示例#1
0
def graphs(request):
    with NamedTemporaryFile(mode="w+", suffix=".csv") as graph_tf:
        graph_tf.writelines(request.param)
        graph_tf.seek(0)

        nx_G = nx.read_weighted_edgelist(graph_tf.name, delimiter=',')
        cudf_df = cudf.read_csv(graph_tf.name,
                                names=["src", "dst", "data"],
                                delimiter=",",
                                dtype=["int32", "int32", "float64"])
        cugraph_G = cugraph.Graph()
        cugraph_G.from_cudf_edgelist(cudf_df,
                                     source="src",
                                     destination="dst",
                                     edge_attr="data")

        # construct cupy coo_matrix graph
        i = []
        j = []
        weights = []
        for index in range(cudf_df.shape[0]):
            vertex1 = cudf_df.iloc[index]["src"]
            vertex2 = cudf_df.iloc[index]["dst"]
            weight = cudf_df.iloc[index]["data"]
            i += [vertex1, vertex2]
            j += [vertex2, vertex1]
            weights += [weight, weight]
        i = cupy.array(i)
        j = cupy.array(j)
        weights = cupy.array(weights)
        largest_vertex = max(cupy.amax(i), cupy.amax(j))
        cupy_df = cupy_coo_matrix(
            (weights, (i, j)), shape=(largest_vertex + 1, largest_vertex + 1))

        yield cugraph_G, nx_G, cupy_df
示例#2
0
def multigrid(H):
    global N
    global vcCnt
    global rConv
    global pAnlt
    global pData, rData

    rData[0] = H
    chMat = cp.zeros(N[0])
    rConv = cp.zeros(vcCnt)

    for i in range(vcCnt):
        v_cycle()

        chMat = laplace(pData[0])
        resVal = float(cp.amax(cp.abs(H[1:-1, 1:-1, 1:-1] - chMat)))
        rConv[i] = resVal

        print("Residual after V-Cycle {0:2d} is {1:.4e}".format(i + 1, resVal))

    errVal = float(
        cp.amax(cp.abs(pAnlt[1:-1, 1:-1, 1:-1] - pData[0][1:-1, 1:-1, 1:-1])))
    print("Error after V-Cycle {0:2d} is {1:4e}\n".format(i + 1, errVal))

    return pData[0]
示例#3
0
def scf_per_batch(Np, L, xs):  # xs shape (bs,1024,2)
    B = Np//2
    s = scd_fam(xs, Np, L)  # shape (batch, alpha, f_k)
    f = cp.absolute(s)
    alpha = cp.amax(cp.absolute(s), axis=-1)
    freq = cp.amax(cp.absolute(s), axis=-2)
    (bs, my, mx) = f.shape 
    freq = freq[:, (mx//2-B):(mx//2 + B)]

    return alpha, freq   # should be (bs,Np) (bs,Np)
示例#4
0
 def __init__(self,
              xx,
              yy,
              minimum=xp.nan,
              maximum=xp.nan,
              name=None,
              latex_label=None,
              unit=None,
              boundary=None):
     self.xx = xp.asarray(xx)
     self.min_limit = float(xp.amin(self.xx))
     self.max_limit = float(xp.amax(self.xx))
     # In order to use np/cp.interp, we need to make sure that xx is ordered
     sorted_idxs = xp.argsort(self.xx)
     self.xx = self.xx[sorted_idxs]
     self._yy = xp.asarray(yy)[sorted_idxs]
     if self._yy.ndim != 1:
         raise TypeError("yy must be 1D. A {}-D array given.".format(
             self.yy.dim))
     self.YY = None
     self.probability_density = None
     self.cumulative_distribution = None
     self.inverse_cumulative_distribution = None
     self.__all_interpolated = Interp(self.xx, self._yy)
     minimum = float(xp.nanmax(xp.array([self.min_limit, minimum])))
     maximum = float(xp.nanmin(xp.array([self.max_limit, maximum])))
     bilby.core.prior.Prior.__init__(self,
                                     name=name,
                                     latex_label=latex_label,
                                     unit=unit,
                                     minimum=minimum,
                                     maximum=maximum,
                                     boundary=boundary)
     self._update_instance()
示例#5
0
文件: _softmax.py 项目: takagi/cupy
def softmax(x, axis=None):
    """Softmax function.

    The softmax function transforms each element of a
    collection by computing the exponential of each element
    divided by the sum of the exponentials of all the elements.

    Parameters
    ----------
    x : array-like
        The input array
    axis : int or tuple of ints, optional
        Axis to compute values along. Default is None

    Returns
    -------
    s : cupy.ndarray
        Returns an array with same shape as input. The result
        will sum to 1 along the provided axis

    """

    x_max = cupy.amax(x, axis=axis, keepdims=True)
    exp_x_shifted = cupy.exp(x - x_max)
    return exp_x_shifted / cupy.sum(exp_x_shifted, axis=axis, keepdims=True)
示例#6
0
    def forward(self, inputs, target_array):
        # Get hidden units
        self.n_h = np.dot(inputs, self.w_h)
        # Sigmoid
        self.n_h = self.activate(self.n_h)
        # Add bias
        self.n_h = np.hstack((self.n_h, 1))
        #self.n_h = np.append(self.n_h, 1)
        # Get outputs
        n_o = np.dot(self.n_h, self.w_o)

        # 1 Activate max of outputs by storing activations of n_o in predictions array.
        #     * predictions array is used to compute confusion matrix.
        # Init predictions array to shape of output neurons
        predictions = np.empty((n_o.shape))
        predictions = self.activate(n_o)
        # 2 Activate on output neurons.
        n_o = self.activate(n_o)
        # 3 Find the max of each sample in predictions and give it a 1 there, 0 otherwise.
        predictions = np.where(predictions>=np.amax(predictions),1,0)
        target_k = predictions * target_array
        # 4 Turn target array into array of 0.9s and 0.1s.
        #     * for calculating deltas.
        target_k = np.where(target_k==1,0.9,0.1)
        return n_o, target_k, predictions
示例#7
0
def max_pool(inp, kernel_size, stride, padding) -> 'Tensor':
    _check_tensors(inp)
    engine = _get_engine(inp)

    def save_mask(x, cords):
        mask = engine.zeros_like(x)
        n, c, h, w = x.shape
        x = x.reshape(n, h * w, c)
        idx = engine.argmax(x, axis=1)

        n_idx, c_idx = engine.indices((n, c))
        mask.reshape((n, h * w, c))[n_idx, idx, c_idx] = 1
        cache[cords] = mask

    cache = {}

    padded_input_array = engine.pad(inp.data, ((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])), mode='constant')

    _, _, output_height, output_width = _calculate_output_dims(inp.shape, (0, 0, kernel_size[0], kernel_size[1]), padding, stride)
    kernel_height, kernel_width = kernel_size
    batch_size, channels, _, _ = padded_input_array.shape

    output_array = engine.zeros((batch_size, channels, output_height, output_width))

    for row in range(output_height):
        for column in range(output_width):
            padded_input_slice = padded_input_array[:, :, row * stride:row * stride + kernel_height, column * stride:column * stride + kernel_width]
            save_mask(x=padded_input_slice, cords=(row, column))
            output_array[:, :, row, column] = engine.amax(padded_input_slice, axis=(2, 3))

    return _create_tensor(
        inp,
        data=output_array,
        func=wrapped_partial(max_pool_backward, inp=inp, kernel_size=kernel_size, stride=stride, padding=padding, cache=cache)
    )
示例#8
0
文件: jfake.py 项目: ThShepard/jfake
def compute_psnr(diff_img):
    """takes diff Image as inout and returns PSNR and Minimum SNR for each color channel"""
    diff = np.asarray(diff_img)
    diff = diff.reshape(3, diff_img.width * diff_img.height)
    mse_rgb = np.mean(diff**2, axis = 1)
    psnr_rgb = 10 * np.log10(255**2 / mse_rgb)
    min_mse_rgb = np.amax(diff, axis = 1)
    min_snr_rgb = 10 * np.log10(255**2 / min_mse_rgb)
    return  psnr_rgb[0], psnr_rgb[1], psnr_rgb[2], min_snr_rgb[0], min_snr_rgb[1], min_snr_rgb[2]
示例#9
0
def logsumexp(x):
    if _GPU_ENABLED:
        # NOTE This is a quick-and-dirty implementation
        # FIXME Should contribute to the cupy codebase
        xmax = cp.amax(x)
        t = cp.exp(x - xmax)
        return cp.asnumpy(cp.log(cp.sum(t)) + xmax)
    else:
        return scipy.special.logsumexp(x)
示例#10
0
    def cg_ptycho(self, data, init, h, lamd, rho, piter, model):
        # minimization functional
        def minf(psi, fpsi):
            if model == 'gaussian':
                f = cp.linalg.norm(cp.abs(fpsi) - cp.sqrt(data))**2
            elif model == 'poisson':
                f = cp.sum(
                    cp.abs(fpsi)**2 - 2 * data * self.mlog(cp.abs(fpsi)))
            f += rho * cp.linalg.norm(h - psi + lamd / rho)**2
            return f

        psi = init.copy()
        gamma = 2  # init gamma as a large value
        for i in range(piter):
            fpsi = self.fwd_ptycho(psi)
            if model == 'gaussian':
                grad = self.adj_ptycho(fpsi - cp.sqrt(data) *
                                       cp.exp(1j * cp.angle(fpsi)))
            elif model == 'poisson':
                grad = self.adj_ptycho(fpsi - data * fpsi /
                                       (cp.abs(fpsi)**2 + 1e-32))
            grad -= rho * (h - psi + lamd / rho)
            # Dai-Yuan direction
            if i == 0:
                d = -grad
            else:
                d = -grad+cp.linalg.norm(grad)**2 / \
                    ((cp.sum(cp.conj(d)*(grad-grad0))))*d
            grad0 = grad
            # line search
            fd = self.fwd_ptycho(d)
            gamma = self.line_search(minf, gamma, psi, fpsi, d, fd)
            psi = psi + gamma * d
            ##print(gamma,minf(psi, fpsi))
        if (cp.amax(cp.abs(cp.angle(psi))) > 3.14):
            print('possible phase wrap, max computed angle',
                  cp.amax(cp.abs(cp.angle(psi))))

        return psi
示例#11
0
    def forward(self, data, is_training=True):
        # print(data[0])
        if (len(data.shape) != 2):
            raise ValueError(
                'data have shape is not compatible. Expect [batch_size, nums_score]'
            )
        logits = np.exp(data - np.amax(data, axis=1, keepdims=True))
        logits = logits / np.sum(logits, axis=1, keepdims=True)
        if is_training:
            self.cache['logits'] = np.copy(logits)

        # print(logits[0])
        return logits
    def amax(a, axis=None, out=None, keepdims=None, initial=None, where=None):
        '''Try first to run as CuPy array, if not run as numpy'''
        try:
            z = cp.amax(a, axis, out, keepdims)
        except:
            z = np.amax(a, axis, out, keepdims, initial, where)
        '''Synchronize all GPU cores as preventative measure against race condition'''
        cp.cuda.Stream.null.synchronize()

        if out is not None:
            return out

        return z
示例#13
0
    def constructLocalMat(self, src_pts, grids, scale_factor):
        '''
        This function 
        src_pts : A N by 2 matrix. N is number of matching pairs.
        grids : A instance of Grids class. 
        '''

        gamma = 0.0025
        src_pts = cp.asarray(src_pts)
        grids_center_coordi = cp.asarray(
            grids.center_lst)  # A M by 2 matrix, M is number of grids.
        grid_num = len(grids.center_lst)
        A = cp.asarray(self.A)
        C1 = cp.asarray(self.C1)
        C2 = cp.asarray(self.C2)
        matchingPairNum = src_pts.shape[0]
        skip = 0
        global_H = cp.asarray(np.copy(self.globalHomoMat))
        local_homo_mat_lst = cp.zeros((grid_num, 3, 3))

        change_mask = []
        for idx in range(grid_num):
            grid_coordi = grids_center_coordi[idx]

            weight = cp.exp((-1) * cp.sum(
                (src_pts - grid_coordi)**2, axis=1) / scale_factor**2)

            print(
                f'SVD {idx+1:8d}/{grid_num}({(idx+1)/(grid_num)*100:8.1f}%)  Current skip {skip} times. Current Skip rate is {skip/grid_num:5.3%}',
                end='\r')

            if cp.amax(weight) < gamma:
                skip += 1
                local_homo_mat_lst[idx, :, :] = global_H
                continue

            weight = cp.repeat(weight, 2)
            weight[weight < gamma] = gamma
            weight = weight.reshape((2 * matchingPairNum, 1))
            weighted_A = cp.multiply(weight, A)
            u, s, v = cp.linalg.svd(weighted_A)
            H = v[-1, :].reshape((3, 3))
            H = cp.linalg.inv(C2) @ H @ C1
            H = H / H[-1, -1]
            local_homo_mat_lst[idx, :, :] = H
            change_mask.append(idx)
        print()

        self.non_global_homo_mat_lst = change_mask
        self.localHomoMat_lst = cp.asnumpy(local_homo_mat_lst)
示例#14
0
    def predict_log_proba(self, X):
        """
        Return log-probability estimates for the test vector X.

        Parameters
        ----------

        X : array-like of shape (n_samples, n_features)


        Returns
        -------

        C : array-like of shape (n_samples, n_classes)
            Returns the log-probability of the samples for each class in the
            model. The columns correspond to the classes in sorted order, as
            they appear in the attribute classes_.
        """

        if isinstance(X, np.ndarray) or isinstance(X, cp.ndarray):
            X = cp.asarray(X, X.dtype)
        elif scipy.sparse.isspmatrix(X) or cp.sparse.isspmatrix(X):
            X = X.tocoo()
            rows = cp.asarray(X.row, dtype=X.row.dtype)
            cols = cp.asarray(X.col, dtype=X.col.dtype)
            data = cp.asarray(X.data, dtype=X.data.dtype)
            X = cp.sparse.coo_matrix((data, (rows, cols)), shape=X.shape)

        jll = self._joint_log_likelihood(X)

        # normalize by P(X) = P(f_1, ..., f_n)

        # Compute log(sum(exp()))

        # Subtract max in exp to prevent inf
        a_max = cp.amax(jll, axis=1, keepdims=True)

        exp = cp.exp(jll - a_max)
        logsumexp = cp.log(cp.sum(exp, axis=1))

        a_max = cp.squeeze(a_max, axis=1)

        log_prob_x = a_max + logsumexp

        if log_prob_x.ndim < 2:
            log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
        return jll - log_prob_x.T
示例#15
0
def _terrain_cupy(data: cupy.ndarray, seed: int, x_range_scaled: tuple,
                  y_range_scaled: tuple, zfactor: int) -> cupy.ndarray:

    data = data * 0

    data[:] = _terrain_gpu(data,
                           seed,
                           x_range=x_range_scaled,
                           y_range=y_range_scaled)
    minimum = cupy.amin(data)
    maximum = cupy.amax(data)

    data[:] = (data - minimum) / (maximum - minimum)
    data[data < 0.3] = 0  # create water
    data *= zfactor

    return data
示例#16
0
    def predict_log_proba(self, X):
        """
        Return log-probability estimates for the test vector X.

        """
        out_type = self._get_output_type(X)

        if has_scipy():
            from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
        else:
            from cuml.common.import_utils import dummy_function_always_false \
                as scipy_sparse_isspmatrix

        # todo: use a sparse CumlArray style approach when ready
        # https://github.com/rapidsai/cuml/issues/2216
        if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
            X = X.tocoo()
            rows = cp.asarray(X.row, dtype=X.row.dtype)
            cols = cp.asarray(X.col, dtype=X.col.dtype)
            data = cp.asarray(X.data, dtype=X.data.dtype)
            X = cupyx.scipy.sparse.coo_matrix((data, (rows, cols)),
                                              shape=X.shape)
        else:
            X = input_to_cuml_array(X, order='K').array.to_output('cupy')

        jll = self._joint_log_likelihood(X)

        # normalize by P(X) = P(f_1, ..., f_n)

        # Compute log(sum(exp()))

        # Subtract max in exp to prevent inf
        a_max = cp.amax(jll, axis=1, keepdims=True)

        exp = cp.exp(jll - a_max)
        logsumexp = cp.log(cp.sum(exp, axis=1))

        a_max = cp.squeeze(a_max, axis=1)

        log_prob_x = a_max + logsumexp

        if log_prob_x.ndim < 2:
            log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
        result = jll - log_prob_x.T
        return CumlArray(result).to_output(out_type)
示例#17
0
def _perlin_cupy(data: cupy.ndarray, freq: tuple, seed: int) -> cupy.ndarray:

    # cupy.random.seed(seed)
    # p = cupy.random.permutation(2**20)

    # use numpy.random then transfer data to GPU to ensure the same result
    # when running numpy backed and cupy backed data array.
    np.random.seed(seed)
    p = cupy.asarray(np.random.permutation(2**20))
    p = cupy.append(p, p)

    griddim, blockdim = cuda_args(data.shape)
    _perlin_gpu[griddim, blockdim](p, 0, freq[0], 0, freq[1], 1, data)

    minimum = cupy.amin(data)
    maximum = cupy.amax(data)
    data[:] = (data - minimum) / (maximum - minimum)
    return data
示例#18
0
def pinv(a, rcond=1e-15):
    """Compute the Moore-Penrose pseudoinverse of a matrix.

    It computes a pseudoinverse of a matrix ``a``, which is a generalization
    of the inverse matrix with Singular Value Decomposition (SVD).
    Note that it automatically removes small singular values for stability.

    Args:
        a (cupy.ndarray): The matrix with dimension ``(..., M, N)``
        rcond (float or cupy.ndarray): Cutoff parameter for small singular
            values. For stability it computes the largest singular value
            denoted by ``s``, and sets all singular values smaller than
            ``rcond * s`` to zero. Broadcasts against the stack of matrices.

    Returns:
        cupy.ndarray: The pseudoinverse of ``a`` with dimension
        ``(..., N, M)``.

    .. warning::
        This function calls one or more cuSOLVER routine(s) which may yield
        invalid results if input conditions are not met.
        To detect these invalid results, you can set the `linalg`
        configuration to a value that is not `ignore` in
        :func:`cupyx.errstate` or :func:`cupyx.seterr`.

    .. seealso:: :func:`numpy.linalg.pinv`
    """
    _util._assert_cupy_array(a)
    if a.size == 0:
        _, out_dtype = _util.linalg_common_type(a)
        m, n = a.shape[-2:]
        if m == 0 or n == 0:
            out_dtype = a.dtype  # NumPy bug?
        return cupy.empty(a.shape[:-2] + (n, m), dtype=out_dtype)

    u, s, vt = _decomposition.svd(a.conj(), full_matrices=False)

    # discard small singular values
    cutoff = rcond * cupy.amax(s, axis=-1)
    leq = s <= cutoff[..., None]
    cupy.reciprocal(s, out=s)
    s[leq] = 0

    return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
示例#19
0
    def takexi(self, psi, phi, lamd, mu, rho, tau):
        # bg subtraction parameters
        r = self.prb.shape[0] / 2
        m1 = cp.mean(cp.angle(psi[:, :, r:2 * r]))
        m2 = cp.mean(cp.angle(psi[:, :,
                                  psi.shape[2] - 2 * r:psi.shape[2] - r]))
        pshift = (m1 + m2) / 2

        t = psi - lamd / rho
        t *= cp.exp(-1j * pshift)
        logt = self.mlog(t)

        # K, xi0, xi1
        K = 1j * self.voxelsize * self.wavenumber() * t / self.coeftomo
        K = K / cp.amax(cp.abs(K))  # normalization
        xi0 = K * (-1j * (logt) /
                   (self.voxelsize * self.wavenumber())) * self.coeftomo
        xi1 = phi - mu / tau
        return xi0, xi1, K, pshift
示例#20
0
def smear(f, fb, pairs):
    """
    build smear matrix B for bp

    Parameters
    ----------
    f : NDArray
        potential on nodes
    fb : NDArray
        potential on adjacent electrodes
    pairs : NDArray
        electrodes numbering pairs

    Returns
    -------
    NDArray
        back-projection matrix
    """
    #b_matrix = np.empty(size=(len(pairs), len(f)))
    #t1 = time()
    #b_matrix = []
    f = cp.array(f)
    fb = cp.array(fb)
    pairs = cp.array(pairs)
    i = cp.arange(len(pairs))
    min_fb = cp.amin(fb[pairs], axis=1)
    max_fb = cp.amax(fb[pairs], axis=1)
    b_matrix = cp.empty((len(pairs), len(f)))
    #index[i, :] = (min_fb[i] < f.all()) & (f.all() <= max_fb[i])
    b_matrix[:] = (min_fb[i, None] < f[None]) & (f[None] <= max_fb[i, None])
    #t2 = time()
    '''
    for i, j in pairs:
        f_min, f_max = min(fb[i], fb[j]), max(fb[i], fb[j])
        b_matrix.append((f_min < f) & (f <= f_max))
    b_matrix = np.array(b_matrix)
    '''
    #print("matrices: ", t2 - t1)
    #print("their loop ", time() - t2)
    return cp.asnumpy(b_matrix)
示例#21
0
    def predict_log_proba(self, X) -> CumlArray:
        """
        Return log-probability estimates for the test vector X.

        """
        if has_scipy():
            from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
        else:
            from cuml.common.import_utils import dummy_function_always_false \
                as scipy_sparse_isspmatrix

        # todo: use a sparse CumlArray style approach when ready
        # https://github.com/rapidsai/cuml/issues/2216
        if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
            X = _convert_x_sparse(X)
        else:
            X = input_to_cupy_array(
                X, order='K', check_dtype=[cp.float32, cp.float64,
                                           cp.int32]).array

        jll = self._joint_log_likelihood(X)

        # normalize by P(X) = P(f_1, ..., f_n)

        # Compute log(sum(exp()))

        # Subtract max in exp to prevent inf
        a_max = cp.amax(jll, axis=1, keepdims=True)

        exp = cp.exp(jll - a_max)
        logsumexp = cp.log(cp.sum(exp, axis=1))

        a_max = cp.squeeze(a_max, axis=1)

        log_prob_x = a_max + logsumexp

        if log_prob_x.ndim < 2:
            log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
        result = jll - log_prob_x.T
        return result
示例#22
0
def log_softmax(x, axis=None):
    """Compute logarithm of softmax function

    Parameters
    ----------
    x : array-like
        Input array
    axis : int or tuple of ints, optional
        Axis to compute values along. Default is None and softmax
        will be  computed over the entire array `x`

    Returns
    -------
    s : cupy.ndarry
        An array with the same shape as `x`. Exponential of the
        result will sum to 1 along the specified axis. If `x` is a
        scalar, a scalar is returned

    """

    x_max = cp.amax(x, axis=axis, keepdims=True)

    if x_max.ndim > 0:
        x_max[~cp.isfinite(x_max)] = 0
    elif not cp.isfinite(x_max):
        x_max = 0

    tmp = x - x_max

    if tmp.dtype.kind in 'iu':
        for out_dtype in [cp.float16, cp.float32, cp.float64]:
            if cp.can_cast(tmp.dtype, out_dtype):
                tmp = tmp.astype(out_dtype)
                break

    out = _log_softmax_kernel(tmp, axis=axis, keepdims=True)

    out = tmp - out
    return out
示例#23
0
def create_triangulation(raster, optix):
    datahash = np.uint64(hash(str(raster.data.get())))
    optixhash = np.uint64(optix.getHash())

    # Calculate a scale factor for the height that maintains the ratio
    # width/height
    H, W = raster.shape

    # Scale the terrain so that the width is proportional to the height
    # Thus the terrain would be neither too flat nor too steep and
    # raytracing will give best accuracy
    maxH = float(cupy.amax(raster.data))
    maxDim = max(H, W)
    scale = maxDim / maxH

    if optixhash != datahash:
        num_tris = (H - 1) * (W - 1) * 2
        verts = cupy.empty(H * W * 3, np.float32)
        triangles = cupy.empty(num_tris * 3, np.int32)
        # Generate a mesh from the terrain (buffers are on the GPU, so
        # generation happens also on GPU)
        res = _triangulate_terrain(verts, triangles, raster, scale)
        if res:
            raise RuntimeError(
                f"Failed to generate mesh from terrain, error code: {res}")

        res = optix.build(datahash, verts, triangles)
        if res:
            raise RuntimeError(f"OptiX failed to build GAS, error code: {res}")

        # Enable for debug purposes
        if False:
            write("mesh.stl", verts, triangles)
        # Clear some GPU memory that we no longer need
        verts = None
        triangles = None
        cupy.get_default_memory_pool().free_all_blocks()
    return scale
示例#24
0
def max(tensor, axis=None, keepdims=False):
    """Returns the maximum of an array or the maximum along a given axis.

    Note::
       When at least one element is NaN, the corresponding min value will be
       NaN.

    Args:
        tensor (ndarray): Array to take the maximum.
        axis (int): Along which axis to take the maximum. The flattened array
            is used by default. Defaults to None.
        keepdims (bool): If ``True``, the axis is kept as an axis of
        size one. Default to False.

    Returns:
        ndarray: The maximum of ``tensor``, along the axis if specified.
    """

    # cupy don't support keepdims.
    if keepdims:
        return numpy.amax(tensor, axis=axis, keepdims=keepdims)
    else:
        return cp.amax(tensor, axis=axis, keepdims=keepdims)
示例#25
0
	if(sys.argv[i]=="--help"):
		print("command:python3 amax_test.py [-finame]")
		exit()

for i in range(n_parameter):
	if(flag_list[i]==0):
		print("please input parameter : [" + parameter_name_list[i] + "]")
		input_parameter=1
if(input_parameter==1):
	exit()

with mrcfile.open(finame, permissive=True) as mrc:
	cp_temp=cp.asarray(mrc.data,dtype="float32")
mrc.close

amax_axis0=cp.amax(cp_temp,axis=(0,1))
amax_axis1=cp.amax(cp_temp,axis=(1,2))
amax_axis2=cp.amax(cp_temp,axis=(0,2))
amax=cp.amax(cp_temp)

print("axis 01 = " + str(amax_axis0))
print("axis 12 = " + str(amax_axis1))
print("axis 02 = " + str(amax_axis2))
print("axis 012 = " + str(amax))
print()
print("axis 01 shape = " + str(amax_axis0.shape))
print("axis 12 shape = " + str(amax_axis1.shape))
print("axis 02 shape = " + str(amax_axis2.shape))
print()
print("cp_temp shape = " + str(cp_temp.shape))
示例#26
0
                cp_structure_factor_bk.imag = cp_G_kernel * cp_structure_factor_bk.imag

                cp_structure_factor_bk = cp.fft.ifftshift(
                    cp_structure_factor_bk)
                G_dens = cp.fft.ifft2(cp_structure_factor_bk, norm="ortho")

                #				np_G_dens = cp.asnumpy(cp.abs(cp_structure_factor_bk))
                #				tifffile.imsave(header + "_" + str(i+1).zfill(6) + '_cp_structure_factor_bk.tif' ,np_G_dens)

                #				np_G_dens = cp.asnumpy(G_dens.real)
                #				tifffile.imsave(header + "_" + str(i+1).zfill(6) + '_G_dens.tif' ,np_G_dens)

                G_dens_real = G_dens.real
                #				G_dens_real_average=cp.average(G_dens_real)
                #				threshold = float(SW_delta)*(cp.amax(G_dens_real)-G_dens_real_average) + G_dens_real_average
                threshold = float(SW_delta) * cp.amax(G_dens_real)

                cp_sup = cp.where(G_dens_real >= threshold, float(1), float(0))
                cp_sup = cp_sup.astype(cp.float32)

                SW_ips = SW_ips * float(SW_ips_step)

                if (SW_sup_output_flag == 1):
                    np_sup = cp.asnumpy(cp_sup)
                    tifffile.imsave(
                        header + "_" + str(i + 1).zfill(6) + '_sup.tif',
                        np_sup)

    #実空間拘束

    cp_dens_bk = cp.real(cp_dens)
    index_orientation = cp.argmax(correlation_orientation)

    #	print(correlation[0,:,i_shift,i_shift])
    print("i_dens_obs = " + str(i_dens_obs))
    #	print("max correlation = " + str(cp.amax(correlation)))
    print("max correlation = " +
          str(correlation_orientation[index_orientation]))
    print("index_orientation = " + str(index_orientation))
    print("index_rot = " + str(index_rot[index_orientation]))
    print("index_x = " + str(index_x[index_orientation]))
    print("index_y = " + str(index_y[index_orientation]))
    print("")
    with open(log_path, mode='a') as log:
        log.write(
            str(i_dens_obs) + "," + str(index_orientation) + "," +
            str(cp.amax(correlation)) + "," +
            str(index_rot[index_orientation]) + "," +
            str(index_x[index_orientation]) + "," +
            str(index_y[index_orientation]) + "\n")

#n=0
#print(correlation[:,n,:,:])
#print("index = " + str(index[n]))
#print("index_rot = " + str(index_rot[n]))
#print("index_x = " + str(index_x[n]))
#print("index_y = " + str(index_y[n]))
#print(correlation[index_rot[n],n,index_x[n],index_y[n]])
#print(correlation[:,n,:,:].shape)

#cupy配列 ⇒ numpy配列に変換
#cp_density_stack = cp.asnumpy(cp_density_stack)
示例#28
0
    def scan_image_calc_color(self, file_path, height, pts_cp, downsampling_xy):

        ### Open Image
        img_src = self.open_image(file_path)
        
        w, h = img_src.size

        ### DownSampling
        ww = int(w / downsampling_xy)
        hh = int(h / downsampling_xy)
        img = img_src.resize((ww, hh), Image.LANCZOS)

        ### Read Shape
        px = img.getdata()
        px_cp = cp.array(px)
        # print("px_cp.shape :", px_cp.shape)

        ### Create Result Canvas
        img_tmp = self.create_canvas_alpha(ww)
        img_result = self.create_canvas_alpha(w)

        ### Segment Contour True/False
        px_seg_0 = cp.amax(px_cp)


        ### Contour : False
        if px_seg_0 < 127:
            ### Export None-Image
            px_result = [(0, 0, 0, 0) for i in range(w) for j in range(h)]
            img_result.putdata(tuple(px_result))
            return img_result


        ### Contour : True
        else:

            ### Running on Cuda
            # print("Running on Cuda !!")


            ################################################################################################


            ###########################
            ###                     ###
            ###    Calc Distance    ###
            ###                     ###
            ###########################

            # print("Distance")

            ### [X] Clac Distance
            # dist_list = self.gen_disctance_list(w, h, height, pts_cp)
            
            ### [O] Clac Distance with DownSampling
            dist_list = self.gen_disctance_list_ds(ww, hh, height, downsampling_xy, pts_cp)


            ################################################################################################


            ############################################
            ###                                      ###
            ###     Generate Color From Distance     ###
            ###                                      ###
            ############################################
            
            # print("Color")

            ### Define Colors

            ################################################################################################

            ### Offset Pattern (Small)

            dist_src = dist_list.tolist()
            # print("len(dist_src) :", len(dist_src))

            clrs = []
            amp = 1 / 2

            for d in dist_src:

                c = int((math.sin(d * amp) + 1) * (1 / 2) * 255)
                cc = 255 - c

                clrs.append([c, c, cc, 255])
                
            clrs_tuple = tuple(map(tuple, clrs))
            
            ### Generate New Image
            img_tmp.putdata(tuple(clrs_tuple))

            ################################################################################################

            """
            ### Offset Pattern (Large)

            dist_src = dist_list.tolist()
            # print("len(dist_src) :", len(dist_src))

            clrs = []

            for d in dist_src:

                th = 30

                if d < (th * 1):
                    clrs.append([255, 0, 0, 255])
                
                elif d < (th * 2):
                    clrs.append([0, 255, 0, 255])

                elif d < (th * 3):
                    clrs.append([0, 0, 255, 255])
                
                else:
                    clrs.append([255, 255, 255, 255])

            clrs_tuple = tuple(map(tuple, clrs))
            
            ### Generate New Image
            img_tmp.putdata(tuple(clrs_tuple))
            """

            ################################################################################################

            """
            ### Test Distance Map

            dist_remap = self.remap_number_cp(dist_list, 0, 200, 0, 255)
            dist_remap = dist_remap.astype('int64')

            # print("dist_remap.shape :", dist_remap.shape)
            
            ### Fill Array (255)
            alpha_array = cp.ones(dist_list.shape) * 255
            alpha_array = alpha_array.astype('int64')


            dist_img = cp.stack([dist_remap, dist_remap, dist_remap, alpha_array])
            dist_img = dist_img.T
            # print("dist_img.shape :", dist_img.shape)

            # print(dist_img)

            dist_4 = dist_img.tolist()
            dist_4 = tuple(map(tuple, dist_4))

            # print("type(dist_4) :", type(dist_4))

            ### Generate New Image
            img_tmp.putdata(tuple(dist_4))
            """

            ################################################################################################


            #########################
            ###                   ###
            ###     Composite     ###
            ###                   ###
            #########################

            # print("Composite")

            ### Scaling
            img_dist = img_tmp.resize((w, h), Image.LANCZOS)

            ### Create Canvas for Composite
            img_canvas = self.create_canvas_alpha(w)

            ### Define Mask
            img_mask = img_src.convert("L")

            ### Composite
            img_result = Image.composite(img_dist, img_canvas, img_mask)

            ### Flip
            ### Image Coordination >> Rhino Coordination
            img_flip = ImageOps.flip(img_result)

            return img_flip
def getNextPrediction(fileJac: str, measuring_electrodes: np.ndarray, voltages: np.ndarray, 
              num_returned: int=10, n_el: int=20, n_per_el: int=3, n_pix: int=64, pert: float=0.5, 
              p_influence: float=-10., p_rec: float=10., p: float=0.2, lamb:float=0.1) -> np.ndarray:
    # extract const permittivity jacobian and voltage (& other)
    file = h5.File(fileJac, 'r')

    meas = file['meas'][()]
    new_ind = file['new_ind'][()]
    p = file['p'][()]
    t = file['t'][()]
    file.close()
    # initialise const permitivity and el_pos variables
    perm = np.ones(t.shape[0], dtype=np.float32)
    el_pos = np.arange(n_el * n_per_el).astype(np.int16)
    mesh_obj = {'element': t,
        'node':    p,
        'perm':    perm}
    # list all possible active/measuring electrode permutations of this measurement
    meas = cp.array(meas)
    # find their indices in the already calculated const. permitivity Jacobian (CPJ)
    measuring_electrodes = cp.array(measuring_electrodes)
    measurements_0 = cp.amin(measuring_electrodes[:, :2], axis=1)
    measurements_1 = cp.amax(measuring_electrodes[:, :2], axis=1)
    measurements_2 = cp.amin(measuring_electrodes[:, 2:], axis=1)
    measurements_3 = cp.amax(measuring_electrodes[:, 2:], axis=1)
    measuring_electrodes = cp.empty((len(measuring_electrodes), 4))
    measuring_electrodes[:, 0] = measurements_0
    measuring_electrodes[:, 1] = measurements_1
    measuring_electrodes[:, 2] = measurements_2
    measuring_electrodes[:, 3] = measurements_3
    index = (cp.sum(cp.equal(measuring_electrodes[:, None, :], meas[None, :, :]), axis=2) == 4)
    index = cp.where(index)
    #print(index)
    ind = cp.unique(index[1])
    #print(ind)
    i = cp.asnumpy(ind)
    j = index[0]
    mask = np.zeros(len(meas), dtype=int)
    mask[i] = 1
    mask = mask.astype(bool)
    # take a slice of Jacobian, voltage readings and B matrix (the one corresponding to the performed measurements)
    file = h5.File(fileJac, 'r')
    jac = file['jac'][mask, :][()]
    v = file['v'][mask][()]
    b = file['b'][mask, :][()]
    file.close()
    # put them in the form desired by the GREIT function
    pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
    f = pde_result(jac=jac,
           v=v,
           b_matrix=b)
    
    # now we can use the real voltage readings and the GREIT algorithm to reconstruct
    greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
    greit.setup(p=p, lamb=lamb, n=n_pix)
    h_mat = greit.H
    reconstruction = greit.solve(voltages, f.v).reshape(n_pix, n_pix)
    # fix_electrodes_multiple is in meshing.py
    _, el_coords = train.fix_electrodes_multiple(centre=None, edgeX=0.1, edgeY=0.1, a=2, b=2, ppl=n_el, el_width=0.02, num_per_el=3)
    # find the distances between each existing electrode pair and the pixels lying on the liine that connects them
    pixel_indices, voltage_all_possible = measopt.find_all_distances(reconstruction, h_mat, el_coords, n_el, cutoff=0.8)
    # call function get_total_map that generates the influence map, the gradient map and the log-reconstruction
    total_map, grad_mat, rec_log = np.abs(measopt.get_total_map(reconstruction, voltages, h_mat, pert=pert, p_influence=p_influence, p_rec=p_rec))
    # get the indices of the total map along the lines connecting each possible electrode pair
    total_maps_along_lines = total_map[None] * pixel_indices
    # find how close each connecting line passes to the boundary of an anomaly (where gradient supposed to be higher)
    proximity_to_boundary = np.sum(total_maps_along_lines, axis=(1, 2)) / np.sum(pixel_indices, axis=(1, 2))
    # rate the possible src-sink pairs by their proximity to existing anomalies
    proposed_ex_line = voltage_all_possible[np.argsort(proximity_to_boundary)[::-1]][:num_returned]

    number_of_voltages = 10
    # generate the voltage measuring electrodes for this current driver pair
    proposed_voltage_pairs = measopt.findNextVoltagePair(proposed_ex_line[0], fileJac, total_map, number_of_voltages, 0, npix=n_pix, cutoff=0.97)
    return proposed_ex_line, proposed_voltage_pairs, reconstruction, total_map
def simulateMeasurements(fileJac, anomaly=0, measurements=None, v_meas=None, n_el=20, n_per_el=3, n_pix=64, a=2.):
	# extract const permittivity jacobian and voltage (& other)
	file = h5.File(fileJac, 'r')

	meas = file['meas'][()]
	new_ind = file['new_ind'][()]
	p = file['p'][()]
	t = file['t'][()]
	file.close()
	# initialise const permitivity and el_pos variables
	perm = np.ones(t.shape[0], dtype=np.float32)
	el_pos = np.arange(n_el * n_per_el).astype(np.int16)
	mesh_obj = {'element': t,
				'node':	p,
				'perm':	perm}

	#for testing
	if measurements is None:
		el_dist = np.random.randint(1, 20)
		ex_mat = (cp.concatenate((cp.arange(20)[None], (cp.arange(20) + el_dist)[None])) % 20).T
		#print(ex_mat.shape)
		fem_all = Forward(mesh_obj, el_pos)
		measurements = fem_all.voltMeter(ex_mat)
		#ex_mat = mesurements[1]
		measurements = cp.concatenate((measurements[1], measurements[0]), axis=1)
		#print(measurements.shape)
	# list all possible active/measuring electrode permutations of this measurement
	meas = cp.array(meas)
	# find their indices in the already calculated const. permitivity Jacobian (CPJ)
	measurements = cp.array(measurements)
	measurements_0 = cp.amin(measurements[:, :2], axis=1)
	measurements_1 = cp.amax(measurements[:, :2], axis=1)
	measurements_2 = cp.amin(measurements[:, 2:], axis=1)
	measurements_3 = cp.amax(measurements[:, 2:], axis=1)
	measurements = cp.empty((len(measurements), 4))
	measurements[:, 0] = measurements_0
	measurements[:, 1] = measurements_1
	measurements[:, 2] = measurements_2
	measurements[:, 3] = measurements_3
	index = (cp.sum(cp.equal(measurements[:, None, :], meas[None, :, :]), axis=2) == 4)
	index = cp.where(index)
	ind = cp.unique(index[1])
	i = cp.asnumpy(ind)
	j = index[0]
	mask = np.zeros(len(meas), dtype=int)
	mask[i] = 1
	mask = mask.astype(bool)
	# take a slice of Jacobian, voltage readings and B matrix
	file = h5.File(fileJac, 'r')
	jac = file['jac'][mask, :][()]
	v = file['v'][mask][()]
	b = file['b'][mask, :][()]
	file.close()
	pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
	f = pde_result(jac=jac,
				   v=v,
				   b_matrix=b)
	
	# simulate voltage readings if not given
	if v_meas is None:
		if np.isscalar(anomaly):
			print("generating new anomaly")
			anomaly = train.generate_anoms(a, a)
		true = train.generate_examplary_output(a, int(n_pix), anomaly)
		mesh_new = train.set_perm(mesh_obj, anomaly=anomaly, background=1)
		fem = FEM(mesh_obj, el_pos, n_el)
		new_ind = cp.array(new_ind)
		f2, raw = fem.solve_eit(volt_mat_all=meas[ind, 2:], new_ind=new_ind[ind], ex_mat=meas[ind, :2], parser=None, perm=mesh_new['perm'].astype('f8'))
		v_meas = f2.v
		'''
		#plot
		fig = plt.figure(3)
		x, y = p[:, 0], p[:, 1]
		ax1 = fig.add_subplot(111)
		# draw equi-potential lines
		print(raw.shape)
		raw = cp.asnumpy(raw[5]).ravel()
		vf = np.linspace(min(raw), max(raw), 32)
		ax1.tricontour(x, y, t, raw, vf, cmap=plt.cm.viridis)
		# draw mesh structure
		ax1.tripcolor(x, y, t, np.real(perm),
					  edgecolors='k', shading='flat', alpha=0.5,
					  cmap=plt.cm.Greys)

		ax1.plot(x[el_pos], y[el_pos], 'ro')
		for i, e in enumerate(el_pos):
			ax1.text(x[e], y[e], str(i+1), size=12)
		ax1.set_title('Equipotential Lines of Uniform Permittivity')
		# clean up
		ax1.set_aspect('equal')
		ax1.set_ylim([-1.2, 1.2])
		ax1.set_xlim([-1.2, 1.2])
		fig.set_size_inches(6, 6)
		#plt.show()'''
	elif len(measurements) == len(v_meas):
		measurements = np.array(measurements)
		v_meas = np.array(v_meas[j[:len(ind)]])
	else:
		raise ValueError('Sizes of arrays do not match (have to have voltage reading for each measurement). If you don\'t have readings, leave empty for simulation.')
	print('Number of measurements:', len(v_meas), len(f.v))

	# now we can use the real voltage readings and the GREIT algorithm to reconstruct
	greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
	greit.setup(p=0.2, lamb=0.01, n=n_pix)
	h_mat = greit.H
	reconstruction = greit.solve(v_meas, f.v).reshape(n_pix, n_pix)
	
	# optional: see reconstruction
	'''
	plt.figure(1)
	im1 = plt.imshow(reconstruction, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.title("Reconstruction")
	plt.colorbar(im1)
	plt.figure(2)
	im2 = plt.imshow(true, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.colorbar(im2)
	plt.title("True Image")
	plt.show()
	'''
	return reconstruction, h_mat, v_meas, f.v, true, len(v_meas)