Beispiel #1
0
    def explain(self, x, scaled=True):
        """
        Return explanation of the anomalies based on t-scores.
        """
        if cp.ndim(x) < 2:
            x = x.reshape(1, -1)
        ranked_feature_importance = cp.zeros([x.shape[1], 1])

        for feature in range(x.shape[1]):
            # find all projections without the feature j and with feature j
            index_selected_feature = cp.where(
                self.projections[:, feature] != 0)[0]
            index_not_selected_feature = cp.where(
                self.projections[:, feature] == 0)[0]
            scores_with_feature = self.instance_score(x,
                                                      index_selected_feature)
            scores_without_feature = self.instance_score(
                x, index_not_selected_feature)
            ranked_feature_importance[feature, 0] = self.t_test(
                scores_with_feature, scores_without_feature)

        if scaled:
            assert cp.max(ranked_feature_importance) != cp.min(
                ranked_feature_importance)
            normalized_score = (ranked_feature_importance - cp.min(
                ranked_feature_importance)) / (
                cp.max(ranked_feature_importance) - cp.min(
                    ranked_feature_importance))
            return normalized_score
        else:
            return ranked_feature_importance
Beispiel #2
0
 def get_evidmap(self):
     evidmap = cp.zeros(shape=(self.xPixelResol,self.yPixelResol))
     if cp.max(self.evidTime)-cp.min(self.evidTime)==0:
         div = 1
     else:
         div = cp.max(self.evidTime)-cp.min(self.evidTime)
     evidmap = evidmap + (self.evidTime-cp.mean(self.evidTime))/(div)*255
     return evidmap
Beispiel #3
0
def test_adapthist_constant():
    """Test constant image, float and uint"""
    img = cp.zeros((8, 8))
    img += 2
    img = img.astype(np.uint16)
    adapted = exposure.equalize_adapthist(img, 3)
    assert cp.min(adapted) == cp.max(adapted)

    img = cp.zeros((8, 8))
    img += 0.1
    img = img.astype(np.float64)
    adapted = exposure.equalize_adapthist(img, 3)
    assert cp.min(adapted) == cp.max(adapted)
def curvature_to_height(image, h2, iterations=2000):
    f = image[..., 0]
    A = image[..., 3]
    u = cup.ones_like(f) * 0.5

    k = 1
    t = np.empty_like(u, dtype=np.float32)

    # periodic gauss seidel iteration
    for ic in range(iterations):
        if ic % 100 == 0:
            print(ic)

        # roll k, axis=0
        t[:-k, :] = u[k:, :]
        t[-k:, :] = u[:k, :]
        # roll -k, axis=0
        t[k:, :] += u[:-k, :]
        t[:k, :] += u[-k:, :]
        # roll k, axis=1
        t[:, :-k] += u[:, k:]
        t[:, -k:] += u[:, :k]
        # roll -k, axis=1
        t[:, k:] += u[:, :-k]
        t[:, :k] += u[:, -k:]

        t -= h2 * f
        t *= 0.25
        u = t * A

    u = -u
    u -= cup.min(u)
    u /= cup.max(u)

    return cup.dstack([u, u, u, image[..., 3]])
Beispiel #5
0
def FSITM(HDR, LDR, alpha=None):

    NumPixels = LDR.size

    if alpha is None:
        r = cp.floor(NumPixels / (2.**18))
        if r > 1.:
            alpha = 1. - (1. / r)
        else:
            alpha = 0.

    minNonzero = cp.min(HDR[HDR > 0])
    LogH = cp.log(cp.maximum(HDR, minNonzero))

    # float is needed for further calculation
    LogH = cp.around((LogH - LogH.min()) * 255. /
                     (LogH.max() - LogH.min())).astype(cp.float)

    if alpha > 0.:
        PhaseHDR_CH = phasecong100(HDR, 2, 2, 8, 8)
        PhaseLDR_CH8 = phasecong100(LDR, 2, 2, 8, 8)
    else:  # so, if image size is smaller than 512x512?
        PhaseHDR_CH = 0
        PhaseLDR_CH8 = 0

    PhaseLogH = phasecong100(LogH, 2, 2, 2, 2)
    PhaseH = alpha * PhaseHDR_CH + (1 - alpha) * PhaseLogH

    PhaseLDR_CH2 = phasecong100(LDR, 2, 2, 2, 2)
    PhaseL = alpha * PhaseLDR_CH8 + (1 - alpha) * PhaseLDR_CH2
    Q = cp.sum(
        cp.logical_or(cp.logical_and(PhaseL <= 0, PhaseH <= 0),
                      cp.logical_and(PhaseL > 0, PhaseH > 0))) / NumPixels
    return Q
def normalize(pix, save_alpha=False):
    if save_alpha:
        A = pix[..., 3]
    t = pix - cup.min(pix)
    t = t / cup.max(t)
    if save_alpha:
        t[..., 3] = A
    return t
Beispiel #7
0
 def gpd(points, fpoints=fpoints, cell=cell, radii=radii):
     points = cp.asarray(points)
     
     diff = cp.expand_dims(points, axis=1)-cp.expand_dims(fpoints, axis=0)
     diff = (diff - cp.around(diff)).reshape(-1,3)
     diff = cp.dot(cell, diff.T).T
     diff = cp.linalg.norm(diff, axis=1).reshape(-1,fpoints.shape[0])-radii.T
     return cp.min(diff, axis=1)
Beispiel #8
0
    def explain(self, anomaly, scaled=True):
        """
        Explain anomaly based on contributions (t-scores) of each feature across histograms.

        :param anomaly: selected anomaly from input dataset
        :type anomaly: cupy.ndarray
        :param scaled: set to scale output feature importance scores
        :type scaled: boolean

        Examples
        --------
        >>> loda_ad.explain(x[5]) # x[5] is found anomaly
        array([[1.        ],
        [0.        ],
        [0.69850349],
        [0.91081035],
        [0.78774349]])
        """
        if cp.ndim(anomaly) < 2:
            anomaly = anomaly.reshape(1, -1)
        ranked_feature_importance = cp.zeros([anomaly.shape[1], 1])

        for feature in range(anomaly.shape[1]):
            # find all projections without the feature j and with feature j
            index_selected_feature = cp.where(
                self._projections[:, feature] != 0)[0]
            index_not_selected_feature = cp.where(
                self._projections[:, feature] == 0)[0]
            scores_with_feature = self._instance_score(anomaly,
                                                       index_selected_feature)
            scores_without_feature = self._instance_score(
                anomaly, index_not_selected_feature)
            ranked_feature_importance[feature, 0] = self._t_test(
                scores_with_feature, scores_without_feature)

        if scaled:
            assert cp.max(ranked_feature_importance) != cp.min(
                ranked_feature_importance)
            normalized_score = (ranked_feature_importance -
                                cp.min(ranked_feature_importance)) / (
                                    cp.max(ranked_feature_importance) -
                                    cp.min(ranked_feature_importance))
            return normalized_score
        else:
            return ranked_feature_importance
Beispiel #9
0
def test_peak_float_out_of_range_dtype():
    im = cp.asarray([10, 100], dtype=np.float16)
    nbins = 10
    frequencies, bin_centers = exposure.histogram(im,
                                                  nbins=nbins,
                                                  source_range="dtype")
    assert_almost_equal(cp.min(bin_centers).get(), -0.9, 3)
    assert_almost_equal(cp.max(bin_centers).get(), 0.9, 3)
    assert len(bin_centers) == 10
def cumulative_distribution(data, bins):
    assert cup.min(data) >= 0.0 and cup.max(data) <= 1.0
    hg_av, hg_a = cup.unique(cup.floor(data * (bins - 1)), return_index=True)
    hg_a = cup.float32(hg_a)
    hgs = cup.sum(hg_a)
    hg_a /= hgs
    res = cup.zeros((bins, ))
    res[cup.int64(hg_av)] = hg_a
    return cup.cumsum(res)
Beispiel #11
0
def select_mating_pool(pop, fitness, num_parents):
    # Выбор лучших особей текущего поколения в качестве родителей для производства потомства следующего поколения.
    parents = cp.empty((num_parents, pop.shape[1]))
    for parent_num in range(num_parents):
        min_fitness_idx = cp.where(fitness == cp.min(fitness))
        min_fitness_idx = min_fitness_idx[0][0]
        parents[parent_num, :] = pop[min_fitness_idx, :]
        fitness[min_fitness_idx] = 99999999999
    return parents
Beispiel #12
0
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
    """
    Compute the peak signal to noise ratio (PSNR) for an image.

    Parameters
    ----------
    image_true : ndarray
        Ground-truth image, same shape as im_test.
    image_test : ndarray
        Test image.
    data_range : int, optional
        The data range of the input image (distance between minimum and
        maximum possible values).  By default, this is estimated from the image
        data-type.

    Returns
    -------
    psnr : float
        The PSNR metric.

    Notes
    -----
    .. versionchanged:: 0.16
        This function was renamed from ``skimage.measure.compare_psnr`` to
        ``skimage.metrics.peak_signal_noise_ratio``.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio

    """
    check_shape_equality(image_true, image_test)

    if data_range is None:
        if image_true.dtype != image_test.dtype:
            warn(
                "Inputs have mismatched dtype.  Setting data_range based on "
                "im_true.",
                stacklevel=2,
            )
        dmin, dmax = dtype_range[image_true.dtype.type]
        true_min, true_max = cp.min(image_true), cp.max(image_true)
        if true_max > dmax or true_min < dmin:
            raise ValueError(
                "im_true has intensity values outside the range expected for "
                "its data type.  Please manually specify the data_range")
        if true_min >= 0:
            # most common case (255 for uint8, 1 for float)
            data_range = dmax
        else:
            data_range = dmax - dmin

    image_true, image_test = _as_floats(image_true, image_test)

    err = mean_squared_error(image_true, image_test)
    return 10 * cp.log10((data_range * data_range) / err)
Beispiel #13
0
def test_denoise_tv_chambolle_float_result_range():
    # astronaut image
    img = astro_gray
    int_astro = cp.multiply(img, 255).astype(np.uint8)
    assert cp.max(int_astro) > 1
    denoised_int_astro = restoration.denoise_tv_chambolle(int_astro, weight=0.1)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_astro.dtype == np.float
    assert cp.max(denoised_int_astro) <= 1.0
    assert cp.min(denoised_int_astro) >= 0.0
Beispiel #14
0
    def rand_jitter(self, arr):
        """
        Introduces random displacements to spread the points
        """
        stdev = .023 * cupy.subtract(cupy.max(arr), cupy.min(arr))
        for i in range(arr.shape[1]):
            rnd = cupy.multiply(cupy.random.randn(len(arr)), stdev)
            arr[:, i] = cupy.add(arr[:, i], rnd)

        return arr
Beispiel #15
0
def test_end_to_end(nrows, ncols, nclusters, n_parts, delayed_predict,
                    input_type, client):

    from cuml.dask.cluster import KMeans as cumlKMeans

    from cuml.dask.datasets import make_blobs

    X, y = make_blobs(n_samples=int(nrows),
                      n_features=ncols,
                      centers=nclusters,
                      n_parts=n_parts,
                      cluster_std=0.01,
                      random_state=10)

    if input_type == "dataframe":
        X_train = to_dask_cudf(X)
        y_train = to_dask_cudf(y)
    elif input_type == "array":
        X_train, y_train = X, y

    cumlModel = cumlKMeans(init="k-means||",
                           n_clusters=nclusters,
                           random_state=10)

    cumlModel.fit(X_train)
    cumlLabels = cumlModel.predict(X_train, delayed=delayed_predict)

    n_workers = len(list(client.has_what().keys()))

    # Verifying we are grouping partitions. This should be changed soon.
    if n_parts is not None:
        parts_len = n_parts
    else:
        parts_len = n_workers

    if input_type == "dataframe":
        assert cumlLabels.npartitions == parts_len
        cumlPred = cumlLabels.compute().values
        labels = y_train.compute().values
    elif input_type == "array":
        assert len(cumlLabels.chunks[0]) == parts_len
        cumlPred = cp.array(cumlLabels.compute())
        labels = cp.squeeze(y_train.compute())

    assert cumlPred.shape[0] == nrows
    assert cp.max(cumlPred) == nclusters - 1
    assert cp.min(cumlPred) == 0

    score = adjusted_rand_score(labels, cumlPred)

    print(str(score))

    assert 1.0 == score
    def forward(self, inputs):
        e1 = array.as_mat(inputs[0])
        #print 'e1.shape',
        #print e1.shape
        e2 = array.as_mat(inputs[1])
        #print 'e2.shape',
        #print e2.shape
        W = inputs[2]
        #print 'W.shape',
        #print W.shape

        #modified algorythm
        y = e1 + e2 - e2.sum(1).reshape(len(e2), 1) / len(e2[0])
        #print 'y.dtype',
        #print y.dtype
        print 'cupy.max(e1) = ',
        print cupy.max(e1)
        print 'cupy.min(e1) = ',
        print cupy.min(e1)
        print 'cupy.max(e2) = ',
        print cupy.max(e2)
        print 'cupy.min(e2) = ',
        print cupy.min(e2)
        print 'cupy.max(y) = ',
        print cupy.max(y)
        print 'cupy.min(y) = ',
        print cupy.min(y)
        #sum_e1e2.astype(dtype=e1.dtype, copy=False)
        
        #print 'y.shape',  
        #print y.shape
        #print 'e2.sum(1).reshape(len(e2), 1).shape',
        #print e2.sum(1).reshape(len(e2), 1).shape

        
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
        else:
            i_len, j_len = e1.shape
            k_len = e2.shape[1]
            # 'ij,ik->ijk'
            e1e2 = e1[:, :, None] * e2[:, None, :]
            # ijk->i[jk]
            e1e2 = e1e2.reshape(i_len, j_len * k_len)
            # jkl->[jk]l
            W_mat = W.reshape(-1, W.shape[2])
            # 'i[jk],[jk]l->il'
            y = e1e2.dot(W_mat)
       
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            y += e1.dot(V1)
            y += e2.dot(V2)
            y += b
        '''
        #print 'y.shape',
        #print y.shape
        return y,
def normals_to_height(image, grid_steps, iterations=2000, intensity=1.0):
    # A = image[..., 3]
    ih, iw = image.shape[0], image.shape[1]
    u = cup.ones((ih, iw), dtype=np.float32) * 0.5

    vectors = nmap_to_vectors(image)
    # vectors[..., 0] = 0.5 - image[..., 0]
    # vectors[..., 1] = image[..., 1] - 0.5

    vectors *= intensity

    t = np.empty_like(u, dtype=np.float32)

    for k in range(grid_steps, -1, -1):
        # multigrid
        k = 2**k
        print("grid step:", k)

        n = cup.roll(vectors[..., 0], k, axis=1)
        n -= cup.roll(vectors[..., 0], -k, axis=1)
        n += cup.roll(vectors[..., 1], k, axis=0)
        n -= cup.roll(vectors[..., 1], -k, axis=0)
        n *= 0.125

        for ic in range(iterations):
            if ic % 100 == 0:
                print(ic)

            # roll k, axis=0
            t[:-k, :] = u[k:, :]
            t[-k:, :] = u[:k, :]
            # roll -k, axis=0
            t[k:, :] += u[:-k, :]
            t[:k, :] += u[-k:, :]
            # roll k, axis=1
            t[:, :-k] += u[:, k:]
            t[:, -k:] += u[:, :k]
            # roll -k, axis=1
            t[:, k:] += u[:, :-k]
            t[:, :k] += u[:, -k:]

            t *= 0.25
            u = t + n
            # zero alpha = zero height
            # u = u * A + cup.max(u) * (1 - A)

    u = -u
    u -= cup.min(u)
    u /= cup.max(u)

    return cup.dstack([u, u, u, image[..., 3]])
    def forward(self, inputs):
        e1 = array.as_mat(inputs[0])
        #print 'e1.shape',
        #print e1.shape
        e2 = array.as_mat(inputs[1])
        #print 'e2.shape',
        #print e2.shape
        W = inputs[2]
        #print 'W.shape',
        #print W.shape

        #modified algorythm
        y = e1 + e2 - e2.sum(1).reshape(len(e2), 1) / len(e2[0])
        #print 'y.dtype',
        #print y.dtype
        print 'cupy.max(e1) = ',
        print cupy.max(e1)
        print 'cupy.min(e1) = ',
        print cupy.min(e1)
        print 'cupy.max(e2) = ',
        print cupy.max(e2)
        print 'cupy.min(e2) = ',
        print cupy.min(e2)
        print 'cupy.max(y) = ',
        print cupy.max(y)
        print 'cupy.min(y) = ',
        print cupy.min(y)
        #sum_e1e2.astype(dtype=e1.dtype, copy=False)

        #print 'y.shape',
        #print y.shape
        #print 'e2.sum(1).reshape(len(e2), 1).shape',
        #print e2.sum(1).reshape(len(e2), 1).shape
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
        else:
            i_len, j_len = e1.shape
            k_len = e2.shape[1]
            # 'ij,ik->ijk'
            e1e2 = e1[:, :, None] * e2[:, None, :]
            # ijk->i[jk]
            e1e2 = e1e2.reshape(i_len, j_len * k_len)
            # jkl->[jk]l
            W_mat = W.reshape(-1, W.shape[2])
            # 'i[jk],[jk]l->il'
            y = e1e2.dot(W_mat)
       
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            y += e1.dot(V1)
            y += e2.dot(V2)
            y += b
        '''
        #print 'y.shape',
        #print y.shape
        return y,
Beispiel #19
0
def ts_min(x, window):
    if window > len(x):
        return cp.full(len(x), cp.nan)
    # 把空值填充为 +inf
    x = cp.where(cp.isinf(x) | cp.isnan(x), cp.inf, x)

    prefix = cp.full(window - 1, cp.nan)
    x_rolling_array = cp_rolling_window(x, window)
    result = cp.min(x_rolling_array, axis=1)
    # 结果中如果存在inf,说明此window值均为空,我们返回nan
    result = result.astype(float)

    result = cp.where(cp.isinf(result), cp.nan, result)
    return cp.concatenate((prefix, result))
        def _pl(self, image, context):
            tmp = image.copy()

            # squared error
            gcr = gaussian_repeat(tmp, self.gA)
            error = (tmp - gcr)**2
            mask = -gaussian_repeat(error, self.gB)
            mask -= cup.min(mask)
            mask /= cup.max(mask)
            mask = (mask - 0.5) * self.strength + 1.0
            res = gcr + mask * (tmp - gcr)

            res[..., 3] = tmp[..., 3]
            return res
Beispiel #21
0
    def update_core(self):
        # 損失
        loss = 0

        # IteratorとOptimizerを取得
        train_iter = self.get_iterator("main")
        optimizer = self.get_optimizer("main")

        # ニューラルネットワークを取得
        model = optimizer.target

        # 1バッチ分取得
        x = train_iter.__next__()
        # 画像ベクトルと解説文を取得
        vectors = [s[0] for s in x]
        words = [s[i] for s in x]

        # RNNのステータスをリセットする
        model.reset_state()

        # 画像ベクトルを入力
        for i in range(5):
            v = [s[i] for s in vectors]
            model.encode(cp.array(v, dtype=cp.float32))

        # RNNのステータスをリセットする
        c, h = model.get_state()
        model.set_state(c, h)

        # 文の長さだけ繰り返しRNNに学習
        for i in range(len(words[0]) - 1):
            # バッチ処理用の配列化
            batch = cp.array([s[i] for s in words], dtype=cp.int32)
            # 正解データの配列
            t = cp.array([s[i + 1] for s in words], dtype=cp.int32)
            # 全て終端文字の場合中断
            if cp.min(batch) == 1 and cp.max(batch) == 1:
                break
            # RNNを実行
            y = model.decode(batch)
            # 結果との比較
            loss += F.softmax_cross_entropy(y, t)

        # 重みをリセットする
        optimizer.target.cleargrads()
        # 誤差逆伝播
        loss.backward()
        # 新しい重みで更新
        optimizer.update()
Beispiel #22
0
def normalize(image, kind_of_normalization=0, return_numpy=True):
    """
    Normalize given line profile by using a normalization technique based on
    the kind_of_normalization parameter.

    0 : Scale line profile to be between 0 and 1

    1 : Divide line profile through its mean value

    Arguments:
        image: Full SLI measurement (series of images) which is
               prepared for the pipeline using the SLIX toolbox methods.
        kind_of_normalization: Normalization technique which will be used for
        the calculation
        return_numpy:  Specifies if a CuPy or Numpy array will be returned.

    Returns:
        numpy.array -- Image where each pixel is normalized by the last axis
        of the image
    """
    gpu_image = cupy.array(image, dtype='float32')
    if kind_of_normalization == 0:
        gpu_image = (gpu_image - cupy.min(gpu_image, axis=-1)[..., None]) / \
                    cupy.maximum(1e-15, (cupy.max(gpu_image, axis=-1)
                                         [..., None] -
                                         cupy.min(gpu_image, axis=-1)
                                         [..., None]))
    else:
        gpu_image = gpu_image / cupy.mean(gpu_image, axis=-1)[..., None]

    if return_numpy:
        gpu_image_cpu = cupy.asnumpy(gpu_image)
        del gpu_image
        return gpu_image_cpu
    else:
        return gpu_image
def delight_simple(image, dd, iterations=500):
    A = image[..., 3]
    u = cup.ones_like(image[..., 0])

    grads = cup.zeros((image.shape[0], image.shape[1], 2), dtype=cup.float32)
    grads[..., 0] = (cup.roll(image[..., 0], 1, axis=0) - image[..., 0]) * dd
    grads[..., 1] = (image[..., 0] - cup.roll(image[..., 0], 1, axis=1)) * dd
    # grads[..., 0] = (image[..., 0] - 0.5) * (dd)
    # grads[..., 1] = (image[..., 0] - 0.5) * (dd)
    for k in range(5, -1, -1):
        # multigrid
        k = 2**k
        print("grid step:", k)

        n = cup.roll(grads[..., 0], k, axis=1)
        n -= cup.roll(grads[..., 0], -k, axis=1)
        n += cup.roll(grads[..., 1], k, axis=0)
        n -= cup.roll(grads[..., 1], -k, axis=0)
        n *= 0.125 * image[..., 3]

        for ic in range(iterations):
            if ic % 100 == 0:
                print(ic)
            t = cup.roll(u, -k, axis=0)
            t += cup.roll(u, k, axis=0)
            t += cup.roll(u, -k, axis=1)
            t += cup.roll(u, k, axis=1)
            t *= 0.25

            # zero alpha = zero height
            u = t + n
            u = u * A + cup.max(u) * (1 - A)

    u = -u
    u -= cup.min(u)
    u /= cup.max(u)

    # u *= image[..., 3]

    # u -= cup.mean(u)
    # u /= max(abs(cup.min(u)), abs(cup.max(u)))
    # u *= 0.5
    # u += 0.5
    # u = 1.0 - u

    # return cup.dstack([(u - image[..., 0]) * 0.5 + 0.5, u, u, image[..., 3]])
    u = (image[..., 0] - u) * 0.5 + 0.5
    return cup.dstack([u, u, u, image[..., 3]])
Beispiel #24
0
 def test_probe_support(self):
     """Finite probe support penalty function is within expected bounds."""
     penalty = tike.ptycho.probe.finite_probe_support(
         probe=cp.zeros((101, 101)),  # must be odd shaped for min to be 0
         radius=0.5 * 0.4,
         degree=1.0,  # must have degree >= 1 for upper bound to be p
         p=2.345,
     )
     try:
         import tifffile
         os.makedirs(resultdir, exist_ok=True)
         tifffile.imsave(os.path.join(resultdir, 'penalty.tiff'),
                         penalty.astype('float32').get())
     except ImportError:
         pass
     assert cp.around(cp.min(penalty), 3) == 0.000
     assert cp.around(cp.max(penalty), 3) == 2.345
Beispiel #25
0
def PDcalculate(x, y, data2):
    lon1 = x
    lat1 = y
    lon1 = cp.asarray(lon1)
    lat1 = cp.asarray(lat1)
    lon2 = data2["CLUSTERLONGITUDE"]
    lat2 = data2["CLUSTERLATITUDE"]
    lon3 = lon2.values
    lat3 = lat2.values
    lon4 = cp.asarray(lon3)
    lat4 = cp.asarray(lat3)
    shortdistance = geodistance_cp(lon1, lat1, lon4, lat4)

    indexmin = cp.argmin(shortdistance)
    indexmin = cp.int(indexmin)
    targetcID = data2.at[indexmin, "CLUSTERID"]
    mindistance = cp.int(cp.min(shortdistance))
    return mindistance, targetcID
    def calc_boundary_avoidance_v(self):
        distance_from_bounds = cp.abs(
            cp.vstack(
                (self.state[:, 0] - self.x_min, self.state[:, 0] - self.x_max,
                 self.state[:, 1] - self.y_min,
                 self.state[:, 1] - self.y_max)).T)

        closest_bound_inds = cp.argmin(distance_from_bounds, axis=1)
        min_distance_to_bound = cp.min(distance_from_bounds, axis=1)
        bound_changes = (cp.ones(
            (2, self.n_obj)) / min_distance_to_bound).T * (
                (-1)**(closest_bound_inds.reshape(-1, 1) % 2))
        bound_changes[:, 0] = bound_changes[:, 0] * (closest_bound_inds < 2)
        bound_changes[:, 1] = bound_changes[:, 1] * (closest_bound_inds >= 2)

        close_to_bound = min_distance_to_bound < self.bound_threshold

        return close_to_bound, bound_changes
def normals_to_curvature(pix):
    intensity = 1.0
    curve = cup.zeros((pix.shape[0], pix.shape[1]), dtype=cup.float32)
    vectors = nmap_to_vectors(pix)

    # y_vec = cup.array([1, 0, 0], dtype=cup.float32)
    # x_vec = cup.array([0, 1, 0], dtype=cup.float32)

    # yd = vectors.dot(x_vec)
    # xd = vectors.dot(y_vec)

    xd = vectors[:, :, 0]
    yd = vectors[:, :, 1]

    # curve[0,0] = yd[1,0]
    curve[:-1, :] += yd[1:, :]
    curve[-1, :] += yd[0, :]

    # curve[0,0] = yd[-1,0]
    curve[1:, :] -= yd[:-1, :]
    curve[0, :] -= yd[-1, :]

    # curve[0,0] = xd[1,0]
    curve[:, :-1] += xd[:, 1:]
    curve[:, -1] += xd[:, 0]

    # curve[0,0] = xd[-1,0]
    curve[:, 1:] -= xd[:, :-1]
    curve[:, 0] -= xd[:, -1]

    # normalize
    dv = max(abs(cup.min(curve)), abs(cup.max(curve)))
    curve /= dv

    # 0 = 0.5 grey
    curve = curve * intensity + 0.5

    pix[..., 0] = curve
    pix[..., 1] = curve
    pix[..., 2] = curve
    return pix
Beispiel #28
0
def clusterSingleBatches(ctx,
                         sanity_plots=False,
                         plot_widgets=None,
                         plot_pos=0):
    """
    outputs an ordering of the batches according to drift
    for each batch, it extracts spikes as threshold crossings and clusters them with kmeans
    the resulting cluster means are then compared for all pairs of batches, and a dissimilarity
    score is assigned to each pair
    the matrix of similarity scores is then re-ordered so that low dissimilaity is along
    the diagonal
    """
    Nbatch = ctx.intermediate.Nbatch
    params = ctx.params
    probe = ctx.probe
    raw_data = ctx.raw_data
    ir = ctx.intermediate
    proc = ir.proc

    if not params.reorder:
        # if reordering is turned off, return consecutive order
        iorig = np.arange(Nbatch)
        return iorig, None, None

    nPCs = params.nPCs
    Nfilt = ceil(probe.Nchan / 2)

    # extract PCA waveforms pooled over channels
    wPCA = extractPCfromSnippets(proc,
                                 probe=probe,
                                 params=params,
                                 Nbatch=Nbatch)

    Nchan = probe.Nchan
    # TODO: move_to_config
    niter = 10  # iterations for k-means. we won't run it to convergence to save time

    nBatches = Nbatch
    NchanNear = min(Nchan, 2 * 8 + 1)

    # initialize big arrays on the GPU to hold the results from each batch
    # this holds the unit norm templates
    Ws = cp.zeros((nPCs, NchanNear, Nfilt, nBatches),
                  dtype=np.float32,
                  order='F')
    # this holds the scalings
    mus = cp.zeros((Nfilt, nBatches), dtype=np.float32, order='F')
    # this holds the number of spikes for that cluster
    ns = cp.zeros((Nfilt, nBatches), dtype=np.float32, order='F')
    # this holds the center channel for each template
    Whs = ones((Nfilt, nBatches), dtype=np.int32, order='F')

    i0 = 0
    # TODO: move_to_config
    NrankPC = 3  # I am not sure if this gets used, but it goes into the function

    # return an array of closest channels for each channel
    iC = getClosestChannels(probe, params.sigmaMask, NchanNear)[0]

    for ibatch in tqdm(range(nBatches), desc="Clustering spikes"):
        enough_spikes = False

        # extract spikes using PCA waveforms
        uproj, call = extractPCbatch2(proc, params, probe, wPCA,
                                      min(nBatches - 2, ibatch), iC, Nbatch)

        if cp.sum(cp.isnan(uproj)) > 0:
            break  # I am not sure what case this safeguards against....

        if uproj.shape[1] > Nfilt:
            enough_spikes = True

            # this initialize the k-means
            W, mu, Wheights, irand = initializeWdata2(call, uproj, Nchan, nPCs,
                                                      Nfilt, iC)

            # Params is a whole bunch of parameters sent to the C++ scripts inside a float64 vector
            Params = [
                uproj.shape[1], NrankPC, Nfilt, 0, W.shape[0], 0, NchanNear,
                Nchan
            ]

            for i in range(niter):

                Wheights = Wheights.reshape((1, 1, -1), order='F')
                iC = cp.atleast_3d(iC)

                # we only compute distances to clusters on the same channels
                # this tells us which spikes and which clusters might match
                iMatch = cp.min(cp.abs(iC - Wheights), axis=0) < .1

                # get iclust and update W
                # CUDA script to efficiently compute distances for pairs in which iMatch is 1
                dWU, iclust, dx, nsp, dV = mexClustering2(
                    Params, uproj, W, mu, call, iMatch, iC)

                dWU = dWU / (
                    1e-5 + nsp.T
                )  # divide the cumulative waveform by the number of spike

                mu = cp.sqrt(cp.sum(dWU**2,
                                    axis=0))  # norm of cluster template
                W = dWU / (1e-5 + mu)  # unit normalize templates

                W = W.reshape((nPCs, Nchan, Nfilt), order='F')
                nW = W[
                    0,
                    ...]**2  # compute best channel from the square of the first PC feature
                W = W.reshape((Nchan * nPCs, Nfilt), order='F')

                Wheights = cp.argmax(
                    nW,
                    axis=0)  # the new best channel of each cluster template

            # carefully keep track of cluster templates in dense format
            W = W.reshape((nPCs, Nchan, Nfilt), order='F')
            W0 = cp.zeros((nPCs, NchanNear, Nfilt),
                          dtype=np.float32,
                          order='F')
            for t in range(Nfilt):
                W0[..., t] = W[:, iC[:, Wheights[t]], t].squeeze()
            # I don't really know why this needs another normalization
            W0 = W0 / (1e-5 + cp.sum(cp.sum(W0**2, axis=0)[np.newaxis, ...],
                                     axis=1)**.5)

        # if a batch doesn't have enough spikes, it gets the cluster templates of the previous batc
        if enough_spikes:
            Ws[..., ibatch] = W0
            mus[:, ibatch] = mu
            ns[:, ibatch] = nsp
            Whs[:, ibatch] = Wheights.astype(np.int32)
        else:
            logger.warning('Data batch #%d only had %d spikes.', ibatch,
                           uproj.shape[1])

        i0 = i0 + Nfilt

    # anothr one of these Params variables transporting parameters to the C++ code
    Params = [1, NrankPC, Nfilt, 0, W.shape[0], 0, NchanNear, Nchan]
    # the total number of templates is the number of templates per batch times the number of batch
    Params[0] = Ws.shape[2] * Ws.shape[3]

    # initialize dissimilarity matrix
    ccb = cp.zeros((nBatches, nBatches), dtype=np.float32, order='F')

    for ibatch in tqdm(range(nBatches), desc="Computing distances"):
        # for every batch, compute in parallel its dissimilarity to ALL other batches
        Wh0 = Whs[:, ibatch]  # this one is the primary batch
        W0 = Ws[..., ibatch]
        mu = mus[..., ibatch]

        # embed the templates from the primary batch back into a full, sparse representation
        W = cp.zeros((nPCs, Nchan, Nfilt), dtype=np.float32, order='F')
        for t in range(Nfilt):
            W[:, iC[:, Wh0[t]], t] = cp.atleast_3d(Ws[:, :, t, ibatch])

        # pairs of templates that live on the same channels are potential "matches"
        # TODO: move_to_config? is the 0.1 here important?
        iMatch = cp.min(cp.abs(iC - Wh0.reshape((1, 1, -1), order='F')),
                        axis=0) < .1

        # compute dissimilarities for iMatch = 1
        iclust, ds = mexDistances2(Params, Ws, W, iMatch, iC, Whs, mus, mu)

        # ds are squared Euclidian distances
        ds = ds.reshape((Nfilt, -1),
                        order='F')  # this should just be an Nfilt-long vector
        ds = cp.maximum(0, ds)

        # weigh the distances according to number of spikes in cluster
        ccb[ibatch, :] = cp.mean(cp.sqrt(ds) * ns, axis=0) / cp.mean(ns,
                                                                     axis=0)

    # ccb = cp.asnumpy(ccb)
    # some normalization steps are needed: zscoring, and symmetrizing ccb
    ccb0 = zscore(ccb, axis=0)
    ccb0 = ccb0 + ccb0.T

    # sort by manifold embedding algorithm
    # iorig is the sorting of the batches
    # ccbsort is the resorted matrix (useful for diagnosing drift)
    ccbsort, iorig = sortBatches2(ccb0)

    logger.info("Finished clustering.")

    if sanity_plots:
        assert plot_widgets is not None, "if sanity_plots is set, then plot_widgets cannot be None"
        plot_dissimilarity_matrices(ccb0, ccbsort, plot_widgets[plot_pos])

    return Bunch(iorig=iorig, ccb0=ccb0, ccbsort=ccbsort)
Beispiel #29
0
def select_min_next_cupy(X, gains, current_values, idxs):
    gains[:] = cupy.sum(cupy.min(current_values + X, 1), axis=1)[idxs]
Beispiel #30
0
 def min_intensity(self):
     return cp.min(self.intensity_image[self.image])
Beispiel #31
0
 def min(self, axis=None, out=None, keepdims=False):
     return cupy.min(self, axis=axis, out=out, keepdims=keepdims)
    def backward(self, inputs, grad_outputs):
        e1 = array.as_mat(inputs[0])
        e2 = array.as_mat(inputs[1])
        W = inputs[2]
        gy = grad_outputs[0]
        print 'cupy.max(gy) = ',
        print cupy.max(gy)
        print 'cupy.min(gy) = ',
        print cupy.min(gy)
        #print 'backward'
        #print 'gy.shape',
        #print gy.shape
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
            ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
            ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
        else:
            kern = cuda.reduce('T in0, T in1, T in2', 'T out',
                               'in0 * in1 * in2', 'a + b', 'out = a', 0,
                               'bilinear_product')

            e1_b = e1[:, :, None, None]  # ij
            e2_b = e2[:, None, :, None]  # ik
            gy_b = gy[:, None, None, :]  # il
            W_b = W[None, :, :, :]  # jkl

            gW = kern(e1_b, e2_b, gy_b, axis=0)  # 'ij,ik,il->jkl'
            ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3))  # 'ik,jkl,il->ij'
            ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3))  # 'ij,jkl,il->ik'
        '''
        #ge1_ext = e1*gy.astype(dtype=gy.dtype, copy=False) #Hadamard product
        #print 'ge1_ext.shape',
        #print ge1_ext.shape
        #ge1 = cupy.sum(ge1_ext, axis=1).astype(dtype=gy.dtype, copy=False)
        #print 'ge1.shape',
        #print ge1.shape

        ge1 = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        print 'cupy.max(ge1) = ',
        print cupy.max(ge1)
        print 'cupy.min(ge1) = ',
        print cupy.min(ge1)

        gy_sum = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        #print 'gy_sum.shape',
        #print gy_sum.shape
        gy_tile = cupy.tile(gy_sum, len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy.shape',
        #print gy.shape
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy_tile / len(gy[0]).dtype',
        #print (gy_tile / len(gy[0])).dtype
        #ge_tmp1 = gy_tile / len(gy[0])
        #ge_tmp2 = gy - gy_tile

        ge2 = (gy - gy_tile / len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'ge2.shape',
        #print ge2.shape
        print 'cupy.max(ge2) = ',
        print cupy.max(ge2)
        print 'cupy.min(ge2) = ',
        print cupy.min(ge2)
  
        gW = cupy.zeros(len(e1[0])*len(e2[0])*len(e2[0])).reshape(len(e1[0]), len(e2[0]), len(e2[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gW.shape',
        #print gW.shape

        ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            gV1 = e1.T.dot(gy)
            gV2 = e2.T.dot(gy)
            gb = gy.sum(0)
            ge1 += gy.dot(V1.T)
            ge2 += gy.dot(V2.T)
            ret += gV1, gV2, gb
        #print 'len(ret)',
        #print len(ret)
        #print 'ret[0].shape',
        #print ret[0].shape
        #print 'ret[1].shape',
        #print ret[1].shape
        #print 'ret[2].shape',
        #print ret[2].shape

        return ret