Ejemplo n.º 1
0
    def helperGetLikeChangeGPU(t):
        t = cp.array(t)
        start = time.time()
        tt = t.item()
        struc_index = cp.where(ts == t)[0][0].item()
        if float(t) in taos:
            change_a_t, change_b_t = getLikeChangeGPU(hic_dist_tao, row_tao,
                                                      col_tao, struc_t,
                                                      changea, changeb, tt,
                                                      struc_index, n_max,
                                                      n_min)
        else:
            lower = taos[taos - tt < 0]
            upper = taos[taos - tt > 0]
            a1 = lower[cp.argmin(cp.abs(lower - tt))]
            a2 = upper[cp.argmin(cp.abs(upper - tt))]
            w2 = (tt - a1) / cp.abs(a1 - a2)
            w1 = (a2 - tt) / cp.abs(a1 - a2)
            changea1, changeb1 = getLikeChangeGPU(hic_dist_tao, row_tao,
                                                  col_tao,
                                                  struc_t, changea, changeb,
                                                  a1.item(), struc_index,
                                                  n_max, n_min)

            changea2, changeb2 = getLikeChangeGPU(hic_dist_tao, row_tao,
                                                  col_tao,
                                                  struc_t, changea, changeb,
                                                  a2.item(), struc_index,
                                                  n_max, n_min)
            change_a_t = w1 * changea1 + w2 * changea2
            change_b_t = w1 * changeb1 + w2 * changeb2
        change_t = change_a_t + change_b_t
        print("indiv", str(start - time.time()))
        return cp.asnumpy(change_t)
Ejemplo n.º 2
0
def nn_gpu(ref, query):
    import cupy

    with open(cu_file) as f:
        kernel = cupy.RawKernel(f.read(), "cuComputeDistanceGlobal")

    ref_nb, ref_dim = ref.shape
    query_nb, query_dim = query.shape
    assert ref_dim == query_dim
    dim = ref_dim

    ref = ref.transpose(1, 0)
    query = query.transpose(1, 0)
    ref = cupy.ascontiguousarray(ref)
    query = cupy.ascontiguousarray(query)

    dist = cupy.empty((ref_nb, query_nb), dtype=cupy.float32)

    BLOCK_DIM = 16
    grid = (
        int(math.ceil(query_nb / BLOCK_DIM)),
        int(math.ceil(ref_nb / BLOCK_DIM)),
        1,
    )
    block = (16, 16, 1)
    args = (ref, ref_nb, query, query_nb, dim, dist)
    shared_mem = BLOCK_DIM * BLOCK_DIM + BLOCK_DIM * BLOCK_DIM + 5

    kernel(grid, block, args=args, shared_mem=shared_mem)

    indices = cupy.argmin(dist, axis=0)
    return indices
Ejemplo n.º 3
0
def fit_custom(X, n_clusters, max_iter):
    assert X.ndim == 2

    n_samples = len(X)

    pred = cupy.zeros(n_samples)

    initial_indexes = cupy.random.choice(n_samples, n_clusters, replace=False)
    centers = X[initial_indexes]

    for _ in range(max_iter):
        distances = var_kernel(X[:, None, 0], X[:, None, 1],
                               centers[None, :, 1], centers[None, :, 0])
        new_pred = cupy.argmin(distances, axis=1)
        if cupy.all(new_pred == pred):
            break
        pred = new_pred

        i = cupy.arange(n_clusters)
        mask = pred == i[:, None]
        sums = sum_kernel(X, mask[:, :, None], axis=1)
        counts = count_kernel(mask, axis=1).reshape((n_clusters, 1))
        centers = sums / counts

    return centers, pred
Ejemplo n.º 4
0
def test_transform(nrows, ncols, nclusters, n_parts, input_type, cluster):

    client = None

    try:

        client = Client(cluster)

        from cuml.dask.cluster import KMeans as cumlKMeans

        from cuml.dask.datasets import make_blobs

        X, y = make_blobs(n_samples=int(nrows),
                          n_features=ncols,
                          centers=nclusters,
                          n_parts=n_parts,
                          cluster_std=0.01,
                          shuffle=False,
                          random_state=10)
        y = y.astype('int64')

        wait(X)
        if input_type == "dataframe":
            X_train = to_dask_cudf(X)
            y_train = to_dask_cudf(y)
            labels = cp.squeeze(y_train.compute().to_pandas().values)
        elif input_type == "array":
            X_train, y_train = X, y
            labels = cp.squeeze(y_train.compute())

        cumlModel = cumlKMeans(init="k-means||",
                               n_clusters=nclusters,
                               random_state=10)

        cumlModel.fit(X_train)

        xformed = cumlModel.transform(X_train).compute()
        if input_type == "dataframe":
            xformed = cp.array(xformed
                               if len(xformed.shape) == 1
                               else xformed.as_gpu_matrix())

        if nclusters == 1:
            # series shape is (nrows,) not (nrows, 1) but both are valid
            # and equivalent for this test
            assert xformed.shape in [(nrows, nclusters), (nrows,)]
        else:
            assert xformed.shape == (nrows, nclusters)

        # The argmin of the transformed values should be equal to the labels
        # reshape is a quick manner of dealing with (nrows,) is not (nrows, 1)
        xformed_labels = cp.argmin(xformed.reshape((int(nrows),
                                                    int(nclusters))), axis=1)

        assert sk_adjusted_rand_score(cp.asnumpy(labels),
                                      cp.asnumpy(xformed_labels))

    finally:
        client.close()
Ejemplo n.º 5
0
    def predict(self, Xtest, use_gpu=False):
        """Predict using the linear model
        
        Let :math:`B^k` be the basis vectors of class :math:`k`, and :math:`x` be the RCDT sapce feature vector of an input, 
        the NS method performs classification by
        
        .. math::
            arg\min_k \| B^k (B^k)^T x - x\|^2
        
        Parameters
        ----------
        Xtest : array-like, shape (n_samples, n_rows, n_columns)
            Image data for testing.
        use_gpu: boolean flag; IF TRUE, use gpu for calculations
            default = False.
            
        Returns
        -------
        ndarray of shape (n_samples,)
           Predicted target values per element in Xtest.
           
        """

        # calculate the RCDT using parallel CPUs
        print('\nCalculating RCDTs for testing images ...')
        Xrcdt = self.rcdt_parallel(Xtest)

        # vectorize RCDT matrix
        X = Xrcdt.reshape([Xrcdt.shape[0], -1])

        # import cupy for using GPU
        if use_gpu:
            import cupy as cp
            X = cp.array(X)

        # find nearest subspace for each test sample
        print('Finding nearest subspace for each test sample ...')
        D = []
        for class_idx in range(self.num_classes):
            basis = self.subspaces[class_idx]
            basis = basis[:self.len_subspace, :]

            if use_gpu:
                D.append(
                    cp.linalg.norm(cp.matmul(cp.matmul(X,
                                                       cp.array(basis).T),
                                             cp.array(basis)) - X,
                                   axis=1))
            else:
                proj = X @ basis.T  # (n_samples, n_basis)
                projR = proj @ basis  # (n_samples, n_features)
                D.append(LA.norm(projR - X, axis=1))
        if use_gpu:
            preds = cp.argmin(cp.stack(D, axis=0), axis=0)
            return cp.asnumpy(preds)
        else:
            D = np.stack(D, axis=0)
            preds = np.argmin(D, axis=0)
            return preds
Ejemplo n.º 6
0
 def test_iamin(self):
     x = self._make_random_vector()
     ref = cupy.argmin(cupy.absolute(x.real) + cupy.absolute(x.imag))
     out = self._make_out('i')
     res = cublas.iamin(x, out=out)
     self._check_pointer(res, out)
     # Note: iamin returns 1-based index
     cupy.testing.assert_array_equal(res - 1, ref)
Ejemplo n.º 7
0
def min_col(cover, active_rows, active_cols):
    """
    :return: (column, count) tuple such that column contains the least number of 1s
    compared to other columns.
    """
    counts = col_counts(cover, active_rows)
    argmin = active_cols[int(cp.argmin(counts[active_cols]))]
    return argmin, counts[argmin]
Ejemplo n.º 8
0
def getGlobalBestPath(particlesN,globalBestCost):
    # zrownoleglic jak będzie na gpu
    particles = cupy.asarray(particlesN)
    shortestPathIndex = cupy.argmin(particles[:,BEST])
    cost = particles[shortestPathIndex, BEST]
    if cost < globalBestCost:
      return int(shortestPathIndex)
    else:
      return -1
Ejemplo n.º 9
0
def test_transform(nrows, ncols, nclusters, n_parts, cluster):

    client = None

    try:

        client = Client(cluster)

        from cuml.dask.cluster import KMeans as cumlKMeans

        from cuml.dask.datasets import make_blobs

        X_cudf, y = make_blobs(nrows,
                               ncols,
                               nclusters,
                               n_parts,
                               cluster_std=0.01,
                               verbose=False,
                               shuffle=False,
                               random_state=10)

        wait(X_cudf)

        cumlModel = cumlKMeans(verbose=0,
                               init="k-means||",
                               n_clusters=nclusters,
                               random_state=10)

        cumlModel.fit(X_cudf)

        labels = np.squeeze(y.compute().to_pandas().values)

        xformed = cumlModel.transform(X_cudf).compute()

        if nclusters == 1:
            # series shape is (nrows,) not (nrows, 1) but both are valid
            # and equivalent for this test
            assert xformed.shape in [(nrows, nclusters), (nrows, )]
        else:
            assert xformed.shape == (nrows, nclusters)

        xformed = cp.array(xformed if len(xformed.shape) ==
                           1 else xformed.as_gpu_matrix())

        # The argmin of the transformed values should be equal to the labels
        # reshape is a quick manner of dealing with (nrows,) is not (nrows, 1)
        xformed_labels = cp.argmin(xformed.reshape(
            (int(nrows), int(nclusters))),
                                   axis=1)

        assert adjusted_rand_score(labels, cp.squeeze(xformed_labels.get()))

    finally:
        client.close()
Ejemplo n.º 10
0
def my_kmedoids(image_data, K, threshold=0):
    """
    This is the cuda implementation of my_kmedoids. Requires cuda to function properly.
    """

    N = image_data.shape[0]
    p = image_data.shape[1]

    medoids = np.zeros((K, p))
    _, medoids = my_kmeans(image_data, K)

    medoids = cp.asarray(medoids)
    medoids_old = cp.zeros(medoids.shape)
    medoids_new = deepcopy(medoids)
    error = cp.linalg.norm(medoids_new - medoids_old)
    image_data = cp.asarray(image_data)
    labels = cp.zeros(N)

    DisMat = cp.zeros((N, K))
    iter_ct = 0
    while error > threshold:
        iter_ct += 1
        #print('K-medoids iteration {}'.format(iter_ct))
        medoids_old = deepcopy(medoids_new)
        for i in range(K):  # assign image_data points to closest centroids
            DisMat[:, i] = cp.linalg.norm(image_data - medoids_new[i], axis=1)
        labels = cp.argmin(DisMat, axis=1)
        for i in range(K):
            cluster = image_data[labels == i]
            DMC = sum(cp.linalg.norm(cluster - medoids_new[i], axis=1))
            DMP = cp.zeros(cluster.shape[0])
        if cluster.shape[0] == 0:
            medoids_new[i] = medoids_old[i]
        else:
            for j in range(cluster.shape[0]):
                DMP[j] = cp.sum(cp.linalg.norm(cluster - cluster[j], axis=1))
            small_cost_idx = cp.argmin(DMP)
            if DMP[small_cost_idx] < DMC:
                medoids_new[i] = cluster[small_cost_idx]
        error = cp.linalg.norm(medoids_new - medoids_old)
    return cp.asnumpy(labels.astype(int)), cp.asnumpy(medoids)
Ejemplo n.º 11
0
def likelihoodlossGPU(hic_dist_tao, row_tao, col_tao, struc_t, ts, taos, n_max,
                      n_min):
    start = time.time()
    change = cp.zeros(struc_t.shape)
    changea = cp.zeros(struc_t.shape)
    changeb = cp.zeros(struc_t.shape)
    print("timea:", time.time() - start)
    for t in ts:
        start = time.time()
        tt = t.item()
        struc_index = cp.where(ts == t)[0][0].item()
        if float(t) in taos:
            changea[struc_index], changeb[struc_index] = getLikeChangeGPU(
                hic_dist_tao, row_tao, col_tao, struc_t, changea, changeb, tt,
                struc_index, n_max, n_min)
        else:
            lower = taos[taos - tt < 0]
            upper = taos[taos - tt > 0]
            a1 = lower[cp.argmin(cp.abs(lower - tt))]
            a2 = upper[cp.argmin(cp.abs(upper - tt))]
            w2 = (tt - a1) / cp.abs(a1 - a2)
            w1 = (a2 - tt) / cp.abs(a1 - a2)
            #print(a1,a2,w1,w2)
            changea1, changeb1 = getLikeChangeGPU(hic_dist_tao, row_tao,
                                                  col_tao,
                                                  struc_t, changea, changeb,
                                                  a1.item(), struc_index,
                                                  n_max, n_min)

            changea2, changeb2 = getLikeChangeGPU(hic_dist_tao, row_tao,
                                                  col_tao,
                                                  struc_t, changea, changeb,
                                                  a2.item(), struc_index,
                                                  n_max, n_min)
            changea[struc_index] += w1 * changea1 + w2 * changea2
            changeb[struc_index] += w1 * changeb1 + w2 * changeb2
        #print("timeb", str(time.time()-start))
    change = changea + changeb
    return -change
Ejemplo n.º 12
0
def ts_argmin(x, window):
    if window > len(x):
        return cp.full(len(x), cp.nan)
    # 将nan及-inf填充为inf
    x = cp.where(cp.isinf(x) | cp.isnan(x), cp.inf, x)

    prefix = cp.full(window - 1, cp.nan)
    x_rolling_array = cp_rolling_window(x, window)
    result = cp.argmin(x_rolling_array, axis=1)

    # 找到window中全为-inf的情况,填充为nan
    result = result.astype(float)
    result = cp.where(cp.isinf(x_rolling_array).all(axis=1), cp.nan, result)
    result += 1
    return cp.concatenate((prefix, result))
Ejemplo n.º 13
0
    def test(self):
        test_mean = np.mean(self.testset, axis=1)
        y = self.__project(self.testset, test_mean)

        pred = []

        for i in range(y.shape[1]):
            tmp = np.sum(np.square((np.expand_dims(y[:, i], axis=1) - self.trainset_project)), axis=0)
            min_i = np.argmin(tmp, axis=0)
            pred.append(self.trainlabel[min_i])

        pred = np.asarray(pred)
        # print(pred)
        acc = np.sum((pred == self.testslabel)) / len(pred)
        print("accuracy: ", acc)
        return pred, acc
Ejemplo n.º 14
0
 def predict(self, Xtest, use_gpu=False):
     """Predict using the linear model
     Parameters
     ----------
     Xtest : array-like, shape (n_samples, n_columns)
         1D data for testing.
     use_gpu: boolean flag; IF TRUE, use gpu for calculations
         default = False.
         
     Returns
     -------
     ndarray of shape (n_samples,)
        Predicted target values per sample in Xtest.
     """
     
     # calculate the CDT using parallel CPUs
     print('\nCalculating CDTs for testing samples ...')
     X = self.cdt_parallel(Xtest)
     
     # import cupy for using GPU
     if use_gpu:
         import cupy as cp
         X = cp.array(X)
     
     # find nearest subspace for each test sample
     print('Finding nearest subspace for each test sample ...')
     D = []
     for class_idx in range(self.num_classes):
         basis = self.subspaces[class_idx]
         basis = basis[:self.len_subspace,:]
         
         if use_gpu:
             D.append(cp.linalg.norm(cp.matmul(cp.matmul(X, cp.array(basis).T), 
                                               cp.array(basis)) -X, axis=1))
         else:
             proj = X @ basis.T  # (n_samples, n_basis)
             projR = proj @ basis  # (n_samples, n_features)
             D.append(LA.norm(projR - X, axis=1))
     if use_gpu:
         preds = cp.argmin(cp.stack(D, axis=0), axis=0)
         return cp.asnumpy(preds)
     else:
         D = np.stack(D, axis=0)
         preds = np.argmin(D, axis=0)
         return preds
Ejemplo n.º 15
0
    def calc_boundary_avoidance_v(self):
        distance_from_bounds = cp.abs(
            cp.vstack(
                (self.state[:, 0] - self.x_min, self.state[:, 0] - self.x_max,
                 self.state[:, 1] - self.y_min,
                 self.state[:, 1] - self.y_max)).T)

        closest_bound_inds = cp.argmin(distance_from_bounds, axis=1)
        min_distance_to_bound = cp.min(distance_from_bounds, axis=1)
        bound_changes = (cp.ones(
            (2, self.n_obj)) / min_distance_to_bound).T * (
                (-1)**(closest_bound_inds.reshape(-1, 1) % 2))
        bound_changes[:, 0] = bound_changes[:, 0] * (closest_bound_inds < 2)
        bound_changes[:, 1] = bound_changes[:, 1] * (closest_bound_inds >= 2)

        close_to_bound = min_distance_to_bound < self.bound_threshold

        return close_to_bound, bound_changes
Ejemplo n.º 16
0
def PDcalculate(x, y, data2):
    lon1 = x
    lat1 = y
    lon1 = cp.asarray(lon1)
    lat1 = cp.asarray(lat1)
    lon2 = data2["CLUSTERLONGITUDE"]
    lat2 = data2["CLUSTERLATITUDE"]
    lon3 = lon2.values
    lat3 = lat2.values
    lon4 = cp.asarray(lon3)
    lat4 = cp.asarray(lat3)
    shortdistance = geodistance_cp(lon1, lat1, lon4, lat4)

    indexmin = cp.argmin(shortdistance)
    indexmin = cp.int(indexmin)
    targetcID = data2.at[indexmin, "CLUSTERID"]
    mindistance = cp.int(cp.min(shortdistance))
    return mindistance, targetcID
Ejemplo n.º 17
0
    def predict(self, X):
        """Predict using the linear model
        Parameters
        ----------
        X : array-like, sparse matrix, shape (n_samples, n_proj, n_angles))
        Returns
        -------
        ndarray of shape (n_samples,)
           Predicted target values per element in X.
        """
        X = X.reshape([X.shape[0], -1])
        D = []
        for class_idx in range(self.num_classes):
            basis = self.subspaces[class_idx]
            basis = basis[:self.len_subspace, :]

            if __GPU__:
                D.append(
                    cp.linalg.norm(cp.matmul(cp.matmul(X,
                                                       cp.array(basis).T),
                                             cp.array(basis)) - X,
                                   axis=1))
                #basis = cp.array(basis)
                #proj = cp.matmul(X,basis.T)
                #projR = cp.matmul(proj,basis)
                #D.append(cp.linalg.norm(projR - X, axis=1))
            else:
                proj = X @ basis.T  # (n_samples, n_basis)
                projR = proj @ basis  # (n_samples, n_features)
                D.append(LA.norm(projR - X, axis=1))
        if __GPU__:
            preds = cp.argmin(cp.stack(D, axis=0), axis=0)
            #D = cp.stack(D, axis=0)  # (num_classes, n_samples)
            #preds = cp.argmin(D, axis=0)  # n_samples
            return cp.asnumpy(preds)
        else:
            D = np.stack(D, axis=0)
            preds = np.argmin(D, axis=0)
            return preds
Ejemplo n.º 18
0
    def predict(self, X):
        """Predict using the linear model
        Parameters
        ----------
        X : array-like, sparse matrix, shape (n_samples, n_proj, n_angles))
        Returns
        -------
        ndarray of shape (n_samples,)
           Predicted target values per element in X.
        """
        if self.use_gpu:
            import cupy as cp
        X = X.reshape([X.shape[0], -1])
        #X = np.transpose(X,(0,2,1)).reshape(X.shape[0],-1)
        print('Len basis: {}'.format(self.len_subspace))
        D = []
        for class_idx in range(self.num_classes):
            basis = self.subspaces[class_idx]
            basis = basis[:self.len_subspace, :]

            if self.use_gpu:
                D.append(
                    cp.linalg.norm(cp.matmul(cp.matmul(X,
                                                       cp.array(basis).T),
                                             cp.array(basis)) - X,
                                   axis=1))
            else:
                proj = X @ basis.T  # (n_samples, n_basis)
                projR = proj @ basis  # (n_samples, n_features)
                D.append(LA.norm(projR - X, axis=1))
        if self.use_gpu:
            preds = cp.argmin(cp.stack(D, axis=0), axis=0)
            self.preds_label = [self.label[i] for i in cp.asnumpy(preds)]
            return self.preds_label
        else:
            D = np.stack(D, axis=0)
            preds = np.argmin(D, axis=0)
            self.preds_label = [self.label[i] for i in preds]
            return self.preds_label
Ejemplo n.º 19
0
    def test_cuda(self):
        import cupy as cp
        # tranform numpy.ndarray to cupy.core.core.ndarray
        self.pca_vector = cp.asarray(self.pca_vector)
        self.testset = cp.asarray(self.testset)
        self.trainset_project = cp.asarray(self.trainset_project)
        self.testslabel = cp.asarray(self.testslabel)
        self.trainlabel = cp.asarray(self.trainlabel)

        def project(set, mean):
            y = cp.matmul(self.pca_vector.transpose(), set.transpose() - mean)
            return y

        s = time.time()
        test_mean = cp.mean(self.testset, axis=1)
        y = project(self.testset, test_mean)
        pred = []
        for i in range(y.shape[1]):
            tmp = y[:, i].reshape((y.shape[0], 1))
            tmp = tmp - self.trainset_project
            tmp = cp.sum(cp.square(tmp), axis=0).flatten()
            min_i = cp.argmin(tmp, axis=0)
            pred.append(self.trainlabel[min_i])

        pred = cp.asarray(pred)
        # print(pred)
        acc = cp.sum((pred == self.testslabel)) / len(pred)
        print("accuracy: ", acc)
        e = time.time()
        tim = e - s
        print("Total time of PAC_ORL test: ", tim)

        self.pca_vector = cp.asnumpy(self.pca_vector)
        self.testset = cp.asnumpy(self.testset)
        self.trainset_project = cp.asnumpy(self.trainset_project)
        self.testslabel = cp.asnumpy(self.testslabel)
        self.trainlabel = cp.asnumpy(self.trainlabel)
        return pred, acc
Ejemplo n.º 20
0
def evaluate_chunks(
        results: [cp.ndarray, cp.ndarray,
                  cp.ndarray],  # closest triangle, distance, projection
        all_pts: cp.ndarray = None,
        vertices: cp.ndarray = None,
        edges: cp.ndarray = None,
        edge_norms: cp.ndarray = None,
        edge_normssq: cp.ndarray = None,
        normals: cp.ndarray = None,
        norms: cp.ndarray = None,
        normssq: cp.ndarray = None,
        zero_tensor: cp.ndarray = None,
        one_tensor: cp.ndarray = None,
        tris: cp.ndarray = None,
        vertex_normals: cp.ndarray = None,
        bounding_box: dict = None,
        chunk_size: int = None,
        num_verts: int = None) -> None:

    #
    # Expand vertex normals if non empty
    if vertex_normals is not None:
        vertex_normals = vertex_normals[tris]
        vertex_normals = cp.tile(cp.expand_dims(vertex_normals, axis=2),
                                 (1, 1, chunk_size, 1))

    # begin = time.time()
    #
    # Load and extend the batch
    num_chunks = all_pts.shape[0] // chunk_size
    for i in range(num_chunks):
        #
        # Get subset of the query points
        start_index = i * chunk_size
        end_index = (i + 1) * chunk_size
        pts = all_pts[start_index:end_index, :]

        #
        # Match the dimensions to those assumed above.
        #    REPEATED       REPEATED
        # [triangle_index, vert_index, querypoint_index, coordinates]
        pts = cp.tile(cp.expand_dims(pts, axis=(0, 1)), (num_verts, 3, 1, 1))

        #
        # Compute the differences between
        # vertices on each triangle and the
        # points of interest
        #
        # [triangle_index, vert_index, querypoint_index, coordinates]
        # ===================
        # [:,0,:,:] = p - p1
        # [:,1,:,:] = p - p2
        # [:,2,:,:] = p - p3
        diff_vectors = pts - vertices

        #
        # Compute alpha, beta, gamma
        barycentric = cp.empty(diff_vectors.shape)

        #
        # gamma = u x (p - p1)
        barycentric[:, 2, :, :] = cp.cross(edges[:, 0, :, :],
                                           diff_vectors[:, 0, :, :])
        # beta = (p - p1) x v
        barycentric[:, 1, :, :] = cp.cross(diff_vectors[:, 0, :, :],
                                           edges[:, 1, :, :])
        # alpha = w x (p - p2)
        barycentric[:, 0, :, :] = cp.cross(edges[:, 2, :, :],
                                           diff_vectors[:, 1, :, :])
        barycentric = cp.divide(
            cp.sum(cp.multiply(barycentric, normals), axis=3), normssq)

        #
        # Test conditions
        less_than_one = cp.less_equal(barycentric, one_tensor)
        more_than_zero = cp.greater_equal(barycentric, zero_tensor)

        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        cond1 = cp.logical_and(less_than_one, more_than_zero)

        #
        #     if gamma <= 0:
        cond2 = cp.logical_not(more_than_zero[:, 2, :])
        cond2 = cp.tile(cp.expand_dims(cond2, axis=1), (1, 3, 1))

        #
        #     if beta <= 0:
        cond3 = cp.logical_not(more_than_zero[:, 1, :])
        cond3 = cp.tile(cp.expand_dims(cond3, axis=1), (1, 3, 1))

        #
        #     if alpha <= 0:
        cond4 = cp.logical_not(more_than_zero[:, 0, :])
        cond4 = cp.tile(cp.expand_dims(cond4, axis=1), (1, 3, 1))

        #
        # Get the projections for each case
        xi = cp.empty(barycentric.shape)
        barycentric_ext = cp.tile(cp.expand_dims(barycentric, axis=3),
                                  (1, 1, 1, 3))
        proj = cp.sum(cp.multiply(barycentric_ext, vertices), axis=1)
        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        xi[cond1] = barycentric[cond1]

        #
        # if gamma <= 0:
        #  x = p - p1
        #  u = p2 - p1
        #  a = p1
        #  b = p2
        t2 = cp.divide(
            #
            # u.dot(x)
            cp.sum(cp.multiply(edges[:, 0, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 0])
        xi2 = cp.zeros((t2.shape[0], 3, t2.shape[1]))
        xi2[:, 0, :] = -t2 + 1
        xi2[:, 1, :] = t2
        #
        t2 = cp.tile(cp.expand_dims(t2, axis=2), (1, 1, 3))
        lz = cp.less(t2, cp.zeros(t2.shape))
        go = cp.greater(t2, cp.ones(t2.shape))
        proj2 = vertices[:, 0, :, :] + cp.multiply(t2, edges[:, 0, :, :])
        proj2[lz] = vertices[:, 0, :, :][lz]
        proj2[go] = vertices[:, 1, :, :][go]
        #
        xi[cond2] = xi2[cond2]
        proj[cp.swapaxes(cond2, 1, 2)] = proj2[cp.swapaxes(cond2, 1, 2)]

        #
        # if beta <= 0:
        #  x = p - p1
        #  v = p3 - p1
        #  a = p1
        #  b = p3
        t3 = cp.divide(
            #
            # v.dot(x)
            cp.sum(cp.multiply(edges[:, 1, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 1])
        xi3 = cp.zeros((t3.shape[0], 3, t3.shape[1]))
        xi3[:, 0, :] = -t3 + 1
        xi3[:, 2, :] = t3
        #
        t3 = cp.tile(cp.expand_dims(t3, axis=2), (1, 1, 3))
        lz = cp.less(t3, cp.zeros(t3.shape))
        go = cp.greater(t3, cp.ones(t3.shape))
        proj3 = vertices[:, 0, :, :] + cp.multiply(t3, edges[:, 1, :, :])
        proj3[lz] = vertices[:, 0, :, :][lz]
        proj3[go] = vertices[:, 2, :, :][go]
        #
        xi[cond3] = xi3[cond3]
        proj[cp.swapaxes(cond3, 1, 2)] = proj3[cp.swapaxes(cond3, 1, 2)]

        #
        #     if alpha <= 0:
        #  y = p - p2
        #  w = p3 - p2
        #  a = p2
        #  b = p3
        t4 = cp.divide(
            #
            # w.dot(y)
            cp.sum(cp.multiply(edges[:, 2, :, :], diff_vectors[:, 1, :, :]),
                   axis=2),
            edge_normssq[:, 2])
        xi4 = cp.zeros((t4.shape[0], 3, t4.shape[1]))
        xi4[:, 1, :] = -t4 + 1
        xi4[:, 2, :] = t4
        #
        t4 = cp.tile(cp.expand_dims(t4, axis=2), (1, 1, 3))
        lz = cp.less(t4, cp.zeros(t4.shape))
        go = cp.greater(t4, cp.ones(t4.shape))
        proj4 = vertices[:, 1, :, :] + cp.multiply(t4, edges[:, 2, :, :])
        proj4[lz] = vertices[:, 1, :, :][lz]
        proj4[go] = vertices[:, 2, :, :][go]
        #
        xi[cond4] = xi4[cond4]
        proj[cp.swapaxes(cond4, 1, 2)] = proj4[cp.swapaxes(cond4, 1, 2)]

        vec_to_point = pts[:, 0, :, :] - proj
        distances = cp.linalg.norm(vec_to_point, axis=2)

        # n = "\n"
        # print(f"{pts[:,0,:,:]=}")
        # print(f"{proj=}")
        # print(f"{pts[:,0,:,:] - proj=}")
        # print(f"{distances=}")

        min_distances = cp.min(distances, axis=0)

        closest_triangles = cp.argmin(distances, axis=0)

        projections = proj[closest_triangles, np.arange(chunk_size), :]

        #
        # Distinguish close triangles
        is_close = cp.isclose(distances, min_distances)

        #
        # Determine sign
        signed_normal = normals[:, 0, :, :]
        if vertex_normals is not None:
            signed_normal = cp.sum(vertex_normals.transpose() * xi.transpose(),
                                   axis=2).transpose()

        is_negative = cp.less_equal(
            cp.sum(cp.multiply(vec_to_point, signed_normal), axis=2), 0.)

        #
        # Combine
        is_close_and_negative = cp.logical_and(is_close, is_negative)

        #
        # Determine if inside
        is_inside = cp.all(cp.logical_or(is_close_and_negative,
                                         cp.logical_not(is_close)),
                           axis=0)

        #
        # Overwrite the signs of points
        # that are outside of the box
        if bounding_box is not None:
            #
            # Extract
            rotation_matrix = cp.asarray(bounding_box['rotation_matrix'])
            translation_vector = cp.asarray(bounding_box['translation_vector'])
            size = cp.asarray(bounding_box['size'])
            #
            # Transform
            transformed_pts = cp.dot(
                all_pts[start_index:end_index, :] - translation_vector,
                rotation_matrix)

            #
            # Determine if outside bbox
            inside_bbox = cp.all(cp.logical_and(
                cp.less_equal(0., transformed_pts),
                cp.less_equal(transformed_pts, size)),
                                 axis=1)

            #
            # Treat points outside bbox as
            # being outside of lumen
            print(f"{inside_bbox=}")
            is_inside = cp.logical_and(is_inside, inside_bbox)

        #
        # Apply sign to indicate whether the distance is
        # inside or outside the mesh.
        min_distances[is_inside] = -1 * min_distances[is_inside]

        #
        # Emplace results
        # [triangle_index, vert_index, querypoint_index, coordinates]
        results[0][start_index:end_index] = closest_triangles
        results[1][start_index:end_index] = min_distances
        results[2][start_index:end_index, :] = projections
	#gammaの計算

	if(i+1==int(iteration)+int(additional_iteration)):
		if(memory_transfer_flag==1):
			cp_dens_pre = cp.asarray(cp_dens_pre, dtype=cp.complex64)#numpy配列 ⇒ cupy配列に変換
		rdens_in=cp_sup*cp.absolute(cp_dens_pre)
		rdens_out=-(cp_sup-1.0)*cp.absolute(cp_dens_pre)
		if(memory_transfer_flag==1):
			cp_dens_pre = cp.asnumpy(cp_dens_pre)#cupy配列 ⇒ numpy配列に変換

		rdens_in_sum=cp.sum(rdens_in, axis=(1,2))
		rdens_out_sum=cp.sum(rdens_out, axis=(1,2))

		gamma = rdens_out_sum / ((OS_ratio - 1.0 ) * rdens_in_sum)

		n_min_gamma=cp.argmin(gamma)
		min_gamma=cp.min(gamma)

	#重心の計算

	if(i+1==int(iteration)+int(additional_iteration)):
		if(memory_transfer_flag==1):
			cp_dens_pre = cp.asarray(cp_dens_pre, dtype=cp.complex64)#numpy配列 ⇒ cupy配列に変換	
		R_dens=cp_dens_pre
		if(memory_transfer_flag==1):
			cp_dens_pre = cp.asnumpy(cp_dens_pre)#cupy配列 ⇒ numpy配列に変換
		R_dens = cp.asnumpy(R_dens)
		cp_sup = cp.asnumpy(cp_sup)
		R_dens.real=R_dens.real*cp_sup
		
		if(DFPR_flag == 1):
Ejemplo n.º 22
0
def pred_drs(Q_input, Q1_LUT, Q2_LUT, Q3_LUT, args, nan_th=np.inf):
    """
    観測Qから
    d: 物体距離
    r: 物体反射率
    s: 霧の消滅係数
    を推定する.
    各推定値の範囲はargs参照
    
    Parameters
    ==========
    Q_input: cp.array([row*col, 3])
        観測値Q1, Q2, Q3
        
    Q1_LUT: cp.array(args.sigma_size)
        sとQ1の対応を保持したLookup table
        
    Q2_LUT, Q3_LUT: cp.array([args.dist_size, args.ref_size, args.sigma_size])
        それぞれdrsとQ2, drsとQ3の対応を保持したLookup table
        
    args: Namespace
        LUTを作るのに用いた設定 (引数)
        
        
    Returns
    =======
    drs_pred: np.array([row*col, 3])
        推定したdrs
    
    """
    drs_pred = cp.zeros(Q_input.shape)
    intensity_defogged = cp.zeros(Q_input.shape[0])
    Q_pred = cp.zeros(Q_input.shape)
    Q_error = cp.zeros(Q_input.shape)

    for i, (Q1_obs, Q2_obs, Q3_obs) in enumerate(tqdm(Q_input)):

        s_error = (Q1_LUT - Q1_obs)**2
        s_index = cp.argmin(s_error)
        drs_pred[i, 2] = index2value(s_index, args.sigma_max, args.sigma_min,
                                     args.sigma_size)

        Q_pred[i, 0] = Q1_LUT[s_index]
        Q_error[i, 0] = s_error[s_index]

        Q2Q3_obs = cp.array([Q2_obs, Q3_obs])
        Q2Q3_LUT = cp.stack([Q2_LUT[:, :, s_index], Q3_LUT[:, :, s_index]],
                            axis=2).reshape(args.dist_size * args.ref_size, -1)

        dr_error_vec = (Q2Q3_LUT - Q2Q3_obs)**2
        dr_error = cp.sum(dr_error_vec, axis=1)

        if dr_error.min() < nan_th:
            dr_index = cp.argmin(dr_error)
            d_index = dr_index // args.ref_size
            r_index = dr_index % args.ref_size

            Q_error[i, 1], Q_error[i, 2] = dr_error_vec[dr_index]

            drs_pred[i, 0] = index2value(d_index,
                                         args.dist_max * args.scene_scale,
                                         args.dist_min * args.scene_scale,
                                         args.dist_size)
            drs_pred[i, 1] = index2value(r_index, args.ref_max, args.ref_min,
                                         args.ref_size)

            Q_pred[i, 1] = Q2Q3_LUT[dr_index][0]
            Q_pred[i, 2] = Q2Q3_LUT[dr_index][1]

            intensity_defogged[i] = Q2_LUT[d_index, r_index,
                                           0] + Q3_LUT[d_index, r_index, 0]

        else:
            drs_pred[i, 0] = nan_th
            drs_pred[i, 1] = nan_th

            Q_pred[i, 1] = nan_th
            Q_pred[i, 2] = nan_th

            intensity_defogged[i] = nan_th

    return cp.asnumpy(drs_pred), cp.asnumpy(intensity_defogged), cp.asnumpy(
        Q_pred), cp.asnumpy(Q_error)
Ejemplo n.º 23
0
def const_reverse_LUT(Q1_LUT,
                      Q2_LUT,
                      Q3_LUT,
                      args,
                      Q1_max=250,
                      Q1_min=0,
                      Q1_size=25,
                      Q2_max=3000,
                      Q2_min=0,
                      Q2_size=60,
                      Q3_max=2000,
                      Q3_min=0,
                      Q3_size=40,
                      nan_th=np.inf):

    RevLUT = cp.zeros([Q1_size, Q2_size, Q3_size, 4])
    Q2Q3_vec = cp.array([[
        int(index2value(Q2_index, Q2_max, Q2_min, Q2_size)),
        int(index2value(Q3_index, Q3_max, Q3_min, Q3_size))
    ] for Q2_index in tqdm(range(Q2_size)) for Q3_index in range(Q3_size)])

    for Q1 in tqdm(range(Q1_size)):
        s_error = (Q1_LUT - index2value(Q1, Q1_max, Q1_min, Q1_size))**2
        s_index = cp.argmin(s_error)
        RevLUT[Q1, :, :, 2] = index2value(s_index, args.sigma_max,
                                          args.sigma_min, args.sigma_size)

        Q2Q3_LUT = cp.stack([Q2_LUT[:, :, s_index], Q3_LUT[:, :, s_index]],
                            axis=2).reshape(args.dist_size * args.ref_size, -1)

        for Q2Q3_real_scale in Q2Q3_vec:

            Q2_revlut_scale = value2index(Q2Q3_real_scale[0], Q2_max, Q2_min,
                                          Q2_size)
            Q3_revlut_scale = value2index(Q2Q3_real_scale[1], Q3_max, Q3_min,
                                          Q3_size)
            dr_error_vec = (Q2Q3_LUT - Q2Q3_real_scale)**2
            dr_error = cp.sum(dr_error_vec, axis=1)

            if dr_error.min() < nan_th:
                dr_index = cp.argmin(dr_error)
                d_index = dr_index // args.ref_size
                r_index = dr_index % args.ref_size

                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale,
                       0] = index2value(d_index,
                                        args.dist_max * args.scene_scale,
                                        args.dist_min * args.scene_scale,
                                        args.dist_size)
                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale,
                       1] = index2value(r_index, args.ref_max, args.ref_min,
                                        args.ref_size)
                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale,
                       3] = Q2_LUT[d_index, r_index, 0] + Q3_LUT[d_index,
                                                                 r_index, 0]

            else:
                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale, 0] = cp.nan
                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale, 1] = cp.nan
                RevLUT[Q1, Q2_revlut_scale, Q3_revlut_scale, 3] = cp.nan

    return cp.asnumpy(RevLUT)
Ejemplo n.º 24
0
    kmeans = KMeans(n_clusters=k).fit(X_train)
    print("done.")
    clst_label = []
    clst_data = []
    for i in np.unique(kmeans.labels_):
        idx = np.where(kmeans.labels_ == i)[0]
        clst_label.append(y_train[idx])
        clst_data.append(X_train[idx])
        
    # Predict with Nearest Neighbor
    temp_time = []
    correct = 0
    for i, te in enumerate(X_test):
        print("test # %d" % i)
        start_time = time.clock()
        target_clst = cp.asnumpy(cp.argmin(cp.linalg.norm(cp.array(kmeans.cluster_centers_-X_test[i]), axis=1)))
        nearest = cp.asnumpy(cp.argmin(cp.linalg.norm(cp.array(clst_data[target_clst]-X_test[i]), axis=1)))
        pred = clst_label[target_clst][nearest]
        end_time = time.clock()
        temp_time.append(end_time-start_time)
        correct += (y_test[i, 0] == pred)
    times.append(np.mean(temp_time))
    times[-1] = times[-1]/times[0]
    accuracies.append(correct*100/X_test.shape[0])
    print('Accuracy: %.3f %%' % (correct*100/X_test.shape[0]))
    print('Estimation Time: %.10f sec' % np.mean(temp_time))
    
accuracies = [np.round(a, decimals=1) for a in accuracies]
times = [np.round(t, decimals=10) for t in times]
print('Accuracies: [%s %%]' % ','.join(map(str, accuracies)))
print('Estimation Time / Estimation Time without Clustering: [%s %%]' % ','.join(map(str, times)))
Ejemplo n.º 25
0
def kmeans_sampling(X, k, round_values=True, detailed=False, random_state=0):
    """
    Adapted from :
    https://github.com/slundberg/shap/blob/9411b68e8057a6c6f3621765b89b24d82bee13d4/shap/utils/_legacy.py
    Summarize a dataset (X) using weighted k-means.

    Parameters
    ----------
    X : cuDF or Pandas DataFrame/Series, numpy arrays or cuda_array_interface
        compliant device array.
        Data to be summarized, shape (n_samples, n_features)
    k : int
        Number of means to use for approximation.
    round_values : bool; default=True
        For all i, round the ith dimension of each mean sample to match the
        nearest value from X[:,i]. This ensures discrete features always get
        a valid value.
    detailed: bool; default=False
        To return details of group names and cluster labels of all data points
    random_state: int; default=0
        Sets the random state.

    Returns
    -------
    summary : Summary of the data, shape (k, n_features)
    group_names : Names of the features
    labels : Cluster labels of the data points in the original dataset,
             shape (n_samples, 1)
    """
    output_dtype = get_supported_input_type(X)
    _output_dtype_str = determine_array_type(X)
    cuml.internals.set_api_output_type(_output_dtype_str)

    if output_dtype is None:
        raise TypeError(f"Type of input {type(X)} is not supported. Supported \
                        dtypes: cuDF DataFrame, cuDF Series, cupy, numba,\
                        numpy, pandas DataFrame, pandas Series")

    if "DataFrame" in str(output_dtype):
        group_names = X.columns
        X = cp.array(X.values, copy=False)
    if "Series" in str(output_dtype):
        group_names = X.name
        X = cp.array(X.values.reshape(-1, 1), copy=False)
    else:
        # it's either numpy, cupy or numba
        X = cp.array(X, copy=False)
        try:
            # more than one column
            group_names = [str(i) for i in range(X.shape[1])]
        except IndexError:
            # one column
            X = X.reshape(-1, 1)
            group_names = ['0']

    # in case there are any missing values in data impute them
    imp = SimpleImputer(missing_values=cp.nan,
                        strategy='mean',
                        output_type=_output_dtype_str)
    X = imp.fit_transform(X)

    kmeans = KMeans(n_clusters=k,
                    random_state=random_state,
                    output_type=_output_dtype_str).fit(X)

    if round_values:
        for i in range(k):
            for j in range(X.shape[1]):
                xj = X[:, j].toarray().flatten() if issparse(
                    X) else X[:, j]  # sparse support courtesy of @PrimozGodec
                ind = cp.argmin(cp.abs(xj - kmeans.cluster_centers_[i, j]))
                kmeans.cluster_centers_[i, j] = X[ind, j]
    summary = kmeans.cluster_centers_
    labels = kmeans.labels_

    if detailed:
        return summary, group_names, labels
    else:
        return summary
Ejemplo n.º 26
0
def threshold_minimum(image=None, nbins=256, max_iter=10000, *, hist=None):
    """Return threshold value based on minimum method.

    The histogram of the input ``image`` is computed if not provided and
    smoothed until there are only two maxima. Then the minimum in between is
    the threshold value.

    Either image or hist must be provided. In case hist is given, the actual
    histogram of the image is ignored.

    Parameters
    ----------
    image : (M, N) ndarray, optional
        Input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    max_iter : int, optional
        Maximum number of iterations to smooth the histogram.
    hist : array, or 2-tuple of arrays, optional
        Histogram to determine the threshold from and a corresponding array
        of bin center intensities. Alternatively, only the histogram can be
        passed.

    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Raises
    ------
    RuntimeError
        If unable to find two local maxima in the histogram or if the
        smoothing takes more than 1e4 iterations.

    References
    ----------
    .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding
           algorithms," CVGIP: Graphical Models and Image Processing,
           vol. 55, pp. 532-537, 1993.
    .. [2] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell
           images", Annals of the New York Academy of Sciences 128: 1035-1053
           :DOI:`10.1111/j.1749-6632.1965.tb11715.x`

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_minimum(image)
    >>> binary = image > thresh
    """
    def find_local_maxima_idx(hist):
        # We can't use scipy.signal.argrelmax
        # as it fails on plateaus
        maximum_idxs = list()
        direction = 1

        # TODO: better to transfer hist back to cpu?
        hist = cp.asnumpy(hist)  # device synchronize

        for i in range(hist.shape[0] - 1):
            if direction > 0:
                if hist[i + 1] < hist[i]:
                    direction = -1
                    maximum_idxs.append(i)
            else:
                if hist[i + 1] > hist[i]:
                    direction = 1

        return maximum_idxs

    counts, bin_centers = _validate_image_histogram(image, hist, nbins)

    smooth_hist = counts.astype(cp.float64, copy=False)

    for counter in range(max_iter):
        smooth_hist = ndi.uniform_filter1d(smooth_hist, 3)
        maximum_idxs = find_local_maxima_idx(smooth_hist)
        if len(maximum_idxs) < 3:
            break

    if len(maximum_idxs) != 2:
        raise RuntimeError('Unable to find two maxima in histogram')
    elif counter == max_iter - 1:
        raise RuntimeError('Maximum iteration reached for histogram'
                           'smoothing')

    # Find lowest point between the maxima
    threshold_idx = cp.argmin(smooth_hist[maximum_idxs[0]:maximum_idxs[1] + 1])

    return bin_centers[maximum_idxs[0] + int(threshold_idx)]
Ejemplo n.º 27
0
    return Array._new(
        np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))


def argmin(x: Array,
           /,
           *,
           axis: Optional[int] = None,
           keepdims: bool = False) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.

    See its docstring for more information.
    """
    return Array._new(
        np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))


def nonzero(x: Array, /) -> Tuple[Array, ...]:
    """
    Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.

    See its docstring for more information.
    """
    return tuple(Array._new(i) for i in np.nonzero(x._array))


def where(condition: Array, x1: Array, x2: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
Ejemplo n.º 28
0
def reconstruct_alt(imgs,
                    discs,
                    hres_size,
                    row,
                    n_iters=1,
                    o_f_init=None,
                    del_1=1000,
                    del_2=1,
                    round_values=True,
                    plot_per_frame=False,
                    show_interval=None,
                    subtract_bg=False,
                    out_path=None):
    """The main reconstruction algorithm. Adapted from Tian et. al."""
    # Put input images on GPU, estimate background noise
    imgs = [cp.array(img) for img in imgs]
    bgs = get_bg(imgs) if subtract_bg else cp.zeros(len(imgs))

    IMAGESIZE = imgs[0].shape[0]
    CUTOFF_FREQ_px = get_cutoff(row)
    FRAMES = len(imgs)

    orig = IMAGESIZE // 2 - 1  # Low-res origin
    lres_size = (IMAGESIZE, IMAGESIZE)
    m1, n1 = lres_size
    m, n = hres_size

    losses = []  # Reconstruction Loss
    convs = []  # Inverse Convergence index

    # Initial high-res guess
    if lres_size == hres_size:  # Initialize with ones
        # Use old algorithm
        F = lambda x: cp.fft.fftshift(cp.fft.fft2(x))
        Ft = lambda x: cp.fft.ifft2(cp.fft.ifftshift(x))
        o = cp.ones(hres_size)
        o_f = F(o)
    elif o_f_init is not None:  # Initialize with given initialization
        F = lambda x: cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(x)))
        Ft = lambda x: cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(x)))
        o = cp.zeros_like(o_f_init)
        o_f = o_f_init
    else:  # Intialize with resized first frame from imgs
        F = lambda x: cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(x)))
        Ft = lambda x: cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(x)))
        o = cp.sqrt(
            cp.array(cv2.resize(cp.asnumpy(imgs[0] - bgs[0]), hres_size)))
        o_f = Ft(o)

    # Pupil Function
    p = cp.zeros(lres_size)
    p = cp.array(cv2.circle(cp.asnumpy(p), (orig, orig), CUTOFF_FREQ_px, 1,
                            -1))
    ctf = p.copy()  # Ideal Pupil, for filtering later on

    # Main Loop
    log = tqdm(
        total=n_iters,
        desc=f'Starting...',
        bar_format=
        '{percentage:3.0f}% [{elapsed}<{remaining} ({rate_inv_fmt})]{bar}{desc}',
        leave=False,
        ascii=True)

    for j in range(n_iters):
        conv = []  # Convergence Index
        for i in range(FRAMES):

            if discs[i] == 0:  # Empty frame
                continue

            # Get k0x, k0y and hence, shifting values
            k0x, k0y = discs[i]

            # Construct auxillary functions for the set of LEDs (= 1, here)
            if hres_size == lres_size:
                shift_x, shift_y = [
                    -round(k0x - orig), -round(k0y - orig)
                ] if round_values else [-(k0x - orig), -(k0y - orig)]

                if not round_values:
                    o_f_i = FourierShift2D(o_f,
                                           [shift_x, shift_y])  # O_i(k - k_m)
                else:
                    o_f_i = cp.roll(o_f, int(shift_y), axis=0)
                    o_f_i = cp.roll(o_f_i, int(shift_x), axis=1)

                yl, xl = 0, 0  # To reduce code later on

            else:  # Output size larger than individual frames
                _orig = hres_size[0] // 2 - 1

                del_x, del_y = k0x - orig, k0y - orig
                x, y = round(_orig - del_x), round(_orig - del_y)

                yl = int(y - m1 // 2)
                xl = int(x - n1 // 2)

                assert xl > 0 and yl > 0, 'Both should be > 0'
                o_f_i = o_f[yl:yl + n1, xl:xl + m1].copy()

            psi_k = o_f_i * p * ctf  #DEBUG: REPLACE * ctf with * p

            # Plot outputs after each frame, for debugging
            if plot_per_frame:
                o_i = Ft(o_f_i * p)
                plt.figure(figsize=(10, 2))
                plt.subplot(161)
                plt.imshow(cp.asnumpy(correct(abs(o_i))))
                plt.title(f'$I_{{l}}({i})$')
                opts()  #DEBUG
                plt.subplot(162)
                plt.imshow(
                    cp.asnumpy(
                        cv2.convertScaleAbs(
                            cp.asnumpy(20 * cp.log(1 + abs(o_f_i * p))))))
                plt.title(f'$S_{{l}}({i})$')
                opts()  #DEBUG

            # Impose intensity constraint and update auxillary function
            psi_r = F(psi_k)  #DEBUG: CHANGE BACK TO F

            # Low-res estimate obtained from our reconstruction
            I_l = abs(psi_r) if lres_size != hres_size else abs(psi_r)

            # Subtract background noise and clip values to avoid NaN
            I_hat = cp.clip(imgs[i] - bgs[i], a_min=0)
            phi_r = cp.sqrt(I_hat / (cp.abs(psi_r)**2)) * psi_r

            phi_k = Ft(phi_r)  #DEBUG: CHANGE BACK TO Ft

            # Update object and pupil estimates
            if hres_size == lres_size:
                if not round_values:
                    p_i = FourierShift2D(p, [-shift_x, -shift_y])  # P_i(k+k_m)
                else:
                    p_i = cp.roll(p, int(-shift_y), axis=0)
                    p_i = cp.roll(p_i, int(-shift_x), axis=1)

                if not round_values:
                    phi_k_i = FourierShift2D(
                        phi_k, [-shift_x, -shift_y])  # Phi_m_i(k+k_m)
                else:
                    phi_k_i = cp.roll(phi_k, int(-shift_y), axis=0)
                    phi_k_i = cp.roll(phi_k_i, int(-shift_x), axis=1)
            else:  # Output size larger than individual frames
                p_i = p.copy()
                phi_k_i = phi_k.copy()

            ## O_{i+1}(k)
            temp = o_f[yl:yl + n1, xl:xl + m1].copy() + ( cp.abs(p_i) * cp.conj(p_i) * (phi_k_i - o_f[yl:yl + n1, xl:xl + m1].copy() * p_i) ) / \
                        ( cp.abs(p).max() * (cp.abs(p_i) ** 2 + del_1) )

            ## P_{i+1}(k)
            p   =  p  + ( cp.abs(o_f_i) * cp.conj(o_f_i) * (phi_k - o_f_i * p) ) / \
                        ( cp.abs(o_f[yl:yl + n1, xl:xl + m1].copy()).max() * (cp.abs(o_f_i) ** 2 + del_2) )

            o_f[yl:yl + n1, xl:xl + m1] = temp.copy()

            ###### Using F here instead of Ft to get upright image
            o = F(o_f) if lres_size != hres_size else Ft(o_f)
            ######

            if plot_per_frame:
                plt.subplot(163)
                plt.imshow(cp.asnumpy(cp.mod(ctf * cp.angle(p), 2 * cp.pi)))
                plt.title(f'P({i})')
                opts()  #DEBUG
                plt.subplot(164)
                plt.imshow(cp.asnumpy(correct(abs(o))))
                plt.title(f'$I_{{h}}({i})$')
                opts()  #DEBUG
                plt.subplot(165)
                plt.imshow(cp.asnumpy(correct(cp.angle(o))))
                plt.title(f'$\\theta(I_{{h}}({i}))$')
                opts()  #DEBUG
                plt.subplot(166)
                plt.imshow(cp.asnumpy(show(cp.asnumpy(o_f))))
                plt.title(f'$S_{{h}}({i})$')
                opts()
                plt.show()  #DEBUG

            c = inv_conv_idx(I_l, imgs[i])
            conv.append(c)

        if not plot_per_frame and (show_interval is not None
                                   and j % show_interval == 0):
            o_i = Ft(o_f_i * p)  #DEBUG
            plt.figure(figsize=(10, 2))
            plt.subplot(161)
            plt.imshow(cp.asnumpy(correct(abs(o_i))))
            plt.title(f'$I_{{l}}({i})$')
            opts()  #DEBUG
            plt.subplot(162)
            plt.imshow(
                cp.asnumpy(
                    cv2.convertScaleAbs(
                        cp.asnumpy(20 * cp.log(1 + abs(o_f_i * p))))))
            plt.title(f'$S_{{l}}({i})$')
            opts()  #DEBUG
            plt.subplot(163)
            plt.imshow(cp.asnumpy(cp.mod(ctf * cp.angle(p), 2 * cp.pi)))
            plt.title(f'P({i})')
            opts()  #DEBUG
            plt.subplot(164)
            plt.imshow(cp.asnumpy(correct(abs(o))))
            plt.title(f'$I_{{h}}({i})$')
            opts()  #DEBUG
            plt.subplot(165)
            plt.imshow(cp.asnumpy(correct(cp.angle(o))))
            plt.title(f'$\\theta(I_{{h}}({i}))$')
            opts()  #DEBUG
            plt.subplot(166)
            plt.imshow(
                cp.asnumpy(
                    cv2.convertScaleAbs(cp.asnumpy(20 *
                                                   cp.log(1 + abs(o_f))))))
            plt.title(f'$S_{{h}}({i})$')
            opts()
            plt.show()  #DEBUG

        loss = metric_norm(imgs, o_f_i, p)
        losses.append(loss)
        conv = float(sum(conv) / len(conv))
        convs.append(conv)
        log.set_description_str(
            f'[Iteration {j + 1}] Convergence Loss: {cp.asnumpy(conv):e}')
        log.update(1)

    scale = 7
    plt.figure(figsize=(3 * scale, 4 * scale))

    plt.subplot(421)
    plt.plot(cp.asnumpy(cp.arange(len(losses))),
             cp.asnumpy(cp.clip(cp.array(losses), a_min=None, a_max=1e4)),
             'b-')
    plt.title('Loss Curve')
    plt.ylabel('Loss Value')
    plt.xlabel('Iteration')
    plt.subplot(422)
    plt.plot(cp.asnumpy(cp.arange(len(convs))),
             cp.asnumpy(cp.clip(cp.array(convs), a_min=None, a_max=1e14)),
             'b-')
    plt.title('Convergence Index Curve')
    plt.ylabel('Convergence Index')
    plt.xlabel('Iteration')

    amp = cp.array(cv2.resize(
        read_tiff(row.AMPLITUDE.values[0])[0], hres_size))
    phase = cp.array(cv2.resize(read_tiff(row.PHASE.values[0])[0], hres_size))

    plt.subplot(434)
    plt.title(f'amplitude (Scaled up from {lres_size})')
    plt.imshow(cp.asnumpy(to_uint8(amp)))
    opts()

    plt.subplot(435)
    plt.title(f'phase (Scaled up from {lres_size})')
    plt.imshow(cp.asnumpy(to_uint8(phase)))

    plt.subplot(436)
    rec = abs(cp.sqrt(amp) * cp.exp(1j * phase))
    plt.title(f'Ground Truth (Scaled up from {lres_size})')
    plt.imshow(cp.asnumpy(to_uint8(rec)))

    plt.subplot(437)
    plt.title('Reconstruction Amplitude')
    amp = abs(o)
    if lres_size == hres_size:
        amp = correct(amp)
    plt.imshow(cp.asnumpy(to_uint8((amp))))

    plt.subplot(438)
    plt.title('Reconstruction Phase')
    phase = cp.angle(o)
    if lres_size == hres_size:
        phase = correct(phase)
    plt.imshow(cp.asnumpy(to_uint8(phase)))

    plt.subplot(439)
    plt.title('Reconstructed Image')
    rec = abs(cp.sqrt(amp) * cp.exp(1j * phase))
    plt.imshow(cp.asnumpy(to_uint8(rec)))

    plt.subplot(427)
    plt.title(f'Recovered Pupil')
    p_show = cp.mod(ctf * cp.angle(p), 2 * cp.pi)
    p_show = (p_show / p_show.max() * 255).astype(np.uint8)
    plt.imshow(cp.asnumpy(p_show), cmap='nipy_spectral')

    plt.subplot(428)
    plt.title(f'Raw frames\' mean (Scaled up from {lres_size})')
    plt.imshow(cv2.resize(cp.asnumpy(cp.array(imgs).mean(axis=0)), hres_size))

    if out_path is None:
        plt.show()
    else:
        plt.savefig(out_path, bbox_inches='tight')
        plt.close('all')

    # Ignore early noise and print where the error is lowest
    if n_iters > 10:
        it = cp.argmin(cp.array(convs[10:])) + 11
        if out_path is not None:
            print(f'Convergence index lowest at {it}th iteration.')
    else:
        it = cp.argmin(cp.array(convs)) + 1
        if out_path is not None:
            print(f'Convergence index lowest at {it}th iteration.')

    if lres_size == hres_size:
        o = correct(o)
    return o, p, it
Ejemplo n.º 29
0
def qmf(hk):
    """
    Return high-pass qmf filter from low-pass

    Parameters
    ----------
    hk : array_like
        Coefficients of high-pass filter.

    """
    N = len(hk) - 1
    asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
    return hk[::-1] * cp.array(asgn)
    """
    Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.

    Parameters
    ----------
    hk : array_like
        Coefficients of low-pass filter.
    J : int, optional
        Values will be computed at grid points ``K/2**J``. Default is 7.

    Returns
    -------
    x : ndarray
        The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
        ``len(hk) = len(gk) = N+1``.
    phi : ndarray
        The scaling function ``phi(x)`` at `x`:
        ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
    psi : ndarray, optional
        The wavelet function ``psi(x)`` at `x`:
        ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
        `psi` is only returned if `gk` is not None.

    Notes
    -----
    The algorithm uses the vector cascade algorithm described by Strang and
    Nguyen in "Wavelets and Filter Banks".  It builds a dictionary of values
    and slices for quick reuse.  Then inserts vectors into final vector at the
    end.

    """
    N = len(hk) - 1

    if (J > 30 - cp.log2(N + 1)):
        raise ValueError("Too many levels.")
    if (J < 1):
        raise ValueError("Too few levels.")

    # construct matrices needed
    nn, kk = cp.ogrid[:N, :N]
    s2 = cp.sqrt(2)
    # append a zero so that take works
    thk = cp.r_[hk, 0]
    gk = qmf(hk)
    tgk = cp.r_[gk, 0]

    indx1 = cp.clip(2 * nn - kk, -1, N + 1)
    indx2 = cp.clip(2 * nn - kk + 1, -1, N + 1)
    m = cp.zeros((2, 2, N, N), 'd')
    m[0, 0] = cp.take(thk, indx1, 0)
    m[0, 1] = cp.take(thk, indx2, 0)
    m[1, 0] = cp.take(tgk, indx1, 0)
    m[1, 1] = cp.take(tgk, indx2, 0)
    m *= s2

    # construct the grid of points
    x = cp.arange(0, N * (1 << J), dtype=float) / (1 << J)
    phi = 0 * x

    psi = 0 * x

    # find phi0, and phi1
    lam, v = eig(m[0, 0])
    ind = cp.argmin(cp.absolute(lam - 1))
    # a dictionary with a binary representation of the
    #   evaluation points x < 1 -- i.e. position is 0.xxxx
    v = cp.real(v[:, ind])
    # need scaling function to integrate to 1 so find
    #  eigenvector normalized to sum(v,axis=0)=1
    sm = cp.sum(v)
    if sm < 0:  # need scaling function to integrate to 1
        v = -v
        sm = -sm
    bitdic = {'0': v / sm}
    bitdic['1'] = cp.dot(m[0, 1], bitdic['0'])
    step = 1 << J
    phi[::step] = bitdic['0']
    phi[(1 << (J - 1))::step] = bitdic['1']
    psi[::step] = cp.dot(m[1, 0], bitdic['0'])
    psi[(1 << (J - 1))::step] = cp.dot(m[1, 1], bitdic['0'])
    # descend down the levels inserting more and more values
    #  into bitdic -- store the values in the correct location once we
    #  have computed them -- stored in the dictionary
    #  for quicker use later.
    prevkeys = ['1']
    for level in range(2, J + 1):
        newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
        fac = 1 << (J - level)
        for key in newkeys:
            # convert key to number
            num = 0
            for pos in range(level):
                if key[pos] == '1':
                    num += (1 << (level - 1 - pos))
            pastphi = bitdic[key[1:]]
            ii = int(key[0])
            temp = cp.dot(m[0, ii], pastphi)
            bitdic[key] = temp
            phi[num * fac::step] = temp
            psi[num * fac::step] = cp.dot(m[1, ii], pastphi)
        prevkeys = newkeys

    return x, phi, psi
Ejemplo n.º 30
0
 def nn_naive(ref, query):
     dist = ((ref[None, :, :] - query[:, None, :])**2).sum(axis=2)
     # indices of ref for each query point
     indices = cupy.argmin(dist, axis=1)
     return indices