def distance_field(self, img:np.ndarray) -> np.ndarray:
     field = np.zeros_like(img)
     for batch in range(len(img)):
         fg_mask = img[batch] > 0.5
         if fg_mask.any():
             bg_mask = ~fg_mask
             fg_dist = edt(fg_mask)
             bg_dist = edt(bg_mask)
             field[batch] = fg_dist + bg_dist
     return field
Exemple #2
0
def test():
    for i_test in range(1000):
        h = np.random.randint(1, 100)
        w = np.random.randint(1, 100)
        mask = np.random.rand(h, w) < 0.02

        if np.sum(mask) == 0:
            continue

        t = Timer()

        df_scipy = edt(np.logical_not(mask))

        t.stop("scipy   ")

        df_meijster = calculate_df_meijster(mask)

        t.stop("meijster")

        df_felz = calculate_df_felz(mask)

        t.stop("felz    ")
        print("")

        #df_true = calculate_df_naive(mask)

        assert (np.max(np.abs(df_scipy - df_meijster)) < 1e-10)
        assert (np.max(np.abs(df_scipy - df_felz)) < 1e-10)

        if i_test % 100 == 0:
            print(i_test)

    print("tests passed")
def score_hypothesis(object_dimg, Q, R, trans):
    """
    Points need to be in camera coordinates
    All distances are in meters
    """
    if Q.points.shape[0]>3000:
        indeces = np.random.randint(0,Q.points.shape[0],[3000])
        Q = mc.PointCloud(Q.points[indeces])
    Q_img = Q.back_project(R, trans).dimg # the raw ndarray is used here
    object_dimg = object_dimg.dimg

    # Points outside mask
    if np.sum((Q_img>0) & (object_dimg==0))>0:
        comb = (object_dimg>0) | (Q_img>0)
        slice_x, slice_y = find_objects(comb)[0]
        dist = edt(object_dimg[slice_x,slice_y]==0)
        q_slice = Q_img[slice_x, slice_y]
        o_slice = object_dimg[slice_x,slice_y]
        first_dim, second_dim = np.nonzero((q_slice>0) & (o_slice==0))
        score1 = np.mean(dist[first_dim, second_dim])
    else:
        score1 = 0

    # Points in front of mask
    nonzero_closer = (Q_img!=0) & (object_dimg>Q_img)
    score2 = object_dimg[nonzero_closer] - Q_img[nonzero_closer]
    if score2.shape[0]>0:
        score2 = np.mean(score2)
    else:
        score2 = 0

    return score1 + 2000*score2
Exemple #4
0
def compute_uncertain(sample, prediction, model):
    """
    Computes uncertainty map for a given sample and its prediction for a given model, based on the
    number of step predictions defined in constants file.
    :param sample: input sample.
    :param prediction: input sample prediction.
    :param model: unet model with Dropout layers.
    :return: uncertainty map.
    """
    X = np.zeros([1, img_rows, img_cols])

    for t in range(nb_step_predictions):
        prediction = model.predict(sample, verbose=0).reshape([1, img_rows, img_cols])
        X = np.concatenate((X, prediction))

    X = np.delete(X, [0], 0)

    if (apply_edt):
        # apply distance transform normalization.
        var = np.var(X, axis=0)
        transform = range_transform(edt(prediction))
        return np.sum(var * transform)

    else:
        return np.sum(np.var(X, axis=0))
    def __init__(self):
        self.current_pose = np.zeros(3) #we start at [0,0] unless said
        self.target_pose = np.zeros(3)
	self.map_msg = self.get_omap()
        self.pixel_to_meters = self.map_msg.info.resolution
        self.height = self.map_msg.info.height
        self.width = self.map_msg.info.width
        self.map = np.array(self.map_msg.data).reshape((self.height,self.width))
	self.map[self.map==0] = 1
        self.map[self.map!=1] = 0
        self.original_map = self.map
        #self.safe_map = cv2.erode(self.map,None,iterations=1.0/3/self.pixel_to_meters)
	self.map = erosion(self.map,disk(1.0/3/self.pixel_to_meters)) ## erodid the map to make it safe
        self.safe_map = edt(self.map) #at every point tells you the distance to the nearest wall, this is very useful as a heuristic because you want to be as far from walls as possible
        # print self.map[0]
	# print set(self.map[600])
	# print self.map.shape
        # plt.imshow(self.original_map)
        # plt.colorbar()
        # plt.show('hold')

	self.pose_sub  = rospy.Subscriber("/initialpose", PoseWithCovarianceStamped, self.clicked_pose, queue_size=1)
        self.click_sub = rospy.Subscriber("/clicked_point", PointStamped, self.clicked_pose, queue_size=1)
        self.odom_sub  = rospy.Subscriber("/pf/pose/odom", Odometry, self.set_current_pose, queue_size=1)
        self.path_pub = rospy.Publisher("/path",Path,queue_size = 1)


        # print self.location_to_map((0,0))
        # path = [(0,0), (10,0), (10,10), (0,10)]
        # self.visualize(path)

        self.search_alg = self.RRT # self.Astar_circle or self.Astar or self.RRT
        print "here"
Exemple #6
0
   def compute_dist_map(self):
      """ Compute distance map (can be time consuming) """
      # Build an hyper surface with three spatial dimensions + 1 dose dimension
      hypSurfDim = self.ref_img.shape + (self.ndbins,)
      hypSurf = np.ones( hypSurfDim )

      # Fill each layer of the dose axis
      # Dose points are set to 0

      lookup = np.digitize(self.ref_img,self.dbins) - 1 # lookup contains the index of dose bins

      for i in range(self.ndbins):
         dose_points = lookup == i
         if self.ndim == 3:
            hypSurf[:,:,:,i][dose_points] = 0
            # simple (naive) interpolation. See Fig. 2 au Chen 2009
            hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,0)
            hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,1)
            hypSurf = self._interp_dose_along_ax3(hypSurf,lookup,2)
            # Here, we could try to mask layer by layer all position of pixels below threshold
            # to speed up calculation (only w/ skfmm)
         elif self.ndim == 2:
            hypSurf[:,:,i][dose_points] = 0
            # simple (naive) interpolation. See Fig. 2 au Chen 2009
            hypSurf = self._interp_dose_along_ax2(hypSurf,lookup,0)
            hypSurf = self._interp_dose_along_ax2(hypSurf,lookup,1)
            # Here, we could try to mask layer by layer all position of pixels below threshold
            # to speed up calculation (only w/ skfmm)
         else:
            raise IndexError('Only 2 and 3 spatial dimension supported at this moment')

      dst = edt(hypSurf,sampling=self.delta)
      # dst = skfmm.distance(hypSurf)

      self.dist_map = dst
    def hd_distance(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:

        if np.count_nonzero(x) == 0 or np.count_nonzero(y) == 0:
            return np.array([np.Inf])

        indexes = np.nonzero(x)
        distances = edt(np.logical_not(y))

        return np.array(np.max(distances[indexes]))
Exemple #8
0
def misclasification_distance(gt, pr, decimals=0):
    '''
    Counts the number of false positves and false negatives as function of distance from the edge of the ground truth.

            Parameters:
                    gt (np array) : The ground truth image
                    pr (np array) : The prediction image

            Returns:
                    binary_sum (str): Binary string of the sum of a and b
    '''

    dpos = np.around(edt(gt != 0), decimals=decimals)
    dneg = np.around(edt(gt == 0), decimals=decimals)

    m = 4 * gt * pr + 2 * gt * (1 - pr) + 3 * (1 - gt) * pr + (1 - gt) * (1 -
                                                                          pr)

    dneg = dneg * (m == 3)

    duneg = np.unique(dneg)
    fpcnt = []

    for d in duneg:
        fpcnt.append((dneg == d).sum())

    dpos = dpos * (m == 2)

    dupos = np.unique(dpos)
    fncnt = []

    for d in dupos:
        fncnt.append((dpos == d).sum())

    res = {
        'fp_dist': duneg,
        'fp_count': fpcnt,
        'fn_dist': dupos,
        'fn_count': fncnt,
        'confusion_map': m
    }

    return res
Exemple #9
0
def main():
    np.random.seed(0)

    test()
    exit(0)

    n = 32

    mask = np.random.rand(n, n) < 0.02

    df_true = calculate_df_naive(mask)

    neighbors = np.array([[-1, -1], [0, -1], [1, -1], [-1, 0]])

    df_sweep = calculate_df_sweep(mask, neighbors)

    df_scipy = edt(np.logical_not(mask))

    df_meijster = calculate_df_meijster(mask)

    df_felz = calculate_df_felz(mask)

    dfs = [
        (df_sweep, "df_sweep"),
        (df_scipy, "df_scipy"),
        (df_meijster, "df_meijster"),
        (df_felz, "df_felz"),
    ]

    for df, name in dfs:
        print(name, "mean error: %e" % np.mean(np.abs(df - df_true)))

    images = [
        (mask, "mask"),
        (df_true, "df_true"),
        (df_meijster, "df_meijster"),
        (df_felz, "df_felz"),
        (np.abs(df_sweep - df_true), "|sweep - true|"),
        (np.abs(df_scipy - df_true), "|scipy - true|"),
        (np.abs(df_meijster - df_true), "|meijster - true|"),
        (np.abs(df_felz - df_true), "|felz - true|"),
    ]

    for i, (image, name) in enumerate(images):
        plt.subplot(2, 4, 1 + i)
        plt.title(name)
        plt.imshow(image, cmap='gray', interpolation='nearest')
        plt.axis('off')

    plt.show()
Exemple #10
0
def track2volume(track, fieldshape, ds=[1, 1], objectids=None, edtgain=1.):
    """
    Converts a track array (as returned by ctrax2np, say) to a volumetric block with EDT
    :type track: numpy.ndarray
    :param track: Track array

    :type fieldshape: list or tuple
    :param fieldshape: Shape of the tracking field

    :type ds: list or tuple
    :param ds: Downsampling ratio for lossless downsampling of track volume.

    :type objectids: list
    :param objectids: List of ID's of objects to be tracked

    :type edtgain: float
    :param edtgain: Gain of the negative exponential of euclidean distance transform

    :rtype: numpy.ndarray
    """

    assert ds[0] == ds[1], "Only square downsampling is supported for now."

    # Preallocate
    trackvol = np.ones(shape=(track.shape[0], fieldshape[0] / ds[0],
                              fieldshape[1] / ds[1]))

    # Default for objectids
    objectids = range(trackvol.shape[1]) if objectids is None else objectids

    # Round track
    track = np.round(track / ds[0]).astype('int64')

    # Write to volume
    for obj in objectids:
        trackvol[range(track.shape[0]), track[range(track.shape[0]), obj, 0],
                 track[range(track.shape[0]), obj, 1]] = 0.

    # This should have generated thread-like structures in the track volume. Run a negative exponential distance
    # transform on it and return
    return np.exp(
        np.array(
            [-edtgain * ds[0] * edt(trackplane) for trackplane in trackvol]))
Exemple #11
0
def drawAlpha(X, filtration, r, draw_balls=False):
    """
    Draw the delaunay triangulation in dotted lines, with the alpha faces at
    a particular scale
    Parameters
    ----------
    X: ndarray(N, 2)
        A 2D point cloud
    filtration: list of [(idxs, d)]
        List of simplices in the filtration, listed by idxs, which indexes into
        X, and with an associated scale d at which the simplex enters the filtration
    r: int
        The radius/scale up to which to plot balls/simplices
    draw_balls: boolean
        Whether to draw the balls (discs intersected with voronoi regions)
    """

    # Determine limits of plot
    pad = 0.3
    xlims = [np.min(X[:, 0]), np.max(X[:, 0])]
    xr = xlims[1] - xlims[0]
    ylims = [np.min(X[:, 1]), np.max(X[:, 1])]
    yr = ylims[1] - ylims[0]
    xlims[0] -= xr * pad
    xlims[1] += xr * pad
    ylims[0] -= yr * pad
    ylims[1] += yr * pad

    if draw_balls:
        resol = 2000
        xr = np.linspace(xlims[0], xlims[1], resol)
        yr = np.linspace(ylims[0], ylims[1], resol)
        xpix, ypix = np.meshgrid(xr, yr)
        P = np.ones((xpix.shape[0], xpix.shape[1], 4))
        PComponent = np.ones_like(xpix)
        PBound = np.zeros_like(PComponent)
        # First make balls
        XPix = np.array([xpix.flatten(), ypix.flatten()]).T
        D = pairwise_distances(X, XPix)
        for i in range(X.shape[0]):
            # First make the ball part
            ballPart = (xpix - X[i, 0])**2 + (ypix - X[i, 1])**2 <= r**2
            # Now make the Voronoi part
            voronoiPart = np.reshape(np.argmin(D, axis=0) == i, ballPart.shape)
            Pi = ballPart * voronoiPart
            PComponent[Pi == 1] = 0
            # Make the boundary stroke part
            e = edt(1 - Pi)
            e[e > 10] = 0
            e[e > 0] = 1.0 / e[e > 0]
            PBound = np.maximum(e, PBound)
        # Now make Voronoi regions
        P[:, :, 0] = PComponent
        P[:, :, 1] = PComponent
        P[:, :, 3] = 0.2 + 0.8 * PBound
        plt.imshow(np.flipud(P),
                   cmap='magma',
                   extent=(xlims[0], xlims[1], ylims[0], ylims[1]))

    # Plot simplices
    patches = []
    for (idxs, d) in filtration:
        if len(idxs) == 2:
            if d < r:
                plt.plot(X[idxs, 0], X[idxs, 1], 'k', 2)
            else:
                plt.plot(X[idxs, 0],
                         X[idxs, 1],
                         'gray',
                         linestyle='--',
                         linewidth=1)
        elif len(idxs) == 3 and d < r:
            patches.append(Polygon(X[idxs, :]))
    ax = plt.gca()
    p = PatchCollection(patches, alpha=0.2, facecolors='C1')
    ax.add_collection(p)
    plt.scatter(X[:, 0], X[:, 1], zorder=0)
    plt.xlim(xlims[0], xlims[1])
    plt.ylim(ylims[0], ylims[1])
    data_mask = data_binary.copy()
    data_mask = data_mask.astype(np.uint16)
    data_tmp = block_reduce(data_mask, (bkrad, bkrad), np.sum)
    data_mask = (
        resize(
            data_tmp,
            data_mask.shape,
            order=0,
            preserve_range=True,
            anti_aliasing=False,
            mode="reflect",
        )
        < bkrad
    ).astype(np.uint8)
    data_label_fine = label(data_mask)
    data_label_coarse = label(edt((data_mask > 0)) > bkrad)
    for lb_c in np.unique(data_label_coarse.reshape(-1)):
        if lb_c > 0:
            y, x = np.where(data_label_coarse == lb_c)
            lb_f = np.unique(data_label_fine[y, x])
            data_label_fine[data_label_fine == lb_f] = -1
    data_mask = (data_label_fine < 0).astype(np.uint8)

    # CNN inference
    data_probs = classifier.predict_image_sliding(
        raw_im=data, stride=8, batch_size=120, interp_order=1, device=device
    )

    # Reordering the classes to
    # 1 - Diffuse/others
    # 2 - Fibers