Exemple #1
0
    def runTest(self):
        tree = Tree(self.points)
        skeleton = Skeleton(tree, [1, 1, 1], "random")
        neuron = Neuron(skeleton, 5, 50)
        canvas, offset = neuron.get_minimal_canvas()
        canvas = neuron.draw(canvas, offset)

        f = h5py.File("./small_neuron_random.h5", "w")
        f.create_dataset("neuron", data=canvas)
        f.close()
Exemple #2
0
    def runTest(self):
        tree = Tree(self.points)
        self.assertEqual(tree.get_number_of_vertices(), self.unique_points)
        self.assertEqual(tree.get_number_of_edges(), self.expected_edges)

        incident_edges = []
        for v in tree.get_vertex_iterator():
            incident_edges.append(tree.get_incident_edges(v))

        n_leaves = len([1 for es in incident_edges if len(es)==1])
        n_branches = len([1 for es in incident_edges if len(es)==3])
        self.assertEqual(n_leaves, self.expected_leaves)
        self.assertEqual(n_branches, self.expected_branches)
        tree.to_nml("./small_tree.nml")
Exemple #3
0
    def runTest(self):
        tree = Tree(self.points)
        skeleton = Skeleton(tree, [1, 1, 1], "random")

        self.min_radius = 5
        self.max_radius = 50
        neuron = Neuron(skeleton, self.min_radius, self.max_radius)

        for v in neuron.get_vertex_iterator():
            radius_v = neuron.get_radius(v)
            self.assertTrue(radius_v <= self.max_radius)
            self.assertTrue(radius_v >= self.min_radius)

            nbs = neuron.get_neighbours(v)
            for u in nbs:
                radius_u = neuron.get_radius(u)
                self.assertTrue(np.abs(radius_u - radius_v) <= 1)
Exemple #4
0
def create_segmentation(shape,
                        n_objects,
                        points_per_skeleton,
                        interpolation,
                        smoothness,
                        write_to=None,
                        seed=0):
    """
    Creates a toy segmentation containing skeletons.

    Args:

    shape: Size of the desired dataset
    
    n_objects: The number of skeleton/neurons to generate in the given volume

    points_per_skeleton: The number of potential branch points that are sampled per skeleton. 
                         Higher numbers lead to more complex shapes.

    interpolation: Method of interpolation between two sample points. Can be either linear or
                   random (constrained random walk).

    smoothness: Controls the smoothness of the initial noise map used to generate object boundaries.
    """
    try:
        shape = np.array(shape)
        if len(shape) != 3:
            raise ValueError("Provide 3D shape.")

        if np.any(shape % 2 != 0):
            raise ValueError("All shape dimensions have to be even.")

        noise = np.abs(np.random.randn(*shape))
        smoothed_noise = gaussian_filter(noise, sigma=smoothness)

        # Sample one tree for each object and generate its skeleton:
        seeds = np.zeros(2 * shape, dtype=int)
        pid = mp.current_process()._identity[0]
        seed = pid * 3 + seed
        np.random.seed(seed)
        for i in range(n_objects):
            """
            We make the virtual volume twice as large to avoid border effects. To keep the density
            of points the same we also increase the number of points by a factor of 8 = 2**3. Such that
            on average we keep the same number of points per unit volume.
            """
            points = np.stack([
                np.random.randint(0, 2 * shape[2 - dim],
                                  (2**3) * points_per_skeleton)
                for dim in range(3)
            ],
                              axis=1)
            tree = Tree(points)
            skeleton = Skeleton(tree, [1, 1, 1],
                                "linear",
                                generate_graph=False)
            seeds = skeleton.draw(seeds, np.array([0, 0, 0]), i + 1)
        """
        Cut the volume to original size.
        """
        seeds = seeds[int(shape[0] / 2):int(3 * shape[0] / 2),
                      int(shape[1] / 2):int(3 * shape[1] / 2),
                      int(shape[2] / 2):int(3 * shape[2] / 2)]
        """
        We generate an artificial segmentation by first filtering
        skeleton points that are too close to each other via a non max supression
        to avoid artifacts. A distance transform of the skeletons plus smoothed noise
        is then used to calculate a watershed transformation with the skeletons as seeds
        resulting in the final segmentation.
        """
        seeds[maximum_filter(seeds, size=4) != seeds] = 0
        seeds_dt = distance_transform_edt(seeds == 0) + 5. * smoothed_noise
        segmentation = cwatershed(seeds_dt, seeds)
        boundaries = find_boundaries(segmentation)

        if write_to is not None:
            f = h5py.File(write_to, "w")
            f.create_dataset("segmentation",
                             data=segmentation.astype(np.uint64))
            f.create_dataset("skeletons", data=seeds.astype(np.uint64))
            f.create_dataset("boundaries", data=boundaries.astype(np.uint64))
            f.create_dataset("smoothed_noise", data=smoothed_noise)
            f.create_dataset("distance_transform", data=seeds_dt)

        data = {
            "segmentation": segmentation,
            "skeletons": seeds,
            "raw": boundaries
        }

    except:
        raise Exception("".join(traceback.format_exception(*sys.exc_info())))

    return data
    def provide(self, request):

        timing = Timing(self)
        timing.start()

        # try to import skelerator
        try:
            from skelerator import Tree, Skeleton
        except ImportError:
            logger.info("skelerator cannot be imported, please check!")

        batch = Batch()
        n_objects = np.random.randint(self.numinst_min, self.numinst_max + 1)
        raw_spec = request.array_specs[self.raw].copy()
        shape = list(raw_spec.roi.get_shape())

        # create noise
        random_noise = np.random.uniform(0, 0.005, [3] + shape)
        pepper_noise = (np.random.uniform(0, 1, [3] + shape) > 0.9999).astype(
            np.uint8)
        pepper_noise = pepper_noise * np.random.uniform(
            0.4, 1, pepper_noise.shape) * 2
        mixed_noise = random_noise + pepper_noise
        smoothed_noise = gaussian_filter(mixed_noise, sigma=1)

        # sample one tree for each object and generate its skeleton
        seeds = np.zeros(shape, dtype=int)
        instances = []
        for i in range(n_objects):
            points_per_skeleton = np.random.randint(
                self.points_per_skeleton_min, self.points_per_skeleton_max + 1)
            instance = np.zeros(shape, dtype=int)
            points = np.stack([
                np.random.randint(0, shape[i], points_per_skeleton)
                for i in range(3)
            ],
                              axis=1)
            tree = Tree(points)
            skeleton = Skeleton(tree, [1, 1, 1],
                                self.interpolation,
                                generate_graph=False)
            instance = skeleton.draw(instance, np.array([0, 0, 0]), 1)
            instances.append(instance)

        # process instances
        for i in range(len(instances)):
            instances[i] = ndimage.binary_dilation(instances[i] > 0).astype(
                np.uint8)
            instance_smoothed = ndimage.gaussian_filter(
                instances[i].astype(np.float32), 1)
            channel = np.random.permutation([0, 1, 2])
            prob = [
                True,
                np.random.uniform(0, 1) > 0.5,
                np.random.uniform(0, 1) > 0.9
            ]
            intensity = [
                np.random.uniform(0.5, 1.0),
                np.random.uniform(0, 1),
                np.random.uniform(0, 1)
            ]
            mask = instances[i] > 0
            for c, cp, ci in zip(channel, prob, intensity):
                if cp:
                    smoothed_noise[c][mask] += instance_smoothed[mask] * ci

        smoothed_noise = np.clip(smoothed_noise, 0, 1)

        # relabel instance masks
        for i in range(len(instances)):
            instances[i] *= (i + 1)

        instances = np.stack(instances, axis=0).astype(np.uint16)
        smoothed_noise = smoothed_noise.astype(np.float32)
        spec = self.array_specs[self.raw].copy()
        spec.roi = raw_spec.roi.copy()
        batch[self.raw] = Array(data=smoothed_noise, spec=spec)

        gt_spec = self.array_specs[self.gt].copy()
        gt_spec.roi = spec.roi.copy()
        gt_request = request[self.gt]
        gt_array = Array(data=instances, spec=gt_spec)
        batch[self.gt] = gt_array.crop(gt_request.roi)

        timing.stop()
        batch.profiling_stats.add(timing)

        # write data
        #sample_name = datetime.now().strftime("%d_%b_%Y_%H_%M_%S_%f")
        #outfn = "/nrs/saalfeld/maisl/ppp/simulated/" + sample_name
        #f = h5py.File(outfn + '.hdf', "w")
        #f.create_dataset("instances", data=instances.astype(np.uint16))
        #f.create_dataset("raw", data=smoothed_noise.astype(np.float32))
        #f.close()

        #mip = (np.max(smoothed_noise.astype(np.float32), axis=1) * 255).astype(
        #    np.uint8)
        #io.imsave(outfn + '.png', np.moveaxis(mip, 0, -1))

        return batch