Example #1
0
def get_candidates(pipeline, setup_config, foreground):

    # Data properties
    voxel_size = gp.Coordinate(setup_config["VOXEL_SIZE"])
    micron_scale = voxel_size[0]

    # Config options
    window_size = gp.Coordinate(setup_config["NMS_WINDOW_SIZE"]) * micron_scale
    threshold = setup_config["NMS_THRESHOLD"]

    # Candidate mode
    mode = setup_config["CANDIDATE_MODE"]

    # New array Key
    maxima = ArrayKey("MAXIMA")

    if mode == "skel":
        pipeline = pipeline + Skeletonize(
            foreground, maxima, min(window_size), threshold=threshold)
    else:
        pipeline = (
            pipeline + UnSqueeze([foreground]) +
            NonMaxSuppression(foreground, maxima, window_size, threshold) +
            Squeeze([foreground, maxima]))

    return pipeline, maxima
Example #2
0
    def process(self, batch, request):

        if self.client is None:
            self.client = pymongo.MongoClient(host=self.db_host)
            self.db = self.client[self.db_name]
            create_indices = 'nodes' not in self.db.list_collection_names()
            self.cells = self.db['nodes']
            if create_indices:
                self.cells.create_index([(loc, pymongo.ASCENDING)
                                         for loc in ['t', 'z', 'y', 'x']],
                                        name='position')
                self.cells.create_index([('id', pymongo.ASCENDING)],
                                        name='id',
                                        unique=True)

        roi = batch[self.maxima].spec.roi
        voxel_size = batch[self.maxima].spec.voxel_size

        maxima = batch[self.maxima].data
        cell_indicator = batch[self.cell_indicator].data
        parent_vectors = batch[self.parent_vectors].data

        cells = []
        for index in np.argwhere(
                maxima * cell_indicator > self.score_threshold):
            index = gp.Coordinate(index)
            logger.debug("Getting parent vector at index %s" % str(index))

            score = cell_indicator[index]
            if self.edge_length == 1:
                parent_vector = tuple(
                    float(x) for x in parent_vectors[(Ellipsis, ) + index])
            else:
                parent_vector = WriteCells.get_avg_pv(parent_vectors, index,
                                                      self.edge_length)
            position = roi.get_begin() + voxel_size * index
            if self.volume_shape is not None and \
               np.any(np.greater_equal(
                   position,
                   gp.Coordinate(self.volume_shape) * voxel_size)):
                continue

            cell_id = int(
                math.cantor_number(roi.get_begin() / voxel_size + index))

            cells.append({
                'id': cell_id,
                'score': float(score),
                't': position[0],
                'z': position[1],
                'y': position[2],
                'x': position[3],
                'parent_vector': parent_vector
            })

            logger.debug("ID=%d, score=%f, parent_vector=%s" %
                         (cell_id, score, parent_vector))

        if len(cells) > 0:
            self.cells.insert_many(cells)
Example #3
0
    def setup(self):

        self.ndims = self.data.shape[1]

        if self.points_spec is not None:
            self.provides(self.points, self.points_spec)
        elif isinstance(self.points, gp.ArrayKey):
            self.provides(self.points, gp.ArraySpec(voxel_size=((1, ))))
        elif isinstance(self.points, gp.GraphKey):
            print(self.ndims)
            min_bb = gp.Coordinate(
                np.floor(np.amin(self.data[:, :self.ndims], 0)))
            max_bb = gp.Coordinate(
                np.ceil(np.amax(self.data[:, :self.ndims], 0)) + 1)

            roi = gp.Roi(min_bb, max_bb - min_bb)
            logger.debug(f"Bounding Box: {roi}")

            self.provides(self.points, gp.GraphSpec(roi=roi))

        if self.labels is not None:
            assert isinstance(self.labels, gp.ArrayKey), \
                   f"Label key must be an ArrayKey, \
                     was given {type(self.labels)}"

            if self.labels_spec is not None:
                self.provides(self.labels, self.labels_spec)
            else:
                self.provides(self.labels, gp.ArraySpec(voxel_size=((1, ))))
Example #4
0
    def setup(self):

        self.provides(
            self.array_key,
            gp.ArraySpec(roi=gp.Roi(offset=gp.Coordinate(
                (-10000, -10000, -10000)),
                                    shape=gp.Coordinate(
                                        (20000, 20000, 20000))),
                         voxel_size=(1, 1, 1)))
Example #5
0
    def test_context(self):
        d_pred = gp.ArrayKeys.D_PRED
        m_pred = gp.ArrayKeys.M_PRED
        presyn = gp.PointsKeys.PRESYN
        postsyn = gp.PointsKeys.POSTSYN

        outdir = tempfile.mkdtemp()

        voxel_size = gp.Coordinate((10, 10, 10))
        size = ((200, 200, 200))
        # Check whether the score of the entire cube is measured, although
        # cube of borderpoint partially outside request ROI.
        context = 40
        shape = gp.Coordinate(size) / voxel_size
        m_predar = np.zeros(shape, dtype=np.float32)
        outsidepoint = gp.Coordinate((13, 13, 13))
        borderpoint = (4, 4, 4)
        m_predar[3:5, 3:5, 3:5] = 1
        m_predar[outsidepoint] = 1

        d_predar = np.ones((3, shape[0], shape[1], shape[2])) * 0

        pipeline = (TestSource(m_predar, d_predar, voxel_size=voxel_size) +
                    ExtractSynapses(m_pred,
                                    d_pred,
                                    presyn,
                                    postsyn,
                                    out_dir=outdir,
                                    settings=parameters,
                                    context=context) +
                    gp.PrintProfilingStats())

        request = gp.BatchRequest()

        roi = gp.Roi((40, 40, 40), (80, 80, 80))

        request[presyn] = gp.PointsSpec(roi=roi)
        request[postsyn] = gp.PointsSpec(roi=roi)
        with gp.build(pipeline):
            batch = pipeline.request_batch(request)

        synapsefile = os.path.join(outdir, "40", "40", "40.npz")
        with np.load(synapsefile) as data:
            data = dict(data)

        self.assertTrue(len(data['ids']) == 1)
        self.assertEqual(data['scores'][0], 2.0**3)  # Size of the cube.
        for ii in range(len(voxel_size)):
            self.assertEqual(data['positions'][0][0][ii],
                             borderpoint[ii] * voxel_size[ii])

        for ii in range(len(voxel_size)):
            self.assertEqual(data['positions'][0][1][ii],
                             borderpoint[ii] * voxel_size[ii] + 0)
        shutil.rmtree(outdir)
Example #6
0
    def test_output_basics(self):
        d_pred = gp.ArrayKeys.D_PRED
        m_pred = gp.ArrayKeys.M_PRED
        presyn = gp.PointsKeys.PRESYN
        postsyn = gp.PointsKeys.POSTSYN

        voxel_size = gp.Coordinate((10, 10, 10))
        size = ((200, 200, 200))
        context = 40
        shape = gp.Coordinate(size) / voxel_size
        m_predar = np.zeros(shape, dtype=np.float32)
        insidepoint = gp.Coordinate((10, 10, 10))
        outsidepoint = gp.Coordinate((15, 15, 15))
        m_predar[insidepoint] = 1
        m_predar[outsidepoint] = 1

        d_predar = np.ones((3, shape[0], shape[1], shape[2])) * 10

        outdir = tempfile.mkdtemp()

        pipeline = (TestSource(m_predar, d_predar, voxel_size=voxel_size) +
                    ExtractSynapses(m_pred,
                                    d_pred,
                                    presyn,
                                    postsyn,
                                    out_dir=outdir,
                                    settings=parameters,
                                    context=context))

        request = gp.BatchRequest()

        roi = gp.Roi((40, 40, 40), (80, 80, 80))

        request[presyn] = gp.PointsSpec(roi=roi)
        request[postsyn] = gp.PointsSpec(roi=roi)
        with gp.build(pipeline):
            batch = pipeline.request_batch(request)
        print(outdir, "outdir")
        synapsefile = os.path.join(outdir, "40", "40", "40.npz")
        with np.load(synapsefile) as data:
            data = dict(data)

        self.assertTrue(len(data['ids']) == 1)
        self.assertEqual(data['scores'][0], 1.0)  # Size of the cube.
        for ii in range(len(voxel_size)):
            self.assertEqual(data['positions'][0][1][ii],
                             insidepoint[ii] * voxel_size[ii])

        for ii in range(len(voxel_size)):
            self.assertEqual(data['positions'][0][0][ii],
                             insidepoint[ii] * voxel_size[ii] + 10)
        shutil.rmtree(outdir)
Example #7
0
    def get_avg_pv(parent_vectors, index, edge_length):
        ''' Computes the average parent vector offset from the parent vectors
        in a cube centered at index. Accounts for the fact that each parent
        vector is a relative offset from its source location, not from index.

        Args:

            parent_vectors (``np.array``):

                A numpy array of parent vectors with dimensions
                (channels, time, z, y, x).

            index (``gp.Coordinate``):

                A 4D coordiante (t, z, y, x) indicating the target
                location to get the average parent vector for.

            edge_length (``int``):

                Length of each side of the cube within which the
                parent vectors are averaged.

        '''
        radius = (edge_length - 1) // 2
        logger.debug("Getting average parent vectors with radius"
                     " %d around index %s" % (radius, str(index)))
        offsets = []
        pv_shape = parent_vectors.shape
        # channels, t, z, y, x
        assert (len(pv_shape) == 5)
        pv_max_z = pv_shape[2]
        pv_max_y = pv_shape[3]
        pv_max_x = pv_shape[4]
        logger.debug("Type of index[1]: %s   index[1] %s" %
                     (str(type(index[1])), str(index[1])))
        for z in range(max(0, index[1] - radius),
                       min(index[1] + radius + 1, pv_max_z)):
            for y in range(max(0, index[2] - radius),
                           min(index[2] + radius + 1, pv_max_y)):
                for x in range(max(0, index[3] - radius),
                               min(index[3] + radius + 1, pv_max_x)):
                    c = gp.Coordinate((z, y, x))
                    c_with_time = gp.Coordinate((index[0], z, y, x))
                    relative_pos = c - index[1:]
                    offset_relative_to_c = parent_vectors[(Ellipsis, ) +
                                                          c_with_time]
                    offsets.append(offset_relative_to_c + relative_pos)
        logger.debug("Offsets to average: %s" + str(offsets))
        parent_vector = tuple(
            float(sum(col) / len(col)) for col in zip(*offsets))
        return parent_vector
Example #8
0
    def test_delete_points_in_context(self):
        points = gp.PointsKey("POINTS")
        pv_array = gp.ArrayKey("PARENT_VECTORS")
        mask = gp.ArrayKey("MASK")
        radius = [0.1, 0.1, 0.1, 0.1]
        ts = TracksSource(TEST_FILE, points)
        apv = AddParentVectors(points, pv_array, mask, radius)
        request = gp.BatchRequest()
        request.add(points, gp.Coordinate((1, 4, 4, 4)))
        request.add(pv_array, gp.Coordinate((1, 4, 4, 4)))
        request.add(mask, gp.Coordinate((1, 4, 4, 4)))

        pipeline = (ts + gp.Pad(points, None) + apv)
        with gp.build(pipeline):
            pipeline.request_batch(request)
 def _updated_spec(self, ref_spec):
     spec = ref_spec.copy()
     spec.dtype = np.float32
     # if stardists are on downsampled grid voxel_size needs to be adapted
     if self.grid != (1, 1, 1):
         spec.voxel_size *= gp.Coordinate(self.grid)
     return spec
Example #10
0
    def prepare(self, request):

        logger.debug("request: %s", request.array_specs)
        logger.debug("my spec: %s", self.spec)

        shift_roi = self.__get_possible_shifts(request)
        if request.array_specs.keys():

            lcm_voxel_size = self.spec.get_lcm_voxel_size(
                request.array_specs.keys())
            shift_roi = shift_roi.snap_to_grid(lcm_voxel_size, mode='shrink')
            lcm_shift_roi = shift_roi/lcm_voxel_size
            logger.debug("lcm voxel size: %s", lcm_voxel_size)

            logger.debug(
                "restricting random locations to multiples of voxel size %s",
                lcm_voxel_size)

        else:

            lcm_voxel_size = gp.Coordinate((1,)*shift_roi.dims())
            lcm_shift_roi = shift_roi

        random_shift = self.__select_random_shift(
            request,
            lcm_shift_roi,
            lcm_voxel_size)

        self.random_shift = random_shift
        self.__shift_request(request, random_shift)
Example #11
0
    def test_shift_points5(self):
        data = {
            0: gp.Point([3, 0]),
            1: gp.Point([3, 2]),
            2: gp.Point([3, 4]),
            3: gp.Point([3, 6]),
            4: gp.Point([3, 8])
        }
        spec = gp.PointsSpec(gp.Roi(offset=(0, 0), shape=(15, 10)))
        points = gp.Points(data, spec)
        request_roi = gp.Roi(offset=(3, 0), shape=(9, 10))
        shift_array = np.array([[3, 0], [-3, 0], [0, 0], [-3, 0], [3, 0]],
                               dtype=int)

        lcm_voxel_size = gp.Coordinate((3, 2))
        shifted_data = {
            0: gp.Point([6, 0]),
            2: gp.Point([3, 4]),
            4: gp.Point([6, 8])
        }
        result = gp.ShiftAugment.shift_points(points,
                                              request_roi,
                                              shift_array,
                                              shift_axis=1,
                                              lcm_voxel_size=lcm_voxel_size)
        # print("test 4", result.data, shifted_data)
        self.assertTrue(self.points_equal(result.data, shifted_data))
        self.assertTrue(result.spec == gp.PointsSpec(request_roi))
Example #12
0
    def build_source(self):
        data = daisy.open_ds(filename, key)

        if self.time_window is None:
            source_roi = gp.Roi(data.roi.get_offset(), data.roi.get_shape())
        else:
            offs = list(data.roi.get_offset())
            offs[1] += self.time_window[0]
            sh = list(data.roi.get_shape())
            offs[1] = self.time_window[1] - self.time_window[0]
            source_roi = gp.Roi(tuple(offs), tuple(sh))

        voxel_size = gp.Coordinate(data.voxel_size)

        return gp.ZarrSource(filename,
                             {
                                 self.raw_0: key,
                                 self.raw_1: key
                             },
                             array_specs={
                                 self.raw_0: gp.ArraySpec(
                                     roi=source_roi,
                                     voxel_size=voxel_size,
                                     interpolatable=True),
                                 self.raw_1: gp.ArraySpec(
                                     roi=source_roi,
                                     voxel_size=voxel_size,
                                     interpolatable=True)
                             })
Example #13
0
    def __init__(self, filename, ds_name):

        self.filename = filename
        self.ds_name = ds_name

        zarr_container = zarr.open(filename)
        ds = zarr_container[ds_name]

        # necessary fields:
        self._shape = gp.Coordinate(ds.shape)
        if "axes" in ds.attrs:
            self._axes = {d: a for d, a in enumerate(ds.attrs["axes"])}
            self._inv_axes = {a: d for d, a in self._axes.items()}
            self._channel_dim = self._inv_axes.get("c")
            self._sample_dim = self._inv_axes.get("s")
            self._spatial_axes = sorted([
                i for i in self._axes.keys()
                if i != self.channel_dim and i != self.sample_dim
            ])
        else:
            raise DacapoConventionError(
                "Dacapo expects zarr arrays to come with axis labels. "
                "Note that label 'c' is reserved for the (optional) channel dimension. "
                "Label 's' is reserved for the (optional) sample dimension. "
                "Any other label will be treated as a spatial dimension.")

        # optional fields
        if "resolution" in ds.attrs:
            self._voxel_size = gp.Coordinate(ds.attrs["resolution"])
        else:
            self._voxel_size = gp.Coordinate(
                tuple(1 for i in self._spatial_axes))
        if "offset" in ds.attrs:
            self._offset = gp.Coordinate(ds.attrs["offset"])
        else:
            self._offset = gp.Coordinate(tuple(0 for i in self._spatial_axes))

        # more optional fields: gt specific
        if "num_classes" in ds.attrs:
            self._num_classes = ds.attrs["num_classes"]
        else:
            self._num_classes = 0
        if "background_label" in ds.attrs:
            self._background_label = ds.attrs["background_label"]
        else:
            self._background_label = None
Example #14
0
    def test_pipeline3(self):
        array_key = gp.ArrayKey("TEST_ARRAY")
        points_key = gp.PointsKey("TEST_POINTS")
        voxel_size = gp.Coordinate((1, 1))
        spec = gp.ArraySpec(voxel_size=voxel_size, interpolatable=True)

        hdf5_source = gp.Hdf5Source(self.fake_data_file,
                                    {array_key: 'testdata'},
                                    array_specs={array_key: spec})
        csv_source = gp.CsvPointsSource(
            self.fake_points_file, points_key,
            gp.PointsSpec(
                roi=gp.Roi(shape=gp.Coordinate((100, 100)), offset=(0, 0))))

        request = gp.BatchRequest()
        shape = gp.Coordinate((60, 60))
        request.add(array_key, shape, voxel_size=gp.Coordinate((1, 1)))
        request.add(points_key, shape)

        shift_node = gp.ShiftAugment(prob_slip=0.2,
                                     prob_shift=0.2,
                                     sigma=5,
                                     shift_axis=0)
        pipeline = ((hdf5_source, csv_source) + gp.MergeProvider() +
                    gp.RandomLocation(ensure_nonempty=points_key) + shift_node)
        with gp.build(pipeline) as b:
            request = b.request_batch(request)
            # print(request[points_key])

        target_vals = [
            self.fake_data[point[0]][point[1]] for point in self.fake_points
        ]
        result_data = request[array_key].data
        result_points = request[points_key].data
        result_vals = [
            result_data[int(point.location[0])][int(point.location[1])]
            for point in result_points.values()
        ]

        for result_val in result_vals:
            self.assertTrue(
                result_val in target_vals,
                msg=
                "result value {} at points {} not in target values {} at points {}"
                .format(result_val, list(result_points.values()), target_vals,
                        self.fake_points))
Example #15
0
    def test_prepare1(self):

        key = gp.ArrayKey("TEST_ARRAY")
        spec = gp.ArraySpec(voxel_size=gp.Coordinate((1, 1)),
                            interpolatable=True)

        hdf5_source = gp.Hdf5Source(self.fake_data_file, {key: 'testdata'},
                                    array_specs={key: spec})

        request = gp.BatchRequest()
        shape = gp.Coordinate((3, 3))
        request.add(key, shape, voxel_size=gp.Coordinate((1, 1)))

        shift_node = gp.ShiftAugment(sigma=1, shift_axis=0)
        with gp.build((hdf5_source + shift_node)):
            shift_node.prepare(request)
            self.assertTrue(shift_node.ndim == 2)
            self.assertTrue(shift_node.shift_sigmas == tuple([0.0, 1.0]))
Example #16
0
    def test_squeeze_not_possible(self):
        raw = gp.ArrayKey("RAW")
        labels = gp.ArrayKey("LABELS")

        voxel_size = gp.Coordinate((50, 5, 5))
        input_voxels = gp.Coordinate((5, 5, 5))
        input_size = input_voxels * voxel_size

        request = gp.BatchRequest()
        request.add(raw, input_size)
        request.add(labels, input_size)

        pipeline = (ExampleSourceSqueeze(voxel_size) +
                    gp.Squeeze([raw], axis=2))

        with self.assertRaises(gp.PipelineRequestError):
            with gp.build(pipeline) as p:
                batch = p.request_batch(request)
Example #17
0
def predict(iteration,path_to_dataGP):
   
  
    input_size = (8, 96, 96)
    output_size = (4, 64, 64)
    amount_size = gp.Coordinate((2, 16, 16))
    model = SpineUNet(crop_output='output_size')

    raw = gp.ArrayKey('RAW')
    affs_predicted = gp.ArrayKey('AFFS_PREDICTED')

                                
    reference_request = gp.BatchRequest()
    reference_request.add(raw, input_size)
    reference_request.add(affs_predicted, output_size)
    
    source = gp.ZarrSource(
        path_to_dataGP,
        {
            raw: 'validate/sample1/raw'
        } 
    )
  
    with gp.build(source):
        source_roi = source.spec[raw].roi
    request = gp.BatchRequest()
    request[raw] = gp.ArraySpec(roi=source_roi)
    request[affs_predicted] = gp.ArraySpec(roi=source_roi)

    pipeline = (
        source +
       
        gp.Pad(raw,amount_size) +
        gp.Normalize(raw) +
        # raw: (d, h, w)
        gp.Stack(1) +
        # raw: (1, d, h, w)
        AddChannelDim(raw) +
        # raw: (1, 1, d, h, w)
        gp_torch.Predict(
            model,
            inputs={'x': raw},
            outputs={0: affs_predicted},
            checkpoint=f'C:/Users/filip/spine_yodl/model_checkpoint_{iteration}') +
        RemoveChannelDim(raw) +
        RemoveChannelDim(raw) +
        RemoveChannelDim(affs_predicted) +
        # raw: (d, h, w)
        # affs_predicted: (3, d, h, w)
        gp.Scan(reference_request)
    )

    with gp.build(pipeline):
        prediction = pipeline.request_batch(request)

    return prediction[raw].data, prediction[affs_predicted].data
Example #18
0
    def test_pipeline2(self):

        key = gp.ArrayKey("TEST_ARRAY")
        spec = gp.ArraySpec(voxel_size=gp.Coordinate((3, 1)),
                            interpolatable=True)

        hdf5_source = gp.Hdf5Source(self.fake_data_file, {key: 'testdata'},
                                    array_specs={key: spec})

        request = gp.BatchRequest()
        shape = gp.Coordinate((3, 3))
        request.add(key, shape, voxel_size=gp.Coordinate((3, 1)))

        shift_node = gp.ShiftAugment(prob_slip=0.2,
                                     prob_shift=0.2,
                                     sigma=1,
                                     shift_axis=0)
        with gp.build((hdf5_source + shift_node)) as b:
            b.request_batch(request)
Example #19
0
def get_roi_from_swc(swc_cube, transform_file, voxel_size):
    graph = parse_swc(
        filename=swc_cube,
        transform=transform_file,
        offset=np.array([0, 0, 0]),
        resolution=np.array(voxel_size),
        transpose=[2, 1, 0],
    )
    mins = np.array([float("inf")] * 3)
    maxs = np.array([-float("inf")] * 3)
    for attrs in graph.nodes.values():
        u_loc = attrs["location"]
        min_stack = np.stack([mins, np.array(u_loc)])
        max_stack = np.stack([maxs, np.array(u_loc)])
        mins = np.min(min_stack, axis=0)
        maxs = np.max(max_stack, axis=0)

    return gp.Roi(gp.Coordinate(tuple(mins)),
                  gp.Coordinate(tuple(maxs - mins)))
Example #20
0
    def __select_random_location(self, lcm_shift_roi, lcm_voxel_size):

        # select a random point inside ROI
        random_shift = gp.Coordinate(
            randint(int(begin), int(end-1))
            for begin, end in zip(lcm_shift_roi.get_begin(), lcm_shift_roi.get_end()))

        random_shift *= lcm_voxel_size

        return random_shift
Example #21
0
    def test_squeeze(self):
        raw = gp.ArrayKey("RAW")
        labels = gp.ArrayKey("LABELS")

        voxel_size = gp.Coordinate((50, 5, 5))
        input_voxels = gp.Coordinate((5, 5, 5))
        input_size = input_voxels * voxel_size

        request = gp.BatchRequest()
        request.add(raw, input_size)
        request.add(labels, input_size)

        pipeline = (ExampleSourceSqueeze(voxel_size) +
                    gp.Squeeze([raw], axis=1) + gp.Squeeze([raw, labels]))

        with gp.build(pipeline) as p:
            batch = p.request_batch(request)
            assert batch[raw].data.shape == input_voxels
            assert batch[labels].data.shape == input_voxels
Example #22
0
def get_foreground_pipelines(config, blocks):
    voxel_size = gp.Coordinate(config["VOXEL_SIZE"])
    input_shape = gp.Coordinate(config["INPUT_SHAPE"])
    output_shape = gp.Coordinate(config["OUTPUT_SHAPE"])
    input_size = voxel_size * input_shape
    output_size = voxel_size * output_shape

    raw_pipelines, (raw, ) = get_raw_snapshot_source(config, blocks)
    labels_pipelines, (labels, gt) = get_labels_snapshot_source(config, blocks)
    fg_pipelines, (fg_pred, ) = add_fg_preds(config, raw_pipelines, raw)
    fg_pipelines = add_scans(fg_pipelines, {
        raw: input_size,
        fg_pred: output_size
    })
    pipelines = merge_pipelines(fg_pipelines, labels_pipelines)

    requests = get_requests(config, blocks, raw, fg_pred, labels, gt)

    return pipelines, requests, (raw, fg_pred, labels, gt)
Example #23
0
    def provide(self, request):

        voxel_size = self.spec[self.raw].voxel_size
        shape = gp.Coordinate((1, ) + request[self.raw].roi.get_shape())

        noise = np.abs(np.random.randn(*shape))
        smoothed_noise = gaussian_filter(noise, sigma=self.smoothness)

        seeds = np.zeros(shape, dtype=int)
        for i in range(self.n_objects):
            if i == 0:
                num_points = 100
            else:
                num_points = self.points_per_skeleton
            points = np.stack(
                [
                    np.random.randint(0, shape[dim], num_points)
                    for dim in range(3)
                ],
                axis=1,
            )
            tree = skelerator.Tree(points)
            skeleton = skelerator.Skeleton(tree, [1, 1, 1],
                                           "linear",
                                           generate_graph=False)
            seeds = skeleton.draw(seeds, np.array([0, 0, 0]), i + 1)

        seeds[maximum_filter(seeds, size=4) != seeds] = 0
        seeds_dt = distance_transform_edt(seeds == 0) + 5.0 * smoothed_noise
        gt_data = cwatershed(seeds_dt, seeds).astype(np.uint64)[0] - 1

        labels = np.unique(gt_data)

        raw_data = np.zeros_like(gt_data, dtype=np.uint8)
        value = 0
        for label in labels:
            raw_data[gt_data == label] = value
            value += 255.0 / self.n_objects

        spec = request[self.raw].copy()
        spec.voxel_size = (1, 1)
        raw = gp.Array(raw_data, spec)

        spec = request[self.gt].copy()
        spec.voxel_size = (1, 1)
        gt_crop = (request[self.gt].roi -
                   request[self.raw].roi.get_begin()) / voxel_size
        gt_crop = gt_crop.to_slices()
        gt = gp.Array(gt_data[gt_crop], spec)

        batch = gp.Batch()
        batch[self.raw] = raw
        batch[self.gt] = gt

        return batch
Example #24
0
    def prepare(self, request):

        context = self.context
        dims = request[self.srcpoints].roi.dims()

        assert type(context) == list
        if len(context) == 1:
            context = context * dims

        # request array in a larger area to get predictions from outside
        # write roi
        m_roi = request[self.srcpoints].roi.grow(gp.Coordinate(context),
                                                 gp.Coordinate(context))

        # however, restrict the request to the array actually provided
        # m_roi = m_roi.intersect(self.spec[self.m_array].roi)
        request[self.m_array] = gp.ArraySpec(roi=m_roi)

        # Do the same for the direction vector array.
        request[self.d_array] = gp.ArraySpec(roi=m_roi)
Example #25
0
def get_requests(config, blocks, raw, emb_pred, labels, gt):
    voxel_size = gp.Coordinate(config["VOXEL_SIZE"])
    input_shape = gp.Coordinate(config["INPUT_SHAPE"])
    output_shape = gp.Coordinate(config["OUTPUT_SHAPE"])
    input_size = voxel_size * input_shape
    output_size = voxel_size * output_shape
    diff = input_size - output_size

    cube_rois = [get_cube_roi(config, block) for block in blocks]

    requests = []
    for cube_roi in cube_rois:
        context_roi = cube_roi.grow(diff // 2, diff // 2)
        request = gp.BatchRequest()
        request[raw] = gp.ArraySpec(roi=context_roi)
        request[emb_pred] = gp.ArraySpec(roi=cube_roi)
        request[labels] = gp.ArraySpec(roi=cube_roi)
        request[gt] = gp.GraphSpec(roi=cube_roi)
        requests.append(request)
    return requests
Example #26
0
    def __init__(self, filename,
                 key,
                 density=None,
                 channels=0,
                 shape=(16, 256, 256),
                 time_window=None,
                 add_sparse_mosaic_channel=True,
                 random_rot=False):

        self.filename = filename
        self.key = key
        self.shape = shape
        self.density = density
        self.raw = gp.ArrayKey('RAW_0')
        self.add_sparse_mosaic_channel = add_sparse_mosaic_channel
        self.random_rot = random_rot
        self.channels = channels

        data = daisy.open_ds(filename, key)

        if time_window is None:
            source_roi = gp.Roi(data.roi.get_offset(), data.roi.get_shape())
        else:
            offs = list(data.roi.get_offset())
            offs[1] += time_window[0]
            sh = list(data.roi.get_shape())
            offs[1] = time_window[1] - time_window[0]
            source_roi = gp.Roi(tuple(offs), tuple(sh))

        voxel_size = gp.Coordinate(data.voxel_size)

        self.pipeline = gp.ZarrSource(
            filename,
            {
                self.raw: key
            },
            array_specs={
                self.raw: gp.ArraySpec(
                    roi=source_roi,
                    voxel_size=voxel_size,
                    interpolatable=True)
            }) + gp.RandomLocation() + IntensityDiffFilter(self.raw, 0, min_distance=0.1, channels=Slice(None))

        # add  augmentations
        self.pipeline = self.pipeline + gp.ElasticAugment([40, 40],
                                                          [2, 2],
                                                          [0, math.pi / 2.0],
                                                          prob_slip=-1,
                                                          spatial_dims=2)



        self.pipeline.setup()
        np.random.seed(os.getpid() + int(time.time()))
Example #27
0
    def __read_spec(self, array_key):

        if array_key in self.array_specs:
            spec = self.array_specs[array_key].copy()
        else:
            spec = gp.ArraySpec()
        assert spec.voxel_size is not None, "Voxel size needs to be given"

        self.ndims = len(spec.voxel_size)

        if spec.roi is None:
            roi = gp.Roi(gp.Coordinate((0, ) * self.ndims),
                         shape=gp.Coordinate((1, ) * self.ndims))
            roi.set_shape(None)
            spec.roi = roi

        arr = self.func((2, ) * self.ndims)
        if spec.dtype is not None:
            assert spec.dtype == arr.dtype, (
                "dtype %s provided in array_specs for %s, "
                "but differs from function output %s dtype %s" %
                (self.array_specs[array_key].dtype, array_key, self.func,
                 arr.dtype))
        else:
            spec.dtype = arr.dtype

        if spec.interpolatable is None:
            spec.interpolatable = spec.dtype in [
                np.float,
                np.float32,
                np.float64,
                np.float128,
                np.uint8  # assuming this is not used for labels
            ]
            logger.warning(
                "WARNING: You didn't set 'interpolatable' for %s "
                "(func %s) . Based on the dtype %s, it has been "
                "set to %s. This might not be what you want.", array_key,
                self.func, spec.dtype, spec.interpolatable)

        return spec
Example #28
0
    def test_get_sub_shift_array2(self):
        total_roi = gp.Roi(offset=(0, 0), shape=(6, 6))
        item_roi = gp.Roi(offset=(1, 2), shape=(3, 3))
        shift_array = np.arange(12).reshape(6, 2).astype(int)
        shift_axis = 0
        lcm_voxel_size = gp.Coordinate((1, 1))

        sub_shift_array = np.array([[2, 3], [4, 5], [6, 7]], dtype=int)
        result = gp.ShiftAugment.get_sub_shift_array(total_roi, item_roi,
                                                     shift_array, shift_axis,
                                                     lcm_voxel_size)
        self.assertTrue(np.array_equal(result, sub_shift_array))
Example #29
0
def grow_labels(pipeline, setup_config, labels):

    # Data Properties
    voxel_size = gp.Coordinate(setup_config["VOXEL_SIZE"])
    micron_scale = voxel_size[0]

    label_radius = setup_config["NEURON_RADIUS"]

    pipeline = pipeline + GrowLabels(labels,
                                     radii=[label_radius * micron_scale])

    return pipeline
Example #30
0
def validation_data_sources_from_snapshots(config, blocks):
    validation_blocks = Path(config["VALIDATION_BLOCKS"])

    raw = gp.ArrayKey("RAW")
    ground_truth = gp.GraphKey("GROUND_TRUTH")
    labels = gp.ArrayKey("LABELS")

    voxel_size = gp.Coordinate(config["VOXEL_SIZE"])
    input_shape = gp.Coordinate(config["INPUT_SHAPE"])
    output_shape = gp.Coordinate(config["OUTPUT_SHAPE"])
    input_size = voxel_size * input_shape
    output_size = voxel_size * output_shape

    block_pipelines = []
    for block in blocks:

        pipelines = (
            SnapshotSource(
                validation_blocks / f"block_{block}.hdf",
                {
                    labels: "volumes/labels",
                    ground_truth: "points/gt"
                },
                directed={ground_truth: True},
            ),
            SnapshotSource(validation_blocks / f"block_{block}.hdf",
                           {raw: "volumes/raw"}),
        )

        cube_roi = get_cube_roi(config, block)

        request = gp.BatchRequest()
        input_roi = cube_roi.grow((input_size - output_size) // 2,
                                  (input_size - output_size) // 2)
        request[raw] = gp.ArraySpec(input_roi)
        request[ground_truth] = gp.GraphSpec(cube_roi)
        request[labels] = gp.ArraySpec(cube_roi)

        block_pipelines.append((pipelines, request))
    return block_pipelines, (raw, labels, ground_truth)