Exemple #1
0
def get_nailed_region(tgt, pred_tgt, sensitivity=0.5):
    aligned_mask = get_aligned_mask(tgt, pred_tgt, sensitivity)
    cc_labels = cc3d.connected_components(aligned_mask)
    segids, counts = np.unique(cc_labels, return_counts=True)
    segids = [segid for segid, ct in zip(segids, counts) if ct > 50]
    filtered_mask = fastremap.mask_except(cc_labels, segids,
                                          in_place=True) != 0
    filtered_mask = masks.closing(filtered_mask, n=4)
    return filtered_mask
Exemple #2
0
    def download(self,
                 bbox,
                 mip=None,
                 parallel=None,
                 root_ids=None,
                 mask_base=True):
        """
    Graphene slicing is distinguished from Precomputed in two ways:

    1) All input bounding boxes are in mip 0 coordinates.
    2) The final position of the input slices may optionally be
      a list of root ids that describe how to remap the base
      watershed coloring.

    If mask_base is set, segids that are not remapped

    """
        bbox = Bbox.create(bbox,
                           context=self.meta.bounds(0),
                           bounded=self.bounded)

        if bbox.subvoxel():
            raise exceptions.EmptyRequestException(
                "Requested {} is smaller than a voxel.".format(bbox))

        if mip is None:
            mip = self.mip

        bbox = self.bbox_to_mip(bbox, mip=0, to_mip=mip)

        if root_ids is None and mask_base:
            return np.zeros(bbox.size(), dtype=self.dtype)

        img = super(CloudVolumeGraphene, self).download(bbox,
                                                        mip=mip,
                                                        parallel=parallel)

        if root_ids is None:
            if mask_base:
                img[:] = 0
            return img

        root_ids = toiter(root_ids)

        remapping = {}
        for root_id in root_ids:
            leaves = self._get_leaves(root_id, bbox, mip)
            remapping.update({leaf: root_id for leaf in leaves})

        img = fastremap.remap(img,
                              remapping,
                              preserve_missing_labels=True,
                              in_place=True)
        if mask_base:
            img = fastremap.mask_except(img, root_ids, in_place=True)

        return img
Exemple #3
0
  def execute(self):
    self._volume = CloudVolume(
      self.layer_path, self.options['mip'], bounded=False,
      parallel=self.options['parallel_download'], 
      fill_missing=self.options['fill_missing']
    )
    self._bounds = Bbox(self.offset, self.shape + self.offset)
    self._bounds = Bbox.clamp(self._bounds, self._volume.bounds)

    self.progress = bool(self.options['progress'])

    self._mesher = zmesh.Mesher(self._volume.resolution)

    # Marching cubes loves its 1vx overlaps.
    # This avoids lines appearing between
    # adjacent chunks.
    data_bounds = self._bounds.clone()
    data_bounds.minpt -= self.options['low_padding']
    data_bounds.maxpt += self.options['high_padding']

    self._mesh_dir = self.get_mesh_dir()

    if self.options['encoding'] == 'draco':
      self.draco_encoding_settings = draco_encoding_settings(
        shape=(self.shape + self.options['low_padding'] + self.options['high_padding']),
        offset=self.offset,
        resolution=self._volume.resolution,
        compression_level=self.options["draco_compression_level"],
        create_metadata=self.options['draco_create_metadata'],
        uses_new_draco_bin_size=False,
      )

    # chunk_position includes the overlap specified by low_padding/high_padding
    # agglomerate, timestamp, stop_layer only applies to graphene volumes, 
    # no-op for precomputed
    data = self._volume.download(
      data_bounds, 
      agglomerate=self.options['agglomerate'], 
      timestamp=self.options['timestamp'], 
      stop_layer=self.options['stop_layer']
    )

    if not np.any(data):
      if self.options['spatial_index']:
        self._upload_spatial_index(self._bounds, {})
      return

    data = self._remove_dust(data, self.options['dust_threshold'])
    data = self._remap(data)

    if self.options['object_ids']:
      data = fastremap.mask_except(data, self.options['object_ids'], in_place=True)

    data, renumbermap = fastremap.renumber(data, in_place=True)
    renumbermap = { v:k for k,v in renumbermap.items() }
    self.compute_meshes(data, renumbermap)
Exemple #4
0
    def _only_keep_selected(self, seg: np.ndarray):
        if self.verbose:
            print('only keep selected segment ids, and remove others.')

        # precompute the remap function
        if self.ids:
            #segids = set(np.unique(seg))
            #ids = self.ids.intersection( segids )
            seg = fastremap.mask_except(seg, list(self.ids), in_place=True)
        return seg
Exemple #5
0
    def download(self,
                 bbox,
                 mip=None,
                 parallel=None,
                 segids=None,
                 preserve_zeros=False):
        """
    Downloads segmentation from the indicated cutout
    region.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.

    Returns: img
    """
        bbox = Bbox.create(bbox,
                           context=self.bounds,
                           bounded=self.bounded,
                           autocrop=self.autocrop)

        if mip is None:
            mip = self.mip

        if parallel is None:
            parallel = self.parallel

        img = self.image.download(bbox, mip, parallel=parallel)

        if segids is None:
            return img

        mask_value = 0
        if preserve_zeros:
            mask_value = np.inf
            if np.issubdtype(self.dtype, np.integer):
                mask_value = np.iinfo(self.dtype).max

            segids.append(0)

        img = fastremap.mask_except(img,
                                    segids,
                                    in_place=True,
                                    value=mask_value)

        return VolumeCutout.from_volume(self.meta, mip, img, bbox)
Exemple #6
0
def apply_object_mask(all_labels, object_ids):
    if object_ids is None:
        return all_labels

    if len(object_ids) == 1:
        all_labels = kimimaro.skeletontricks.zero_out_all_except(
            all_labels, object_ids[0])  # faster
    else:
        all_labels = fastremap.mask_except(all_labels,
                                           object_ids,
                                           in_place=True)

    return all_labels
Exemple #7
0
  def _remap(self, data):
    if self.options['remap_table'] is None:
      return data 

    self.options['remap_table'] = {
      int(k): int(v) for k, v in self.options['remap_table'].items()
    }

    remap = self.options['remap_table']
    remap[0] = 0

    data = fastremap.mask_except(data, list(remap.keys()), in_place=True)
    return fastremap.remap(data, remap, in_place=True)
Exemple #8
0
    def mask_except(self, selected_obj_ids: Union[str, list, set]):
        if selected_obj_ids is None:
            logging.warning('empty selected object ids to mask, do nothing!')
            return

        if isinstance(selected_obj_ids,
                      str) and selected_obj_ids.endswith('.json'):
            # assume that ids is a json file in the storage path
            json_storage = Storage(os.path.dirname(selected_obj_ids))
            ids_str = json_storage.get_file(os.path.basename(selected_obj_ids))
            selected_obj_ids = set(json.loads(ids_str))
            assert len(selected_obj_ids) > 0
            logging.info(
                f'number of selected objects: {len(selected_obj_ids)}')
        elif isinstance(selected_obj_ids, str):
            # a simple string, like "34,45,56,23"
            # this is used for small object numbers
            selected_obj_ids = set(
                [int(id) for id in selected_obj_ids.split(',')])

        self.array = fastremap.mask_except(self.array, list(selected_obj_ids))
Exemple #9
0
    def download(
        self,
        bbox,
        mip=None,
        parallel=None,
        segids=None,
        preserve_zeros=False,
        agglomerate=None,
        timestamp=None,
        stop_layer=None,
        renumber=False,
        coord_resolution=None,
    ):
        """
    Downloads base segmentation and optionally agglomerates
    labels based on information in the graph server.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)
    coord_resolution: (rx,ry,rz) the coordinate resolution of the input point.
      Sometimes Neuroglancer is working in the resolution of another
      higher res layer and this can help correct that.

    agglomerate: if true, remap all watershed ids in the volume
      and return a flat segmentation.

    if agglomerate is true these options are available:

    timestamp: (agglomerate only) get the roots from this date and time
      formats accepted:
        int: unix timestamp
        datetime: self explainatory
        string: ISO 8601 date
    stop_layer: (agglomerate only) (int) if specified, return the lowest 
      parent at or above that layer. If not specified, go all the way 
      to the root id. 
        Layer 1: Watershed
        Layer 2: Within-Chunk Agglomeration
        Layer 2+: Between chunk interconnections (skip connections possible)

    If agglomerate is None, then the cv.meta.agglomerate controls
    its value.

    If agglomerate is false, these other options come into play:

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.

    Returns: img as a VolumeCutout
    """
        agglomerate = agglomerate if agglomerate is not None else self.agglomerate

        bbox = Bbox.create(bbox,
                           context=self.bounds,
                           bounded=(self.bounded and coord_resolution is None),
                           autocrop=self.autocrop)

        if mip is None:
            mip = self.mip

        if coord_resolution is not None:
            factor = self.meta.resolution(mip) / coord_resolution
            bbox /= factor
            if self.bounded and not self.meta.bounds(mip).contains_bbox(bbox):
                raise exceptions.OutOfBoundsError(
                    f"Computed {bbox} is not contained within bounds {self.meta.bounds(mip)}"
                )

        if bbox.subvoxel():
            raise exceptions.EmptyRequestException(
                "Requested {} is smaller than a voxel.".format(bbox))

        if (agglomerate and stop_layer is not None) and (
                stop_layer <= 0 or stop_layer > self.meta.n_layers):
            raise ValueError(
                "Stop layer {} must be 1 <= stop_layer <= {} or None.".format(
                    stop_layer, self.meta.n_layers))

        mip0_bbox = self.bbox_to_mip(bbox, mip=mip, to_mip=0)
        # Only ever necessary to make requests within the bounding box
        # to the server. We can fill black in other situations.
        mip0_bbox = bbox.intersection(self.meta.bounds(0), mip0_bbox)

        renumber_return = renumber
        if renumber and (segids or agglomerate):
            renumber = False  # no point

        img = super(CloudVolumeGraphene, self).download(bbox,
                                                        mip=mip,
                                                        parallel=parallel,
                                                        renumber=renumber)
        renumber_remap = None
        if renumber:
            img, renumber_remap = img

        if agglomerate:
            img = self.agglomerate_cutout(img,
                                          timestamp=timestamp,
                                          stop_layer=stop_layer)
            img = VolumeCutout.from_volume(self.meta, mip, img, bbox)

        if segids is None or agglomerate:
            if renumber_return:
                return img, renumber_remap
            return img

        segids = list(toiter(segids))

        remapping = {}
        for segid in segids:
            leaves = self.get_leaves(segid, mip0_bbox, 0)
            remapping.update({leaf: segid for leaf in leaves})

        # Issue #434: Do not write img = fastremap.FN(in_place=True) as this allows
        # the underlying buffer to get garbage collected. Make sure to carefully
        # manage the buffer's references when making any changes.
        fastremap.remap(img,
                        remapping,
                        preserve_missing_labels=True,
                        in_place=True)

        mask_value = 0
        if preserve_zeros:
            mask_value = np.inf
            if np.issubdtype(self.dtype, np.integer):
                mask_value = np.iinfo(self.dtype).max

            segids.append(0)

        fastremap.mask_except(img, segids, in_place=True, value=mask_value)
        if renumber_return:
            return img, renumber_remap
        return img
def remap_segmentation(cv,
                       chunk_x,
                       chunk_y,
                       chunk_z,
                       mip=2,
                       overlap_vx=1,
                       time_stamp=None,
                       progress=False):
    ws_cv = CloudVolume(cv.meta.cloudpath,
                        mip=mip,
                        progress=progress,
                        fill_missing=cv.fill_missing)
    mip_diff = mip - cv.meta.watershed_mip

    mip_chunk_size = np.array(cv.meta.graph_chunk_size,
                              dtype=np.int) / np.array(
                                  [2**mip_diff, 2**mip_diff, 1])
    mip_chunk_size = mip_chunk_size.astype(np.int)

    offset = Vec(chunk_x, chunk_y, chunk_z) * mip_chunk_size
    bbx = Bbox(offset, offset + mip_chunk_size + overlap_vx)
    if cv.meta.chunks_start_at_voxel_offset:
        bbx += ws_cv.voxel_offset
    bbx = Bbox.clamp(bbx, ws_cv.bounds)

    seg = ws_cv[bbx][..., 0]

    if not np.any(seg):
        return seg

    sv_remapping, unsafe_dict = get_lx_overlapping_remappings(
        cv,
        chunk_x,
        chunk_y,
        chunk_z,
        time_stamp=time_stamp,
        progress=progress)

    seg = fastremap.mask_except(seg, list(sv_remapping.keys()), in_place=True)
    fastremap.remap(seg,
                    sv_remapping,
                    preserve_missing_labels=True,
                    in_place=True)

    for unsafe_root_id in tqdm(unsafe_dict.keys(),
                               desc="Unsafe Relabel",
                               disable=(not progress)):
        bin_seg = seg == unsafe_root_id

        if np.sum(bin_seg) == 0:
            continue

        l2_edges = []
        cc_seg = cc3d.connected_components(bin_seg)
        for i_cc in range(1, np.max(cc_seg) + 1):
            bin_cc_seg = cc_seg == i_cc

            overlaps = []
            overlaps.extend(np.unique(seg[-2, :, :][bin_cc_seg[-1, :, :]]))
            overlaps.extend(np.unique(seg[:, -2, :][bin_cc_seg[:, -1, :]]))
            overlaps.extend(np.unique(seg[:, :, -2][bin_cc_seg[:, :, -1]]))
            overlaps = np.unique(overlaps)

            linked_l2_ids = overlaps[np.in1d(overlaps,
                                             unsafe_dict[unsafe_root_id])]

            if len(linked_l2_ids) == 0:
                seg[bin_cc_seg] = 0
            elif len(linked_l2_ids) == 1:
                seg[bin_cc_seg] = linked_l2_ids[0]
            else:
                seg[bin_cc_seg] = linked_l2_ids[0]

                for i_l2_id in range(len(linked_l2_ids) - 1):
                    for j_l2_id in range(i_l2_id + 1, len(linked_l2_ids)):
                        l2_edges.append(
                            [linked_l2_ids[i_l2_id], linked_l2_ids[j_l2_id]])

        if len(l2_edges) > 0:
            g = nx.Graph()
            g.add_edges_from(l2_edges)

            ccs = nx.connected_components(g)

            for cc in ccs:
                cc_ids = np.sort(list(cc))
                seg[np.in1d(seg, cc_ids[1:]).reshape(seg.shape)] = cc_ids[0]

    return seg
Exemple #11
0
  def download(
    self, bbox, mip=None, 
    parallel=None, segids=None,
    preserve_zeros=False,
    agglomerate=False, timestamp=None,
    stop_layer=None
  ):
    """
    Downloads base segmentation and optionally agglomerates
    labels based on information in the graph server.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)

    agglomerate: if true, remap all watershed ids in the volume
      and return a flat segmentation.

    if agglomerate is true these options are available:

    timestamp: (agglomerate only) get the roots from this date and time
      formats accepted:
        int: unix timestamp
        datetime: self explainatory
        string: ISO 8601 date
    stop_layer: (agglomerate only) (int) if specified, return the lowest 
      parent at or above that layer. If not specified, go all the way 
      to the root id. 
        Layer 1: Watershed
        Layer 2: Within-Chunk Agglomeration
        Layer 2+: Between chunk interconnections (skip connections possible)

    if agglomerate is false, these other options come into play:

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.

    Returns: img as a VolumeCutout
    """
    if type(bbox) is Vec:
      bbox = Bbox(bbox, bbox+1)
    
    bbox = Bbox.create(
      bbox, context=self.bounds, 
      bounded=self.bounded, 
      autocrop=self.autocrop
    )
  
    if bbox.subvoxel():
      raise exceptions.EmptyRequestException("Requested {} is smaller than a voxel.".format(bbox))

    if mip is None:
      mip = self.mip

    mip0_bbox = self.bbox_to_mip(bbox, mip=mip, to_mip=0)
    # Only ever necessary to make requests within the bounding box
    # to the server. We can fill black in other situations.
    mip0_bbox = bbox.intersection(self.meta.bounds(0), mip0_bbox)

    img = super(CloudVolumeGraphene, self).download(bbox, mip=mip, parallel=parallel)

    if agglomerate:
      img = self.agglomerate_cutout(img, timestamp=timestamp, stop_layer=stop_layer)
      return VolumeCutout.from_volume(self.meta, mip, img, bbox)

    if segids is None:
      return img

    segids = list(toiter(segids))

    remapping = {}
    for segid in segids:
      leaves = self.get_leaves(segid, mip0_bbox, 0)
      remapping.update({ leaf: segid for leaf in leaves })
    
    img = fastremap.remap(img, remapping, preserve_missing_labels=True, in_place=True)

    mask_value = 0
    if preserve_zeros:
      mask_value = np.inf
      if np.issubdtype(self.dtype, np.integer):
        mask_value = np.iinfo(self.dtype).max

      segids.append(0)

    img = fastremap.mask_except(img, segids, in_place=True, value=mask_value)

    return VolumeCutout.from_volume(
      self.meta, mip, img, bbox 
    )
Exemple #12
0
  def download(
      self, bbox, mip=None, parallel=None,
      segids=None, preserve_zeros=False,
      
      # Absorbing polymorphic Graphene calls
      agglomerate=None, timestamp=None, stop_layer=None,

      # new download arguments
      renumber=False
    ):
    """
    Downloads segmentation from the indicated cutout
    region.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.
    renumber: dynamically rewrite downloaded segmentation into
      a more compact data type. Only compatible with single-process
      non-sharded download.

    agglomerate, timestamp, and stop_layer are just there to 
    absorb arguments to what could be a graphene frontend.

    Returns: img
    """  
    bbox = Bbox.create(
      bbox, context=self.bounds, 
      bounded=self.bounded, 
      autocrop=self.autocrop
    )

    if mip is None:
      mip = self.mip

    if parallel is None:
      parallel = self.parallel

    tup = self.image.download(bbox, mip, parallel=parallel, renumber=bool(renumber))
    if renumber:
      img, remap = tup
    else:
      remap = {}
      img = tup

    if segids is None:
      return tup

    mask_value = 0
    if preserve_zeros:
      mask_value = np.inf
      if np.issubdtype(self.dtype, np.integer):
        mask_value = np.iinfo(self.dtype).max

      segids.append(0)

    img = fastremap.mask_except(img, segids, in_place=True, value=mask_value)

    img = VolumeCutout.from_volume(
      self.meta, mip, img, bbox
    )
    if renumber:
      return img, remap
    else:
      return img
Exemple #13
0
    def download(
        self,
        bbox: BboxLikeType,
        mip: Optional[int] = None,
        parallel: Optional[int] = None,
        segids: Optional[Sequence[int]] = None,
        preserve_zeros: bool = False,

        # Absorbing polymorphic Graphene calls
        agglomerate: Optional[bool] = None,
        timestamp: Optional[int] = None,
        stop_layer: Optional[int] = None,

        # new download arguments
        renumber: bool = False,
        coord_resolution: Optional[Sequence[int]] = None,
    ) -> VolumeCutout:
        """
    Downloads segmentation from the indicated cutout
    region.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.
    renumber: dynamically rewrite downloaded segmentation into
      a more compact data type. Only compatible with single-process
      non-sharded download.
    coord_resolution: (rx,ry,rz) the coordinate resolution of the input point.
      Sometimes Neuroglancer is working in the resolution of another
      higher res layer and this can help correct that.

    agglomerate, timestamp, and stop_layer are just there to 
    absorb arguments to what could be a graphene frontend.

    Returns: img
    """
        bbox = Bbox.create(bbox,
                           context=self.bounds,
                           bounded=(self.bounded and coord_resolution is None),
                           autocrop=self.autocrop)

        if mip is None:
            mip = self.mip

        if coord_resolution is not None:
            factor = self.meta.resolution(mip) / coord_resolution
            bbox /= factor
            if self.bounded and not self.meta.bounds(mip).contains_bbox(bbox):
                raise exceptions.OutOfBoundsError(
                    f"Computed {bbox} is not contained within bounds {self.meta.bounds(mip)}"
                )

        if parallel is None:
            parallel = self.parallel

        tup = self.image.download(bbox.astype(np.int64),
                                  mip,
                                  parallel=parallel,
                                  renumber=bool(renumber))
        if renumber:
            img, remap = tup
        else:
            remap = {}
            img = tup

        if segids is None:
            return tup

        mask_value = 0
        if preserve_zeros:
            mask_value = np.inf
            if np.issubdtype(self.dtype, np.integer):
                mask_value = np.iinfo(self.dtype).max

            segids.append(0)

        img = fastremap.mask_except(img,
                                    segids,
                                    in_place=True,
                                    value=mask_value)

        img = VolumeCutout.from_volume(self.meta, mip, img, bbox)
        if renumber:
            return img, remap
        else:
            return img
Exemple #14
0
    def download(self,
                 bbox,
                 mip=None,
                 parallel=None,
                 segids=None,
                 preserve_zeros=False):
        """
    Downloads base segmentation and optionally agglomerates
    labels based on information in the graph server.

    bbox: specifies cutout to fetch
    mip: which resolution level to get (default self.mip)
    parallel: what parallel level to use (default self.parallel)

    segids: agglomerate the leaves of these segids from the graph 
      server and label them with the given segid.
    preserve_zeros: If segids is not None:
      False: mask other segids with zero
      True: mask other segids with the largest integer value
        contained by the image data type and leave zero as is.

    Returns: img
    """
        if type(bbox) is Vec:
            bbox = Bbox(bbox, bbox + 1)

        bbox = Bbox.create(bbox,
                           context=self.bounds,
                           bounded=self.bounded,
                           autocrop=self.autocrop)

        if bbox.subvoxel():
            raise exceptions.EmptyRequestException(
                "Requested {} is smaller than a voxel.".format(bbox))

        if mip is None:
            mip = self.mip

        mip0_bbox = self.bbox_to_mip(bbox, mip=mip, to_mip=0)
        # Only ever necessary to make requests within the bounding box
        # to the server. We can fill black in other situations.
        mip0_bbox = bbox.intersection(self.meta.bounds(0), mip0_bbox)

        img = super(CloudVolumeGraphene, self).download(bbox,
                                                        mip=mip,
                                                        parallel=parallel)

        if segids is None:
            return img

        segids = list(toiter(segids))

        remapping = {}
        for segid in segids:
            leaves = self.get_leaves(segid, mip0_bbox, 0)
            remapping.update({leaf: segid for leaf in leaves})

        img = fastremap.remap(img,
                              remapping,
                              preserve_missing_labels=True,
                              in_place=True)

        mask_value = 0
        if preserve_zeros:
            mask_value = np.inf
            if np.issubdtype(self.dtype, np.integer):
                mask_value = np.iinfo(self.dtype).max

            segids.append(0)

        img = fastremap.mask_except(img,
                                    segids,
                                    in_place=True,
                                    value=mask_value)

        return VolumeCutout.from_volume(self.meta, mip, img, bbox)