Exemplo n.º 1
0
def test_2d_renumber():
    for dtype in DTYPES:
        data = np.array([
            [5, 5, 5, 2],
            [3, 5, 5, 0],
            [1, 2, 4, 1],
            [20, 19, 20, 1],
        ],
                        dtype=dtype)

        data2 = np.copy(data, order='C')
        data2, remapdict = fastremap.renumber(data2, preserve_zero=True)

        assert np.all(data2 == [
            [1, 1, 1, 2],
            [3, 1, 1, 0],
            [4, 2, 5, 4],
            [6, 7, 6, 4],
        ])

        data2 = np.copy(data, order='F')
        data2, remapdict = fastremap.renumber(data2, preserve_zero=True)

        assert np.all(data2 == [
            [1, 1, 1, 5],
            [2, 1, 1, 0],
            [3, 5, 7, 3],
            [4, 6, 4, 3],
        ])
Exemplo n.º 2
0
def test_1d_renumber():
    for dtype in DTYPES:
        print(dtype)
        data = np.arange(8).astype(dtype)
        data = np.flip(data)

        data2 = np.copy(data)
        data2, remapdict = fastremap.renumber(data2, preserve_zero=False)

        assert np.all(data2 == np.arange(1, 9))
        assert len(remapdict) > 0

        data2 = np.copy(data)
        data2, remapdict = fastremap.renumber(data2, preserve_zero=True)

        assert data2[-1] == 0
        assert np.all(data2 == [1, 2, 3, 4, 5, 6, 7, 0])
        assert len(remapdict) > 0

    data = np.arange(8).astype(np.bool)
    data = np.flip(data)

    data2 = np.copy(data)
    data2, remapdict = fastremap.renumber(data2, preserve_zero=False)

    assert np.all(data2 == [1, 1, 1, 1, 1, 1, 1, 2])
    assert len(remapdict) > 0

    data2 = np.copy(data)
    data2, remapdict = fastremap.renumber(data2, preserve_zero=True)

    assert np.all(data2 == [1, 1, 1, 1, 1, 1, 1, 0])
    assert len(remapdict) > 0
Exemplo n.º 3
0
def test_empty_renumber():
    for dtype in DTYPES:
        data = np.array([], dtype=dtype)
        data2, remapdict = fastremap.renumber(data, preserve_zero=False)

        assert np.all(data2 == [])
        assert remapdict == {}
Exemplo n.º 4
0
def postprocessing(raster, S):
    """Post processing function to enforce connectivity.

    :param raster: Labelled image.
    :type raster: numpy.ndarray

    :param S: Spacing between superpixels.
    :type S: int

    :returns final: Labelled image with connectivity enforced.
    """
    import cc3d
    import fastremap
    from rasterio import features

    for i in range(10):

        raster, remapping = fastremap.renumber(raster, in_place=True)

        # Remove spourious regions generated during segmentation
        cc = cc3d.connected_components(raster.astype(dtype=numpy.uint16),
                                       connectivity=6)

        T = int((S**2) / 2)

        # Use Connectivity as 4 to avoid undesired connections
        raster = features.sieve(cc.astype(dtype=rasterio.int32),
                                T,
                                out=numpy.zeros(cc.shape,
                                                dtype=rasterio.int32),
                                connectivity=4)

    return raster
Exemplo n.º 5
0
    def build_batch(data: torch_geometric.data.Batch,
                    id2graphlet: Dict[int, Subgraph],
                    common_file=None) -> "Batch":
        # Check if graphlet_id == 0 exists in x because of how remap works
        graphlet_id_zero = (data.x == 0).any().item()

        # remap graphlet_ids to (0..len(data.x.unique()))
        remapped_graphlet_ids, mapping = renumber(
            data.x.numpy(),
            start=0 + int(graphlet_id_zero),
            in_place=False,
            preserve_zero=graphlet_id_zero)

        batch_graphlet_indices = torch.tensor(remapped_graphlet_ids.flatten(),
                                              dtype=torch.int64)

        graphlet_ids_sorted_by_new_id = (
            list(
                map(
                    lambda e: e[0],  # get key of
                    # (key, value) sorted by value
                    sorted(mapping.items(), key=lambda e: e[1]))))

        xs = []
        edge_indices = []

        # create list of xs and edge_indices where
        # xs[i] are the features of the graphlet that was mapped to
        # new_id == i etc.
        for i, graphlet_id in enumerate(graphlet_ids_sorted_by_new_id):
            graphlet = id2graphlet[graphlet_id]
            xs.append(graphlet.x)
            edge_indices.append(graphlet.edge_index + i * graphlet.x.size(0))

        if common_file is not None:
            common = np.loadtxt(str(common_file), dtype=np.int64)
            common = torch.tensor(
                list(map(lambda x: mapping.get(x, -100), common)))
            common = (batch_graphlet_indices == common.reshape(-1, 1)).any(0)
            data.estimates[~common] = 0

        # Sparse matrix where each row represents a graph
        # and each column a graphlet where
        # m[graph][graphlet] == count of graphlet in graph
        graph_has_graphlet = SparseTensor(row=data.batch,
                                          col=batch_graphlet_indices,
                                          value=data.estimates)

        if graph_has_graphlet.density(
        ) > .75:  # FIXME: update parameter if necessary
            graph_has_graphlet = graph_has_graphlet.to_dense()

        return Batch(x=torch.cat(xs, dim=0),
                     edge_index=torch.cat(edge_indices, dim=1),
                     graph_has_graphlet=graph_has_graphlet,
                     graphlet_ids=graphlet_ids_sorted_by_new_id,
                     y=data.y)
Exemplo n.º 6
0
def test_3d_renumber():
    for dtype in DTYPES:
        bits = np.dtype(dtype).itemsize * 8
        big = (2**(bits - 1)) - 1  # cover ints and uints
        data = np.array([
            [
                [big, 0],
                [2, big],
            ],
            [
                [big - 5, big - 1],
                [big - 7, big - 3],
            ],
        ],
                        dtype=dtype)

        data2 = np.copy(data, order='C')
        data2, remapdict = fastremap.renumber(data2, preserve_zero=False)

        assert np.all(data2 == [
            [[1, 2], [3, 1]],
            [
                [4, 5],
                [6, 7],
            ],
        ])

        data2 = np.copy(data, order='F')
        data2, remapdict = fastremap.renumber(data2, preserve_zero=False)

        assert np.all(data2 == [
            [[1, 5], [3, 1]],
            [
                [2, 6],
                [4, 7],
            ],
        ])

    big = np.random.randint(0, (2**64) - 1,
                            size=(512, 512, 100),
                            dtype=np.uint64)
    big, remapdict = fastremap.renumber(big, preserve_zero=True, in_place=True)
    assert np.dtype(big.dtype).itemsize <= 4
    assert np.dtype(big.dtype).itemsize > 1
Exemplo n.º 7
0
  def execute(self):
    self._volume = CloudVolume(
      self.layer_path, self.options['mip'], bounded=False,
      parallel=self.options['parallel_download'], 
      fill_missing=self.options['fill_missing']
    )
    self._bounds = Bbox(self.offset, self.shape + self.offset)
    self._bounds = Bbox.clamp(self._bounds, self._volume.bounds)

    self.progress = bool(self.options['progress'])

    self._mesher = zmesh.Mesher(self._volume.resolution)

    # Marching cubes loves its 1vx overlaps.
    # This avoids lines appearing between
    # adjacent chunks.
    data_bounds = self._bounds.clone()
    data_bounds.minpt -= self.options['low_padding']
    data_bounds.maxpt += self.options['high_padding']

    self._mesh_dir = self.get_mesh_dir()

    if self.options['encoding'] == 'draco':
      self.draco_encoding_settings = draco_encoding_settings(
        shape=(self.shape + self.options['low_padding'] + self.options['high_padding']),
        offset=self.offset,
        resolution=self._volume.resolution,
        compression_level=self.options["draco_compression_level"],
        create_metadata=self.options['draco_create_metadata'],
        uses_new_draco_bin_size=False,
      )

    # chunk_position includes the overlap specified by low_padding/high_padding
    # agglomerate, timestamp, stop_layer only applies to graphene volumes, 
    # no-op for precomputed
    data = self._volume.download(
      data_bounds, 
      agglomerate=self.options['agglomerate'], 
      timestamp=self.options['timestamp'], 
      stop_layer=self.options['stop_layer']
    )

    if not np.any(data):
      if self.options['spatial_index']:
        self._upload_spatial_index(self._bounds, {})
      return

    data = self._remove_dust(data, self.options['dust_threshold'])
    data = self._remap(data)

    if self.options['object_ids']:
      data = fastremap.mask_except(data, self.options['object_ids'], in_place=True)

    data, renumbermap = fastremap.renumber(data, in_place=True)
    renumbermap = { v:k for k,v in renumbermap.items() }
    self.compute_meshes(data, renumbermap)
Exemplo n.º 8
0
def compute_cc_labels(all_labels):
    tmp_labels = all_labels
    if np.dtype(all_labels.dtype).itemsize > 1:
        tmp_labels, remapping = fastremap.renumber(all_labels, in_place=False)

    cc_labels = cc3d.connected_components(tmp_labels)
    cc_labels = fastremap.refit(cc_labels)

    del tmp_labels
    remapping = kimimaro.skeletontricks.get_mapping(all_labels, cc_labels)
    return cc_labels, remapping
Exemplo n.º 9
0
def _masks_to_gui(parent, masks, outlines=None):
    """ masks loaded into GUI """
    # get unique values
    shape = masks.shape

    fastremap.renumber(masks, in_place=True)
    masks = np.reshape(masks, shape)
    masks = masks.astype(
        np.uint16) if masks.max() < 2**16 - 1 else masks.astype(np.uint32)
    parent.cellpix = masks

    # get outlines
    if outlines is None:  # parent.outlinesOn
        parent.outpix = np.zeros_like(masks)
        for z in range(parent.NZ):
            outlines = utils.masks_to_outlines(masks[z])
            parent.outpix[z] = outlines * masks[z]
            if z % 50 == 0 and parent.NZ > 1:
                print('GUI_INFO: plane %d outlines processed' % z)
    else:
        parent.outpix = outlines
        shape = parent.outpix.shape
        _, parent.outpix = np.unique(parent.outpix, return_inverse=True)
        parent.outpix = np.reshape(parent.outpix, shape)

    parent.ncells = parent.cellpix.max()
    colors = parent.colormap[:parent.ncells, :3]

    parent.cellcolors = list(
        np.concatenate((np.array([[255, 255, 255]]), colors),
                       axis=0).astype(np.uint8))
    parent.draw_masks()
    parent.redraw_masks(masks=parent.masksOn, outlines=parent.outlinesOn
                        )  # add to obey outline/mask setting upon recomputing
    if parent.ncells > 0:
        parent.toggle_mask_ops()
    parent.ismanual = np.zeros(parent.ncells, bool)
    parent.zdraw = list(-1 * np.ones(parent.ncells, np.int16))
    parent.update_plot()
Exemplo n.º 10
0
def engage_avocado_protection(cc_labels, all_dbf, remapping,
                              soma_detection_threshold, edtfn, progress):
    orig_cc_labels = np.copy(cc_labels, order='F')

    unchanged = set()
    max_iterations = max(fastremap.unique(cc_labels))

    # This loop handles nested avocados
    # Unless there are deeply nested double avocados,
    # this should complete in 2-3 passes. We limit it
    # to 20 just to make sure this loop terminates no matter what.
    # Avocados aren't the end of the world.
    for _ in tqdm(range(20), disable=(not progress), desc="Avocado Pass"):
        # Note: Divide soma_detection_threshold by a bit more than 2 because the nucleii are going to be
        # about a factor of 2 or less smaller than what we'd expect from a cell. For example,
        # in an avocado I saw, the DBF of the nucleus was 499 when the detection threshold was
        # set to 1100.
        candidates = set(
            fastremap.unique(cc_labels *
                             (all_dbf > soma_detection_threshold / 2.5)))
        candidates -= unchanged
        candidates.discard(0)

        cc_labels, unchanged_this_cycle, changes = engage_avocado_protection_single_pass(
            cc_labels,
            all_dbf,
            candidates=candidates,
            progress=progress,
        )
        unchanged |= unchanged_this_cycle

        if len(changes) == 0:
            break

        all_dbf = edtfn(cc_labels)

    # Downstream logic assumes cc_labels is contigiously numbered
    cc_labels, _ = fastremap.renumber(cc_labels, in_place=True)
    cc_remapping = kimimaro.skeletontricks.get_mapping(orig_cc_labels,
                                                       cc_labels)

    adjusted_remapping = {}
    for new_cc, cc in cc_remapping.items():
        if cc in remapping:
            adjusted_remapping[new_cc] = remapping[cc]

    return cc_labels, all_dbf, adjusted_remapping
Exemplo n.º 11
0
def compute_cc_labels(all_labels, cc_safety_factor):
    if cc_safety_factor <= 0 or cc_safety_factor > 1:
        raise ValueError(
            "cc_safety_factor must be greater than zero and less than or equal to one. Got: "
            + str(cc_safety_factor))

    tmp_labels = all_labels
    if np.dtype(all_labels.dtype).itemsize > 1:
        tmp_labels, remapping = fastremap.renumber(all_labels, in_place=True)

    cc_labels = cc3d.connected_components(tmp_labels,
                                          max_labels=int(tmp_labels.size *
                                                         cc_safety_factor))

    del tmp_labels
    remapping = kimimaro.skeletontricks.get_mapping(all_labels, cc_labels)
    return cc_labels, remapping
Exemplo n.º 12
0
def labels_to_flows(labels, files=None, use_gpu=False, device=None, redo_flows=False):
    """ convert labels (list of masks or flows) to flows for training model 

    if files is not None, flows are saved to files to be reused

    Parameters
    --------------

    labels: list of ND-arrays
        labels[k] can be 2D or 3D, if [3 x Ly x Lx] then it is assumed that flows were precomputed.
        Otherwise labels[k][0] or labels[k] (if 2D) is used to create flows and cell probabilities.

    Returns
    --------------

    flows: list of [4 x Ly x Lx] arrays
        flows[k][0] is labels[k], flows[k][1] is cell distance transform, flows[k][2] is Y flow,
        flows[k][3] is X flow, and flows[k][4] is heat distribution

    """
    nimg = len(labels)
    if labels[0].ndim < 3:
        labels = [labels[n][np.newaxis,:,:] for n in range(nimg)]

    if labels[0].shape[0] == 1 or labels[0].ndim < 3 or redo_flows: # flows need to be recomputed
        
        dynamics_logger.info('computing flows for labels')
        
        # compute flows; labels are fixed here to be unique, so they need to be passed back
        # make sure labels are unique!
        labels = [fastremap.renumber(label, in_place=True)[0] for label in labels]
        veci = [masks_to_flows(labels[n][0],use_gpu=use_gpu, device=device) for n in trange(nimg)]
        
        # concatenate labels, distance transform, vector flows, heat (boundary and mask are computed in augmentations)
        flows = [np.concatenate((labels[n], labels[n]>0.5, veci[n]), axis=0).astype(np.float32)
                    for n in range(nimg)]
        if files is not None:
            for flow, file in zip(flows, files):
                file_name = os.path.splitext(file)[0]
                tifffile.imsave(file_name+'_flows.tif', flow)
    else:
        dynamics_logger.info('flows precomputed')
        flows = [labels[n].astype(np.float32) for n in range(nimg)]
    return flows
Exemplo n.º 13
0
def cc3d_test(labels):
    labels, remap = fastremap.renumber(labels)
    res = cc3d.connected_components(labels)
    N = np.max(labels)
    for segid in tqdm(range(1, N + 1)):
        extracted = (res == segid)
Exemplo n.º 14
0
 def remap(self, start_id: int):
     fastremap.renumber(self.array, preserve_zero=True, in_place=True)
     seg = self.astype(np.uint64)
     seg.array[seg.array > 0] += start_id
     start_id = seg.max()
     return seg, start_id
Exemplo n.º 15
0
def get_masks(p, iscell=None, rpad=20, flows=None, threshold=0.4, use_gpu=False, device=None):
    """ create masks using pixel convergence after running dynamics
    
    Makes a histogram of final pixel locations p, initializes masks 
    at peaks of histogram and extends the masks from the peaks so that
    they include all pixels with more than 2 final pixels p. Discards 
    masks with flow errors greater than the threshold. 
    Parameters
    ----------------
    p: float32, 3D or 4D array
        final locations of each pixel after dynamics,
        size [axis x Ly x Lx] or [axis x Lz x Ly x Lx].
    iscell: bool, 2D or 3D array
        if iscell is not None, set pixels that are 
        iscell False to stay in their original location.
    rpad: int (optional, default 20)
        histogram edge padding
    threshold: float (optional, default 0.4)
        masks with flow error greater than threshold are discarded 
        (if flows is not None)
    flows: float, 3D or 4D array (optional, default None)
        flows [axis x Ly x Lx] or [axis x Lz x Ly x Lx]. If flows
        is not None, then masks with inconsistent flows are removed using 
        `remove_bad_flow_masks`.
    Returns
    ---------------
    M0: int, 2D or 3D array
        masks with inconsistent flow masks removed, 
        0=NO masks; 1,2,...=mask labels,
        size [Ly x Lx] or [Lz x Ly x Lx]
    
    """
    
    pflows = []
    edges = []
    shape0 = p.shape[1:]
    dims = len(p)
    if iscell is not None:
        if dims==3:
            inds = np.meshgrid(np.arange(shape0[0]), np.arange(shape0[1]),
                np.arange(shape0[2]), indexing='ij')
        elif dims==2:
            inds = np.meshgrid(np.arange(shape0[0]), np.arange(shape0[1]),
                     indexing='ij')
        for i in range(dims):
            p[i, ~iscell] = inds[i][~iscell]

    for i in range(dims):
        pflows.append(p[i].flatten().astype('int32'))
        edges.append(np.arange(-.5-rpad, shape0[i]+.5+rpad, 1))

    h,_ = np.histogramdd(tuple(pflows), bins=edges)
    hmax = h.copy()
    for i in range(dims):
        hmax = maximum_filter1d(hmax, 5, axis=i)

    seeds = np.nonzero(np.logical_and(h-hmax>-1e-6, h>10))
    Nmax = h[seeds]
    isort = np.argsort(Nmax)[::-1]
    for s in seeds:
        s = s[isort]

    pix = list(np.array(seeds).T)

    shape = h.shape
    if dims==3:
        expand = np.nonzero(np.ones((3,3,3)))
    else:
        expand = np.nonzero(np.ones((3,3)))
    for e in expand:
        e = np.expand_dims(e,1)

    for iter in range(5):
        for k in range(len(pix)):
            if iter==0:
                pix[k] = list(pix[k])
            newpix = []
            iin = []
            for i,e in enumerate(expand):
                epix = e[:,np.newaxis] + np.expand_dims(pix[k][i], 0) - 1
                epix = epix.flatten()
                iin.append(np.logical_and(epix>=0, epix<shape[i]))
                newpix.append(epix)
            iin = np.all(tuple(iin), axis=0)
            for p in newpix:
                p = p[iin]
            newpix = tuple(newpix)
            igood = h[newpix]>2
            for i in range(dims):
                pix[k][i] = newpix[i][igood]
            if iter==4:
                pix[k] = tuple(pix[k])
    
    M = np.zeros(h.shape, np.uint32)
    for k in range(len(pix)):
        M[pix[k]] = 1+k
        
    for i in range(dims):
        pflows[i] = pflows[i] + rpad
    M0 = M[tuple(pflows)]

    # remove big masks
    uniq, counts = fastremap.unique(M0, return_counts=True)
    big = np.prod(shape0) * 0.4
    bigc = uniq[counts > big]
    if len(bigc) > 0 and (len(bigc)>1 or bigc[0]!=0):
        M0 = fastremap.mask(M0, bigc)
    fastremap.renumber(M0, in_place=True) #convenient to guarantee non-skipped labels
    M0 = np.reshape(M0, shape0)
    return M0