Ejemplo n.º 1
0
def test_concatenate_and_rechunk__shape_mismatch():
    z1 = zarr.zeros((5, 3), chunks=(2, 3), dtype="i4")
    z2 = zarr.zeros((5, 4), chunks=(2, 4), dtype="i4")
    zarrs = [z1, z2]

    with pytest.raises(ValueError, match="Zarr arrays must have matching shapes"):
        concatenate_and_rechunk(zarrs)
Ejemplo n.º 2
0
def test_run_simulator_with_threads_and_zarr_memory_store():
    """
    If the store is in memory (here a Zarr MemoryStore) and the Dask workers
    share memory with the client (i.e. we have a threads-based cluster),
    collect_in_memory can be set to False.
    """
    cluster = LocalCluster(n_workers=2, processes=False, threads_per_worker=1)
    simulator = Simulator(model, sim_shapes=dict(x=(10, )), cluster=cluster)

    pars = zarr.zeros((100, 2))
    pars[:, :] = np.random.random(pars.shape)
    x = zarr.zeros((100, 10))
    sims = dict(x=x.oindex)
    sim_status = zarr.full(100, SimulationStatus.RUNNING, dtype="int")

    # the following is non-blocking (it immediately returns)
    simulator.run(
        pars=pars,
        sims=sims,
        sim_status=sim_status.oindex,
        indices=np.arange(100, dtype=np.int),
        collect_in_memory=False,
        batch_size=20,
    )

    # need to wait for tasks to be completed
    _wait_for_all_tasks()

    assert np.all(sim_status[:] == SimulationStatus.FINISHED)
    assert not np.all(np.isclose(sims["x"][:, :].sum(axis=1), 0.0))
    simulator.client.close()
    cluster.close()
Ejemplo n.º 3
0
def test_run_simulator_with_processes_and_zarr_memory_store():
    """
    If the store is in memory (here a Zarr MemoryStore) and the Dask workers do
    not share memory with the client (i.e. we have a processes-based cluster),
    collect_in_memory must be set to True.
    """
    cluster = LocalCluster(n_workers=2, processes=True, threads_per_worker=1)
    simulator = Simulator(model, sim_shapes=dict(x=(10, )), cluster=cluster)

    pars = zarr.zeros((100, 2))
    pars[:, :] = np.random.random(pars.shape)
    x = zarr.zeros((100, 10))
    sims = dict(x=x.oindex)
    sim_status = zarr.full(100, SimulationStatus.RUNNING, dtype="int")

    simulator.run(
        pars=pars,
        sims=sims,
        sim_status=sim_status.oindex,
        indices=np.arange(100, dtype=np.int),
        collect_in_memory=True,
        batch_size=20,
    )

    assert np.all(sim_status[:] == SimulationStatus.FINISHED)
    assert not np.all(np.isclose(sims["x"][:, :].sum(axis=1), 0.0))
    simulator.client.close()
    cluster.close()
Ejemplo n.º 4
0
def test_fancy_indexing_fallback_on_get_setitem():
    z = zarr.zeros((20, 20))
    z[[1, 2, 3], [1, 2, 3]] = 1
    np.testing.assert_array_equal(
        z[:4, :4],
        [
            [0, 0, 0, 0],
            [0, 1, 0, 0],
            [0, 0, 1, 0],
            [0, 0, 0, 1],
        ],
    )
    np.testing.assert_array_equal(
        z[[1, 2, 3], [1, 2, 3]], 1
    )
    # test broadcasting
    np.testing.assert_array_equal(
        z[1, [1, 2, 3]], [1, 0, 0]
    )
    # test 1D fancy indexing
    z2 = zarr.zeros(5)
    z2[[1, 2, 3]] = 1
    np.testing.assert_array_equal(
        z2, [0, 1, 1, 1, 0]
    )
Ejemplo n.º 5
0
    def _create_stores(self, p):
        """
        extract all singlepage tiffs at each coordinate and place them in a zarr array
        coordinates are of shape = (pos, time, channel, z)
        arrays are of shape = (time, channel, z, height, width)

        Parameters
        ----------
        p:      (int) position (aka ome-tiff scene) to extract

        Returns
        -------

        """

        self.log.info("")
        z = zarr.zeros(shape=(self.frames, self.channels, self.slices,
                              self.height, self.width),
                       chunks=(1, 1, 1, self.height, self.width))
        for c, fn in self.coord_to_filename.items():
            if c[0] == p:
                self.log.info(f"reading coord = {c} from filename = {fn}")
                z[c[1], c[2], c[3]] = zarr.open(tiff.imread(fn, aszarr=True))

        # check that the array was assigned
        if z == zarr.zeros(shape=(self.frames, self.channels, self.slices,
                                  self.height, self.width),
                           chunks=(1, 1, 1, self.height, self.width)):
            raise IOError(f"array at position {p} can not be found")

        self.positions[p] = z
Ejemplo n.º 6
0
def test__assert_calendars_same(source_attr, target_attr, expected_error):
    source_array = zarr.zeros((5))
    for k, v in source_attr.items():
        source_array.attrs[k] = v
    target_array = zarr.zeros((5))
    for k, v in target_attr.items():
        target_array.attrs[k] = v
    with pytest.raises(expected_error):
        append._assert_calendars_same(source_array, target_array)
Ejemplo n.º 7
0
    def reset_states(self, input_shape=None):
        """Initialize the state space

        This method initializes the layer and resets any previously held data.
        The zarr array is initialized in this method.

        Args:
            input_shape (TensorShape,tuple, list): Shape of the input.
        """

        if not isinstance(input_shape, type(None)):
            self._input_shape = input_shape

        if self._input_shape == None:
            raise ValueError(
                'The input_shape is None, and no previous input ' +
                'shape information was provided. The first time ' +
                'reset_states is called, an input_shape must be ' +
                'provided.')

        # Try to keep chunks limited to 16MB
        ncols = int(np.ceil(self._input_shape[self._channel_index] / 8))
        nrows = 2**22 // ncols

        # Initialize internal variables related to state space
        self._state_ids = None
        self._edges = None
        self._index = None
        self._counts = None
        self._entropy = None
        self._threads = []
        self._chunk_size = (nrows, ncols)
        self._state_shape = list(self._chunk_size)
        self._state_count = 0

        if self._raw_states != None:
            # Zero out states and resize if zarr already open
            self._raw_states.resize(self._state_shape)
            self._raw_states[:] = 0
        else:
            # Initialize the zarr array
            if self._zarr_path != None:
                if self._zarr_path.is_file():
                    self._zarr_path.unlink()

                self._raw_states = zarr.zeros(
                    shape=self._state_shape,
                    chunks=self._chunk_size,
                    dtype='B',
                    synchronizer=zarr.ThreadSynchronizer(),
                    store=str(self._zarr_path.absolute()))
            else:
                self._raw_states = zarr.zeros(
                    shape=self._state_shape,
                    chunks=self._chunk_size,
                    dtype='B',
                    synchronizer=zarr.ThreadSynchronizer())
Ejemplo n.º 8
0
def test_concatenate_and_rechunk__2d():
    z1 = zarr.zeros((5, 3), chunks=(2, 3), dtype="i4")
    z1[:] = np.arange(15).reshape(5, 3)

    z2 = zarr.zeros((5, 3), chunks=(2, 3), dtype="i4")
    z2[:] = np.arange(15, 30).reshape(5, 3)

    zarrs = [z1, z2]

    out = concatenate_and_rechunk(zarrs)

    assert out.chunks == ((2, 2, 2, 2, 2), (3,))
    np.testing.assert_array_equal(out.compute(), np.arange(30).reshape(10, 3))
Ejemplo n.º 9
0
def test_concatenate_and_rechunk__1d():
    z1 = zarr.zeros(5, chunks=2, dtype="i4")
    z1[:] = np.arange(5)

    z2 = zarr.zeros(5, chunks=2, dtype="i4")
    z2[:] = np.arange(5, 10)

    zarrs = [z1, z2]

    out = concatenate_and_rechunk(zarrs)

    assert out.chunks == ((2, 2, 2, 2, 2),)
    np.testing.assert_array_equal(out.compute(), np.arange(10))
Ejemplo n.º 10
0
    def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(
            data=inf_results, voxel_size=list(self.gt_vol.resolution))

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.AnnotationLayer()
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
        self.flood_fill_event = threading.Event()
        t = threading.Thread(
            target=self._do_flood_fill,
            kwargs=dict(
                initial_pos=pos,
                inf_results=inf_results,
                inf_volume=inf_volume,
                event=self.flood_fill_event,
            ))
        t.daemon = True
        t.start()
Ejemplo n.º 11
0
def test_cache():
    store = SlowStore()
    store = Cache(store, max_size=1000000)
    store.flush()
    for i in range(10):
        z = zarr.zeros(
            (1000, 1000),
            chunks=(100, 100),
            path=posixpath.realpath(f"./data/test/test_cache/first{i}"),
            store=store,
            overwrite=True,
        )

        z[...] = i
        store.invalidate()

        t1 = time.time()
        z[...]
        t2 = time.time()
        z[...]
        t3 = time.time()
        assert z[0, 0] == i
        # print(t2 - t1, t3 - t2)
        assert t2 - t1 > t3 - t2
    store.close()
    def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                 chunks=(64, 64, 64),
                                 dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(data=inf_results,
                                              dimensions=self.dimensions)

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
        self.flood_fill_event = threading.Event()
        t = threading.Thread(target=self._do_flood_fill,
                             kwargs=dict(
                                 initial_pos=pos,
                                 inf_results=inf_results,
                                 inf_volume=inf_volume,
                                 event=self.flood_fill_event,
                             ))
        t.daemon = True
        t.start()
Ejemplo n.º 13
0
def test_fancy_indexing_doesnt_mix_with_slicing():
    z = zarr.zeros((20, 20))
    with pytest.raises(IndexError):
        z[[1, 2, 3], :] = 2
    with pytest.raises(IndexError):
        np.testing.assert_array_equal(
            z[[1, 2, 3], :], 0
        )
Ejemplo n.º 14
0
def test_concatenate_and_rechunk__tiny_file():
    z1 = zarr.zeros(4, chunks=3, dtype="i4")
    z1[:] = np.arange(4)

    # this zarr array lies entirely within the second chunk
    z2 = zarr.zeros(1, chunks=3, dtype="i4")
    z2[:] = np.arange(4, 5)

    z3 = zarr.zeros(5, chunks=3, dtype="i4")
    z3[:] = np.arange(5, 10)

    zarrs = [z1, z2, z3]

    out = concatenate_and_rechunk(zarrs)

    assert out.chunks == ((3, 3, 3, 1),)
    np.testing.assert_array_equal(out.compute(), np.arange(10))
Ejemplo n.º 15
0
def test_zarr_dask_2D(viewer_factory):
    """Test adding 2D dask image."""
    view, viewer = viewer_factory()

    data = zarr.zeros((200, 100), chunks=(40, 20))
    data[53:63, 10:20] = 1
    zdata = da.from_zarr(data)
    viewer.add_image(zdata)
    assert np.all(viewer.layers[0].data == zdata)
Ejemplo n.º 16
0
def test_zarr_nD():
    """Test adding nD zarr image."""
    viewer = ViewerModel()

    data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))
    data[53:63, 10:20, :] = 1
    # If passing a zarr file directly, must pass contrast_limits
    viewer.add_image(data, contrast_limits=[0, 1])
    assert np.all(viewer.layers[0].data == data)
Ejemplo n.º 17
0
def test_zarr_2D(viewer_factory):
    """Test adding 2D zarr image."""
    view, viewer = viewer_factory()

    data = zarr.zeros((200, 100), chunks=(40, 20))
    data[53:63, 10:20] = 1
    # If passing a zarr file directly, must pass contrast_limits
    viewer.add_image(data, contrast_limits=[0, 1])
    assert np.all(viewer.layers[0].data == data)
Ejemplo n.º 18
0
    def setUp(self):
        self.z = zarr.zeros((25, 25), chunks=(5, 5), dtype=np.float32)
        self.z[:] = np.arange(25**2).reshape((25, 25))
        self.f_zarr = double_elements

        self.shm = shm
        with shm.txn() as arr:
            arr[:] = self.z[:]
        self.f_shm = double_elements_shm
Ejemplo n.º 19
0
def test_zarr_dask_nD():
    """Test adding nD zarr image."""
    viewer = ViewerModel()

    data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))
    data[53:63, 10:20, :] = 1
    zdata = da.from_zarr(data)
    viewer.add_image(zdata)
    assert np.all(viewer.layers[0].data == zdata)
Ejemplo n.º 20
0
def test_zeros_like():
    z = zeros(100, 10)
    z2 = zeros_like(z)
    eq(z.shape, z2.shape)
    eq(z.chunks, z2.chunks)
    eq(z.dtype, z2.dtype)
    eq(z.cname, z2.cname)
    eq(z.clevel, z2.clevel)
    eq(z.shuffle, z2.shuffle)
    eq(z.fill_value, z2.fill_value)
Ejemplo n.º 21
0
def test_zeros_like():
    z = zeros(100, 10)
    z2 = zeros_like(z)
    eq(z.shape, z2.shape)
    eq(z.chunks, z2.chunks)
    eq(z.dtype, z2.dtype)
    eq(z.cname, z2.cname)
    eq(z.clevel, z2.clevel)
    eq(z.shuffle, z2.shuffle)
    eq(z.fill_value, z2.fill_value)
Ejemplo n.º 22
0
def test_ensure_dask_array():
    np_arr = np.zeros((128, 128, 3), dtype=np.uint8)
    da_arr = da.zeros((128, 128, 3), dtype=np.uint8)
    za_arr = zarr.zeros((128, 128, 3), dtype=np.uint8)

    np_out = ensure_dask_array(np_arr)
    da_out = ensure_dask_array(da_arr)
    za_out = ensure_dask_array(za_arr)

    assert isinstance(np_out, da.Array)
    assert isinstance(da_out, da.Array)
    assert isinstance(za_out, da.Array)
Ejemplo n.º 23
0
def write_n5(path, shape, block_size, compressor):
  store = zarr.N5Store(path)
  data = np.arange(np.prod(shape), dtype=np.uint16)
  data = data.reshape(shape)
  data_transpose = data.transpose()
  z = zarr.zeros(
      data_transpose.shape,
      chunks=block_size[::-1],
      store=store,
      dtype=data.dtype,
      overwrite=True,
      compressor=compressor)
  z[...] = data_transpose
Ejemplo n.º 24
0
def test_zarr_nD(qtbot):
    """Test adding nD zarr image."""
    viewer = Viewer()
    view = viewer.window.qt_viewer
    qtbot.addWidget(view)

    data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))
    data[53:63, 10:20, :] = 1
    # If passing a zarr file directly, must pass contrast_limits
    viewer.add_image(data, contrast_limits=[0, 1])
    assert np.all(viewer.layers[0].data == data)

    # Close the viewer
    viewer.window.close()
Ejemplo n.º 25
0
def test_zarr_dask_nD(qtbot):
    """Test adding nD zarr image."""
    viewer = Viewer()
    view = viewer.window.qt_viewer
    qtbot.addWidget(view)

    data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))
    data[53:63, 10:20, :] = 1
    zdata = da.from_zarr(data)
    viewer.add_image(zdata)
    assert np.all(viewer.layers[0].data == zdata)

    # Close the viewer
    viewer.window.close()
Ejemplo n.º 26
0
    def dummy_matrix_sequential(self):
        """Convert COO matrix format in Pytables to zarr array with
        One-Hot-Encoding or Dummy matrix format

        BAD PERFORMANCE

        Args:
            None

        Returns:
            zarr.array

        """
        def assign(za, chunk):
            for row in chunk:
                za[row['row'], row['col']] = 1

        h5file = pt.open_file(self.pytables_filepath, mode='r')
        ncol = h5file.root.kmers.nrows
        nrow = h5file.root.genomes.nrows
        idxtable = h5file.root.indexes
        print(nrow, ncol)

        za = zarr.zeros((nrow, ncol), chunks=(nrow, 1000), dtype='u1')

        bsize = 1000
        t = h5file.root.indexes.nrows
        t0 = time.time()
        for b in range(0, t, bsize):
            assign(za, idxtable[b:(bsize + b)])
            if b % 100000 == 0:
                t1 = time.time()
                diff = int(t1 - t0)
                minutes, seconds = diff // 60, diff % 60
                print("{} complete.\tElapsed time {}:{}".format(
                    round(b / t * 100, 1), str(minutes),
                    str(seconds).zfill(2)))

        # Write to file
        # store = zarr.DirectoryStore(self.zarr_filepath)
        # group=zarr.hierarchy.group(store=store,overwrite=True,synchronizer=zarr.ThreadSynchronizer())
        # za2=group.empty('ohe',shape=za.shape,dtype=za.dtype,chunks=za.chunks)
        # za2[...]=za[...]

        print(za[:10, :10])

        return za
Ejemplo n.º 27
0
def mergeTstats(randout, tstatout, contrast):
    import numpy as np
    from glob import glob
    import re, zarr
    zarrf = '%s/TestTval_%s.zarr' % (tstatout, contrast)
    print('Merging Test-T Values from contrast: %s' % contrast)

    # randomise results: randout/rowSlice/chunk/chunk*
    # TODO: This line seems RAM heavy:
    testTvals = glob('%s/*/*/*[0-9]_tstat%s.nii.gz' % (randout, contrast))

    # key: tstat filename
    # value: list of chunk coordinates (SourceStart, SourceEnd, TargetStart, TargetEnd)
    filedict = {}
    for ifile in testTvals:
        # RowStart-RowEnd_ColStart-ColEnd (inclusive)
        research = re.search('.*/(.*?)-(.*?)_(.*?)-(.*?)_done/', ifile)
        chunk_coords = [
            research.group(1),
            research.group(2),
            research.group(3),
            research.group(4)
        ]
        chunk_coords = [int(k) for k in chunk_coords]
        filedict[ifile] = chunk_coords

    # Find max Source/Target based on created niftis:
    tmp = np.array(list(filedict.values()))
    tmp = np.max(tmp, axis=0)
    shape = [
        tmp[i] + 1 for i in [1, 3]
    ]  # shape of final array needs to include maximums for source and target
    print('Contrast %s Final Shape: %s' % (contrast, shape))

    # create zarr file to output final matrix (2D)
    testTvals = zarr.zeros(store=zarrf, shape=shape, chunks=(128, 128))

    # loop through dictionary of input files to assign final values to testT:
    # i: tstat filename
    # j: chunk coords
    for i, j in filedict.items():
        tmpnii = getNII(i)
        # Assaign nifti Test T-values to testTvals:
        # Add one because chunk coordinates from filename are inclusive:
        testTvals.oindex[j[0]:j[1] + 1, j[2]:j[3] + 1] = tmpnii
    print('### Test T-values Merged: %s' % zarrf)
Ejemplo n.º 28
0
        def delayed_assign(chunk, ncol, j):
            zrow = zarr.zeros((1, ncol), chunks=(1, 1000), dtype='u1')
            t = chunk.shape[0]
            t0 = time.time()
            i = 0
            for row in chunk:
                zrow[0, row['col']] = 1
                i += 1
                if i % 100000 == 0 and j == 0:
                    t1 = time.time()
                    diff = int(t1 - t0)
                    minutes, seconds = diff // 60, diff % 60
                    print("Job {} is {} complete.\tElapsed time {}:{}".format(
                        j, round(i / t * 100, 1), str(minutes),
                        str(seconds).zfill(2)))

            return (zrow)
Ejemplo n.º 29
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                      chunks=(64, 64, 64),
                                      dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(data=self.inf_results,
                                                   dimensions=self.dimensions)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
Ejemplo n.º 30
0
def new_zarr(path, shape, chunks, dtype, in_memory=False, **kwargs):
    """
    Create new Zarr NestedDirectoryStore at `path`.

    **NOTE:** Persistent Zarr arrays are stored on disk. To avoid data loss, be careful when calling `new_zarr`
    on a path with an existing array.

    Parameters
    ----------
    path : str
        Path to new zarr array
    shape : tuple
        Overall shape of the zarr array
    chunks : tuple
        Shape of each chunk for the zarr array
    dtype : str
        Data type of for the zarr array
    kwargs : dict
        Keyword args to passs to zarr.open()

    Returns
    -------
    arr : zarr Array
        Reference to open zarr array
    """
    compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
    if in_memory:
        z_arr_out = zarr.zeros(shape=shape,
                               chunks=chunks,
                               dtype=dtype,
                               compressor=compressor,
                               **kwargs)
    else:
        store = zarr.NestedDirectoryStore(path)
        z_arr_out = zarr.open(store,
                              mode='w',
                              shape=shape,
                              chunks=chunks,
                              dtype=dtype,
                              compressor=compressor,
                              **kwargs)
    return z_arr_out
Ejemplo n.º 31
0
def test_fancy_indexing_doesnt_mix_with_implicit_slicing():
    z2 = zarr.zeros((5, 5, 5))
    with pytest.raises(IndexError):
        z2[[1, 2, 3], [1, 2, 3]] = 2
    with pytest.raises(IndexError):
        np.testing.assert_array_equal(
            z2[[1, 2, 3], [1, 2, 3]], 0
        )
    with pytest.raises(IndexError):
        z2[[1, 2, 3]] = 2
    with pytest.raises(IndexError):
        np.testing.assert_array_equal(
            z2[[1, 2, 3]], 0
        )
    with pytest.raises(IndexError):
        z2[..., [1, 2, 3]] = 2
    with pytest.raises(IndexError):
        np.testing.assert_array_equal(
            z2[..., [1, 2, 3]], 0
        )
Ejemplo n.º 32
0
 def __init__(
     self,
     url: str,
     shape: typing.Tuple[int, ...] = None,
     dtype="float32",
     creds=None,
     memcache: float = None,
 ):
     if shape is not None:
         self._zarr = zarr.zeros(
             shape,
             dtype=dtype,
             chunks=self._determine_chunksizes(shape, dtype),
             store=hub.areal.store.get_storage_map(url, creds, memcache),
             overwrite=True,
         )
     else:
         self._zarr = zarr.open_array(
             hub.areal.store.get_storage_map(url, creds, memcache))
     self._shape = self._zarr.shape
     self._chunks = self._zarr.chunks
     self._dtype = self._zarr.dtype
     self._memcache = memcache
Ejemplo n.º 33
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(
            data=self.inf_results, voxel_size=list(self.gt_vol.resolution))
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
Ejemplo n.º 34
0
def test_zeros():
    z = zeros(100, 10)
    eq((100,), z.shape)
    eq((10,), z.chunks)
    assert_array_equal(np.zeros(100), z[:])
Ejemplo n.º 35
0
    def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
        initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))

        gt_vol_zarr = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint64)

        gt_blocks_seen = set()

        block_size = np.array((64, 64, 64), np.int64)

        def fetch_gt_block(block):
            spos = block * block_size
            epos = spos + block_size
            slice_expr = np.s_[int(spos[0]):int(epos[0]),
                               int(spos[1]):int(epos[1]),
                               int(spos[2]):int(epos[2])]
            rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
                                   int(spos[1]):int(epos[1]),
                                   int(spos[0]):int(epos[0])]
            gt_data = np.transpose(self.gt_vol[slice_expr][..., 0], (2, 1, 0))
            gt_vol_zarr[rev_slice_expr] = gt_data

        def get_patch(spos, epos):
            spos = np.array(spos)
            epos = np.array(epos)
            sblock = spos // block_size
            eblock = (epos - 1) // block_size
            for blockoff in np.ndindex(tuple(eblock - sblock + 1)):
                block = np.array(blockoff) + sblock
                block_tuple = tuple(block)
                if block_tuple in gt_blocks_seen: continue
                gt_blocks_seen.add(block_tuple)
                fetch_gt_block(block)
            rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
                                   int(spos[1]):int(epos[1]),
                                   int(spos[0]):int(epos[0])]
            result = gt_vol_zarr[rev_slice_expr]
            return result

        segment_id = self.gt_vol[initial_pos][0]

        patch_size = np.array((33, ) * 3, np.int64)
        lower_bound = patch_size // 2
        upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2
        d = 8

        seen = set()
        q = []

        last_invalidate = [time.time()]
        invalidate_interval = 3

        def enqueue(pos):
            if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return
            if pos in seen: return
            seen.add(pos)
            q.append(pos)

        def update_view():
            if event.is_set():
                return
            cur_time = time.time()
            if cur_time < last_invalidate[0] + invalidate_interval:
                return
            last_invalidate[0] = cur_time
            inf_volume.invalidate()
            with self.viewer.txn() as s:
                s.layers['points'].annotations = [
                    neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen)
                ]

        def process_pos(pos):
            spos = pos - patch_size // 2
            epos = spos + patch_size
            rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
                                   int(spos[1]):int(epos[1]),
                                   int(spos[0]):int(epos[0])]
            gt_data = get_patch(spos, epos)
            mask = gt_data == segment_id
            for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)):
                if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue
                new_pos = np.array(pos) + np.array(offset)
                enqueue(tuple(new_pos))

            dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask)
            inf_results[rev_slice_expr] = 1 + np.cast[np.uint8](
                np.minimum(dist_transform, 5) / 5.0 * 254)

            self.viewer.defer_callback(update_view)

        enqueue(initial_pos)

        while len(q) > 0 and not event.is_set():
            i = random.randint(0, len(q) - 1)
            pos = q[i]
            q[i] = q[-1]
            del q[-1]
            process_pos(pos)
        self.viewer.defer_callback(update_view)