예제 #1
0
def test_ingest_segmentation():
    delete_layer()
    storage, data = create_layer(size=(256, 256, 128, 1),
                                 offset=(0, 0, 0),
                                 layer_type='segmentation')
    cv = CloudVolume(storage.layer_path)
    assert len(cv.scales) == 3
    assert len(cv.available_mips) == 3

    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    assert len(cv.available_mips) == 3
    assert np.array_equal(cv.mip_volume_size(0), [256, 256, 128])
    assert np.array_equal(cv.mip_volume_size(1), [128, 128, 128])
    assert np.array_equal(cv.mip_volume_size(2), [64, 64, 128])

    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    data_ds1, = tinybrain.downsample_segmentation(data, factor=[2, 2, 1, 1])
    cv.mip = 1
    assert np.all(cv[slice64] == data_ds1[slice64])

    data_ds2, = tinybrain.downsample_segmentation(data, factor=[4, 4, 1])
    cv.mip = 2
    assert np.all(cv[slice64] == data_ds2[slice64])
예제 #2
0
    def __call__(self, chunk):
        assert 3 == chunk.ndim
        voxel_offset = chunk.voxel_offset

        num_mips = self.stop_mip - self.chunk_mip
        # tinybrain use F order and require 4D array!
        chunk2 = np.transpose(chunk)
        # chunk2 = np.reshape(chunk2, (*chunk2.shape, 1))
        chunk2 = np.expand_dims(chunk2, 3)

        if np.issubdtype(chunk.dtype, np.floating) or chunk.dtype == np.uint8:
            pyramid = tinybrain.downsample_with_averaging(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)
        else:
            pyramid = tinybrain.downsample_segmentation(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)

        for mip in range(self.start_mip, self.stop_mip):
            # the first chunk in pyramid is already downsampled!
            downsampled_chunk = pyramid[mip - self.chunk_mip - 1]
            # compute new offset, only downsample the y,x dimensions
            offset = np.divide(
                voxel_offset,
                np.asarray([
                    self.factor[0]**(mip - self.chunk_mip),
                    self.factor[1]**(mip - self.chunk_mip),
                    self.factor[2]**(mip - self.chunk_mip)
                ]))
            bbox = Bbox.from_delta(offset, downsampled_chunk.shape[0:3][::-1])
            # upload downsampled chunk, note that we should use F order in the indexing
            self.vols[mip][bbox.to_slices()[::-1]] = downsampled_chunk
예제 #3
0
파일: tasks.py 프로젝트: sandyhider/igneous
def downsample_and_upload(
    image, bounds, vol, ds_shape, 
    mip=0, axis='z', skip_first=False,
    sparse=False
  ):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
      size=ds_shape,
      preserve_axis=axis,
      max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample_scales.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
      print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format(
          image.shape, ds_shape, vol.volume_size, bounds)
      )

    vol.mip = mip
    if not skip_first:
      vol[bounds.to_slices()] = image

    if len(factors) == 0:
      return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
      mips = tinybrain.downsample_with_averaging(image, factors[0], num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
      mips = tinybrain.downsample_segmentation(
        image, factors[0], 
        num_mips=num_mips, sparse=sparse
      )
    else:
      mips = tinybrain.downsample_with_striding(image, factors[0], num_mips=num_mips)

    new_bounds = bounds.clone()
   
    for factor3 in factors:
      vol.mip += 1
      new_bounds //= factor3
      mipped = mips.pop(0)
      new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
      vol[new_bounds] = mipped
예제 #4
0
def downsample_and_upload(image,
                          bounds,
                          vol,
                          ds_shape,
                          mip=0,
                          axis='z',
                          skip_first=False,
                          sparse=False,
                          factor=None):
    ds_shape = min2(vol.volume_size, ds_shape[:3])
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    chunk_size = vol.meta.chunk_size(underlying_mip).astype(np.float32)

    if factor is None:
        factor = downsample_scales.axis_to_factor(axis)
    factors = downsample_scales.compute_factors(ds_shape, factor, chunk_size)

    if len(factors) == 0:
        print(
            "No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}"
            .format(image.shape, ds_shape, vol.volume_size, bounds))

    vol.mip = mip
    if not skip_first:
        vol[bounds.to_slices()] = image

    if len(factors) == 0:
        return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
        mips = tinybrain.downsample_with_averaging(image,
                                                   factors[0],
                                                   num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
        mips = tinybrain.downsample_segmentation(image,
                                                 factors[0],
                                                 num_mips=num_mips,
                                                 sparse=sparse)
    else:
        mips = tinybrain.downsample_with_striding(image,
                                                  factors[0],
                                                  num_mips=num_mips)

    new_bounds = bounds.clone()

    for factor3 in factors:
        vol.mip += 1
        new_bounds //= factor3
        mipped = mips.pop(0)
        new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
        vol[new_bounds] = mipped
예제 #5
0
def test_accelerated_vs_numpy_mode_pooling():
    image = np.random.randint(0, 255, size=(512, 512, 6, 1), dtype=np.uint8)

    accimg = tinybrain.accelerated.mode_pooling_2x2(image)
    npimg = tinybrain.downsample.countless2d(image)
    assert np.all(accimg == npimg)

    # There are slight differences in how the accelerated version and
    # the numpy version handle the edge so we only compare a nice
    # even power of two where there's no edge. We also can't do iterated
    # downsamples of the (naked) numpy version because it will result in
    # integer truncation. We can't compare above mip 4 because the accelerated
    # version will exhibit integer truncation.

    mips = tinybrain.downsample_segmentation(image, (2, 2, 1), num_mips=4)
    npimg = tinybrain.downsample.downsample_segmentation_2d(image, (16, 16, 1),
                                                            sparse=False)

    assert np.all(mips[-1] == npimg)