예제 #1
0
def test_ingest_image():
    delete_layer()
    storage, data = create_layer(size=(256, 256, 128, 1),
                                 offset=(0, 0, 0),
                                 layer_type='image')
    cv = CloudVolume(storage.layer_path)
    assert len(cv.scales) == 3
    assert len(cv.available_mips) == 3

    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    assert len(cv.available_mips) == 3
    assert np.array_equal(cv.mip_volume_size(0), [256, 256, 128])
    assert np.array_equal(cv.mip_volume_size(1), [128, 128, 128])
    assert np.array_equal(cv.mip_volume_size(2), [64, 64, 128])

    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    data_ds1, = tinybrain.downsample_with_averaging(data, factor=[2, 2, 1, 1])
    cv.mip = 1
    assert np.all(cv[slice64] == data_ds1[slice64])

    data_ds2, = tinybrain.downsample_with_averaging(data, factor=[4, 4, 1, 1])
    cv.mip = 2
    assert np.all(cv[slice64] == data_ds2[slice64])
예제 #2
0
def process_slice(bias_slice, z, data_orig_path, data_bc_path):
    data_vol = CloudVolume(data_orig_path,
                           parallel=False,
                           progress=False,
                           fill_missing=True)
    data_vol_bc = CloudVolume(data_bc_path,
                              parallel=False,
                              progress=False,
                              fill_missing=True)
    data_vols_bc = [
        CloudVolume(data_bc_path, mip=i, parallel=False)
        for i in range(len(data_vol_bc.scales))
    ]
    # convert spcing rom nm to um
    new_spacing = np.array(data_vol.scales[0]['resolution'][:2]) / 1000
    bias_upsampled_sitk = imgResample(bias_slice,
                                      new_spacing,
                                      size=data_vol.scales[0]['size'][:2])
    bias_upsampled = sitk.GetArrayFromImage(bias_upsampled_sitk)
    data_native = np.squeeze(data_vol[:, :, z]).T
    data_corrected = data_native * bias_upsampled
    img_pyramid = tinybrain.downsample_with_averaging(
        data_corrected.T[:, :, None],
        factor=(2, 2, 1),
        num_mips=len(data_vol_bc.scales) - 1)
    data_vol_bc[:, :, z] = data_corrected.T.astype('uint16')[:, :, None]
    for i in range(len(data_vols_bc) - 1):
        data_vols_bc[i + 1][:, :, z] = img_pyramid[i].astype('uint16')
예제 #3
0
파일: cutout.py 프로젝트: aixioma/chunkflow
    def _validate_chunk(self, chunk):
        """
        check that all the input voxels was downloaded without black region  
        We have found some black regions in previous inference run, 
        so hopefully this will solve the problem.
        """
        if chunk.ndim == 4 and chunk.shape[0] > 1:
            chunk = chunk[0, :, :, :]

        chunk_mip = self.mip
        if self.verbose:
            print('validate chunk in mip {}'.format(self.validate_mip))
        assert self.validate_mip >= chunk_mip
        # only use the region corresponds to higher mip level
        # clamp the surrounding regions in XY plane
        # this assumes that the input dataset was downsampled starting from the
        # beginning offset in the info file
        global_offset = chunk.global_offset

        # factor3 follows xyz order in CloudVolume
        factor3 = np.array([
            2**(self.validate_mip - chunk_mip), 2
            **(self.validate_mip - chunk_mip), 1
        ],
                           dtype=np.int32)
        clamped_offset = tuple(go + f - (go - vo) % f for go, vo, f in zip(
            global_offset[::-1], self.vol.voxel_offset, factor3))
        clamped_stop = tuple(
            go + s - (go + s - vo) % f
            for go, s, vo, f in zip(global_offset[::-1], chunk.shape[::-1],
                                    self.vol.voxel_offset, factor3))
        clamped_slices = tuple(
            slice(o, s) for o, s in zip(clamped_offset, clamped_stop))
        clamped_bbox = Bbox.from_slices(clamped_slices)
        clamped_input = chunk.cutout(clamped_slices[::-1])
        # transform to xyz order
        clamped_input = np.transpose(clamped_input)
        # get the corresponding bounding box for validation
        validate_bbox = self.vol.bbox_to_mip(clamped_bbox,
                                             mip=chunk_mip,
                                             to_mip=self.validate_mip)
        #validate_bbox = clamped_bbox // factor3

        # downsample the input using avaraging
        # keep the z as it is since the mip only applies to xy plane
        # recursivly downsample the input
        # if we do it directly, the downsampled input will not be the same with the recursive one
        # because of the rounding error of integer division
        for _ in range(self.validate_mip - chunk_mip):
            clamped_input = downsample_with_averaging(clamped_input, (2, 2, 1))

        # validation by template matching
        assert validate_by_template_matching(clamped_input)

        validate_input = self.validate_vol[validate_bbox.to_slices()]
        if validate_input.shape[3] == 1:
            validate_input = np.squeeze(validate_input, axis=3)

        # use the validate input to check the downloaded input
        assert np.alltrue(validate_input == clamped_input)
예제 #4
0
    def __call__(self, chunk):
        assert 3 == chunk.ndim
        voxel_offset = chunk.voxel_offset

        num_mips = self.stop_mip - self.chunk_mip
        # tinybrain use F order and require 4D array!
        chunk2 = np.transpose(chunk)
        # chunk2 = np.reshape(chunk2, (*chunk2.shape, 1))
        chunk2 = np.expand_dims(chunk2, 3)

        if np.issubdtype(chunk.dtype, np.floating) or chunk.dtype == np.uint8:
            pyramid = tinybrain.downsample_with_averaging(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)
        else:
            pyramid = tinybrain.downsample_segmentation(
                chunk2, factor=self.factor[::-1], num_mips=num_mips)

        for mip in range(self.start_mip, self.stop_mip):
            # the first chunk in pyramid is already downsampled!
            downsampled_chunk = pyramid[mip - self.chunk_mip - 1]
            # compute new offset, only downsample the y,x dimensions
            offset = np.divide(
                voxel_offset,
                np.asarray([
                    self.factor[0]**(mip - self.chunk_mip),
                    self.factor[1]**(mip - self.chunk_mip),
                    self.factor[2]**(mip - self.chunk_mip)
                ]))
            bbox = Bbox.from_delta(offset, downsampled_chunk.shape[0:3][::-1])
            # upload downsampled chunk, note that we should use F order in the indexing
            self.vols[mip][bbox.to_slices()[::-1]] = downsampled_chunk
예제 #5
0
def test_downsample_no_offset(compression_method):
    delete_layer()
    storage, data = create_layer(size=(1024, 1024, 128, 1), offset=(0, 0, 0))
    cv = CloudVolume(storage.layer_path)
    assert len(cv.scales) == 1
    assert len(cv.available_mips) == 1

    cv.commit_info()

    tq = MockTaskQueue()
    tasks = create_downsampling_tasks(storage.layer_path,
                                      mip=0,
                                      num_mips=4,
                                      compress=compression_method)
    tq.insert_all(tasks)

    cv.refresh_info()

    assert len(cv.available_mips) == 5
    assert np.array_equal(cv.mip_volume_size(0), [1024, 1024, 128])
    assert np.array_equal(cv.mip_volume_size(1), [512, 512, 128])
    assert np.array_equal(cv.mip_volume_size(2), [256, 256, 128])
    assert np.array_equal(cv.mip_volume_size(3), [128, 128, 128])
    assert np.array_equal(cv.mip_volume_size(4), [64, 64, 128])

    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    data_ds1, = tinybrain.downsample_with_averaging(data, factor=[2, 2, 1, 1])
    cv.mip = 1
    assert np.all(cv[slice64] == data_ds1[slice64])

    data_ds2, = tinybrain.downsample_with_averaging(data, factor=[4, 4, 1, 1])
    cv.mip = 2
    assert np.all(cv[slice64] == data_ds2[slice64])

    data_ds3, = tinybrain.downsample_with_averaging(data, factor=[8, 8, 1, 1])
    cv.mip = 3
    assert np.all(cv[slice64] == data_ds3[slice64])

    data_ds4, = tinybrain.downsample_with_averaging(data,
                                                    factor=[16, 16, 1, 1])
    cv.mip = 4
    assert np.all(cv[slice64] == data_ds4[slice64])
예제 #6
0
파일: tasks.py 프로젝트: sandyhider/igneous
def downsample_and_upload(
    image, bounds, vol, ds_shape, 
    mip=0, axis='z', skip_first=False,
    sparse=False
  ):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
      size=ds_shape,
      preserve_axis=axis,
      max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample_scales.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
      print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format(
          image.shape, ds_shape, vol.volume_size, bounds)
      )

    vol.mip = mip
    if not skip_first:
      vol[bounds.to_slices()] = image

    if len(factors) == 0:
      return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
      mips = tinybrain.downsample_with_averaging(image, factors[0], num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
      mips = tinybrain.downsample_segmentation(
        image, factors[0], 
        num_mips=num_mips, sparse=sparse
      )
    else:
      mips = tinybrain.downsample_with_striding(image, factors[0], num_mips=num_mips)

    new_bounds = bounds.clone()
   
    for factor3 in factors:
      vol.mip += 1
      new_bounds //= factor3
      mipped = mips.pop(0)
      new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
      vol[new_bounds] = mipped
예제 #7
0
def downsample_and_upload(image,
                          bounds,
                          vol,
                          ds_shape,
                          mip=0,
                          axis='z',
                          skip_first=False,
                          sparse=False,
                          factor=None):
    ds_shape = min2(vol.volume_size, ds_shape[:3])
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    chunk_size = vol.meta.chunk_size(underlying_mip).astype(np.float32)

    if factor is None:
        factor = downsample_scales.axis_to_factor(axis)
    factors = downsample_scales.compute_factors(ds_shape, factor, chunk_size)

    if len(factors) == 0:
        print(
            "No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}"
            .format(image.shape, ds_shape, vol.volume_size, bounds))

    vol.mip = mip
    if not skip_first:
        vol[bounds.to_slices()] = image

    if len(factors) == 0:
        return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
        mips = tinybrain.downsample_with_averaging(image,
                                                   factors[0],
                                                   num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
        mips = tinybrain.downsample_segmentation(image,
                                                 factors[0],
                                                 num_mips=num_mips,
                                                 sparse=sparse)
    else:
        mips = tinybrain.downsample_with_striding(image,
                                                  factors[0],
                                                  num_mips=num_mips)

    new_bounds = bounds.clone()

    for factor3 in factors:
        vol.mip += 1
        new_bounds //= factor3
        mipped = mips.pop(0)
        new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
        vol[new_bounds] = mipped
예제 #8
0
def test_downsample_no_offset_2x2x2():
    delete_layer()
    cf, data = create_layer(size=(512,512,512,1), offset=(0,0,0))
    cv = CloudVolume(cf.cloudpath)
    assert len(cv.scales) == 1
    assert len(cv.available_mips) == 1

    cv.commit_info()

    tq = MockTaskQueue()
    tasks = create_downsampling_tasks(
        cf.cloudpath, mip=0, num_mips=3, 
        compress=None, factor=(2,2,2)
    )
    tq.insert_all(tasks)

    cv.refresh_info()

    assert len(cv.available_mips) == 4
    assert np.array_equal(cv.mip_volume_size(0), [ 512, 512, 512 ])
    assert np.array_equal(cv.mip_volume_size(1), [ 256, 256, 256 ])
    assert np.array_equal(cv.mip_volume_size(2), [ 128, 128, 128 ])
    assert np.array_equal(cv.mip_volume_size(3), [  64,  64,  64 ])
    
    slice64 = np.s_[0:64, 0:64, 0:64]

    cv.mip = 0
    assert np.all(cv[slice64] == data[slice64])

    data_ds1, = tinybrain.downsample_with_averaging(data, factor=[2, 2, 2, 1])
    cv.mip = 1
    assert np.all(cv[slice64] == data_ds1[slice64])

    data_ds2, = tinybrain.downsample_with_averaging(data, factor=[4, 4, 4, 1])
    cv.mip = 2
    assert np.all(cv[slice64] == data_ds2[slice64])

    data_ds3, = tinybrain.downsample_with_averaging(data, factor=[8, 8, 8, 1])
    cv.mip = 3
    assert np.all(cv[slice64] == data_ds3[slice64])
예제 #9
0
def test_downsample_with_offset():
    delete_layer()
    storage, data = create_layer(size=(512, 512, 128, 1), offset=(3, 7, 11))
    cv = CloudVolume(storage.layer_path)
    assert len(cv.scales) == 1
    assert len(cv.available_mips) == 1

    cv.commit_info()

    tq = MockTaskQueue()
    tasks = create_downsampling_tasks(storage.layer_path, mip=0, num_mips=3)
    tq.insert_all(tasks)

    cv.refresh_info()

    assert len(cv.available_mips) == 4
    assert np.array_equal(cv.mip_volume_size(0), [512, 512, 128])
    assert np.array_equal(cv.mip_volume_size(1), [256, 256, 128])
    assert np.array_equal(cv.mip_volume_size(2), [128, 128, 128])
    assert np.array_equal(cv.mip_volume_size(3), [64, 64, 128])

    assert np.all(cv.mip_voxel_offset(3) == (0, 0, 11))

    cv.mip = 0
    assert np.all(cv[3:67, 7:71, 11:75] == data[0:64, 0:64, 0:64])

    data_ds1, = tinybrain.downsample_with_averaging(data, factor=[2, 2, 1, 1])
    cv.mip = 1
    assert np.all(cv[1:33, 3:35, 11:75] == data_ds1[0:32, 0:32, 0:64])

    data_ds2, = tinybrain.downsample_with_averaging(data, factor=[4, 4, 1, 1])
    cv.mip = 2
    assert np.all(cv[0:16, 1:17, 11:75] == data_ds2[0:16, 0:16, 0:64])

    data_ds3, = tinybrain.downsample_with_averaging(data, factor=[8, 8, 1, 1])
    cv.mip = 3
    assert np.all(cv[0:8, 0:8, 11:75] == data_ds3[0:8, 0:8, 0:64])
예제 #10
0
def test_accelerated_vs_numpy_avg_pooling_2x2(dtype):
    image = np.random.randint(0, 255, size=(512, 512, 6),
                              dtype=np.uint8).astype(dtype)
    imagef = np.asfortranarray(image)

    accimg = tinybrain.accelerated.average_pooling_2x2(imagef)
    npimg = tinybrain.downsample.downsample_with_averaging_numpy(
        imagef, (2, 2, 1))
    assert np.all(accimg == npimg)

    # There are slight differences in how the accelerated version and
    # the numpy version handle the edge so we only compare a nice
    # even power of two where there's no edge. We also can't do iterated
    # downsamples of the (naked) numpy version because it will result in
    # integer truncation. We can't compare above mip 4 because the accelerated
    # version will exhibit integer truncation.

    mips = tinybrain.downsample_with_averaging(imagef, (2, 2, 1), num_mips=4)
    npimg = tinybrain.downsample.downsample_with_averaging_numpy(
        imagef, (16, 16, 1))

    assert np.all(mips[-1] == npimg)
예제 #11
0
def transfer_data(request):
    if not CHECKER:
        data = np.random.randint(0xFF, size=request.param, dtype=np.uint8)
    else:
        data = np.zeros((512, 512, 128), dtype=np.uint8)
        i = 1
        for x in range(8):
            for y in range(8):
                for z in range(2):
                    data[64 * x:64 * (x + 1), 64 * y:64 * (y + 1),
                         64 * z:64 * (z + 1)] = i
                    i += 1

    ds_data = tinybrain.downsample_with_averaging(data,
                                                  factor=[2, 2, 1, 1],
                                                  num_mips=5)
    ds_data.insert(0, data)

    for i in range(len(ds_data)):
        while ds_data[i].ndim < 4:
            ds_data[i] = ds_data[i][..., np.newaxis]

    return ds_data
예제 #12
0
def process_slice(bias_slice, z, data_orig_path, data_bc_path):
    """Correct and upload a single slice of data

    Args:
        bias_slice (sitk.Image): Slice of illumination correction
        z (int): Z slice of data to apply correction to
        data_orig_path (str): S3 path to source data that needs to be corrected
        data_bc_path (str): S3 path where corrected data will be stored
    """
    data_vol = CloudVolume(data_orig_path,
                           parallel=False,
                           progress=False,
                           fill_missing=True)
    data_vol_bc = CloudVolume(data_bc_path,
                              parallel=False,
                              progress=False,
                              fill_missing=True)
    data_vols_bc = [
        CloudVolume(data_bc_path, mip=i, parallel=False)
        for i in range(len(data_vol_bc.scales))
    ]
    # convert spcing rom nm to um
    new_spacing = np.array(data_vol.scales[0]["resolution"][:2]) / 1000
    bias_upsampled_sitk = imgResample(bias_slice,
                                      new_spacing,
                                      size=data_vol.scales[0]["size"][:2])
    bias_upsampled = sitk.GetArrayFromImage(bias_upsampled_sitk)
    data_native = np.squeeze(data_vol[:, :, z]).T
    data_corrected = data_native * bias_upsampled
    img_pyramid = tinybrain.downsample_with_averaging(
        data_corrected.T[:, :, None],
        factor=(2, 2, 1),
        num_mips=len(data_vol_bc.scales) - 1,
    )
    data_vol_bc[:, :, z] = data_corrected.T.astype("uint16")[:, :, None]
    for i in range(len(data_vols_bc) - 1):
        data_vols_bc[i + 1][:, :, z] = img_pyramid[i].astype("uint16")