Exemplo n.º 1
0
    def test_downsample_upload(self):
        print('test downsample and upload...')
        # compute parameters
        mip = 0
        size = (16, 512, 512)

        # create image dataset using cloud-volume
        img = np.random.randint(np.iinfo(np.uint32).max, size=size)
        img = img.astype(np.uint32)
        chunk = Chunk(img, global_offset=[2, 32, 32])
        # save the input to disk
        volume_path = 'file:///tmp/test/cutout/' + generate_random_string()
        CloudVolume.from_numpy(np.transpose(img),
                               vol_path=volume_path,
                               voxel_offset=(32, 32, 2),
                               chunk_size=(32, 32, 4),
                               max_mip=4,
                               layer_type='segmentation')

        operator = DownsampleUploadOperator(volume_path,
                                            chunk_mip=0,
                                            start_mip=1,
                                            stop_mip=4)
        operator(chunk)
        shutil.rmtree('/tmp/test')
Exemplo n.º 2
0
def test_from_numpy():
    arr = np.random.randint(0, high=256, size=(128, 128, 128))
    arr = np.asarray(arr, dtype=np.uint8)
    vol = CloudVolume.from_numpy(arr, max_mip=1)
    arr2 = vol[:, :, :]
    np.alltrue(arr == arr2)

    arr = np.random.randn(128, 128, 128, 3)
    arr = np.asarray(arr, dtype=np.float32)
    vol = CloudVolume.from_numpy(arr, max_mip=1)
    arr2 = vol[:, :, :]
    np.alltrue(arr == arr2)
    shutil.rmtree('/tmp/image')
Exemplo n.º 3
0
def test_save_image():

    chunk = Chunk.create(size=size, dtype=np.uint8, voxel_offset=voxel_offset)
    tempdir = tempfile.mkdtemp()
    volume_path = 'file://' + tempdir
    print('construct volume from numpy array in ', tempdir)
    vol = CloudVolume.from_numpy(chunk.transpose(),
                                 vol_path=volume_path,
                                 voxel_offset=voxel_offset[::-1],
                                 chunk_size=(32, 32, 4),
                                 max_mip=4,
                                 layer_type='image')

    print('construct save operator')
    op = WritePrecomputedOperator(volume_path,
                                  0,
                                  upload_log=True,
                                  create_thumbnail=False,
                                  name='save')

    print('really save the chunk.')
    op(chunk, log={'timer': {'save': 43}})

    sleep(2)
    shutil.rmtree(tempdir)
Exemplo n.º 4
0
def test_cache_compression_setting():
    image = np.zeros(shape=(128, 128, 128, 1), dtype=np.uint8)
    dirpath = '/tmp/cloudvolume/caching-validity-' + str(TEST_NUMBER)
    layer_path = 'file://' + dirpath

    vol = CloudVolume.from_numpy(image,
                                 voxel_offset=(1, 1, 1),
                                 vol_path=layer_path,
                                 layer_type='image',
                                 resolution=(1, 1, 1),
                                 encoding='raw')
    vol.cache.enabled = True
    vol.cache.flush()
    vol.commit_info()

    vol.cache.compress = None
    vol[:] = image
    assert all([os.path.splitext(x)[1] == '.gz' for x in vol.cache.list()])
    vol.cache.flush()

    vol.cache.compress = True
    vol[:] = image
    assert all([os.path.splitext(x)[1] == '.gz' for x in vol.cache.list()])
    vol.cache.flush()

    vol.cache.compress = False
    vol[:] = image
    assert all([os.path.splitext(x)[1] == '' for x in vol.cache.list()])
    vol.cache.flush()

    delete_layer(dirpath)
Exemplo n.º 5
0
def test_info_provenance_cache():
    image = np.zeros(shape=(128, 128, 128, 1), dtype=np.uint8)
    vol = CloudVolume.from_numpy(
        image,
        voxel_offset=(0, 0, 0),
        vol_path='gs://seunglab-test/cloudvolume/caching',
        layer_type='image',
        resolution=(1, 1, 1),
        encoding='raw')

    # Test Info
    vol.cache.enabled = True
    vol.cache.flush()
    info = vol.refresh_info()
    assert info is not None

    with open(os.path.join(vol.cache.path, 'info'), 'r') as infof:
        info = infof.read()
        info = json.loads(info)

    with open(os.path.join(vol.cache.path, 'info'), 'w') as infof:
        infof.write(json.dumps({'wow': 'amaze'}))

    info = vol.refresh_info()
    assert info == {'wow': 'amaze'}
    vol.cache.enabled = False
    info = vol.refresh_info()
    assert info != {'wow': 'amaze'}

    infopath = os.path.join(vol.cache.path, 'info')
    assert os.path.exists(infopath)

    vol.cache.flush_info()
    assert not os.path.exists(infopath)
    vol.cache.flush_info()  # assert no error by double delete

    # Test Provenance
    vol.cache.enabled = True
    vol.cache.flush()
    prov = vol.refresh_provenance()
    assert prov is not None

    with open(os.path.join(vol.cache.path, 'provenance'), 'r') as provf:
        prov = provf.read()
        prov = json.loads(prov)

    with open(os.path.join(vol.cache.path, 'provenance'), 'w') as provf:
        prov['description'] = 'wow'
        provf.write(json.dumps(prov))

    prov = vol.refresh_provenance()
    assert prov['description'] == 'wow'
    vol.cache.enabled = False
    prov = vol.refresh_provenance()
    assert prov['description'] == ''

    provpath = os.path.join(vol.cache.path, 'provenance')
    vol.cache.flush_provenance()
    assert not os.path.exists(provpath)
    vol.cache.flush_provenance()  # assert no error by double delete
Exemplo n.º 6
0
def hierarchical_downsample(chunk, layer_type='segmentation'):
    # save the input to disk
    tempdir = tempfile.mkdtemp()
    volume_path = 'file://' + tempdir
    CloudVolume.from_numpy(chunk.transpose(),
                           vol_path=volume_path,
                           voxel_offset=(32, 32, 2),
                           chunk_size=(32, 32, 4),
                           max_mip=4,
                           layer_type=layer_type)

    operator = DownsampleUploadOperator(volume_path,
                                        chunk_mip=0,
                                        start_mip=1,
                                        stop_mip=4)
    operator(chunk)
    shutil.rmtree(tempdir)
Exemplo n.º 7
0
    def setUp(self):
        print('test volume cutout...')
        # compute parameters
        self.mip = 0
        self.size = (36, 448, 448)

        # create image dataset using cloud-volume
        img = np.random.randint(0, 256, size=self.size)
        self.img = img.astype(np.uint8)
        # save the input to disk
        self.volume_path = 'file:///tmp/test/cutout/' + generate_random_string(
        )
        CloudVolume.from_numpy(np.transpose(self.img),
                               vol_path=self.volume_path)

        # prepare blackout section ids
        self.blackout_section_ids = [17, 20]
        ids = {'section_ids': self.blackout_section_ids}
        with Storage(self.volume_path) as stor:
            stor.put_json('blackout_section_ids.json', ids)
Exemplo n.º 8
0
def src_cv(transfer_data):
    rmsrc()
    rmdest()
    return CloudVolume.from_numpy(
        transfer_data[0],
        vol_path=srcpath,
        resolution=(1, 1, 1),
        voxel_offset=(0, 0, 0),
        chunk_size=(64, 64, 64),
        layer_type="image",
        max_mip=0,
    )
Exemplo n.º 9
0
def create_layer(size,
                 offset,
                 layer_type="image",
                 layer_name='layer',
                 dtype=None):

    default = lambda dt: dtype or dt

    if layer_type == "image":
        random_data = np.random.randint(255,
                                        size=size,
                                        dtype=default(np.uint8))
    elif layer_type == 'affinities':
        random_data = np.random.uniform(low=0, high=1,
                                        size=size).astype(default(np.float32))
    elif layer_type == "segmentation":
        random_data = np.random.randint(0xFFFFFF, size=size, dtype=np.uint32)
    else:
        high = np.array([0], dtype=default(np.uint32)) - 1
        random_data = np.random.randint(high[0],
                                        size=size,
                                        dtype=default(np.uint32))

    storage = create_storage(layer_name)

    CloudVolume.from_numpy(
        random_data,
        vol_path='file://' + layer_path + '/' + layer_name,
        resolution=(1, 1, 1),
        voxel_offset=offset,
        chunk_size=(64, 64, 64),
        layer_type=layer_type,
        max_mip=0,
    )

    return storage, random_data
Exemplo n.º 10
0
def upload_image(image, offset, layer_type, layer_name, encoding):
    lpath = 'file://{}'.format(os.path.join(layer_path, layer_name))
    
    neuroglancer_chunk_size = find_closest_divisor(image.shape[:3], closest_to=[64,64,64])

    # Jpeg encoding is lossy so it won't work
    vol = CloudVolume.from_numpy(
      image, 
      vol_path=lpath,
      resolution=(1,1,1), 
      voxel_offset=offset, 
      chunk_size=neuroglancer_chunk_size, 
      layer_type=layer_type, 
      encoding=encoding, 
    )
    
    return vol
Exemplo n.º 11
0
def test_skeletonization_task():
    directory = '/tmp/removeme/skeleton/'
    layer_path = 'file://' + directory
    delete_layer(layer_path)

    img = np.ones((256,256,256), dtype=np.uint64)
    img[:,:,:] = 2
    cv = CloudVolume.from_numpy(
        img,
        layer_type='segmentation',
        vol_path=layer_path, 
    )

    tq = MockTaskQueue()
    tasks = tc.create_skeletonizing_tasks(layer_path, mip=0, teasar_params={
        'scale': 10,
        'const': 10,
    })
    tq.insert_all(tasks)
Exemplo n.º 12
0
def start_server():

    ## add some error checking
    if not os.path.isfile('/mnt/data/info'):
        logging.info('no valid volume found, using test data')
        arr = np.random.random_integers(0, high=255, size=(128,128, 128))
        arr = np.asarray(arr, dtype=np.uint8)
        vol = CloudVolume.from_numpy(arr, max_mip=1)
    else:
        logging.info('using mounted dataset')
        vol = CloudVolume('file:///mnt/data',parallel=2,cache=True)

    logging.info('volume created: {}'.format(vol[1,1,1]))

    logging.info('patching viewer to allow connections on all IPs')
    funcType = types.MethodType
    vol.viewer = funcType(localviewer, vol)

    logging.info('starting cloudvolume service')

    vol.viewer(port=1337)
Exemplo n.º 13
0
def test_cache_validity():
    image = np.zeros(shape=(128, 128, 128, 1), dtype=np.uint8)
    dirpath = '/tmp/cloudvolume/caching-validity-' + str(TEST_NUMBER)
    layer_path = 'file://' + dirpath

    vol = CloudVolume.from_numpy(image,
                                 voxel_offset=(1, 1, 1),
                                 vol_path=layer_path,
                                 layer_type='image',
                                 resolution=(1, 1, 1),
                                 encoding='raw')
    vol.cache.enabled = True
    vol.cache.flush()
    vol.commit_info()

    def test_with_mock_cache_info(info, shoulderror):
        finfo = os.path.join(vol.cache.path, 'info')
        with open(finfo, 'w') as f:
            f.write(json.dumps(info))

        if shoulderror:
            try:
                CloudVolume(vol.cloudpath, cache=True)
            except ValueError:
                pass
            else:
                assert False
        else:
            CloudVolume(vol.cloudpath, cache=True)

    test_with_mock_cache_info(vol.info, shoulderror=False)

    info = vol.info.copy()
    info['scales'][0]['size'][0] = 666
    test_with_mock_cache_info(info, shoulderror=False)

    test_with_mock_cache_info({'zomg': 'wow'}, shoulderror=True)

    def tiny_change(key, val):
        info = vol.info.copy()
        info[key] = val
        test_with_mock_cache_info(info, shoulderror=True)

    tiny_change('type', 'zoolander')
    tiny_change('data_type', 'uint32')
    tiny_change('num_channels', 2)
    tiny_change('mesh', 'mesh')

    def scale_change(key, val, mip=0):
        info = vol.info.copy()
        info['scales'][mip][key] = val
        test_with_mock_cache_info(info, shoulderror=True)

    scale_change('voxel_offset', [1, 2, 3])
    scale_change('resolution', [1, 2, 3])
    scale_change('encoding', 'npz')

    vol.cache.flush()

    # Test no info file at all
    CloudVolume(vol.cloudpath, cache=True)

    vol.cache.flush()
Exemplo n.º 14
0
def test_caching():
    image = np.zeros(shape=(128, 128, 128, 1), dtype=np.uint8)
    image[0:64, 0:64, 0:64] = 1
    image[64:128, 0:64, 0:64] = 2
    image[0:64, 64:128, 0:64] = 3
    image[0:64, 0:64, 64:128] = 4
    image[64:128, 64:128, 0:64] = 5
    image[64:128, 0:64, 64:128] = 6
    image[0:64, 64:128, 64:128] = 7
    image[64:128, 64:128, 64:128] = 8

    dirpath = '/tmp/cloudvolume/caching-volume-' + str(TEST_NUMBER)
    layer_path = 'file://' + dirpath

    vol = CloudVolume.from_numpy(
        image,
        voxel_offset=(0, 0, 0),
        vol_path=layer_path,
        layer_type='image',
        resolution=(1, 1, 1),
        encoding='raw',
        chunk_size=(64, 64, 64),
    )

    vol.cache.enabled = True
    vol.cache.flush()

    # Test that reading populates the cache
    read1 = vol[:, :, :]
    assert np.all(read1 == image)

    read2 = vol[:, :, :]
    assert np.all(read2 == image)

    assert len(vol.cache.list()) > 0

    files = vol.cache.list()
    validation_set = [
        '0-64_0-64_0-64', '64-128_0-64_0-64', '0-64_64-128_0-64',
        '0-64_0-64_64-128', '64-128_64-128_0-64', '64-128_0-64_64-128',
        '0-64_64-128_64-128', '64-128_64-128_64-128'
    ]
    assert set([os.path.splitext(fname)[0]
                for fname in files]) == set(validation_set)

    for i in range(8):
        fname = os.path.join(vol.cache.path, vol.key,
                             validation_set[i]) + '.gz'
        with gzip.GzipFile(fname, mode='rb') as gfile:
            chunk = gfile.read()
        img3d = chunks.decode(chunk, 'raw', (64, 64, 64, 1), np.uint8)
        assert np.all(img3d == (i + 1))

    vol.cache.flush()
    assert not os.path.exists(vol.cache.path)

    # Test that writing populates the cache
    vol[:, :, :] = image

    assert os.path.exists(vol.cache.path)
    assert np.all(vol[:, :, :] == image)

    vol.cache.flush()

    # Test that partial reads work too
    result = vol[0:64, 0:64, :]
    assert np.all(result == image[0:64, 0:64, :])
    files = vol.cache.list()
    assert len(files) == 2
    result = vol[:, :, :]
    assert np.all(result == image)
    files = vol.cache.list()
    assert len(files) == 8

    vol.cache.flush()

    # Test Non-standard Cache Destination
    dirpath = '/tmp/cloudvolume/caching-cache-' + str(TEST_NUMBER)
    vol.cache.enabled = True
    vol.cache.path = dirpath
    vol[:, :, :] = image

    assert len(os.listdir(os.path.join(dirpath, vol.key))) == 8

    vol.cache.flush()

    # Test that caching doesn't occur when cache is not set
    vol.cache.enabled = False
    result = vol[:, :, :]
    if os.path.exists(vol.cache.path):
        files = vol.cache.list()
        assert len(files) == 0

    vol[:, :, :] = image
    if os.path.exists(vol.cache.path):
        files = vol.cache.list()
        assert len(files) == 0

    vol.cache.flush()

    # Test that deletion works too
    vol.cache.enabled = True
    vol[:, :, :] = image
    files = vol.cache.list()
    assert len(files) == 8
    vol.delete(np.s_[:, :, :])
    files = vol.cache.list()
    assert len(files) == 0

    vol.cache.flush()

    vol[:, :, :] = image
    files = vol.cache.list()
    assert len(files) == 8
    vol.cache.flush(preserve=np.s_[:, :, :])
    files = vol.cache.list()
    assert len(files) == 8
    vol.cache.flush(preserve=np.s_[:64, :64, :])
    files = vol.cache.list()
    assert len(files) == 2

    vol.cache.flush()

    vol[:, :, :] = image
    files = vol.cache.list()
    assert len(files) == 8
    vol.cache.flush_region(Bbox((50, 50, 0), (100, 100, 10)))
    files = vol.cache.list()
    assert len(files) == 4

    vol.cache.flush()

    vol[:, :, :] = image
    files = vol.cache.list()
    assert len(files) == 8
    vol.cache.flush_region(np.s_[50:100, 50:100, 0:10])
    files = vol.cache.list()
    assert len(files) == 4

    vol.cache.flush()
Exemplo n.º 15
0
    def setUp(self):
        # compute parameters
        self.mip = 0
        self.input_size = (36, 448, 448)
        self.patch_size = (20, 256, 256)
        self.cropping_margin_size = (4, 64, 64)
        self.patch_overlap = (4, 64, 64)
        self.input_mask_mip = 1
        self.input_mask_size = (
            self.input_size[0],
            self.input_size[1] // (2**self.input_mask_mip),
            self.input_size[2] // (2**self.input_mask_mip),
        )
        self.output_size = tuple(
            i - 2 * c
            for i, c in zip(self.input_size, self.cropping_margin_size))
        self.output_mask_mip = 2
        self.output_mask_size = (
            self.output_size[0],
            self.output_size[1] // (2**self.output_mask_mip),
            self.output_size[2] // (2**self.output_mask_mip),
        )
        self.output_bbox = Bbox.from_slices(
            tuple(
                slice(c, i - c)
                for i, c in zip(self.input_size, self.cropping_margin_size)))
        #output_size = np.asarray(self.output_size)
        #output_start = np.asarray((4,64,64))
        #output_stop = output_start + output_size
        #output_bbox = Bbox.from_list([*output_start, *output_stop])

        # create image dataset using cloud-volume
        img = np.random.randint(0, 256, size=self.input_size)
        self.img = img.astype(np.uint8)
        # save the input to disk
        self.input_volume_path = 'file:///tmp/input/' + generate_random_string(
        )
        CloudVolume.from_numpy(np.transpose(self.img),
                               vol_path=self.input_volume_path)

        # create input mask volume
        input_mask = np.ones(self.input_size, dtype=np.bool)
        self.input_mask_volume_path = 'file:///tmp/input-mask/' + generate_random_string(
        )
        CloudVolume.from_numpy(np.transpose(input_mask),
                               vol_path=self.input_mask_volume_path,
                               max_mip=self.input_mask_mip)
        input_mask = np.ones(self.input_mask_size, dtype=np.bool)
        # will mask out the [:2, :8, :8] since it is in mip 1
        input_mask[:(4 + 2), :(64 // 2 + 8 // 2), :(64 // 2 + 8 // 2)] = False
        input_mask_vol = CloudVolume(self.input_mask_volume_path,
                                     mip=self.input_mask_mip)
        input_mask_vol[:, :, :] = np.transpose(input_mask)

        # create output layer
        out = np.random.rand(3, *self.output_size).astype(np.float32)
        self.output_volume_path = 'file:///tmp/output/' + generate_random_string(
        )
        self.output_vol = CloudVolume.from_numpy(
            np.transpose(out),
            vol_path=self.output_volume_path,
            voxel_offset=self.cropping_margin_size[::-1])

        # create output mask volume
        # this is the mip 0 size, so the size should be the same with output
        # it was only used to create the volume
        # TODO: delete this step by creating a mip parameter in from_numpy function
        output_mask = np.ones(self.output_size, dtype=np.bool)
        self.output_mask_volume_path = 'file:///tmp/output-mask/' + generate_random_string(
        )
        CloudVolume.from_numpy(np.transpose(output_mask),
                               vol_path=self.output_mask_volume_path,
                               max_mip=self.output_mask_mip,
                               voxel_offset=self.cropping_margin_size[::-1])
        # this is the higher mip level mask, so this time we are using the real size
        output_mask = np.ones(self.output_mask_size, dtype=np.bool)
        # will mask out the [-2:, -8:, -8:] since it is in mip 2
        output_mask[-2:, -8 // 4:, -8 // 4:] = False
        output_mask_vol = CloudVolume(self.output_mask_volume_path,
                                      mip=self.output_mask_mip)
        output_mask_vol[:, :, :] = np.transpose(output_mask)

        # create volume for output thumbnail
        self.thumbnail_volume_path = os.path.join(self.output_volume_path,
                                                  'thumbnail')
        thumbnail = np.asarray(out, dtype='uint8')
        CloudVolume.from_numpy(np.transpose(thumbnail),
                               vol_path=self.thumbnail_volume_path,
                               voxel_offset=self.cropping_margin_size[::-1],
                               max_mip=4)
Exemplo n.º 16
0
def test_inference_pipeline():
    # compute parameters
    mip = 0
    input_size = (36, 448, 448)
    patch_size = (20, 256, 256)
    cropping_margin_size = (4, 64, 64)
    patch_overlap = (4, 64, 64)
    input_mask_mip = 1
    input_mask_size = (
        input_size[0],
        input_size[1] // (2**input_mask_mip),
        input_size[2] // (2**input_mask_mip),
    )
    output_size = tuple(
        i - 2 * c
        for i, c in zip(input_size, cropping_margin_size))
    output_mask_mip = 2
    output_mask_size = (
        output_size[0],
        output_size[1] // (2**output_mask_mip),
        output_size[2] // (2**output_mask_mip),
    )
    output_bbox = Bbox.from_slices(
        tuple(slice(c, i - c)
            for i, c in zip(input_size, cropping_margin_size)))

    # create image dataset using cloud-volume
    img = np.random.randint(0, 256, size=input_size)
    img = img.astype(np.uint8)
    # save the input to disk
    input_volume_path = 'file:///tmp/input/' + generate_random_string(
    )
    CloudVolume.from_numpy(np.transpose(img),
                            vol_path=input_volume_path)

    # create input mask volume
    input_mask = np.ones(input_size, dtype=np.bool)
    input_mask_volume_path = 'file:///tmp/input-mask/' + generate_random_string()
    CloudVolume.from_numpy(np.transpose(input_mask),
                            vol_path=input_mask_volume_path,
                            max_mip=input_mask_mip)
    input_mask = np.ones(input_mask_size, dtype=np.bool)
    # will mask out the [:2, :8, :8] since it is in mip 1
    input_mask[:(4 + 2), :(64 // 2 + 8 // 2), :(64 // 2 + 8 // 2)] = False
    input_mask_vol = CloudVolume(input_mask_volume_path,
                                    mip=input_mask_mip)
    input_mask_vol[:, :, :] = np.transpose(input_mask)

    # create output layer
    out = np.random.rand(3, *output_size).astype(np.float32)
    output_volume_path = 'file:///tmp/output/' + generate_random_string()
    output_vol = CloudVolume.from_numpy(
        np.transpose(out),
        vol_path=output_volume_path,
        voxel_offset=cropping_margin_size[::-1])

    # create output mask volume
    # this is the mip 0 size, so the size should be the same with output
    # it was only used to create the volume
    # TODO: delete this step by creating a mip parameter in from_numpy function
    output_mask = np.ones(output_size, dtype=np.bool)
    output_mask_volume_path = 'file:///tmp/output-mask/' + generate_random_string(
    )
    CloudVolume.from_numpy(np.transpose(output_mask),
                            vol_path=output_mask_volume_path,
                            max_mip=output_mask_mip,
                            voxel_offset=cropping_margin_size[::-1])
    # this is the higher mip level mask, so this time we are using the real size
    output_mask = np.ones(output_mask_size, dtype=np.bool)
    # will mask out the [-2:, -8:, -8:] since it is in mip 2
    output_mask[-2:, -8 // 4:, -8 // 4:] = False
    output_mask_vol = CloudVolume(output_mask_volume_path,
                                    mip=output_mask_mip)
    output_mask_vol[:, :, :] = np.transpose(output_mask)

    # create volume for output thumbnail
    thumbnail_volume_path = os.path.join(output_volume_path,
                                                'thumbnail')
    thumbnail = np.asarray(out, dtype='uint8')
    CloudVolume.from_numpy(np.transpose(thumbnail),
                            vol_path=thumbnail_volume_path,
                            voxel_offset=cropping_margin_size[::-1],
                            max_mip=4)

    # run pipeline by composing functions
    print('cutout image chunk...')
    cutout_operator = ReadPrecomputedOperator(
        input_volume_path,
        mip=mip,
        expand_margin_size=cropping_margin_size)
    chunk = cutout_operator(output_bbox)

    print('mask input...')
    mask_input_operator = MaskOperator(input_mask_volume_path,
                                        input_mask_mip,
                                        mip,
                                        inverse=False)
    chunk = mask_input_operator(chunk)

    print('run convnet inference...')
    with Inferencer(None, None, patch_size,
                    num_output_channels=3,
                    input_size=chunk.shape,
                    output_patch_overlap=patch_overlap,
                    framework='identity',
                    batch_size=5,
                    dtype='float32') as inferencer:
        print(inferencer.compute_device)
        chunk = inferencer(chunk)
    print('after inference: {}'.format(chunk.slices))
    print('crop the marging...')
    chunk = chunk.crop_margin(output_bbox=output_bbox)
    print('after crop: {}'.format(chunk.slices))

    print('mask the output...')
    mask_output_operator = MaskOperator(output_mask_volume_path,
                                        output_mask_mip,
                                        mip,
                                        inverse=False)
    chunk = mask_output_operator(chunk)
    print('after masking: {}'.format(chunk.slices))

    print('save to output volume...')
    save_operator = WritePrecomputedOperator(output_volume_path,
                                    mip,
                                    upload_log=True,
                                    create_thumbnail=True)
    save_operator(chunk, log={'timer': {'save': 34}})
    print('after saving: {}'.format(chunk.slices))

    # evaluate the output
    print('start evaluation...')
    out = output_vol[output_bbox.to_slices()[::-1] +
                            (slice(0, 3), )]
    out = np.asarray(out)
    out = out[:, :, :, 0] * 255
    out = out.astype(np.uint8)
    out = np.transpose(out)

    # ignore the patch overlap around the border
    img = img[4:-4, 64:-64, 64:-64]

    # check that the masked region are all zero
    # input mask validation
    np.testing.assert_array_equal(out[:2, :8, :8], 0)
    # output mask validation
    np.testing.assert_array_equal(out[-2:, -8:, -8:], 0)

    # ignore the masked part of output
    img = img[2:-2, 8:-8, 8:-8]
    out = out[2:-2, 8:-8, 8:-8]

    # the value can only be close since there is mask error
    # abs(desired-actual) < 1.5 * 10**(-decimal)
    np.testing.assert_array_almost_equal(img, out, decimal=0)

    # clean up
    shutil.rmtree('/tmp/input')
    shutil.rmtree('/tmp/input-mask')
    shutil.rmtree('/tmp/output-mask')
    shutil.rmtree('/tmp/output')