def setupOutputs(self): self.ReturnCode.meta.dtype = bool self.ReturnCode.meta.shape = (1, ) self._closeFiles() self._primaryBlockwiseFileset = BlockwiseFileset( self.OutputFilesetDescription.value, 'a')
def setUp(self): """ Create a blockwise fileset to test with. """ if platform.system() == 'Windows': # On windows, there are errors, and we make no attempt to solve them (at the moment). raise nose.SkipTest try: BlockwiseFileset._prepare_system() except ValueError: # If the system isn't configured to allow lots of open files, we can't run this test. raise nose.SkipTest testConfig = \ """ { "_schema_name" : "blockwise-fileset-description", "_schema_version" : 1.0, "name" : "synapse_small", "format" : "hdf5", "axes" : "txyzc", "shape" : [1,400,400,100,1], "dtype" : "numpy.uint8", "block_shape" : [1, 50, 50, 50, 100], "block_file_name_format" : "cube{roiString}.h5/volume/data" } """ self.tempDir = tempfile.mkdtemp() self.configpath = os.path.join(self.tempDir, "config.json") logger.debug("Loading config file...") with open(self.configpath, 'w') as f: f.write(testConfig) logger.debug("Creating random test data...") bfs = BlockwiseFileset(self.configpath, 'a') dataShape = tuple(bfs.description.shape) self.data = numpy.random.randint(255, size=dataShape).astype(numpy.uint8) logger.debug("Writing test data...") datasetRoi = ([0, 0, 0, 0, 0], dataShape) bfs.writeData(datasetRoi, self.data) block_starts = getIntersectingBlocks(bfs.description.block_shape, datasetRoi) for block_start in block_starts: bfs.setBlockStatus(block_start, BlockwiseFileset.BLOCK_AVAILABLE) bfs.close()
def setupOutputs(self): if not os.path.exists(self.DescriptionFilePath.value): raise OpBlockwiseFilesetReader.MissingDatasetError("Dataset description not found: {}".format( self.DescriptionFilePath.value ) ) # Load up the class that does the real work self._blockwiseFileset = BlockwiseFileset( self.DescriptionFilePath.value ) # Check for errors in the description file descriptionFields = self._blockwiseFileset.description axes = descriptionFields.axes assert False not in map(lambda a: a in 'txyzc', axes), "Unknown axis type. Known axes: txyzc Your axes:".format(axes) self.Output.meta.shape = tuple(descriptionFields.view_shape) self.Output.meta.dtype = descriptionFields.dtype self.Output.meta.axistags = vigra.defaultAxistags(descriptionFields.axes) drange = descriptionFields.drange if drange is not None: self.Output.meta.drange = drange
def setup_class(cls): if platform.system() == "Windows": # On windows, there are errors, and we make no attempt to solve them (at the moment). pytest.skip("Windows") try: BlockwiseFileset._prepare_system() except ValueError: # If the system isn't configured to allow lots of open files, we can't run this test. pytest.skip( "System is not configured to allow opening a lot of files") testConfig = """ { "_schema_name" : "blockwise-fileset-description", "_schema_version" : 1.0, "name" : "synapse_small", "format" : "hdf5", "axes" : "txyzc", "shape" : [1,10,20,5,1], "dtype" : "object", "block_shape" : [1, 5, 4, 1, 100], "block_file_name_format" : "cube{roiString}.h5/volume/data" } """ cls.tempDir = tempfile.mkdtemp() cls.description_path = os.path.join(cls.tempDir, "config.json") with open(cls.description_path, "w") as f: f.write(testConfig) logger.debug("Loading config file...") cls.bfs = BlockwiseFileset(cls.description_path, "a") cls.dataShape = tuple(cls.bfs.description.shape) def make_dummy_dict(x): return {str(x): numpy.array([x, x])} vec_make_dummy_dict = numpy.vectorize(make_dummy_dict) int_data = numpy.random.randint(255, size=cls.dataShape).astype(numpy.uint8) dict_data = vec_make_dummy_dict(int_data) cls.data = dict_data
def setup_class(cls): if platform.system() == 'Windows': # On windows, there are errors, and we make no attempt to solve them (at the moment). raise nose.SkipTest try: BlockwiseFileset._prepare_system() except ValueError: # If the system isn't configured to allow lots of open files, we can't run this test. raise nose.SkipTest testConfig = \ """ { "_schema_name" : "blockwise-fileset-description", "_schema_version" : 1.0, "name" : "synapse_small", "format" : "hdf5", "axes" : "txyzc", "shape" : [1,400,400,200,1], "dtype" : "numpy.uint8", "compression" : "lzf", "block_shape" : [1, 50, 50, 50, 100], "block_file_name_format" : "cube{roiString}.h5/volume/data" } """ cls.tempDir = tempfile.mkdtemp() cls.description_path = os.path.join(cls.tempDir, "description.json") with open(cls.description_path, 'w') as f: f.write(testConfig) logger.debug("Loading config file...") cls.bfs = BlockwiseFileset(cls.description_path, 'a') cls.dataShape = tuple(cls.bfs.description.shape) logger.debug("Creating random test data...") cls.data = numpy.random.randint(255, size=cls.dataShape).astype(numpy.uint8)
def test_6_TestExportSubset(self): roi = ((0, 0, 50, 100, 0), (1, 100, 200, 200, 1)) exportDir = tempfile.mkdtemp() self.bfs.close() self.bfs.reopen("r") exported_description_path = self.bfs.exportSubset(roi, exportDir) try: exported_bfs = BlockwiseFileset(exported_description_path, "r") assert os.path.exists( exported_description_path ), "Couldn't even find the exported description file." read_data = exported_bfs.readData(roi) expected_data = self.data[roiToSlice(*roi)] assert read_data.shape == expected_data.shape, "Exported data had wrong shape" assert read_data.dtype == expected_data.dtype, "Exported data had wrong dtype" assert (read_data == expected_data ).all(), "Exported data did not match expected data" finally: shutil.rmtree(exportDir)
def test_9_TestView(self): """ Load some of the dataset again; this time with an offset view. Note: The original blockwise fileset must be closed before this test starts. """ # Create a copy of the original description, but specify a translated (and smaller) view desc = BlockwiseFileset.readDescription(self.description_path) desc.view_origin = [0, 300, 200, 100, 0] desc.view_shape = [1, 50, 50, 50, 1] offsetConfigPath = self.description_path + "_offset" BlockwiseFileset.writeDescription(offsetConfigPath, desc) # Open the fileset using the special description file bfs = BlockwiseFileset(offsetConfigPath, "r") try: assert (bfs.description.view_origin == desc.view_origin).all() assert (bfs.description.view_shape == desc.view_shape).all() # Read some data logger.debug("Reading data...") disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :] view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :] roi = sliceToRoi(view_slicing, self.dataShape) roiShape = roi[1] - roi[0] read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8) bfs.readData(roi, read_data) # The data we read should match the correct part of the original dataset. logger.debug("Checking data...") assert self.data[disk_slicing].shape == read_data.shape assert (self.data[disk_slicing] == read_data ).all(), "Data didn't match." finally: bfs.close()
def _prepareDestination(self): """ - If the result file doesn't exist yet, create it (and the dataset) - If the result file already exists, return a list of the rois that are NOT needed (their data already exists in the final output) """ originalDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value) datasetDescription = copy.deepcopy(originalDescription) # Modify description fields as needed # -- axes datasetDescription.axes = "".join(list(self.Input.meta.getTaggedShape().keys())) assert set(originalDescription.axes) == set(datasetDescription.axes), ( "Can't prepare destination dataset: original dataset description listed " "axes as {}, but actual output axes are {}".format(originalDescription.axes, datasetDescription.axes) ) # -- shape datasetDescription.view_shape = list(self.Input.meta.shape) # -- block_shape assert originalDescription.block_shape is not None originalBlockDims = collections.OrderedDict( list(zip(originalDescription.axes, originalDescription.block_shape)) ) datasetDescription.block_shape = [originalBlockDims[a] for a in datasetDescription.axes] datasetDescription.block_shape = list( map(min, list(zip(datasetDescription.block_shape, self.Input.meta.shape))) ) # -- chunks if originalDescription.chunks is not None: originalChunkDims = collections.OrderedDict(list(zip(originalDescription.axes, originalDescription.chunks))) datasetDescription.chunks = [originalChunkDims[a] for a in datasetDescription.axes] datasetDescription.chunks = list(map(min, list(zip(datasetDescription.chunks, self.Input.meta.shape)))) # -- dtype if datasetDescription.dtype != self.Input.meta.dtype: dtype = self.Input.meta.dtype if type(dtype) is numpy.dtype: dtype = dtype.type datasetDescription.dtype = dtype().__class__.__name__ # Create a unique hash for this blocking scheme. # If it changes, we can't use any previous data. sha = hashlib.sha1() sha.update(str(tuple(datasetDescription.block_shape))) sha.update(datasetDescription.axes) sha.update(datasetDescription.block_file_name_format) datasetDescription.hash_id = sha.hexdigest() if datasetDescription != originalDescription: descriptionFilePath = self.OutputDatasetDescription.value logger.info("Overwriting dataset description: {}".format(descriptionFilePath)) BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription) with open(descriptionFilePath, "r") as f: logger.info(f.read()) # Now open the dataset blockwiseFileset = BlockwiseFileset(self.OutputDatasetDescription.value) taskInfos = self._prepareTaskInfos(blockwiseFileset.getAllBlockRois()) if blockwiseFileset.description.hash_id != originalDescription.hash_id: # Something about our blocking scheme changed. # Make sure all blocks are marked as NOT available. # (Just in case some were left over from a previous run.) for roi in list(taskInfos.keys()): blockwiseFileset.setBlockStatus(roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE) return blockwiseFileset, taskInfos