def test_9_TestView(self): """ Load some of the dataset again; this time with an offset view. Note: The original blockwise fileset must be closed before this test starts. """ # Create a copy of the original description, but specify a translated (and smaller) view desc = BlockwiseFileset.readDescription(self.description_path) desc.view_origin = [0, 300, 200, 100, 0] desc.view_shape = [1, 50, 50, 50, 1] offsetConfigPath = self.description_path + "_offset" BlockwiseFileset.writeDescription(offsetConfigPath, desc) # Open the fileset using the special description file bfs = BlockwiseFileset(offsetConfigPath, "r") try: assert (bfs.description.view_origin == desc.view_origin).all() assert (bfs.description.view_shape == desc.view_shape).all() # Read some data logger.debug("Reading data...") disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :] view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :] roi = sliceToRoi(view_slicing, self.dataShape) roiShape = roi[1] - roi[0] read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8) bfs.readData(roi, read_data) # The data we read should match the correct part of the original dataset. logger.debug("Checking data...") assert self.data[disk_slicing].shape == read_data.shape assert (self.data[disk_slicing] == read_data).all(), "Data didn't match." finally: bfs.close()
class OpBlockwiseFilesetReader(Operator): """ Adapter that provides an operator interface to the BlockwiseFileset class for reading ONLY. """ name = "OpBlockwiseFilesetReader" DescriptionFilePath = InputSlot(stype="filestring") Output = OutputSlot() class MissingDatasetError(Exception): pass def __init__(self, *args, **kwargs): super(OpBlockwiseFilesetReader, self).__init__(*args, **kwargs) self._blockwiseFileset = None self._opDummyData = OpDummyData(parent=self) def setupOutputs(self): if not os.path.exists(self.DescriptionFilePath.value): raise OpBlockwiseFilesetReader.MissingDatasetError( "Dataset description not found: {}".format( self.DescriptionFilePath.value)) # Load up the class that does the real work self._blockwiseFileset = BlockwiseFileset( self.DescriptionFilePath.value) # Check for errors in the description file descriptionFields = self._blockwiseFileset.description axes = descriptionFields.axes assert False not in [ a in "txyzc" for a in axes ], "Unknown axis type. Known axes: txyzc Your axes:".format(axes) self.Output.meta.shape = tuple(descriptionFields.view_shape) self.Output.meta.dtype = descriptionFields.dtype self.Output.meta.axistags = vigra.defaultAxistags( str(descriptionFields.axes)) drange = descriptionFields.drange if drange is not None: self.Output.meta.drange = drange def execute(self, slot, subindex, roi, result): assert slot == self.Output, "Unknown output slot" try: self._blockwiseFileset.readData((roi.start, roi.stop), result) except BlockwiseFileset.BlockNotReadyError: result[:] = self._opDummyData.execute(slot, subindex, roi, result) return result def propagateDirty(self, slot, subindex, roi): assert slot == self.DescriptionFilePath, "Unknown input slot." self.Output.setDirty(slice(None)) def cleanUp(self): if self._blockwiseFileset is not None: self._blockwiseFileset.close() super(OpBlockwiseFilesetReader, self).cleanUp()
class OpBlockwiseFilesetReader(Operator): """ Adapter that provides an operator interface to the BlockwiseFileset class for reading ONLY. """ name = "OpBlockwiseFilesetReader" DescriptionFilePath = InputSlot(stype="filestring") Output = OutputSlot() class MissingDatasetError(Exception): pass def __init__(self, *args, **kwargs): super(OpBlockwiseFilesetReader, self).__init__(*args, **kwargs) self._blockwiseFileset = None self._opDummyData = OpDummyData(parent=self) def setupOutputs(self): if not os.path.exists(self.DescriptionFilePath.value): raise OpBlockwiseFilesetReader.MissingDatasetError( "Dataset description not found: {}".format(self.DescriptionFilePath.value) ) # Load up the class that does the real work self._blockwiseFileset = BlockwiseFileset(self.DescriptionFilePath.value) # Check for errors in the description file descriptionFields = self._blockwiseFileset.description axes = descriptionFields.axes assert False not in [a in "txyzc" for a in axes], "Unknown axis type. Known axes: txyzc Your axes:".format( axes ) self.Output.meta.shape = tuple(descriptionFields.view_shape) self.Output.meta.dtype = descriptionFields.dtype self.Output.meta.axistags = vigra.defaultAxistags(str(descriptionFields.axes)) drange = descriptionFields.drange if drange is not None: self.Output.meta.drange = drange def execute(self, slot, subindex, roi, result): assert slot == self.Output, "Unknown output slot" try: self._blockwiseFileset.readData((roi.start, roi.stop), result) except BlockwiseFileset.BlockNotReadyError: result[:] = self._opDummyData.execute(slot, subindex, roi, result) return result def propagateDirty(self, slot, subindex, roi): assert slot == self.DescriptionFilePath, "Unknown input slot." self.Output.setDirty(slice(None)) def cleanUp(self): if self._blockwiseFileset is not None: self._blockwiseFileset.close() super(OpBlockwiseFilesetReader, self).cleanUp()
def test_9_TestView(self): """ Load some of the dataset again; this time with an offset view. Note: The original blockwise fileset must be closed before this test starts. """ # Create a copy of the original description, but specify a translated (and smaller) view desc = BlockwiseFileset.readDescription(self.description_path) desc.view_origin = [0, 300, 200, 100, 0] desc.view_shape = [1, 50, 50, 50, 1] offsetConfigPath = self.description_path + "_offset" BlockwiseFileset.writeDescription(offsetConfigPath, desc) # Open the fileset using the special description file bfs = BlockwiseFileset(offsetConfigPath, "r") try: assert (bfs.description.view_origin == desc.view_origin).all() assert (bfs.description.view_shape == desc.view_shape).all() # Read some data logger.debug("Reading data...") disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :] view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :] roi = sliceToRoi(view_slicing, self.dataShape) roiShape = roi[1] - roi[0] read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8) bfs.readData(roi, read_data) # The data we read should match the correct part of the original dataset. logger.debug("Checking data...") assert self.data[disk_slicing].shape == read_data.shape assert (self.data[disk_slicing] == read_data ).all(), "Data didn't match." finally: bfs.close()
def test_6_TestExportSubset(self): roi = ((0, 0, 50, 100, 0), (1, 100, 200, 200, 1)) exportDir = tempfile.mkdtemp() self.bfs.close() self.bfs.reopen("r") exported_description_path = self.bfs.exportSubset(roi, exportDir) try: exported_bfs = BlockwiseFileset(exported_description_path, "r") assert os.path.exists(exported_description_path), "Couldn't even find the exported description file." read_data = exported_bfs.readData(roi) expected_data = self.data[roiToSlice(*roi)] assert read_data.shape == expected_data.shape, "Exported data had wrong shape" assert read_data.dtype == expected_data.dtype, "Exported data had wrong dtype" assert (read_data == expected_data).all(), "Exported data did not match expected data" finally: shutil.rmtree(exportDir)
def test_6_TestExportSubset(self): roi = ((0, 0, 50, 100, 0), (1, 100, 200, 200, 1)) exportDir = tempfile.mkdtemp() self.bfs.close() self.bfs.reopen("r") exported_description_path = self.bfs.exportSubset(roi, exportDir) try: exported_bfs = BlockwiseFileset(exported_description_path, "r") assert os.path.exists( exported_description_path ), "Couldn't even find the exported description file." read_data = exported_bfs.readData(roi) expected_data = self.data[roiToSlice(*roi)] assert read_data.shape == expected_data.shape, "Exported data had wrong shape" assert read_data.dtype == expected_data.dtype, "Exported data had wrong dtype" assert (read_data == expected_data ).all(), "Exported data did not match expected data" finally: shutil.rmtree(exportDir)