def test_9_TestView(self): """ Load some of the dataset again; this time with an offset view. Note: The original blockwise fileset must be closed before this test starts. """ # Create a copy of the original description, but specify a translated (and smaller) view desc = BlockwiseFileset.readDescription(self.description_path) desc.view_origin = [0, 300, 200, 100, 0] desc.view_shape = [1, 50, 50, 50, 1] offsetConfigPath = self.description_path + '_offset' BlockwiseFileset.writeDescription(offsetConfigPath, desc) # Open the fileset using the special description file bfs = BlockwiseFileset( offsetConfigPath, 'r' ) try: assert (bfs.description.view_origin == desc.view_origin).all() assert (bfs.description.view_shape == desc.view_shape).all() # Read some data logger.debug( "Reading data..." ) disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :] view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :] roi = sliceToRoi( view_slicing, self.dataShape ) roiShape = roi[1] - roi[0] read_data = numpy.zeros( tuple(roiShape), dtype=numpy.uint8 ) bfs.readData( roi, read_data ) # The data we read should match the correct part of the original dataset. logger.debug( "Checking data..." ) assert self.data[disk_slicing].shape == read_data.shape assert (self.data[disk_slicing] == read_data).all(), "Data didn't match." finally: bfs.close()
def test_9_TestView(self): """ Load some of the dataset again; this time with an offset view. Note: The original blockwise fileset must be closed before this test starts. """ # Create a copy of the original description, but specify a translated (and smaller) view desc = BlockwiseFileset.readDescription(self.description_path) desc.view_origin = [0, 300, 200, 100, 0] desc.view_shape = [1, 50, 50, 50, 1] offsetConfigPath = self.description_path + '_offset' BlockwiseFileset.writeDescription(offsetConfigPath, desc) # Open the fileset using the special description file bfs = BlockwiseFileset(offsetConfigPath, 'r') try: assert (bfs.description.view_origin == desc.view_origin).all() assert (bfs.description.view_shape == desc.view_shape).all() # Read some data logger.debug("Reading data...") disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :] view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :] roi = sliceToRoi(view_slicing, self.dataShape) roiShape = roi[1] - roi[0] read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8) bfs.readData(roi, read_data) # The data we read should match the correct part of the original dataset. logger.debug("Checking data...") assert self.data[disk_slicing].shape == read_data.shape assert (self.data[disk_slicing] == read_data ).all(), "Data didn't match." finally: bfs.close()
def _prepareDestination(self): """ - If the result file doesn't exist yet, create it (and the dataset) - If the result file already exists, return a list of the rois that are NOT needed (their data already exists in the final output) """ originalDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value) datasetDescription = copy.deepcopy(originalDescription) # Modify description fields as needed # -- axes datasetDescription.axes = "".join( self.Input.meta.getTaggedShape().keys() ) assert set(originalDescription.axes) == set( datasetDescription.axes ), \ "Can't prepare destination dataset: original dataset description listed " \ "axes as {}, but actual output axes are {}".format( originalDescription.axes, datasetDescription.axes ) # -- shape datasetDescription.view_shape = list(self.Input.meta.shape) # -- block_shape assert originalDescription.block_shape is not None originalBlockDims = collections.OrderedDict( zip( originalDescription.axes, originalDescription.block_shape ) ) datasetDescription.block_shape = map( lambda a: originalBlockDims[a], datasetDescription.axes ) datasetDescription.block_shape = map( min, zip( datasetDescription.block_shape, self.Input.meta.shape ) ) # -- chunks if originalDescription.chunks is not None: originalChunkDims = collections.OrderedDict( zip( originalDescription.axes, originalDescription.chunks ) ) datasetDescription.chunks = map( lambda a: originalChunkDims[a], datasetDescription.axes ) datasetDescription.chunks = map( min, zip( datasetDescription.chunks, self.Input.meta.shape ) ) # -- dtype if datasetDescription.dtype != self.Input.meta.dtype: dtype = self.Input.meta.dtype if type(dtype) is numpy.dtype: dtype = dtype.type datasetDescription.dtype = dtype().__class__.__name__ # Create a unique hash for this blocking scheme. # If it changes, we can't use any previous data. sha = hashlib.sha1() sha.update( str( tuple( datasetDescription.block_shape) ) ) sha.update( datasetDescription.axes ) sha.update( datasetDescription.block_file_name_format ) datasetDescription.hash_id = sha.hexdigest() if datasetDescription != originalDescription: descriptionFilePath = self.OutputDatasetDescription.value logger.info( "Overwriting dataset description: {}".format( descriptionFilePath ) ) BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription) with open( descriptionFilePath, 'r' ) as f: logger.info( f.read() ) # Now open the dataset blockwiseFileset = BlockwiseFileset( self.OutputDatasetDescription.value ) taskInfos = self._prepareTaskInfos( blockwiseFileset.getAllBlockRois() ) if blockwiseFileset.description.hash_id != originalDescription.hash_id: # Something about our blocking scheme changed. # Make sure all blocks are marked as NOT available. # (Just in case some were left over from a previous run.) for roi in taskInfos.keys(): blockwiseFileset.setBlockStatus( roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE ) return blockwiseFileset, taskInfos
def _prepareDestination(self): """ - If the result file doesn't exist yet, create it (and the dataset) - If the result file already exists, return a list of the rois that are NOT needed (their data already exists in the final output) """ originalDescription = BlockwiseFileset.readDescription( self.OutputDatasetDescription.value) datasetDescription = copy.deepcopy(originalDescription) # Modify description fields as needed # -- axes datasetDescription.axes = "".join( self.Input.meta.getTaggedShape().keys()) assert set(originalDescription.axes) == set( datasetDescription.axes ), \ "Can't prepare destination dataset: original dataset description listed " \ "axes as {}, but actual output axes are {}".format( originalDescription.axes, datasetDescription.axes ) # -- shape datasetDescription.view_shape = list(self.Input.meta.shape) # -- block_shape assert originalDescription.block_shape is not None originalBlockDims = collections.OrderedDict( zip(originalDescription.axes, originalDescription.block_shape)) datasetDescription.block_shape = map(lambda a: originalBlockDims[a], datasetDescription.axes) datasetDescription.block_shape = map( min, zip(datasetDescription.block_shape, self.Input.meta.shape)) # -- chunks if originalDescription.chunks is not None: originalChunkDims = collections.OrderedDict( zip(originalDescription.axes, originalDescription.chunks)) datasetDescription.chunks = map(lambda a: originalChunkDims[a], datasetDescription.axes) datasetDescription.chunks = map( min, zip(datasetDescription.chunks, self.Input.meta.shape)) # -- dtype if datasetDescription.dtype != self.Input.meta.dtype: dtype = self.Input.meta.dtype if type(dtype) is numpy.dtype: dtype = dtype.type datasetDescription.dtype = dtype().__class__.__name__ # Create a unique hash for this blocking scheme. # If it changes, we can't use any previous data. sha = hashlib.sha1() sha.update(str(tuple(datasetDescription.block_shape))) sha.update(datasetDescription.axes) sha.update(datasetDescription.block_file_name_format) datasetDescription.hash_id = sha.hexdigest() if datasetDescription != originalDescription: descriptionFilePath = self.OutputDatasetDescription.value logger.info("Overwriting dataset description: {}".format( descriptionFilePath)) BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription) with open(descriptionFilePath, 'r') as f: logger.info(f.read()) # Now open the dataset blockwiseFileset = BlockwiseFileset( self.OutputDatasetDescription.value) taskInfos = self._prepareTaskInfos(blockwiseFileset.getAllBlockRois()) if blockwiseFileset.description.hash_id != originalDescription.hash_id: # Something about our blocking scheme changed. # Make sure all blocks are marked as NOT available. # (Just in case some were left over from a previous run.) for roi in taskInfos.keys(): blockwiseFileset.setBlockStatus( roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE) return blockwiseFileset, taskInfos