コード例 #1
0
ファイル: clusterOps.py プロジェクト: burcin/ilastik
    def setupOutputs(self):
        self.ReturnCode.meta.dtype = bool
        self.ReturnCode.meta.shape = (1,)

        # Check for errors
        primaryOutputDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value)
        primary_block_shape = primaryOutputDescription.block_shape
        primary_sub_block_shape = primaryOutputDescription.sub_block_shape
        assert primary_sub_block_shape is not None, "Primary output description file must specify a sub_block_shape"

        # Ratio of blocks to sub-blocks for all secondaries must match the primary.
        primary_sub_block_factor = (primary_block_shape + primary_sub_block_shape - 1) / primary_sub_block_shape
        
        for i, slot in enumerate( self.SecondaryOutputDescriptions ):
            descriptionPath = slot.value
            secondaryDescription = BlockwiseFileset.readDescription(descriptionPath)
            block_shape = secondaryDescription.block_shape
            sub_block_shape = secondaryDescription.sub_block_shape
            assert sub_block_shape is not None, "Secondary output description #{} doesn't specify a sub_block_shape".format( i )
            
            sub_block_factor = (block_shape + sub_block_shape - 1) / sub_block_shape
            if (tuple(primary_sub_block_factor) != tuple(sub_block_factor)):
                msg = "Error: Ratio of sub_block_shape to block_shape must be the same in the primary output dataset and in all secondary datasets.\n"
                msg += "Secondary dataset {} has a factor of {}, which doesn't match primary factor of {}".format( i, sub_block_factor, primary_sub_block_factor )
                raise RuntimeError(msg)
コード例 #2
0
ファイル: clusterOps.py プロジェクト: christophdecker/ilastik
    def setupOutputs(self):
        self.ReturnCode.meta.dtype = bool
        self.ReturnCode.meta.shape = (1, )

        # Check for errors
        primaryOutputDescription = BlockwiseFileset.readDescription(
            self.OutputDatasetDescription.value)
        primary_block_shape = primaryOutputDescription.block_shape
        primary_sub_block_shape = primaryOutputDescription.sub_block_shape
        assert primary_sub_block_shape is not None, "Primary output description file must specify a sub_block_shape"

        # Ratio of blocks to sub-blocks for all secondaries must match the primary.
        primary_sub_block_factor = (primary_block_shape +
                                    primary_sub_block_shape -
                                    1) / primary_sub_block_shape

        for i, slot in enumerate(self.SecondaryOutputDescriptions):
            descriptionPath = slot.value
            secondaryDescription = BlockwiseFileset.readDescription(
                descriptionPath)
            block_shape = secondaryDescription.block_shape
            sub_block_shape = secondaryDescription.sub_block_shape
            assert sub_block_shape is not None, "Secondary output description #{} doesn't specify a sub_block_shape".format(
                i)

            sub_block_factor = (block_shape + sub_block_shape -
                                1) / sub_block_shape
            if (tuple(primary_sub_block_factor) != tuple(sub_block_factor)):
                msg = "Error: Ratio of sub_block_shape to block_shape must be the same in the primary output dataset and in all secondary datasets.\n"
                msg += "Secondary dataset {} has a factor of {}, which doesn't match primary factor of {}".format(
                    i, sub_block_factor, primary_sub_block_factor)
                raise RuntimeError(msg)
コード例 #3
0
 def test_9_TestView(self):
     """
     Load some of the dataset again; this time with an offset view.
     Note: The original blockwise fileset must be closed before this test starts.
     """
     # Create a copy of the original description, but specify a translated (and smaller) view
     desc = BlockwiseFileset.readDescription(self.description_path)
     desc.view_origin = [0, 300, 200, 100, 0]
     desc.view_shape = [1, 50, 50, 50, 1]
     offsetConfigPath = self.description_path + '_offset'
     BlockwiseFileset.writeDescription(offsetConfigPath, desc)
     
     # Open the fileset using the special description file
     bfs = BlockwiseFileset( offsetConfigPath, 'r' )
     try:
         assert (bfs.description.view_origin == desc.view_origin).all()
         assert (bfs.description.view_shape == desc.view_shape).all()
         
         # Read some data
         logger.debug( "Reading data..." )
         disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :]
         view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :]
         roi = sliceToRoi( view_slicing, self.dataShape )
         roiShape = roi[1] - roi[0]
         read_data = numpy.zeros( tuple(roiShape), dtype=numpy.uint8 )
         
         bfs.readData( roi, read_data )
         
         # The data we read should match the correct part of the original dataset.
         logger.debug( "Checking data..." )
         assert self.data[disk_slicing].shape == read_data.shape
         assert (self.data[disk_slicing] == read_data).all(), "Data didn't match."
     
     finally:
         bfs.close()
コード例 #4
0
    def test_9_TestView(self):
        """
        Load some of the dataset again; this time with an offset view.
        Note: The original blockwise fileset must be closed before this test starts.
        """
        # Create a copy of the original description, but specify a translated (and smaller) view
        desc = BlockwiseFileset.readDescription(self.description_path)
        desc.view_origin = [0, 300, 200, 100, 0]
        desc.view_shape = [1, 50, 50, 50, 1]
        offsetConfigPath = self.description_path + '_offset'
        BlockwiseFileset.writeDescription(offsetConfigPath, desc)

        # Open the fileset using the special description file
        bfs = BlockwiseFileset(offsetConfigPath, 'r')
        try:
            assert (bfs.description.view_origin == desc.view_origin).all()
            assert (bfs.description.view_shape == desc.view_shape).all()

            # Read some data
            logger.debug("Reading data...")
            disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :]
            view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :]
            roi = sliceToRoi(view_slicing, self.dataShape)
            roiShape = roi[1] - roi[0]
            read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8)

            bfs.readData(roi, read_data)

            # The data we read should match the correct part of the original dataset.
            logger.debug("Checking data...")
            assert self.data[disk_slicing].shape == read_data.shape
            assert (self.data[disk_slicing] == read_data
                    ).all(), "Data didn't match."

        finally:
            bfs.close()
コード例 #5
0
ファイル: clusterOps.py プロジェクト: burcin/ilastik
    def _prepareDestination(self):
        """
        - If the result file doesn't exist yet, create it (and the dataset)
        - If the result file already exists, return a list of the rois that 
        are NOT needed (their data already exists in the final output)
        """
        originalDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value)
        datasetDescription = copy.deepcopy(originalDescription)

        # Modify description fields as needed
        # -- axes
        datasetDescription.axes = "".join( self.Input.meta.getTaggedShape().keys() )
        assert set(originalDescription.axes) == set( datasetDescription.axes ), \
            "Can't prepare destination dataset: original dataset description listed " \
            "axes as {}, but actual output axes are {}".format( originalDescription.axes, datasetDescription.axes )

        # -- shape
        datasetDescription.view_shape = list(self.Input.meta.shape)
        # -- block_shape
        assert originalDescription.block_shape is not None
        originalBlockDims = collections.OrderedDict( zip( originalDescription.axes, originalDescription.block_shape ) )
        datasetDescription.block_shape = map( lambda a: originalBlockDims[a], datasetDescription.axes )
        datasetDescription.block_shape = map( min, zip( datasetDescription.block_shape, self.Input.meta.shape ) )
        # -- chunks
        if originalDescription.chunks is not None:
            originalChunkDims = collections.OrderedDict( zip( originalDescription.axes, originalDescription.chunks ) )
            datasetDescription.chunks = map( lambda a: originalChunkDims[a], datasetDescription.axes )
            datasetDescription.chunks = map( min, zip( datasetDescription.chunks, self.Input.meta.shape ) )
        # -- dtype
        if datasetDescription.dtype != self.Input.meta.dtype:
            dtype = self.Input.meta.dtype
            if type(dtype) is numpy.dtype:
                dtype = dtype.type
            datasetDescription.dtype = dtype().__class__.__name__

        # Create a unique hash for this blocking scheme.
        # If it changes, we can't use any previous data.
        sha = hashlib.sha1()
        sha.update( str( tuple( datasetDescription.block_shape) ) )
        sha.update( datasetDescription.axes )
        sha.update( datasetDescription.block_file_name_format )

        datasetDescription.hash_id = sha.hexdigest()

        if datasetDescription != originalDescription:
            descriptionFilePath = self.OutputDatasetDescription.value
            logger.info( "Overwriting dataset description: {}".format( descriptionFilePath ) )
            BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription)
            with open( descriptionFilePath, 'r' ) as f:
                logger.info( f.read() )

        # Now open the dataset
        blockwiseFileset = BlockwiseFileset( self.OutputDatasetDescription.value )
        
        taskInfos = self._prepareTaskInfos( blockwiseFileset.getAllBlockRois() )
        
        if blockwiseFileset.description.hash_id != originalDescription.hash_id:
            # Something about our blocking scheme changed.
            # Make sure all blocks are marked as NOT available.
            # (Just in case some were left over from a previous run.)
            for roi in taskInfos.keys():
                blockwiseFileset.setBlockStatus( roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE )

        return blockwiseFileset, taskInfos
コード例 #6
0
ファイル: clusterOps.py プロジェクト: christophdecker/ilastik
    def _prepareDestination(self):
        """
        - If the result file doesn't exist yet, create it (and the dataset)
        - If the result file already exists, return a list of the rois that 
        are NOT needed (their data already exists in the final output)
        """
        originalDescription = BlockwiseFileset.readDescription(
            self.OutputDatasetDescription.value)
        datasetDescription = copy.deepcopy(originalDescription)

        # Modify description fields as needed
        # -- axes
        datasetDescription.axes = "".join(
            self.Input.meta.getTaggedShape().keys())
        assert set(originalDescription.axes) == set( datasetDescription.axes ), \
            "Can't prepare destination dataset: original dataset description listed " \
            "axes as {}, but actual output axes are {}".format( originalDescription.axes, datasetDescription.axes )

        # -- shape
        datasetDescription.view_shape = list(self.Input.meta.shape)
        # -- block_shape
        assert originalDescription.block_shape is not None
        originalBlockDims = collections.OrderedDict(
            zip(originalDescription.axes, originalDescription.block_shape))
        datasetDescription.block_shape = map(lambda a: originalBlockDims[a],
                                             datasetDescription.axes)
        datasetDescription.block_shape = map(
            min, zip(datasetDescription.block_shape, self.Input.meta.shape))
        # -- chunks
        if originalDescription.chunks is not None:
            originalChunkDims = collections.OrderedDict(
                zip(originalDescription.axes, originalDescription.chunks))
            datasetDescription.chunks = map(lambda a: originalChunkDims[a],
                                            datasetDescription.axes)
            datasetDescription.chunks = map(
                min, zip(datasetDescription.chunks, self.Input.meta.shape))
        # -- dtype
        if datasetDescription.dtype != self.Input.meta.dtype:
            dtype = self.Input.meta.dtype
            if type(dtype) is numpy.dtype:
                dtype = dtype.type
            datasetDescription.dtype = dtype().__class__.__name__

        # Create a unique hash for this blocking scheme.
        # If it changes, we can't use any previous data.
        sha = hashlib.sha1()
        sha.update(str(tuple(datasetDescription.block_shape)))
        sha.update(datasetDescription.axes)
        sha.update(datasetDescription.block_file_name_format)

        datasetDescription.hash_id = sha.hexdigest()

        if datasetDescription != originalDescription:
            descriptionFilePath = self.OutputDatasetDescription.value
            logger.info("Overwriting dataset description: {}".format(
                descriptionFilePath))
            BlockwiseFileset.writeDescription(descriptionFilePath,
                                              datasetDescription)
            with open(descriptionFilePath, 'r') as f:
                logger.info(f.read())

        # Now open the dataset
        blockwiseFileset = BlockwiseFileset(
            self.OutputDatasetDescription.value)

        taskInfos = self._prepareTaskInfos(blockwiseFileset.getAllBlockRois())

        if blockwiseFileset.description.hash_id != originalDescription.hash_id:
            # Something about our blocking scheme changed.
            # Make sure all blocks are marked as NOT available.
            # (Just in case some were left over from a previous run.)
            for roi in taskInfos.keys():
                blockwiseFileset.setBlockStatus(
                    roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE)

        return blockwiseFileset, taskInfos