Ejemplo n.º 1
0
    def setupOutputs(self):
        self.ReturnCode.meta.dtype = bool
        self.ReturnCode.meta.shape = (1, )

        self._closeFiles()
        self._primaryBlockwiseFileset = BlockwiseFileset(
            self.OutputFilesetDescription.value, 'a')
Ejemplo n.º 2
0
class OpBlockwiseFilesetReader(Operator):
    """
    Adapter that provides an operator interface to the BlockwiseFileset class for reading ONLY.
    """

    name = "OpBlockwiseFilesetReader"

    DescriptionFilePath = InputSlot(stype="filestring")
    Output = OutputSlot()

    class MissingDatasetError(Exception):
        pass

    def __init__(self, *args, **kwargs):
        super(OpBlockwiseFilesetReader, self).__init__(*args, **kwargs)
        self._blockwiseFileset = None
        self._opDummyData = OpDummyData(parent=self)

    def setupOutputs(self):
        if not os.path.exists(self.DescriptionFilePath.value):
            raise OpBlockwiseFilesetReader.MissingDatasetError(
                "Dataset description not found: {}".format(
                    self.DescriptionFilePath.value))

        # Load up the class that does the real work
        self._blockwiseFileset = BlockwiseFileset(
            self.DescriptionFilePath.value)

        # Check for errors in the description file
        descriptionFields = self._blockwiseFileset.description
        axes = descriptionFields.axes
        assert False not in [
            a in "txyzc" for a in axes
        ], "Unknown axis type.  Known axes: txyzc  Your axes:".format(axes)

        self.Output.meta.shape = tuple(descriptionFields.view_shape)
        self.Output.meta.dtype = descriptionFields.dtype
        self.Output.meta.axistags = vigra.defaultAxistags(
            str(descriptionFields.axes))
        drange = descriptionFields.drange
        if drange is not None:
            self.Output.meta.drange = drange

    def execute(self, slot, subindex, roi, result):
        assert slot == self.Output, "Unknown output slot"
        try:
            self._blockwiseFileset.readData((roi.start, roi.stop), result)
        except BlockwiseFileset.BlockNotReadyError:
            result[:] = self._opDummyData.execute(slot, subindex, roi, result)
        return result

    def propagateDirty(self, slot, subindex, roi):
        assert slot == self.DescriptionFilePath, "Unknown input slot."
        self.Output.setDirty(slice(None))

    def cleanUp(self):
        if self._blockwiseFileset is not None:
            self._blockwiseFileset.close()
        super(OpBlockwiseFilesetReader, self).cleanUp()
Ejemplo n.º 3
0
class OpBlockwiseFilesetReader(Operator):
    """
    Adapter that provides an operator interface to the BlockwiseFileset class for reading ONLY.
    """

    name = "OpBlockwiseFilesetReader"

    DescriptionFilePath = InputSlot(stype="filestring")
    Output = OutputSlot()

    class MissingDatasetError(Exception):
        pass

    def __init__(self, *args, **kwargs):
        super(OpBlockwiseFilesetReader, self).__init__(*args, **kwargs)
        self._blockwiseFileset = None
        self._opDummyData = OpDummyData(parent=self)

    def setupOutputs(self):
        if not os.path.exists(self.DescriptionFilePath.value):
            raise OpBlockwiseFilesetReader.MissingDatasetError(
                "Dataset description not found: {}".format(self.DescriptionFilePath.value)
            )

        # Load up the class that does the real work
        self._blockwiseFileset = BlockwiseFileset(self.DescriptionFilePath.value)

        # Check for errors in the description file
        descriptionFields = self._blockwiseFileset.description
        axes = descriptionFields.axes
        assert False not in [a in "txyzc" for a in axes], "Unknown axis type.  Known axes: txyzc  Your axes:".format(
            axes
        )

        self.Output.meta.shape = tuple(descriptionFields.view_shape)
        self.Output.meta.dtype = descriptionFields.dtype
        self.Output.meta.axistags = vigra.defaultAxistags(str(descriptionFields.axes))
        drange = descriptionFields.drange
        if drange is not None:
            self.Output.meta.drange = drange

    def execute(self, slot, subindex, roi, result):
        assert slot == self.Output, "Unknown output slot"
        try:
            self._blockwiseFileset.readData((roi.start, roi.stop), result)
        except BlockwiseFileset.BlockNotReadyError:
            result[:] = self._opDummyData.execute(slot, subindex, roi, result)
        return result

    def propagateDirty(self, slot, subindex, roi):
        assert slot == self.DescriptionFilePath, "Unknown input slot."
        self.Output.setDirty(slice(None))

    def cleanUp(self):
        if self._blockwiseFileset is not None:
            self._blockwiseFileset.close()
        super(OpBlockwiseFilesetReader, self).cleanUp()
    def setUp(self):
        """
        Create a blockwise fileset to test with.
        """
        if platform.system() == 'Windows':
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        testConfig = \
        """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "synapse_small",
            "format" : "hdf5",
            "axes" : "txyzc",
            "shape" : [1,400,400,100,1],
            "dtype" : "numpy.uint8",
            "block_shape" : [1, 50, 50, 50, 100],
            "block_file_name_format" : "cube{roiString}.h5/volume/data"
        }
        """
        self.tempDir = tempfile.mkdtemp()
        self.configpath = os.path.join(self.tempDir, "config.json")

        logger.debug("Loading config file...")
        with open(self.configpath, 'w') as f:
            f.write(testConfig)

        logger.debug("Creating random test data...")
        bfs = BlockwiseFileset(self.configpath, 'a')
        dataShape = tuple(bfs.description.shape)
        self.data = numpy.random.randint(255,
                                         size=dataShape).astype(numpy.uint8)

        logger.debug("Writing test data...")
        datasetRoi = ([0, 0, 0, 0, 0], dataShape)
        bfs.writeData(datasetRoi, self.data)
        block_starts = getIntersectingBlocks(bfs.description.block_shape,
                                             datasetRoi)
        for block_start in block_starts:
            bfs.setBlockStatus(block_start, BlockwiseFileset.BLOCK_AVAILABLE)
        bfs.close()
Ejemplo n.º 5
0
    def test_9_TestView(self):
        """
        Load some of the dataset again; this time with an offset view.
        Note: The original blockwise fileset must be closed before this test starts.
        """
        # Create a copy of the original description, but specify a translated (and smaller) view
        desc = BlockwiseFileset.readDescription(self.description_path)
        desc.view_origin = [0, 300, 200, 100, 0]
        desc.view_shape = [1, 50, 50, 50, 1]
        offsetConfigPath = self.description_path + "_offset"
        BlockwiseFileset.writeDescription(offsetConfigPath, desc)

        # Open the fileset using the special description file
        bfs = BlockwiseFileset(offsetConfigPath, "r")
        try:
            assert (bfs.description.view_origin == desc.view_origin).all()
            assert (bfs.description.view_shape == desc.view_shape).all()

            # Read some data
            logger.debug("Reading data...")
            disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :]
            view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :]
            roi = sliceToRoi(view_slicing, self.dataShape)
            roiShape = roi[1] - roi[0]
            read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8)

            bfs.readData(roi, read_data)

            # The data we read should match the correct part of the original dataset.
            logger.debug("Checking data...")
            assert self.data[disk_slicing].shape == read_data.shape
            assert (self.data[disk_slicing] == read_data).all(), "Data didn't match."

        finally:
            bfs.close()
Ejemplo n.º 6
0
    def setupOutputs(self):
        if not os.path.exists(self.DescriptionFilePath.value):
            raise OpBlockwiseFilesetReader.MissingDatasetError("Dataset description not found: {}".format( self.DescriptionFilePath.value ) )

        # Load up the class that does the real work
        self._blockwiseFileset = BlockwiseFileset( self.DescriptionFilePath.value )

        # Check for errors in the description file
        descriptionFields = self._blockwiseFileset.description
        axes = descriptionFields.axes
        assert False not in map(lambda a: a in 'txyzc', axes), "Unknown axis type.  Known axes: txyzc  Your axes:".format(axes)

        self.Output.meta.shape = tuple(descriptionFields.view_shape)
        self.Output.meta.dtype = descriptionFields.dtype
        self.Output.meta.axistags = vigra.defaultAxistags(descriptionFields.axes)
        drange = descriptionFields.drange
        if drange is not None:
            self.Output.meta.drange = drange
    def setUp(self):
        """
        Create a blockwise fileset to test with.
        """
        if platform.system() == 'Windows':
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest
        
        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest
        
        testConfig = \
        """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "synapse_small",
            "format" : "hdf5",
            "axes" : "txyzc",
            "shape" : [1,400,400,100,1],
            "dtype" : "numpy.uint8",
            "block_shape" : [1, 50, 50, 50, 100],
            "block_file_name_format" : "cube{roiString}.h5/volume/data"
        }
        """
        self.tempDir = tempfile.mkdtemp()
        self.configpath = os.path.join(self.tempDir, "config.json")

        logger.debug( "Loading config file..." )
        with open(self.configpath, 'w') as f:
            f.write(testConfig)
        
        logger.debug( "Creating random test data..." )
        bfs = BlockwiseFileset( self.configpath, 'a' )
        dataShape = tuple(bfs.description.shape)
        self.data = numpy.random.randint( 255, size=dataShape ).astype(numpy.uint8)
        
        logger.debug( "Writing test data..." )
        datasetRoi = ([0,0,0,0,0], dataShape)
        bfs.writeData( datasetRoi, self.data )
        block_starts = getIntersectingBlocks(bfs.description.block_shape, datasetRoi)
        for block_start in block_starts:
            bfs.setBlockStatus(block_start, BlockwiseFileset.BLOCK_AVAILABLE)
        bfs.close()
Ejemplo n.º 8
0
    def setup_class(cls):
        if platform.system() == "Windows":
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            pytest.skip("Windows")

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            pytest.skip(
                "System is not configured to allow opening a lot of files")

        testConfig = """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,
            "name" : "synapse_small",
            "format" : "hdf5",
            "axes" : "txyzc",
            "shape" : [1,10,20,5,1],
            "dtype" : "object",
            "block_shape" : [1, 5, 4, 1, 100],
            "block_file_name_format" : "cube{roiString}.h5/volume/data"
        }
        """
        cls.tempDir = tempfile.mkdtemp()
        cls.description_path = os.path.join(cls.tempDir, "config.json")
        with open(cls.description_path, "w") as f:
            f.write(testConfig)

        logger.debug("Loading config file...")
        cls.bfs = BlockwiseFileset(cls.description_path, "a")
        cls.dataShape = tuple(cls.bfs.description.shape)

        def make_dummy_dict(x):
            return {str(x): numpy.array([x, x])}

        vec_make_dummy_dict = numpy.vectorize(make_dummy_dict)

        int_data = numpy.random.randint(255,
                                        size=cls.dataShape).astype(numpy.uint8)
        dict_data = vec_make_dummy_dict(int_data)
        cls.data = dict_data
Ejemplo n.º 9
0
    def test_6_TestExportSubset(self):
        roi = ((0, 0, 50, 100, 0), (1, 100, 200, 200, 1))
        exportDir = tempfile.mkdtemp()
        self.bfs.close()
        self.bfs.reopen("r")
        exported_description_path = self.bfs.exportSubset(roi, exportDir)

        try:
            exported_bfs = BlockwiseFileset(exported_description_path, "r")
            assert os.path.exists(exported_description_path), "Couldn't even find the exported description file."

            read_data = exported_bfs.readData(roi)
            expected_data = self.data[roiToSlice(*roi)]

            assert read_data.shape == expected_data.shape, "Exported data had wrong shape"
            assert read_data.dtype == expected_data.dtype, "Exported data had wrong dtype"
            assert (read_data == expected_data).all(), "Exported data did not match expected data"

        finally:
            shutil.rmtree(exportDir)
Ejemplo n.º 10
0
    def setup_class(cls):
        if platform.system() == "Windows":
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        testConfig = """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,
            "name" : "synapse_small",
            "format" : "hdf5",
            "axes" : "txyzc",
            "shape" : [1,10,20,5,1],
            "dtype" : "object",
            "block_shape" : [1, 5, 4, 1, 100],
            "block_file_name_format" : "cube{roiString}.h5/volume/data"
        }
        """
        cls.tempDir = tempfile.mkdtemp()
        cls.description_path = os.path.join(cls.tempDir, "config.json")
        with open(cls.description_path, "w") as f:
            f.write(testConfig)

        logger.debug("Loading config file...")
        cls.bfs = BlockwiseFileset(cls.description_path, "a")
        cls.dataShape = tuple(cls.bfs.description.shape)

        def make_dummy_dict(x):
            return {str(x): numpy.array([x, x])}

        vec_make_dummy_dict = numpy.vectorize(make_dummy_dict)

        int_data = numpy.random.randint(255, size=cls.dataShape).astype(numpy.uint8)
        dict_data = vec_make_dummy_dict(int_data)
        cls.data = dict_data
Ejemplo n.º 11
0
    def setup_class(cls):
        if platform.system() == 'Windows':
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        testConfig = \
        """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,
            "name" : "synapse_small",
            "format" : "hdf5",
            "axes" : "txyzc",
            "shape" : [1,400,400,200,1],
            "dtype" : "numpy.uint8",
            "compression" : "lzf",
            "block_shape" : [1, 50, 50, 50, 100],
            "block_file_name_format" : "cube{roiString}.h5/volume/data"
        }
        """
        cls.tempDir = tempfile.mkdtemp()
        cls.description_path = os.path.join(cls.tempDir, "description.json")
        with open(cls.description_path, 'w') as f:
            f.write(testConfig)

        logger.debug("Loading config file...")
        cls.bfs = BlockwiseFileset(cls.description_path, 'a')
        cls.dataShape = tuple(cls.bfs.description.shape)

        logger.debug("Creating random test data...")
        cls.data = numpy.random.randint(255,
                                        size=cls.dataShape).astype(numpy.uint8)
Ejemplo n.º 12
0
    def test_6_TestExportSubset(self):
        roi = ((0, 0, 50, 100, 0), (1, 100, 200, 200, 1))
        exportDir = tempfile.mkdtemp()
        self.bfs.close()
        self.bfs.reopen("r")
        exported_description_path = self.bfs.exportSubset(roi, exportDir)

        try:
            exported_bfs = BlockwiseFileset(exported_description_path, "r")
            assert os.path.exists(
                exported_description_path
            ), "Couldn't even find the exported description file."

            read_data = exported_bfs.readData(roi)
            expected_data = self.data[roiToSlice(*roi)]

            assert read_data.shape == expected_data.shape, "Exported data had wrong shape"
            assert read_data.dtype == expected_data.dtype, "Exported data had wrong dtype"
            assert (read_data == expected_data
                    ).all(), "Exported data did not match expected data"

        finally:
            shutil.rmtree(exportDir)
Ejemplo n.º 13
0
 def setupClass(cls):
     if platform.system() == 'Windows':
         # On windows, there are errors, and we make no attempt to solve them (at the moment).
         raise nose.SkipTest
     
     try:
         BlockwiseFileset._prepare_system()
     except ValueError:
         # If the system isn't configured to allow lots of open files, we can't run this test.
         raise nose.SkipTest
     
     testConfig = \
     """
     {
         "_schema_name" : "blockwise-fileset-description",
         "_schema_version" : 1.0,
         "name" : "synapse_small",
         "format" : "hdf5",
         "axes" : "txyzc",
         "shape" : [1,400,400,200,1],
         "dtype" : "numpy.uint8",
         "compression" : "lzf",
         "block_shape" : [1, 50, 50, 50, 100],
         "block_file_name_format" : "cube{roiString}.h5/volume/data"
     }
     """
     cls.tempDir = tempfile.mkdtemp()
     cls.description_path = os.path.join(cls.tempDir, "description.json")
     with open(cls.description_path, 'w') as f:
         f.write(testConfig)
 
     logger.debug( "Loading config file..." )
     cls.bfs = BlockwiseFileset( cls.description_path, 'a' )
     cls.dataShape = tuple(cls.bfs.description.shape)
 
     logger.debug( "Creating random test data..." )
     cls.data = numpy.random.randint(255, size=cls.dataShape ).astype(numpy.uint8)
Ejemplo n.º 14
0
    def test_9_TestView(self):
        """
        Load some of the dataset again; this time with an offset view.
        Note: The original blockwise fileset must be closed before this test starts.
        """
        # Create a copy of the original description, but specify a translated (and smaller) view
        desc = BlockwiseFileset.readDescription(self.description_path)
        desc.view_origin = [0, 300, 200, 100, 0]
        desc.view_shape = [1, 50, 50, 50, 1]
        offsetConfigPath = self.description_path + "_offset"
        BlockwiseFileset.writeDescription(offsetConfigPath, desc)

        # Open the fileset using the special description file
        bfs = BlockwiseFileset(offsetConfigPath, "r")
        try:
            assert (bfs.description.view_origin == desc.view_origin).all()
            assert (bfs.description.view_shape == desc.view_shape).all()

            # Read some data
            logger.debug("Reading data...")
            disk_slicing = numpy.s_[:, 300:350, 200:250, 100:150, :]
            view_slicing = numpy.s_[:, 0:50, 0:50, 0:50, :]
            roi = sliceToRoi(view_slicing, self.dataShape)
            roiShape = roi[1] - roi[0]
            read_data = numpy.zeros(tuple(roiShape), dtype=numpy.uint8)

            bfs.readData(roi, read_data)

            # The data we read should match the correct part of the original dataset.
            logger.debug("Checking data...")
            assert self.data[disk_slicing].shape == read_data.shape
            assert (self.data[disk_slicing] == read_data
                    ).all(), "Data didn't match."

        finally:
            bfs.close()
    def setup_class(cls):
        # The openconnectome site appears to be down at the moment.
        # This test fails when that happens...
        raise nose.SkipTest

        if platform.system() == "Windows":
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            from lazyflow.utility.io_util.blockwiseFileset import BlockwiseFileset

            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        cls.tempDir = tempfile.mkdtemp()
        logger.debug("Working in {}".format(cls.tempDir))

        # Create the two sub-descriptions
        Bock11VolumeDescription = """
        {
            "_schema_name" : "RESTful-volume-description",
            "_schema_version" : 1.0,

            "name" : "Bock11-level0",
            "format" : "hdf5",
            "axes" : "zyx",
            "## NOTE 1": "The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
            "## NOTE 2": "The website says that the data goes up to plane 4156, but it actually errors out past 4150",
            "origin_offset" : [2917, 0, 0],
            "bounds" : [4150, 135424, 119808],
            "dtype" : "numpy.uint8",
            "url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
            "hdf5_dataset" : "CUTOUT"
        }
        """

        blockwiseFilesetDescription = """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "bock11-blocks",
            "format" : "hdf5",
            "axes" : "zyx",
            "shape" : [40,40,40],
            "dtype" : "numpy.uint8",
            "block_shape" : [20, 20, 20],
            "block_file_name_format" : "block-{roiString}.h5/CUTOUT"
        }
        """

        # Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
        compositeDescription = """
        {{
            "_schema_name" : "RESTful-blockwise-fileset-description",
            "_schema_version" : 1.0,

            "remote_description" : {remote_description},
            "local_description" : {local_description}
        }}
        """.format(
            remote_description=Bock11VolumeDescription, local_description=blockwiseFilesetDescription
        )

        # Create the description file
        cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
        with open(cls.descriptionFilePath, "w") as f:
            f.write(compositeDescription)
Ejemplo n.º 16
0
    def setup_class(cls):
        # The openconnectome site appears to be down at the moment.
        # This test fails when that happens...
        raise nose.SkipTest

        if platform.system() == "Windows":
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        cls.tempDir = tempfile.mkdtemp()
        logger.debug("Working in {}".format(cls.tempDir))

        # Create the two sub-descriptions
        Bock11VolumeDescription = """
        {
            "_schema_name" : "RESTful-volume-description",
            "_schema_version" : 1.0,

            "name" : "Bock11-level0",
            "format" : "hdf5",
            "axes" : "zyx",
            "##NOTE":"The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
            "origin_offset" : [2917, 50000, 50000],
            "bounds" : [4156, 135424, 119808],
            "dtype" : "numpy.uint8",
            "url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
            "hdf5_dataset" : "CUTOUT"
        }
        """

        blockwiseFilesetDescription = """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "bock11-blocks",
            "format" : "hdf5",
            "axes" : "zyx",
            "shape" : [40,40,40],
            "dtype" : "numpy.uint8",
            "block_shape" : [20, 20, 20],
            "block_file_name_format" : "block-{roiString}.h5/CUTOUT",
            "dataset_root_dir" : "blocks"
        }
        """

        # Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
        compositeDescription = """
        {{
            "_schema_name" : "RESTful-blockwise-fileset-description",
            "_schema_version" : 1.0,

            "remote_description" : {remote_description},
            "local_description" : {local_description}
        }}
        """.format(
            remote_description=Bock11VolumeDescription, local_description=blockwiseFilesetDescription
        )

        # Create the description file
        cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
        with open(cls.descriptionFilePath, "w") as f:
            f.write(compositeDescription)

        # Create a new fileset that views the same data and stores it the
        #  same way locally, but this time we'll use an offset 'view'
        # Start with a copy of the non-offset description
        offsetDescription = RESTfulBlockwiseFileset.readDescription(cls.descriptionFilePath)
        offsetDescription.local_description.view_origin = numpy.array([0, 20, 0])
        offsetDescription.local_description.dataset_root_dir = "offset_blocks"
        cls.descriptionFilePath_offset = os.path.join(cls.tempDir, "description_offset.json")
        RESTfulBlockwiseFileset.writeDescription(cls.descriptionFilePath_offset, offsetDescription)
    def setupClass(cls):
        # The openconnectome site appears to be down at the moment.
        # This test fails when that happens...
        raise nose.SkipTest

        if platform.system() == 'Windows':
            # On windows, there are errors, and we make no attempt to solve them (at the moment).
            raise nose.SkipTest

        try:
            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            raise nose.SkipTest

        cls.tempDir = tempfile.mkdtemp()
        logger.debug("Working in {}".format(cls.tempDir))

        # Create the two sub-descriptions
        Bock11VolumeDescription = """
        {
            "_schema_name" : "RESTful-volume-description",
            "_schema_version" : 1.0,
        
            "name" : "Bock11-level0",
            "format" : "hdf5",
            "axes" : "zyx",
            "##NOTE":"The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
            "origin_offset" : [2917, 50000, 50000],
            "bounds" : [4156, 135424, 119808],
            "dtype" : "numpy.uint8",
            "url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
            "hdf5_dataset" : "CUTOUT"
        }
        """

        blockwiseFilesetDescription = \
        """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "bock11-blocks",
            "format" : "hdf5",
            "axes" : "zyx",
            "shape" : [40,40,40],
            "dtype" : "numpy.uint8",
            "block_shape" : [20, 20, 20],
            "block_file_name_format" : "block-{roiString}.h5/CUTOUT",
            "dataset_root_dir" : "blocks"
        }
        """

        # Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
        compositeDescription = \
        """
        {{
            "_schema_name" : "RESTful-blockwise-fileset-description",
            "_schema_version" : 1.0,

            "remote_description" : {remote_description},
            "local_description" : {local_description}        
        }}
        """.format( remote_description=Bock11VolumeDescription, local_description=blockwiseFilesetDescription )

        # Create the description file
        cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
        with open(cls.descriptionFilePath, 'w') as f:
            f.write(compositeDescription)

        # Create a new fileset that views the same data and stores it the
        #  same way locally, but this time we'll use an offset 'view'
        # Start with a copy of the non-offset description
        offsetDescription = RESTfulBlockwiseFileset.readDescription(
            cls.descriptionFilePath)
        offsetDescription.local_description.view_origin = numpy.array(
            [0, 20, 0])
        offsetDescription.local_description.dataset_root_dir = "offset_blocks"
        cls.descriptionFilePath_offset = os.path.join(
            cls.tempDir, "description_offset.json")
        RESTfulBlockwiseFileset.writeDescription(
            cls.descriptionFilePath_offset, offsetDescription)
Ejemplo n.º 18
0
    def _prepareDestination(self):
        """
        - If the result file doesn't exist yet, create it (and the dataset)
        - If the result file already exists, return a list of the rois that
        are NOT needed (their data already exists in the final output)
        """
        originalDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value)
        datasetDescription = copy.deepcopy(originalDescription)

        # Modify description fields as needed
        # -- axes
        datasetDescription.axes = "".join(list(self.Input.meta.getTaggedShape().keys()))
        assert set(originalDescription.axes) == set(datasetDescription.axes), (
            "Can't prepare destination dataset: original dataset description listed "
            "axes as {}, but actual output axes are {}".format(originalDescription.axes, datasetDescription.axes)
        )

        # -- shape
        datasetDescription.view_shape = list(self.Input.meta.shape)
        # -- block_shape
        assert originalDescription.block_shape is not None
        originalBlockDims = collections.OrderedDict(
            list(zip(originalDescription.axes, originalDescription.block_shape))
        )
        datasetDescription.block_shape = [originalBlockDims[a] for a in datasetDescription.axes]
        datasetDescription.block_shape = list(
            map(min, list(zip(datasetDescription.block_shape, self.Input.meta.shape)))
        )
        # -- chunks
        if originalDescription.chunks is not None:
            originalChunkDims = collections.OrderedDict(list(zip(originalDescription.axes, originalDescription.chunks)))
            datasetDescription.chunks = [originalChunkDims[a] for a in datasetDescription.axes]
            datasetDescription.chunks = list(map(min, list(zip(datasetDescription.chunks, self.Input.meta.shape))))
        # -- dtype
        if datasetDescription.dtype != self.Input.meta.dtype:
            dtype = self.Input.meta.dtype
            if type(dtype) is numpy.dtype:
                dtype = dtype.type
            datasetDescription.dtype = dtype().__class__.__name__

        # Create a unique hash for this blocking scheme.
        # If it changes, we can't use any previous data.
        sha = hashlib.sha1()
        sha.update(str(tuple(datasetDescription.block_shape)))
        sha.update(datasetDescription.axes)
        sha.update(datasetDescription.block_file_name_format)

        datasetDescription.hash_id = sha.hexdigest()

        if datasetDescription != originalDescription:
            descriptionFilePath = self.OutputDatasetDescription.value
            logger.info("Overwriting dataset description: {}".format(descriptionFilePath))
            BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription)
            with open(descriptionFilePath, "r") as f:
                logger.info(f.read())

        # Now open the dataset
        blockwiseFileset = BlockwiseFileset(self.OutputDatasetDescription.value)

        taskInfos = self._prepareTaskInfos(blockwiseFileset.getAllBlockRois())

        if blockwiseFileset.description.hash_id != originalDescription.hash_id:
            # Something about our blocking scheme changed.
            # Make sure all blocks are marked as NOT available.
            # (Just in case some were left over from a previous run.)
            for roi in list(taskInfos.keys()):
                blockwiseFileset.setBlockStatus(roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE)

        return blockwiseFileset, taskInfos
Ejemplo n.º 19
0
 def setupOutputs(self):
     self.ReturnCode.meta.dtype = bool
     self.ReturnCode.meta.shape = (1,)
     
     self._closeFiles()
     self._primaryBlockwiseFileset = BlockwiseFileset( self.OutputFilesetDescription.value, 'a' )        
Ejemplo n.º 20
0
class OpTaskWorker(Operator):
    Input = InputSlot()
    RoiString = InputSlot(stype="string")
    TaskName = InputSlot(stype="string")
    ConfigFilePath = InputSlot(stype="filestring")
    OutputFilesetDescription = InputSlot(stype="filestring")

    ReturnCode = OutputSlot()

    def __init__(self, *args, **kwargs):
        super(OpTaskWorker, self).__init__(*args, **kwargs)
        self.progressSignal = OrderedSignal()
        self._primaryBlockwiseFileset = None

    def setupOutputs(self):
        self.ReturnCode.meta.dtype = bool
        self.ReturnCode.meta.shape = (1,)

        self._closeFiles()
        self._primaryBlockwiseFileset = BlockwiseFileset(self.OutputFilesetDescription.value, "a")

    def cleanUp(self):
        self._closeFiles()
        super(OpTaskWorker, self).cleanUp()

    def _closeFiles(self):
        if self._primaryBlockwiseFileset is not None:
            self._primaryBlockwiseFileset.close()
        self._primaryBlockwiseFileset = None

    def execute(self, slot, subindex, ignored_roi, result):
        configFilePath = self.ConfigFilePath.value
        config = parseClusterConfigFile(configFilePath)

        blockwiseFileset = self._primaryBlockwiseFileset

        # Check axis compatibility
        inputAxes = list(self.Input.meta.getTaggedShape().keys())
        outputAxes = list(blockwiseFileset.description.axes)
        assert set(inputAxes) == set(
            outputAxes
        ), "Output dataset has the wrong set of axes.  Input axes: {}, Output axes: {}".format(
            "".join(inputAxes), "".join(outputAxes)
        )

        roiString = self.RoiString.value
        roi = Roi.loads(roiString)
        if len(roi.start) != len(self.Input.meta.shape):
            assert (
                False
            ), "Task roi: {} is not valid for this input.  Did the master launch this task correctly?".format(roiString)

        logger.info("Executing for roi: {}".format(roi))

        if config.use_node_local_scratch:
            assert False, "FIXME."

        assert (
            blockwiseFileset.getEntireBlockRoi(roi.start)[1] == roi.stop
        ).all(), "Each task must execute exactly one full block.  ({},{}) is not a valid block roi.".format(
            roi.start, roi.stop
        )
        assert self.Input.ready()

        with Timer() as computeTimer:
            # Stream the data out to disk.
            request_blockshape = (
                self._primaryBlockwiseFileset.description.sub_block_shape
            )  # Could be None.  That's okay.
            streamer = BigRequestStreamer(self.Input, (roi.start, roi.stop), request_blockshape)
            streamer.progressSignal.subscribe(self.progressSignal)
            streamer.resultSignal.subscribe(self._handlePrimaryResultBlock)
            streamer.execute()

            # Now the block is ready.  Update the status.
            blockwiseFileset.setBlockStatus(roi.start, BlockwiseFileset.BLOCK_AVAILABLE)

        logger.info("Finished task in {} seconds".format(computeTimer.seconds()))
        result[0] = True
        return result

    def propagateDirty(self, slot, subindex, roi):
        self.ReturnCode.setDirty(slice(None))

    def _handlePrimaryResultBlock(self, roi, result):
        # First write the primary
        self._primaryBlockwiseFileset.writeData(roi, result)

        # Ask the workflow if there is any special post-processing to do...
        self.get_workflow().postprocessClusterSubResult(roi, result, self._primaryBlockwiseFileset)

    def get_workflow(self):
        op = self
        while not isinstance(op, Workflow):
            op = op.parent
        return op
Ejemplo n.º 21
0
    def _prepareDestination(self):
        """
        - If the result file doesn't exist yet, create it (and the dataset)
        - If the result file already exists, return a list of the rois that 
        are NOT needed (their data already exists in the final output)
        """
        originalDescription = BlockwiseFileset.readDescription(self.OutputDatasetDescription.value)
        datasetDescription = copy.deepcopy(originalDescription)

        # Modify description fields as needed
        # -- axes
        datasetDescription.axes = "".join( list(self.Input.meta.getTaggedShape().keys()) )
        assert set(originalDescription.axes) == set( datasetDescription.axes ), \
            "Can't prepare destination dataset: original dataset description listed " \
            "axes as {}, but actual output axes are {}".format( originalDescription.axes, datasetDescription.axes )

        # -- shape
        datasetDescription.view_shape = list(self.Input.meta.shape)
        # -- block_shape
        assert originalDescription.block_shape is not None
        originalBlockDims = collections.OrderedDict( list(zip( originalDescription.axes, originalDescription.block_shape )) )
        datasetDescription.block_shape = [originalBlockDims[a] for a in datasetDescription.axes]
        datasetDescription.block_shape = list(map( min, list(zip( datasetDescription.block_shape, self.Input.meta.shape )) ))
        # -- chunks
        if originalDescription.chunks is not None:
            originalChunkDims = collections.OrderedDict( list(zip( originalDescription.axes, originalDescription.chunks )) )
            datasetDescription.chunks = [originalChunkDims[a] for a in datasetDescription.axes]
            datasetDescription.chunks = list(map( min, list(zip( datasetDescription.chunks, self.Input.meta.shape )) ))
        # -- dtype
        if datasetDescription.dtype != self.Input.meta.dtype:
            dtype = self.Input.meta.dtype
            if type(dtype) is numpy.dtype:
                dtype = dtype.type
            datasetDescription.dtype = dtype().__class__.__name__

        # Create a unique hash for this blocking scheme.
        # If it changes, we can't use any previous data.
        sha = hashlib.sha1()
        sha.update( str( tuple( datasetDescription.block_shape) ) )
        sha.update( datasetDescription.axes )
        sha.update( datasetDescription.block_file_name_format )

        datasetDescription.hash_id = sha.hexdigest()

        if datasetDescription != originalDescription:
            descriptionFilePath = self.OutputDatasetDescription.value
            logger.info( "Overwriting dataset description: {}".format( descriptionFilePath ) )
            BlockwiseFileset.writeDescription(descriptionFilePath, datasetDescription)
            with open( descriptionFilePath, 'r' ) as f:
                logger.info( f.read() )

        # Now open the dataset
        blockwiseFileset = BlockwiseFileset( self.OutputDatasetDescription.value )
        
        taskInfos = self._prepareTaskInfos( blockwiseFileset.getAllBlockRois() )
        
        if blockwiseFileset.description.hash_id != originalDescription.hash_id:
            # Something about our blocking scheme changed.
            # Make sure all blocks are marked as NOT available.
            # (Just in case some were left over from a previous run.)
            for roi in list(taskInfos.keys()):
                blockwiseFileset.setBlockStatus( roi[0], BlockwiseFileset.BLOCK_NOT_AVAILABLE )

        return blockwiseFileset, taskInfos
    def setup_class(cls):
        if platform.system() == "Windows":
            pytest.skip("Windows")

        try:
            from lazyflow.utility.io_util.blockwiseFileset import BlockwiseFileset

            BlockwiseFileset._prepare_system()
        except ValueError:
            # If the system isn't configured to allow lots of open files, we can't run this test.
            pytest.skip(
                "System is not configured to allow opening a lot of files")

        cls.tempDir = tempfile.mkdtemp()
        logger.debug("Working in {}".format(cls.tempDir))

        # Create the two sub-descriptions
        Bock11VolumeDescription = """
        {
            "_schema_name" : "RESTful-volume-description",
            "_schema_version" : 1.0,

            "name" : "Bock11-level0",
            "format" : "hdf5",
            "axes" : "zyx",
            "## NOTE 1": "The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
            "## NOTE 2": "The website says that the data goes up to plane 4156, but it actually errors out past 4150",
            "origin_offset" : [2917, 0, 0],
            "bounds" : [4150, 135424, 119808],
            "dtype" : "numpy.uint8",
            "url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
            "hdf5_dataset" : "CUTOUT"
        }
        """

        blockwiseFilesetDescription = """
        {
            "_schema_name" : "blockwise-fileset-description",
            "_schema_version" : 1.0,

            "name" : "bock11-blocks",
            "format" : "hdf5",
            "axes" : "zyx",
            "shape" : [40,40,40],
            "dtype" : "numpy.uint8",
            "block_shape" : [20, 20, 20],
            "block_file_name_format" : "block-{roiString}.h5/CUTOUT"
        }
        """

        # Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
        compositeDescription = """
        {{
            "_schema_name" : "RESTful-blockwise-fileset-description",
            "_schema_version" : 1.0,

            "remote_description" : {remote_description},
            "local_description" : {local_description}
        }}
        """.format(remote_description=Bock11VolumeDescription,
                   local_description=blockwiseFilesetDescription)

        # Create the description file
        cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
        with open(cls.descriptionFilePath, "w") as f:
            f.write(compositeDescription)
Ejemplo n.º 23
0
class OpTaskWorker(Operator):
    Input = InputSlot()
    RoiString = InputSlot(stype='string')
    TaskName = InputSlot(stype='string')
    ConfigFilePath = InputSlot(stype='filestring')
    OutputFilesetDescription = InputSlot(stype='filestring')

    ReturnCode = OutputSlot()

    def __init__(self, *args, **kwargs):
        super( OpTaskWorker, self ).__init__( *args, **kwargs )
        self.progressSignal = OrderedSignal()
        self._primaryBlockwiseFileset = None

    def setupOutputs(self):
        self.ReturnCode.meta.dtype = bool
        self.ReturnCode.meta.shape = (1,)
        
        self._closeFiles()
        self._primaryBlockwiseFileset = BlockwiseFileset( self.OutputFilesetDescription.value, 'a' )        
    
    def cleanUp(self):
        self._closeFiles()
        super( OpTaskWorker, self ).cleanUp()

    def _closeFiles(self):
        if self._primaryBlockwiseFileset is not None:
            self._primaryBlockwiseFileset.close()
        self._primaryBlockwiseFileset = None

    def execute(self, slot, subindex, ignored_roi, result):
        configFilePath = self.ConfigFilePath.value
        config = parseClusterConfigFile( configFilePath )        
        
        blockwiseFileset = self._primaryBlockwiseFileset
        
        # Check axis compatibility
        inputAxes = list(self.Input.meta.getTaggedShape().keys())
        outputAxes = list(blockwiseFileset.description.axes)
        assert set(inputAxes) == set(outputAxes), \
            "Output dataset has the wrong set of axes.  Input axes: {}, Output axes: {}".format( "".join(inputAxes), "".join(outputAxes) )
        
        roiString = self.RoiString.value
        roi = Roi.loads(roiString)
        if len( roi.start ) != len( self.Input.meta.shape ):
            assert False, "Task roi: {} is not valid for this input.  Did the master launch this task correctly?".format( roiString )

        logger.info( "Executing for roi: {}".format(roi) )

        if config.use_node_local_scratch:
            assert False, "FIXME."

        assert (blockwiseFileset.getEntireBlockRoi( roi.start )[1] == roi.stop).all(), "Each task must execute exactly one full block.  ({},{}) is not a valid block roi.".format( roi.start, roi.stop )
        assert self.Input.ready()

        with Timer() as computeTimer:
            # Stream the data out to disk.
            request_blockshape = self._primaryBlockwiseFileset.description.sub_block_shape # Could be None.  That's okay.
            streamer = BigRequestStreamer(self.Input, (roi.start, roi.stop), request_blockshape )
            streamer.progressSignal.subscribe( self.progressSignal )
            streamer.resultSignal.subscribe( self._handlePrimaryResultBlock )
            streamer.execute()

            # Now the block is ready.  Update the status.
            blockwiseFileset.setBlockStatus( roi.start, BlockwiseFileset.BLOCK_AVAILABLE )

        logger.info( "Finished task in {} seconds".format( computeTimer.seconds() ) )
        result[0] = True
        return result

    def propagateDirty(self, slot, subindex, roi):
        self.ReturnCode.setDirty( slice(None) )
        
    def _handlePrimaryResultBlock(self, roi, result):
        # First write the primary
        self._primaryBlockwiseFileset.writeData(roi, result)

        # Ask the workflow if there is any special post-processing to do...
        self.get_workflow().postprocessClusterSubResult(roi, result, self._primaryBlockwiseFileset)

    def get_workflow(self):
        op = self
        while not isinstance(op, Workflow):
            op = op.parent
        return op