def _create_grayscale_instances(self, volume_config):
        """
        Create the grayscale instance(s) in DVID for the given volume configuration.

        In DVID, grayscale data is stored in instances of type 'uint8blk',
        which has no concept of scale.

        Instead, multi-scale volumes are represented by creating multiple instances,
        with the scale indicated by a suffix (except for scale 0).

        For example:
            - grayscale # scale 0
            - grayscale_1
            - grayscale_2
            - grayscale_3
            - ...
        """
        settings = volume_config["dvid"]["creation-settings"]

        block_width = volume_config["geometry"]["block-width"]

        pyramid_depth = settings["max-scale"]
        if pyramid_depth == -1:
            pyramid_depth = choose_pyramid_depth(self.bounding_box_zyx, 512)

        repo_instances = fetch_repo_instances(self.server, self.uuid)

        # Bottom level of pyramid is listed as neuroglancer-compatible
        extend_list_value(self.server, self.uuid, '.meta', 'neuroglancer',
                          [self.instance_name])

        for scale in range(pyramid_depth + 1):
            scaled_output_box_zyx = round_box(self.bounding_box_zyx, 2**scale,
                                              'out') // 2**scale

            if scale == 0:
                scaled_instance_name = self.instance_name
            else:
                scaled_instance_name = f"{self.instance_name}_{scale}"

            if scaled_instance_name in repo_instances:
                logger.info(
                    f"'{scaled_instance_name}' already exists, skipping creation"
                )
            else:
                create_voxel_instance(
                    self.server, self.uuid, scaled_instance_name, 'uint8blk',
                    settings["versioned"], settings["compression"],
                    settings["tags"], block_width, settings["voxel-size"],
                    settings["voxel-units"], settings["background"])

            update_extents(self.server, self.uuid, scaled_instance_name,
                           scaled_output_box_zyx)

            # Higher-levels of the pyramid should not appear in the DVID console.
            extend_list_value(self.server, self.uuid, '.meta', 'restrictions',
                              [scaled_instance_name])
    def _create_segmentation_instance(self, volume_config):
        """
        Create a segmentation volume in DVID according to the given configuration.
        In DVID, segmentation instances are stored via a special instance type, 'labelmap',
        which has several features, including built-in multiscale support, supervoxel-to-body
        mapping, and sparse retrieval of body locations.
        """
        if self.instance_name in fetch_repo_instances(self.server, self.uuid):
            logger.info(
                f"'{self.instance_name}' already exists, skipping creation")
            return

        settings = volume_config["dvid"]["creation-settings"]
        block_width = volume_config["geometry"]["block-width"]

        pyramid_depth = settings["max-scale"]
        if pyramid_depth == -1:
            pyramid_depth = choose_pyramid_depth(self.bounding_box_zyx, 512)

        if settings["compression"] != DvidInstanceCreationSettingsSchema[
                "properties"]["compression"]["default"]:
            raise RuntimeError(
                "Alternative compression methods are not permitted on labelmap instances. "
                "Please remove the 'compression' setting from your config.")

        if settings["background"] != 0:
            raise RuntimeError(
                "Labelmap instances do not support custom background values. "
                "Please remove 'background' from your config.")

        create_labelmap_instance(self.server, self.uuid, self.instance_name,
                                 settings["versioned"], settings["tags"],
                                 block_width, settings["voxel-size"],
                                 settings["voxel-units"],
                                 settings["enable-index"], pyramid_depth)

        # Workaround for https://github.com/janelia-flyem/dvid/issues/344
        # Write an empty block to the first and last blocks to set the
        # bounding-box in DVID now, without any concurrency issues.
        empty_block = np.zeros((64, 64, 64), np.uint64)
        first_block_start, last_block_stop = round_box(self.bounding_box_zyx,
                                                       64, 'out')
        last_block_start = last_block_stop - 64
        self.write_subvolume(empty_block, first_block_start)
        self.write_subvolume(empty_block, last_block_start)
Ejemplo n.º 3
0
    def _ensure_datasets_exist(self, volume_config):
        dtype = volume_config["zarr"]["creation-settings"]["dtype"]
        create_if_necessary = volume_config["zarr"]["create-if-necessary"]
        writable = volume_config["zarr"]["writable"]
        if writable is None:
            writable = create_if_necessary

        mode = 'r'
        if writable:
            mode = 'a'
        self._filemode = mode

        block_shape = volume_config["zarr"]["creation-settings"]["chunk-shape"][::-1]

        global_offset = volume_config["zarr"]["global-offset"][::-1]
        bounding_box_zyx = np.array(volume_config["geometry"]["bounding-box"])[:,::-1]
        creation_shape = np.array(volume_config["zarr"]["creation-settings"]["shape"][::-1])
        replace_default_entries(creation_shape, bounding_box_zyx[1] - global_offset)

        compression = volume_config["zarr"]["creation-settings"]["compression"]
        if compression == 'gzip':
            compressor = numcodecs.GZip()
        elif compression.startswith('blosc-'):
            cname = compression[len('blosc-'):]
            compressor = numcodecs.Blosc(cname)
        else:
            assert compression == "", f"Unimplemented compression: {compression}"

        if create_if_necessary:
            max_scale = volume_config["zarr"]["creation-settings"]["max-scale"]
            if max_scale == -1:
                if -1 in creation_shape:
                    raise RuntimeError("Can't auto-determine the appropriate max-scale to create "
                                       "(or extend) the data with, because you didn't specify a "
                                       "volume creation shape (or bounding box")
                max_scale = choose_pyramid_depth(creation_shape, 512)

            available_scales = [*range(1+max_scale)]
        else:
            available_scales = volume_config["geometry"]["available-scales"]

            if not os.path.exists(self._path):
                raise RuntimeError(f"File does not exist: {self._path}\n"
                                   "You did not specify 'create-if-necessary' in the config, so I won't create it.:\n")

            if self._dataset_name and not os.path.exists(f"{self._path}/{self._dataset_name}"):
                raise RuntimeError(f"File does not exist: {self._path}/{self._dataset_name}\n"
                                   "You did not specify 'create-if-necessary' in the config, so I won't create it.:\n")

        for scale in available_scales:
            if scale == 0:
                name = self._dataset_name
            else:
                name = self._dataset_name[:-1] + f'{scale}'

            if name not in self.zarr_file:
                if not writable:
                    raise RuntimeError(f"Dataset for scale {scale} does not exist, and you "
                                       "didn't specify 'writable' in the config, so I won't create it.")

                if dtype == "auto":
                    raise RuntimeError(f"Can't create Zarr array {self._path}/{self._dataset_name}: "
                                       "No dtype specified in the config.")

                # Use 128 if the user didn't specify a chunkshape
                replace_default_entries(block_shape, 3*[128])

                # zarr misbehaves if the chunks are larger than the shape,
                # which could happen here if we aren't careful (for higher scales).
                scaled_shape = (creation_shape // (2**scale))
                chunks = np.minimum(scaled_shape, block_shape).tolist()
                if (chunks != block_shape) and (scale == 0):
                    logger.warning(f"Block shape ({block_shape}) is too small for "
                                   f"the dataset shape ({creation_shape}). Shrinking block shape.")

                self._zarr_datasets[scale] = self.zarr_file.create_dataset( name,
                                                                            shape=scaled_shape.tolist(),
                                                                            dtype=np.dtype(dtype),
                                                                            chunks=chunks,
                                                                            compressor=compressor )
Ejemplo n.º 4
0
    def _ensure_datasets_exist(self, volume_config):
        dtype = volume_config["n5"]["creation-settings"]["dtype"]
        create_if_necessary = volume_config["n5"]["create-if-necessary"]
        writable = volume_config["n5"]["writable"]
        if writable is None:
            writable = create_if_necessary
        
        mode = 'r'
        if writable:
            mode = 'a'
        self._filemode = mode

        block_shape = volume_config["n5"]["creation-settings"]["block-shape"]

        bounding_box_zyx = np.array(volume_config["geometry"]["bounding-box"])[:,::-1]
        creation_shape = np.array(volume_config["n5"]["creation-settings"]["shape"][::-1])
        replace_default_entries(creation_shape, bounding_box_zyx[1])
        
        compression = volume_config["n5"]["creation-settings"]["compression"]
        compression_options = {}
        if compression != "raw":
            compression_options['level'] = volume_config["n5"]["creation-settings"]["compression-level"]

        if create_if_necessary:
            max_scale = volume_config["n5"]["creation-settings"]["max-scale"]
            if max_scale == -1:
                if -1 in creation_shape:
                    raise RuntimeError("Can't auto-determine the appropriate max-scale to create "
                                       "(or extend) the data with, because you didn't specify a "
                                       "volume creation shape (or bounding box")
                max_scale = choose_pyramid_depth(creation_shape, 512)
            
            available_scales = [*range(1+max_scale)]
        else:
            available_scales = volume_config["geometry"]["available-scales"]

            if not os.path.exists(self._path):
                raise RuntimeError(f"File does not exist: {self._path}\n"
                                   "You did not specify 'writable' in the config, so I won't create it.:\n")

            if self._dataset_name and not os.path.exists(f"{self._path}/{self._dataset_name}"):
                raise RuntimeError(f"File does not exist: {self._path}/{self._dataset_name}\n"
                                   "You did not specify 'writable' in the config, so I won't create it.:\n")

        for scale in available_scales:
            if scale == 0:
                name = self._dataset_name
            else:
                name = self._dataset_name[:-1] + f'{scale}'

            if name not in self.n5_file:
                if not writable:
                    raise RuntimeError(f"Dataset for scale {scale} does not exist, and you "
                                       "didn't specify 'writable' in the config, so I won't create it.")

                if dtype == "auto":
                    raise RuntimeError(f"Can't create N5 array {self._path}/{self._dataset_name}: "
                                       "No dtype specified in the config.")

                # Use 128 if the user didn't specify a chunkshape
                replace_default_entries(block_shape, 3*[128])

                # z5py complains if the chunks are larger than the shape,
                # which could happen here if we aren't careful (for higher scales).
                scaled_shape = (creation_shape // (2**scale))
                chunks = np.minimum(scaled_shape, block_shape).tolist()
                if (chunks != block_shape) and (scale == 0):
                    logger.warning(f"Block shape ({block_shape}) is too small for "
                                   f"the dataset shape ({creation_shape}). Shrinking block shape.")
                
                self._n5_datasets[scale] = self.n5_file.create_dataset( name,
                                                                        scaled_shape.tolist(),
                                                                        np.dtype(dtype),
                                                                        chunks=chunks,
                                                                        compression=compression,
                                                                        **compression_options )