def get_single_image(self, train_id, tag, step):
        """
        Returns the actual image bytes for a given image.

        Args:
            train_id (str): The ID of the events data the image belongs to.
            tag (str): The name of the tag the images belongs to.
            step (int): The step of the image in the current reservoir. If step = -1, return image of final step.

        Returns:
            bytes, a byte string of the raw image bytes.

        """
        Validation.check_param_empty(train_id=train_id, tag=tag, step=step)
        step = to_int(step, "step")

        try:
            tensors = self._data_manager.list_tensors(train_id, tag)
        except ParamValueError as ex:
            raise ImageNotExistError(ex.message)

        image = _find_image(tensors, step)
        if image is None:
            raise ImageNotExistError(
                "Can not find the step with given train job id and tag.")

        return image
    def get_single_image(self, train_id, tag, step):
        """
        Returns the actual image bytes for a given image.

        Args:
            train_id (str): The ID of the events data the image belongs to.
            tag (str): The name of the tag the images belongs to.
            step (int): The step of the image in the current reservoir.

        Returns:
            bytes, a byte string of the raw image bytes.

        """
        Validation.check_param_empty(train_id=train_id, tag=tag, step=step)
        step = to_int(step, "step")

        try:
            tensors = self._data_manager.list_tensors(train_id, tag)
        except ParamValueError as ex:
            raise ImageNotExistError(ex.message)

        image = None
        for tensor in tensors:
            if tensor.step == step:
                # Default value for bytes field is empty byte string normally,
                # see also "Optional Fields And Default Values" in protobuf
                # documentation.
                image = tensor.value.encoded_image
                break

        if image is None:
            raise ImageNotExistError("Can not find the step with given train job id and tag.")

        return image
Exemple #3
0
    def _get_hoc_image(self, image_path, train_id):
        """Get hoc image for image data demonstration in UI."""

        sample_id, label, layer = image_path.strip(".jpg").split("_")
        layer = int(layer)
        job = self.job_manager.get_job(train_id)
        samples = job.samples
        label_idx = job.labels.index(label)

        chosen_sample = samples[int(sample_id)]
        original_path_image = chosen_sample['image']
        abs_image_path = os.path.join(self.job_manager.summary_base_dir,
                                      _clean_train_id_b4_join(train_id),
                                      original_path_image)
        if self._is_forbidden(abs_image_path):
            raise FileSystemPermissionError("Forbidden.")

        image_type = ImageQueryTypes.OUTCOME.value
        try:
            image = Image.open(abs_image_path)
        except FileNotFoundError:
            raise ImageNotExistError(
                f"train_id:{train_id} path:{image_path} type:{image_type}")
        except PermissionError:
            raise FileSystemPermissionError(
                f"train_id:{train_id} path:{image_path} type:{image_type}")
        except OSError:
            raise UnknownError(
                f"Invalid image file: train_id:{train_id} path:{image_path} type:{image_type}"
            )

        edit_steps = []
        boxes = chosen_sample["hierarchical_occlusion"][label_idx][
            "hoc_layers"][layer]["boxes"]
        mask = chosen_sample["hierarchical_occlusion"][label_idx]["mask"]

        for box in boxes:
            edit_steps.append(EditStep(layer, *box))
        image_cp = pil_apply_edit_steps(image, mask, edit_steps)
        buffer = io.BytesIO()
        image_cp.save(buffer, format=_PNG_FORMAT)

        return buffer.getvalue()
    def get_metadata_list(self, train_id, tag):
        """
        Builds a JSON-serializable object with information about images.

        Args:
            train_id (str): The ID of the events data.
            tag (str): The name of the tag the images all belong to.

        Returns:
            list[dict], a list of dictionaries containing the `wall_time`, `step`, `width`,
                and `height` for each image.
                    [
                        {
                            "wall_time": ****,
                            "step": ****,
                            "width": ****,
                            "height": ****,
                        },
                        {...}
                    ]

        """
        Validation.check_param_empty(train_id=train_id, tag=tag)
        result = []
        try:
            tensors = self._data_manager.list_tensors(train_id, tag)
        except ParamValueError as ex:
            raise ImageNotExistError(ex.message)

        for tensor in tensors:
            # no tensor_proto in TensorEvent
            (width, height) = (tensor.value.width, tensor.value.height)
            result.append({
                'wall_time': tensor.wall_time,
                'step': tensor.step,
                'width': int(width),
                'height': int(height),
            })
        return dict(metadatas=result)
Exemple #5
0
    def query_image_binary(self, train_id, image_path, image_type):
        """
        Query image binary content.

        Args:
            train_id (str): Job ID.
            image_path (str): Image path relative to explain job's summary directory.
            image_type (str): Image type, 'original' or 'overlay'.

        Returns:
            bytes, image binary.
        """

        abs_image_path = os.path.join(self.job_manager.summary_base_dir,
                                      _clean_train_id_b4_join(train_id),
                                      image_path)

        if self._is_forbidden(abs_image_path):
            raise FileSystemPermissionError("Forbidden.")

        try:

            if image_type != "overlay":
                # no need to convert
                with open(abs_image_path, "rb") as fp:
                    return fp.read()

            image = Image.open(abs_image_path)

            if image.mode == _RGBA_MODE:
                # It is RGBA already, do not convert.
                with open(abs_image_path, "rb") as fp:
                    return fp.read()

        except FileNotFoundError:
            raise ImageNotExistError(
                f"train_id:{train_id} path:{image_path} type:{image_type}")
        except PermissionError:
            raise FileSystemPermissionError(
                f"train_id:{train_id} path:{image_path} type:{image_type}")
        except OSError:
            raise UnknownError(
                f"Invalid image file: train_id:{train_id} path:{image_path} type:{image_type}"
            )

        if image.mode == _SINGLE_CHANNEL_MODE:
            saliency = np.asarray(image) / _UINT8_MAX
        elif image.mode == _RGB_MODE:
            saliency = np.asarray(image)
            saliency = saliency[:, :, 0] / _UINT8_MAX
        else:
            raise UnknownError(f"Invalid overlay image mode:{image.mode}.")

        saliency_stack = np.empty((saliency.shape[0], saliency.shape[1], 4))
        for c in range(3):
            saliency_stack[:, :, c] = saliency
        rgba = saliency_stack * _SALIENCY_CMAP_HI
        rgba += (1 - saliency_stack) * _SALIENCY_CMAP_LOW
        rgba[:, :, 3] = saliency * _UINT8_MAX

        overlay = Image.fromarray(np.uint8(rgba), mode=_RGBA_MODE)
        buffer = io.BytesIO()
        overlay.save(buffer, format=_PNG_FORMAT)

        return buffer.getvalue()