Exemplo n.º 1
0
    def test_encapsulate_single_fragment_per_frame_bot(self):
        """Test encapsulating single fragment per frame with BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=True)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        fp = DicomBytesIO(data)
        fp.is_little_endian = True
        length, offsets = get_frame_offsets(fp)
        assert offsets == [
            0x0000,  # 0
            0x0eee,  # 3822
            0x1df6,  # 7670
            0x2cf8,  # 11512
            0x3bfc,  # 15356
            0x4ade,  # 19166
            0x59a2,  # 22946
            0x6834,  # 26676
            0x76e2,  # 30434
            0x8594  # 34196
        ]
Exemplo n.º 2
0
    def test_encapsulate_single_fragment_per_frame_bot(self):
        """Test encapsulating single fragment per frame with BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=True)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        fp = DicomBytesIO(data)
        fp.is_little_endian = True
        offsets = get_frame_offsets(fp)
        assert offsets == [
            0x0000,  # 0
            0x0eee,  # 3822
            0x1df6,  # 7670
            0x2cf8,  # 11512
            0x3bfc,  # 15356
            0x4ade,  # 19166
            0x59a2,  # 22946
            0x6834,  # 26676
            0x76e2,  # 30434
            0x8594  # 34196
        ]
Exemplo n.º 3
0
    def slice_dicom(self, dcm: U) -> U:
        r"""Slices a DICOM object input according to :func:`get_indices`.

        .. note:
            Unlike :func:`slice_array`, this function can perform slicing on compressed DICOMs
            with out needing to decompress all frames. This can provide a substantial performance gain.

        """
        # copy dicom and read key tags
        dcm = deepcopy(dcm)
        num_frames: Optional[SupportsInt] = dcm.get("NumberOfFrames", None)
        num_frames = int(num_frames) if num_frames is not None else None
        is_compressed: bool = dcm.file_meta.TransferSyntaxUID.is_compressed

        start, stop, stride = self.get_indices(num_frames)

        # read data
        if is_compressed:
            frame_iterator: Iterator = generate_pixel_data_frame(
                dcm.PixelData, num_frames)
            frames = list(islice(frame_iterator, start, stop, stride))
            new_pixel_data = encapsulate(frames)
        else:
            all_frames: np.ndarray = dcm.pixel_array
            frames = all_frames[start:stop:stride]
            new_pixel_data = frames.tobytes()

        out_frames = len(frames)
        dcm.NumberOfFrames = out_frames
        dcm.PixelData = new_pixel_data
        return dcm
Exemplo n.º 4
0
    def convert(self):
        # create file meta information
        file_meta = Dataset()
        file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.77.1.2'  # VL Microscopic Image Storage
        file_meta.MediaStorageSOPInstanceUID = "1.2.276.0.7230010.3.1.4.296485376.1.1484917438.721089"
        file_meta.ImplementationClassUID = "1.2.3.4"
        file_meta.FileMetaInformationVersion = b'\x00\x01'
        file_meta.FileMetaInformationGroupLength = len(file_meta)
        if self.JPEG_COMPRESS:
            # file_meta.TransferSyntaxUID = '1.2.840.10008.1.2.4.80'  # JPEG 2k
            # file_meta.TransferSyntaxUID = '1.2.840.10008.1.2.4.70'  # JPEG
            file_meta.TransferSyntaxUID = '1.2.840.10008.1.2.4.50'  # JPEG baseline
        else:
            file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'  # default uncompressed

        # write data into Dicom instances
        self.instance_cnt = 0
        for frame_items_info in self.frame_items_info_list:
            print("Saving to instance %d/%d" %
                  (self.instance_cnt, len(self.frame_items_info_list)))
            # update relevant tags
            self.dcm_instance.InstanceNumber = self.instance_cnt
            self.dcm_instance.SeriesInstanceUID = '1.2.276.0.7230010.3.1.3.296485376.1.1484917433.721085.' + str(
                frame_items_info.img_level)
            self.dcm_instance.SeriesNumber = frame_items_info.img_level
            print(frame_items_info.img_level)
            # self.dcm_instance.SOPInstanceUID = self.dcm_instance.SOPInstanceUID + str(self.instance_cnt)
            self.dcm_instance.SOPInstanceUID = '1.2.276.0.7230010.3.1.4.296485376.1.1484917438.721089.' + str(
                self.instance_cnt)
            self.dcm_instance.NumberOfFrames = len(frame_items_info.locations)
            self.dcm_instance.TotalPixelMatrixColumns, self.dcm_instance.TotalPixelMatrixRows = self.wsi_obj.level_dimensions[
                frame_items_info.img_level]
            self.add_Frame_Sequence_data(frame_items_info)
            # create encoded pixel data
            PixelData_encoded = self.add_PixelData(frame_items_info)
            if self.JPEG_COMPRESS:
                filename = os.path.join(
                    self.save_to_dir,
                    "compressed_instance_" + str(self.instance_cnt) + ".dcm")
                data_elem_tag = pydicom.tag.TupleTag((0x7FE0, 0x0010))
                enc_frames = encapsulate(PixelData_encoded, has_bot=True)
                pd_ele = DataElement(data_elem_tag,
                                     'OB',
                                     enc_frames,
                                     is_undefined_length=True)
                self.dcm_instance.add(pd_ele)
            else:
                filename = os.path.join(
                    self.save_to_dir,
                    "instance_" + str(self.instance_cnt) + ".dcm")
                self.dcm_instance.PixelData = PixelData_encoded

            self.dcm_instance.file_meta = file_meta
            self.dcm_instance.save_as(filename, write_like_original=False)
            self.instance_cnt += 1
Exemplo n.º 5
0
    def test_encapsulate_single_fragment_per_frame_no_bot(self):
        """Test encapsulating single fragment per frame with no BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=False)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        # Original data has no BOT values
        assert data == ds.PixelData
Exemplo n.º 6
0
    def test_encapsulate_single_fragment_per_frame_no_bot(self):
        """Test encapsulating single fragment per frame with no BOT values."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=False)
        test_frames = decode_data_sequence(data)
        for a, b in zip(test_frames, frames):
            assert a == b

        # Original data has no BOT values
        assert data == ds.PixelData
Exemplo n.º 7
0
 def buildDataSetJPEG2000(image):
     from io import BytesIO
     from PIL import Image as PImage
     dataset = image.dataset
     pixels = image.pixelData
     frame_data = []
     with BytesIO() as output:
         image = PImage.fromarray(pixels)
         image.save(output, format="JPEG2000")
         frame_data.append(output.getvalue())
     dataset.PixelData = encapsulate(frame_data)
     dataset.file_meta.TransferSyntaxUID = JPEG2000
     dataset.is_implicit_VR = False
     return dataset
Exemplo n.º 8
0
    def func(num_frames, syntax=ExplicitVRLittleEndian):
        file_meta.TransferSyntaxUID = syntax
        old_data = dcm.PixelData
        dcm.NumberOfFrames = num_frames

        if syntax.is_compressed:
            new_data = encapsulate([old_data for _ in range(num_frames)],
                                   has_bot=False)
            dcm.PixelData = new_data
            dcm.compress(syntax)

        else:
            new_data = b"".join(old_data for _ in range(num_frames))
            dcm.PixelData = new_data

        dcm.file_meta = file_meta
        return dcm
Exemplo n.º 9
0
    def test_encapsulate_bot(self):
        """Test the Basic Offset Table is correct."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=True)
        assert data[:56] == (
            b'\xfe\xff\x00\xe0'  # Basic offset table item tag
            b'\x28\x00\x00\x00'  # Basic offset table length
            b'\x00\x00\x00\x00'  # First offset
            b'\xee\x0e\x00\x00'
            b'\xf6\x1d\x00\x00'
            b'\xf8\x2c\x00\x00'
            b'\xfc\x3b\x00\x00'
            b'\xde\x4a\x00\x00'
            b'\xa2\x59\x00\x00'
            b'\x34\x68\x00\x00'
            b'\xe2\x76\x00\x00'
            b'\x94\x85\x00\x00'  # Last offset
            b'\xfe\xff\x00\xe0'  # Next item tag
            b'\xe6\x0e\x00\x00'  # Next item length
        )
Exemplo n.º 10
0
    def test_encapsulate_bot(self):
        """Test the Basic Offset Table is correct."""
        ds = dcmread(JP2K_10FRAME_NOBOT)
        frames = decode_data_sequence(ds.PixelData)
        assert len(frames) == 10

        data = encapsulate(frames, fragments_per_frame=1, has_bot=True)
        assert data[:56] == (
            b'\xfe\xff\x00\xe0'  # Basic offset table item tag
            b'\x28\x00\x00\x00'  # Basic offset table length
            b'\x00\x00\x00\x00'  # First offset
            b'\xee\x0e\x00\x00'
            b'\xf6\x1d\x00\x00'
            b'\xf8\x2c\x00\x00'
            b'\xfc\x3b\x00\x00'
            b'\xde\x4a\x00\x00'
            b'\xa2\x59\x00\x00'
            b'\x34\x68\x00\x00'
            b'\xe2\x76\x00\x00'
            b'\x94\x85\x00\x00'  # Last offset
            b'\xfe\xff\x00\xe0'  # Next item tag
            b'\xe6\x0e\x00\x00'  # Next item length
        )
Exemplo n.º 11
0
    def add_segments(
        self,
        pixel_array: np.ndarray,
        segment_descriptions: Sequence[SegmentDescription],
        plane_positions: Optional[Sequence[PlanePositionSequence]] = None
    ) -> None:
        """Adds one or more segments to the segmentation image.

        Parameters
        ----------
        pixel_array: numpy.ndarray
            Array of segmentation pixel data of boolean, unsigned integer or
            floating point data type representing a mask image. If `pixel_array`
            is a floating-point array or a binary array (containing only the
            values ``True`` and ``False`` or ``0`` and ``1``), the segment
            number used to encode the segment is taken from
            `segment_descriptions`.
            Otherwise, if `pixel_array` contains multiple integer values, each
            value is treated as a different segment whose segment number is
            that integer value. In this case, all segments found in the array
            must be described in `segment_descriptions`. Note that this is
            valid for both ``"BINARY"`` and ``"FRACTIONAL"`` segmentations.
            For ``"FRACTIONAL"`` segmentations, values either encode the
            probability of a given pixel belonging to a segment
            (if `fractional_type` is ``"PROBABILITY"``)
            or the extent to which a segment occupies the pixel
            (if `fractional_type` is ``"OCCUPANCY"``).
            When `pixel_array` has a floating point data type, only one segment
            can be encoded. Additional segments can be subsequently
            added to the `Segmentation` instance using the ``add_segments()``
            method.
            If `pixel_array` represents a 3D image, the first dimension
            represents individual 2D planes and these planes must be ordered
            based on their position in the three-dimensional patient
            coordinate system (first along the X axis, second along the Y axis,
            and third along the Z axis).
            If `pixel_array` represents a tiled 2D image, the first dimension
            represents individual 2D tiles (for one channel and z-stack) and
            these tiles must be ordered based on their position in the tiled
            total pixel matrix (first along the row dimension and second along
            the column dimension, which are defined in the three-dimensional
            slide coordinate system by the direction cosines encoded by the
            *Image Orientation (Slide)* attribute).
        segment_descriptions: Sequence[highdicom.seg.content.SegmentDescription]
            Description of each segment encoded in `pixel_array`. In the case of
            pixel arrays with multiple integer values, the segment description
            with the corresponding segment number is used to describe each
            segment.
        plane_positions: Sequence[highdicom.content.PlanePositionSequence], optional
            Position of each plane in `pixel_array` relative to the
            three-dimensional patient or slide coordinate system.

        Raises
        ------
        ValueError
            When
                - The pixel array is not 2D or 3D numpy array
                - The shape of the pixel array does not match the source images
                - The numbering of the segment descriptions is not
                  monotonically increasing by 1
                - The numbering of the segment descriptions does
                  not begin at 1 (for the first segments added to the instance)
                  or at one greater than the last added segment (for
                  subsequent segments)
                - One or more segments already exist within the
                  segmentation instance
                - The segmentation is binary and the pixel array contains
                  integer values that belong to segments that are not described
                  in the segment descriptions
                - The segmentation is binary and pixel array has floating point
                  values not equal to 0.0 or 1.0
                - The segmentation is fractional and pixel array has floating
                  point values outside the range 0.0 to 1.0
                - The segmentation is fractional and pixel array has floating
                  point values outside the range 0.0 to 1.0
                - Plane positions are provided but the length of the array
                  does not match the number of frames in the pixel array
        TypeError
            When the dtype of the pixel array is invalid


        Note
        ----
        Segments must be sorted by segment number in ascending order and
        increase by 1.  Additionally, the first segment description must have a
        segment number one greater than the segment number of the last segment
        added to the segmentation, or 1 if this is the first segment added.

        In case `segmentation_type` is ``"BINARY"``, the number of items in
        `segment_descriptions` must be greater than or equal to the number of
        unique positive pixel values in `pixel_array`. It is possible for some
        segments described in `segment_descriptions` not to appear in the
        `pixel_array`. In case `segmentation_type` is ``"FRACTIONAL"``, only
        one segment can be encoded by `pixel_array` and hence only one item is
        permitted in `segment_descriptions`.

        """  # noqa
        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]
        if pixel_array.ndim != 3:
            raise ValueError('Pixel array must be a 2D or 3D array.')

        if pixel_array.shape[1:3] != (self.Rows, self.Columns):
            raise ValueError(
                'Pixel array representing segments has the wrong number of '
                'rows and columns.')

        # Determine the expected starting number of the segments to ensure
        # they will be continuous with existing segments
        if self._segment_inventory:
            # Next segment number is one greater than the largest existing
            # segment number
            seg_num_start = max(self._segment_inventory) + 1
        else:
            # No existing segments so start at 1
            seg_num_start = 1

        # Check segment numbers
        # Check the existing descriptions
        described_segment_numbers = np.array(
            [int(item.SegmentNumber) for item in segment_descriptions])
        # Check segment numbers in the segment descriptions are
        # monotonically increasing by 1
        if not (np.diff(described_segment_numbers) == 1).all():
            raise ValueError(
                'Segment descriptions must be sorted by segment number '
                'and monotonically increasing by 1.')
        if described_segment_numbers[0] != seg_num_start:
            if seg_num_start == 1:
                msg = ('Segment descriptions should be numbered starting '
                       f'from 1. Found {described_segment_numbers[0]}. ')
            else:
                msg = ('Segment descriptions should be numbered to '
                       'continue from existing segments. Expected the first '
                       f'segment to be numbered {seg_num_start} but found '
                       f'{described_segment_numbers[0]}.')
            raise ValueError(msg)

        if pixel_array.dtype in (np.bool_, np.uint8, np.uint16):
            segments_present = np.unique(pixel_array[pixel_array > 0].astype(
                np.uint16))

            # Special case where the mask is binary and there is a single
            # segment description. Mark the positive segment with
            # the correct segment number
            if (np.array_equal(segments_present, np.array([1]))
                    and len(segment_descriptions) == 1):
                pixel_array = pixel_array.astype(np.uint8)
                pixel_array *= described_segment_numbers.item()

            # Otherwise, the pixel values in the pixel array must all belong to
            # a described segment
            else:
                if not np.all(
                        np.in1d(segments_present, described_segment_numbers)):
                    raise ValueError('Pixel array contains segments that lack '
                                     'descriptions.')

        elif (pixel_array.dtype in (np.float_, np.float32, np.float64)):
            unique_values = np.unique(pixel_array)
            if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0:
                raise ValueError(
                    'Floating point pixel array values must be in the '
                    'range [0, 1].')
            if len(segment_descriptions) != 1:
                raise ValueError(
                    'When providing a float-valued pixel array, provide only '
                    'a single segment description')
            if self.SegmentationType == SegmentationTypeValues.BINARY.value:
                non_boolean_values = np.logical_and(unique_values > 0.0,
                                                    unique_values < 1.0)
                if np.any(non_boolean_values):
                    raise ValueError(
                        'Floating point pixel array values must be either '
                        '0.0 or 1.0 in case of BINARY segmentation type.')
                pixel_array = pixel_array.astype(np.bool_)
        else:
            raise TypeError('Pixel array has an invalid data type.')

        # Check that the new segments do not already exist
        if len(set(described_segment_numbers) & self._segment_inventory) > 0:
            raise ValueError(
                'Segment with given segment number already exists')

        # Set the optional tag value SegmentsOverlapValues to NO to indicate
        # that the segments do not overlap. We can know this for sure if it's
        # the first segment (or set of segments) to be added because they are
        # contained within a single pixel array.
        if len(self._segment_inventory) == 0:
            self.SegmentsOverlap = SegmentsOverlapValues.NO.value
        else:
            # If this is not the first set of segments to be added, we cannot
            # be sure whether there is overlap with the existing segments
            self.SegmentsOverlap = SegmentsOverlapValues.UNDEFINED.value

        src_image = self._source_images[0]
        is_multiframe = hasattr(src_image, 'NumberOfFrames')
        if is_multiframe:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_image(
                    src_image
                )
        else:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_series(
                    self._source_images
                )

        if plane_positions is None:
            if pixel_array.shape[0] != len(source_plane_positions):
                if is_multiframe:
                    raise ValueError(
                        'Number of frames in pixel array does not match number '
                        ' of frames in source image.')
                else:
                    raise ValueError(
                        'Number of frames in pixel array does not match number '
                        'of source images.')
            plane_positions = source_plane_positions
        else:
            if pixel_array.shape[0] != len(plane_positions):
                raise ValueError(
                    'Number of pixel array planes does not match number of '
                    'provided plane positions.')

        plane_position_values, plane_sort_index = \
            self.DimensionIndexSequence.get_index_values(plane_positions)

        are_spatial_locations_preserved = (
            all(plane_positions[i] == source_plane_positions[i]
                for i in range(len(plane_positions)))
            and self._plane_orientation == self._source_plane_orientation)

        # Get unique values of attributes in the Plane Position Sequence or
        # Plane Position Slide Sequence, which define the position of the plane
        # with respect to the three dimensional patient or slide coordinate
        # system, respectively. These can subsequently be used to look up the
        # relative position of a plane relative to the indexed dimension.
        dimension_position_values = [
            np.unique(plane_position_values[:, index], axis=0)
            for index in range(plane_position_values.shape[1])
        ]

        # In certain circumstances, we can add new pixels without unpacking the
        # previous ones, which is more efficient. This can be done when using
        # non-encapsulated transfer syntaxes when there is no padding required
        # for each frame to be a multiple of 8 bits.
        framewise_encoding = False
        is_encaps = self.file_meta.TransferSyntaxUID.is_encapsulated
        if not is_encaps:
            if self.SegmentationType == SegmentationTypeValues.FRACTIONAL.value:
                framewise_encoding = True
            elif self.SegmentationType == SegmentationTypeValues.BINARY.value:
                # Framewise encoding can only be used if there is no padding
                # This requires the number of pixels in each frame to be
                # multiple of 8
                if (self.Rows * self.Columns * self.SamplesPerPixel) % 8 == 0:
                    framewise_encoding = True
                else:
                    logger.warning(
                        'pixel data needs to be re-encoded for binary '
                        'bitpacking - consider using FRACTIONAL instead of '
                        'BINARY segmentation type')

        if framewise_encoding:
            # Before adding new pixel data, remove trailing null padding byte
            if len(self.PixelData) == get_expected_length(self) + 1:
                self.PixelData = self.PixelData[:-1]
        else:
            # In the case of encapsulated transfer syntaxes, we will accumulate
            # a list of encoded frames to re-encapsulate at the end
            if is_encaps:
                if hasattr(self, 'PixelData') and len(self.PixelData) > 0:
                    # Undo the encapsulation but not the encoding within each
                    # frame
                    full_frames_list = decode_data_sequence(self.PixelData)
                else:
                    full_frames_list = []
            else:
                if hasattr(self, 'PixelData') and len(self.PixelData) > 0:
                    full_pixel_array = self.pixel_array.flatten()
                else:
                    full_pixel_array = np.array([], np.bool_)

        for i, segment_number in enumerate(described_segment_numbers):
            if pixel_array.dtype in (np.float_, np.float32, np.float64):
                # Floating-point numbers must be mapped to 8-bit integers in
                # the range [0, max_fractional_value].
                planes = np.around(pixel_array *
                                   float(self.MaximumFractionalValue))
                planes = planes.astype(np.uint8)
            elif pixel_array.dtype in (np.uint8, np.uint16):
                # Labeled masks must be converted to binary masks.
                planes = np.zeros(pixel_array.shape, dtype=np.bool_)
                planes[pixel_array == segment_number] = True
            elif pixel_array.dtype == np.bool_:
                planes = pixel_array
            else:
                raise TypeError('Pixel array has an invalid data type.')

            contained_plane_index = []
            for j in plane_sort_index:
                if np.sum(planes[j]) == 0:
                    logger.info('skip empty plane {} of segment #{}'.format(
                        j, segment_number))
                    continue
                contained_plane_index.append(j)
                logger.info('add plane #{} for segment #{}'.format(
                    j, segment_number))

                pffp_item = Dataset()
                frame_content_item = Dataset()
                frame_content_item.DimensionIndexValues = [segment_number]

                # Look up the position of the plane relative to the indexed
                # dimension.
                try:
                    if self._coordinate_system == CoordinateSystemNames.SLIDE:
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                    else:
                        # In case of the patient coordinate system, the
                        # value of the attribute the Dimension Index Sequence
                        # points to (Image Position Patient) has a value
                        # multiplicity greater than one.
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos).all(axis=1))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                except IndexError as error:
                    raise IndexError(
                        'Could not determine position of plane #{} in '
                        'three dimensional coordinate system based on '
                        'dimension index values: {}'.format(j, error))
                frame_content_item.DimensionIndexValues.extend(index_values)
                pffp_item.FrameContentSequence = [frame_content_item]
                if self._coordinate_system == CoordinateSystemNames.SLIDE:
                    pffp_item.PlanePositionSlideSequence = plane_positions[j]
                else:
                    pffp_item.PlanePositionSequence = plane_positions[j]

                # Determining the source images that map to the frame is not
                # always trivial. Since DerivationImageSequence is a type 2
                # attribute, we leave its value empty.
                pffp_item.DerivationImageSequence = []

                if are_spatial_locations_preserved:
                    derivation_image_item = Dataset()
                    derivation_code = codes.cid7203.Segmentation
                    derivation_image_item.DerivationCodeSequence = [
                        CodedConcept(derivation_code.value,
                                     derivation_code.scheme_designator,
                                     derivation_code.meaning,
                                     derivation_code.scheme_version),
                    ]

                    derivation_src_img_item = Dataset()
                    if len(plane_sort_index) > len(self._source_images):
                        # A single multi-frame source image
                        src_img_item = self.SourceImageSequence[0]
                        # Frame numbers are one-based
                        derivation_src_img_item.ReferencedFrameNumber = j + 1
                    else:
                        # Multiple single-frame source images
                        src_img_item = self.SourceImageSequence[j]
                    derivation_src_img_item.ReferencedSOPClassUID = \
                        src_img_item.ReferencedSOPClassUID
                    derivation_src_img_item.ReferencedSOPInstanceUID = \
                        src_img_item.ReferencedSOPInstanceUID
                    purpose_code = \
                        codes.cid7202.SourceImageForImageProcessingOperation
                    derivation_src_img_item.PurposeOfReferenceCodeSequence = [
                        CodedConcept(purpose_code.value,
                                     purpose_code.scheme_designator,
                                     purpose_code.meaning,
                                     purpose_code.scheme_version),
                    ]
                    derivation_src_img_item.SpatialLocationsPreserved = 'YES'
                    derivation_image_item.SourceImageSequence = [
                        derivation_src_img_item,
                    ]
                    pffp_item.DerivationImageSequence.append(
                        derivation_image_item)
                else:
                    logger.warning('spatial locations not preserved')

                identification = Dataset()
                identification.ReferencedSegmentNumber = segment_number
                pffp_item.SegmentIdentificationSequence = [
                    identification,
                ]
                self.PerFrameFunctionalGroupsSequence.append(pffp_item)
                self.NumberOfFrames += 1

            if framewise_encoding:
                # Straightforward concatenation of the binary data
                self.PixelData += self._encode_pixels(
                    planes[contained_plane_index])
            else:
                if is_encaps:
                    # Encode this frame and add to the list for encapsulation
                    # at the end
                    for f in contained_plane_index:
                        full_frames_list.append(self._encode_pixels(planes[f]))
                else:
                    # Concatenate the 1D array for re-encoding at the end
                    full_pixel_array = np.concatenate([
                        full_pixel_array,
                        planes[contained_plane_index].flatten()
                    ])

            # In case of a tiled Total Pixel Matrix pixel data for the same
            # segment may be added.
            if segment_number not in self._segment_inventory:
                self.SegmentSequence.append(segment_descriptions[i])
            self._segment_inventory.add(segment_number)

        # Re-encode the whole pixel array at once if necessary
        if not framewise_encoding:
            if is_encaps:
                self.PixelData = encapsulate(full_frames_list)
            else:
                self.PixelData = self._encode_pixels(full_pixel_array)

        # Add back the null trailing byte if required
        if len(self.PixelData) % 2 == 1:
            self.PixelData += b'0'
Exemplo n.º 12
0
 def time_encapsulate_ten_nobot(self):
     """Time encapsulating frames with 10 fragments per frame."""
     for ii in range(self.no_runs):
         encapsulate(self.test_data, 10, has_bot=False)
Exemplo n.º 13
0
from pydicom.encaps import encapsulate
from pydicom.uid import JPEG2000

import matplotlib.pyplot as plt

PIL.Image.MAX_IMAGE_PIXELS = None
im_source = 'MicroDicomimage-00000.tif'
im_name = im_source.replace('.tif', '')
im = PIL.Image.open(im_source)
im.save(im_name + '.j2k', irreversible=False)

# Template file or whatever
ds = dcmread('CT_small.dcm')
with open((im_name + '.j2k'), 'rb') as f:
    # Image is only a single frame
    ds.PixelData = encapsulate([f.read()])

img = cv2.imread(im_source)
arr = img

ds.file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
ds.Rows, ds.Columns, dummy = arr.shape
ds.PhotometricInterpretation = "MONOCHROME1"
ds.SamplesPerPixel = 1
ds.BitsStored = 8
ds.BitsAllocated = 8
ds.HighBit = 7
ds.PixelRepresentation = 0
ds.save_as(im_name + '_tifresult.dcm')

# plt.imshow(ds.pixel_array, cmap=plt.cm.gray)
Exemplo n.º 14
0
    def __init__(
            self,
            source_images: Sequence[Dataset],
            pixel_array: np.ndarray,
            segmentation_type: Union[str, SegmentationTypeValues],
            segment_descriptions: Sequence[SegmentDescription],
            series_instance_uid: str,
            series_number: int,
            sop_instance_uid: str,
            instance_number: int,
            manufacturer: str,
            manufacturer_model_name: str,
            software_versions: Union[str, Tuple[str]],
            device_serial_number: str,
            fractional_type:
        Optional[Union[
            str,
            SegmentationFractionalTypeValues]] = SegmentationFractionalTypeValues
        .PROBABILITY,
            max_fractional_value: int = 255,
            content_description: Optional[str] = None,
            content_creator_name: Optional[Union[str, PersonName]] = None,
            transfer_syntax_uid: Union[str, UID] = ImplicitVRLittleEndian,
            pixel_measures: Optional[PixelMeasuresSequence] = None,
            plane_orientation: Optional[PlaneOrientationSequence] = None,
            plane_positions: Optional[Sequence[PlanePositionSequence]] = None,
            omit_empty_frames: bool = True,
            **kwargs: Any) -> None:
        """
        Parameters
        ----------
        source_images: Sequence[pydicom.dataset.Dataset]
            One or more single- or multi-frame images (or metadata of images)
            from which the segmentation was derived
        pixel_array: numpy.ndarray
            Array of segmentation pixel data of boolean, unsigned integer or
            floating point data type representing a mask image. The array may
            be a 2D, 3D or 4D numpy array.

            If it is a 2D numpy array, it represents the segmentation of a
            single frame image, such as a planar x-ray or single instance from
            a CT or MR series.

            If it is a 3D array, it represents the segmentation of either a
            series of source images (such as a series of CT or MR images) a
            single 3D multi-frame image (such as a multi-frame CT/MR image), or
            a single 2D tiled image (such as a slide microscopy image).

            If ``pixel_array`` represents the segmentation of a 3D image, the
            first dimension represents individual 2D planes. Unless the
            ``plane_positions`` parameter is provided, the frame in
            ``pixel_array[i, ...]`` should correspond to either
            ``source_images[i]`` (if ``source_images`` is a list of single
            frame instances) or source_images[0].pixel_array[i, ...] if
            ``source_images`` is a single multiframe instance.

            Similarly, if ``pixel_array`` is a 3D array representing the
            segmentation of a tiled 2D image, the first dimension represents
            individual 2D tiles (for one channel and z-stack) and these tiles
            correspond to the frames in the source image dataset.

            If ``pixel_array`` is an unsigned integer or boolean array with
            binary data (containing only the values ``True`` and ``False`` or
            ``0`` and ``1``) or a floating-point array, it represents a single
            segment. In the case of a floating-point array, values must be in
            the range 0.0 to 1.0.

            Otherwise, if ``pixel_array`` is a 2D or 3D array containing multiple
            unsigned integer values, each value is treated as a different
            segment whose segment number is that integer value. This is
            referred to as a *label map* style segmentation.  In this case, all
            segments from 1 through ``pixel_array.max()`` (inclusive) must be
            described in `segment_descriptions`, regardless of whether they are
            present in the image.  Note that this is valid for segmentations
            encoded using the ``"BINARY"`` or ``"FRACTIONAL"`` methods.

            Note that that a 2D numpy array and a 3D numpy array with a
            single frame along the first dimension may be used interchangeably
            as segmentations of a single frame, regardless of their data type.

            If ``pixel_array`` is a 4D numpy array, the first three dimensions
            are used in the same way as the 3D case and the fourth dimension
            represents multiple segments. In this case
            ``pixel_array[:, :, :, i]`` represents segment number ``i + 1``
            (since numpy indexing is 0-based but segment numbering is 1-based),
            and all segments from 1 through ``pixel_array.shape[-1] + 1`` must
            be described in ``segment_descriptions``.

            Furthermore, a 4D array with unsigned integer data type must
            contain only binary data (``True`` and ``False`` or ``0`` and
            ``1``). In other words, a 4D array is incompatible with the *label
            map* style encoding of the segmentation.

            Where there are multiple segments that are mutually exclusive (do
            not overlap) and binary, they may be passed using either a *label
            map* style array or a 4D array. A 4D array is required if either
            there are multiple segments and they are not mutually exclusive
            (i.e. they overlap) or there are multiple segments and the
            segmentation is fractional.

            Note that if the segmentation of a single source image with
            multiple stacked segments is required, it is necessary to include
            the singleton first dimension in order to give a 4D array.

            For ``"FRACTIONAL"`` segmentations, values either encode the
            probability of a given pixel belonging to a segment
            (if `fractional_type` is ``"PROBABILITY"``)
            or the extent to which a segment occupies the pixel
            (if `fractional_type` is ``"OCCUPANCY"``).

        segmentation_type: Union[str, highdicom.seg.SegmentationTypeValues]
            Type of segmentation, either ``"BINARY"`` or ``"FRACTIONAL"``
        segment_descriptions: Sequence[highdicom.seg.SegmentDescription]
            Description of each segment encoded in `pixel_array`. In the case of
            pixel arrays with multiple integer values, the segment description
            with the corresponding segment number is used to describe each segment.
        series_instance_uid: str
            UID of the series
        series_number: Union[int, None]
            Number of the series within the study
        sop_instance_uid: str
            UID that should be assigned to the instance
        instance_number: int
            Number that should be assigned to the instance
        manufacturer: str
            Name of the manufacturer of the device (developer of the software)
            that creates the instance
        manufacturer_model_name: str
            Name of the device model (name of the software library or
            application) that creates the instance
        software_versions: Union[str, Tuple[str]]
            Version(s) of the software that creates the instance
        device_serial_number: str
            Manufacturer's serial number of the device
        fractional_type: Union[str, highdicom.seg.SegmentationFractionalTypeValues], optional
            Type of fractional segmentation that indicates how pixel data
            should be interpreted
        max_fractional_value: int, optional
            Maximum value that indicates probability or occupancy of 1 that
            a pixel represents a given segment
        content_description: str, optional
            Description of the segmentation
        content_creator_name: Optional[Union[str, PersonName]], optional
            Name of the creator of the segmentation
        transfer_syntax_uid: str, optional
            UID of transfer syntax that should be used for encoding of
            data elements. The following lossless compressed transfer syntaxes
            are supported for encapsulated format encoding in case of
            FRACTIONAL segmentation type:
            RLE Lossless (``"1.2.840.10008.1.2.5"``) and
            JPEG 2000 Lossless (``"1.2.840.10008.1.2.4.90"``).
        pixel_measures: PixelMeasures, optional
            Physical spacing of image pixels in `pixel_array`.
            If ``None``, it will be assumed that the segmentation image has the
            same pixel measures as the source image(s).
        plane_orientation: highdicom.PlaneOrientationSequence, optional
            Orientation of planes in `pixel_array` relative to axes of
            three-dimensional patient or slide coordinate space.
            If ``None``, it will be assumed that the segmentation image as the
            same plane orientation as the source image(s).
        plane_positions: Sequence[highdicom.PlanePositionSequence], optional
            Position of each plane in `pixel_array` in the three-dimensional
            patient or slide coordinate space.
            If ``None``, it will be assumed that the segmentation image has the
            same plane position as the source image(s). However, this will only
            work when the first dimension of `pixel_array` matches the number
            of frames in `source_images` (in case of multi-frame source images)
            or the number of `source_images` (in case of single-frame source
            images).
        omit_empty_frames: bool
            If True (default), frames with no non-zero pixels are omitted from
            the segmentation image. If False, all frames are included.
        **kwargs: Any, optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        Raises
        ------
        ValueError
            When
                * Length of `source_images` is zero.
                * Items of `source_images` are not all part of the same study
                  and series.
                * Items of `source_images` have different number of rows and
                  columns.
                * Length of `plane_positions` does not match number of segments
                  encoded in `pixel_array`.
                * Length of `plane_positions` does not match number of 2D planes
                  in `pixel_array` (size of first array dimension).

        Note
        ----
        The assumption is made that segments in `pixel_array` are defined in
        the same frame of reference as `source_images`.


        """  # noqa
        if len(source_images) == 0:
            raise ValueError('At least one source image is required.')

        uniqueness_criteria = set((
            image.StudyInstanceUID,
            image.SeriesInstanceUID,
            image.Rows,
            image.Columns,
        ) for image in source_images)
        if len(uniqueness_criteria) > 1:
            raise ValueError(
                'Source images must all be part of the same series and must '
                'have the same image dimensions (number of rows/columns).')

        src_img = source_images[0]
        is_multiframe = hasattr(src_img, 'NumberOfFrames')
        if is_multiframe and len(source_images) > 1:
            raise ValueError(
                'Only one source image should be provided in case images '
                'are multi-frame images.')
        supported_transfer_syntaxes = {
            ImplicitVRLittleEndian,
            ExplicitVRLittleEndian,
            JPEG2000Lossless,
            RLELossless,
        }
        if transfer_syntax_uid not in supported_transfer_syntaxes:
            raise ValueError('Transfer syntax "{}" is not supported'.format(
                transfer_syntax_uid))

        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]

        super().__init__(study_instance_uid=src_img.StudyInstanceUID,
                         series_instance_uid=series_instance_uid,
                         series_number=series_number,
                         sop_instance_uid=sop_instance_uid,
                         instance_number=instance_number,
                         sop_class_uid='1.2.840.10008.5.1.4.1.1.66.4',
                         manufacturer=manufacturer,
                         modality='SEG',
                         transfer_syntax_uid=transfer_syntax_uid,
                         patient_id=src_img.PatientID,
                         patient_name=src_img.PatientName,
                         patient_birth_date=src_img.PatientBirthDate,
                         patient_sex=src_img.PatientSex,
                         accession_number=src_img.AccessionNumber,
                         study_id=src_img.StudyID,
                         study_date=src_img.StudyDate,
                         study_time=src_img.StudyTime,
                         referring_physician_name=getattr(
                             src_img, 'ReferringPhysicianName', None),
                         **kwargs)

        # Using Container Type Code Sequence attribute would be more elegant,
        # but unfortunately it is a type 2 attribute.
        if (hasattr(src_img, 'ImageOrientationSlide')
                or hasattr(src_img, 'ImageCenterPointCoordinatesSequence')):
            self._coordinate_system = CoordinateSystemNames.SLIDE
        else:
            self._coordinate_system = CoordinateSystemNames.PATIENT

        # Frame of Reference
        self.FrameOfReferenceUID = src_img.FrameOfReferenceUID
        self.PositionReferenceIndicator = getattr(
            src_img, 'PositionReferenceIndicator', None)

        # (Enhanced) General Equipment
        self.DeviceSerialNumber = device_serial_number
        self.ManufacturerModelName = manufacturer_model_name
        self.SoftwareVersions = software_versions

        # General Reference
        self.SourceImageSequence: List[Dataset] = []
        referenced_series: Dict[str, List[Dataset]] = defaultdict(list)
        for s_img in source_images:
            ref = Dataset()
            ref.ReferencedSOPClassUID = s_img.SOPClassUID
            ref.ReferencedSOPInstanceUID = s_img.SOPInstanceUID
            self.SourceImageSequence.append(ref)
            referenced_series[s_img.SeriesInstanceUID].append(ref)

        # Common Instance Reference
        self.ReferencedSeriesSequence: List[Dataset] = []
        for series_instance_uid, referenced_images in referenced_series.items(
        ):
            ref = Dataset()
            ref.SeriesInstanceUID = series_instance_uid
            ref.ReferencedInstanceSequence = referenced_images
            self.ReferencedSeriesSequence.append(ref)

        # Image Pixel
        self.Rows = pixel_array.shape[1]
        self.Columns = pixel_array.shape[2]

        # Segmentation Image
        self.ImageType = ['DERIVED', 'PRIMARY']
        self.SamplesPerPixel = 1
        self.PhotometricInterpretation = 'MONOCHROME2'
        self.PixelRepresentation = 0
        self.ContentLabel = 'ISO_IR 192'  # UTF-8
        self.ContentDescription = content_description
        if content_creator_name is not None:
            check_person_name(content_creator_name)
        self.ContentCreatorName = content_creator_name

        segmentation_type = SegmentationTypeValues(segmentation_type)
        self.SegmentationType = segmentation_type.value
        if self.SegmentationType == SegmentationTypeValues.BINARY.value:
            self.BitsAllocated = 1
            self.HighBit = 0
            if self.file_meta.TransferSyntaxUID.is_encapsulated:
                raise ValueError(
                    'The chosen transfer syntax '
                    f'{self.file_meta.TransferSyntaxUID} '
                    'is not compatible with the BINARY segmentation type')
        elif self.SegmentationType == SegmentationTypeValues.FRACTIONAL.value:
            self.BitsAllocated = 8
            self.HighBit = 7
            segmentation_fractional_type = SegmentationFractionalTypeValues(
                fractional_type)
            self.SegmentationFractionalType = segmentation_fractional_type.value
            if max_fractional_value > 2**8:
                raise ValueError(
                    'Maximum fractional value must not exceed image bit depth.'
                )
            self.MaximumFractionalValue = max_fractional_value
        else:
            raise ValueError(
                'Unknown segmentation type "{}"'.format(segmentation_type))

        self.BitsStored = self.BitsAllocated
        self.LossyImageCompression = getattr(src_img, 'LossyImageCompression',
                                             '00')
        if self.LossyImageCompression == '01':
            self.LossyImageCompressionRatio = \
                src_img.LossyImageCompressionRatio
            self.LossyImageCompressionMethod = \
                src_img.LossyImageCompressionMethod

        self.SegmentSequence: List[Dataset] = []

        # Multi-Frame Functional Groups and Multi-Frame Dimensions
        shared_func_groups = Dataset()
        if pixel_measures is None:
            if is_multiframe:
                src_shared_fg = src_img.SharedFunctionalGroupsSequence[0]
                pixel_measures = src_shared_fg.PixelMeasuresSequence
            else:
                pixel_measures = PixelMeasuresSequence(
                    pixel_spacing=src_img.PixelSpacing,
                    slice_thickness=src_img.SliceThickness,
                    spacing_between_slices=src_img.get('SpacingBetweenSlices',
                                                       None))
            # TODO: ensure derived segmentation image and original image have
            # same physical dimensions
            # seg_row_dim = self.Rows * pixel_measures[0].PixelSpacing[0]
            # seg_col_dim = self.Columns * pixel_measures[0].PixelSpacing[1]
            # src_row_dim = src_img.Rows

        if is_multiframe:
            if self._coordinate_system == CoordinateSystemNames.SLIDE:
                source_plane_orientation = PlaneOrientationSequence(
                    coordinate_system=self._coordinate_system,
                    image_orientation=src_img.ImageOrientationSlide)
            else:
                src_sfg = src_img.SharedFunctionalGroupsSequence[0]
                source_plane_orientation = src_sfg.PlaneOrientationSequence
        else:
            source_plane_orientation = PlaneOrientationSequence(
                coordinate_system=self._coordinate_system,
                image_orientation=src_img.ImageOrientationPatient)
        if plane_orientation is None:
            plane_orientation = source_plane_orientation

        self.DimensionIndexSequence = DimensionIndexSequence(
            coordinate_system=self._coordinate_system)
        dimension_organization = Dataset()
        dimension_organization.DimensionOrganizationUID = \
            self.DimensionIndexSequence[0].DimensionOrganizationUID
        self.DimensionOrganizationSequence = [dimension_organization]

        if is_multiframe:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_image(
                    source_images[0]
                )
        else:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_series(
                    source_images
                )

        shared_func_groups.PixelMeasuresSequence = pixel_measures
        shared_func_groups.PlaneOrientationSequence = plane_orientation
        self.SharedFunctionalGroupsSequence = [shared_func_groups]

        # NOTE: Information about individual frames will be updated below
        self.NumberOfFrames = 0
        self.PerFrameFunctionalGroupsSequence: List[Dataset] = []

        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]
        if pixel_array.ndim not in [3, 4]:
            raise ValueError('Pixel array must be a 2D, 3D, or 4D array.')

        if pixel_array.shape[1:3] != (self.Rows, self.Columns):
            raise ValueError(
                'Pixel array representing segments has the wrong number of '
                'rows and columns.')

        # Check segment numbers
        described_segment_numbers = np.array(
            [int(item.SegmentNumber) for item in segment_descriptions])
        self._check_segment_numbers(described_segment_numbers)

        # Checks on pixels and overlap
        pixel_array, segments_overlap = self._check_pixel_array(
            pixel_array, described_segment_numbers, segmentation_type)
        self.SegmentsOverlap = segments_overlap.value

        if plane_positions is None:
            if pixel_array.shape[0] != len(source_plane_positions):
                raise ValueError(
                    'Number of frames in pixel array does not match number '
                    'of source image frames.')
            plane_positions = source_plane_positions
        else:
            if pixel_array.shape[0] != len(plane_positions):
                raise ValueError(
                    'Number of pixel array planes does not match number of '
                    'provided plane positions.')

        are_spatial_locations_preserved = (
            all(plane_positions[i] == source_plane_positions[i]
                for i in range(len(plane_positions)))
            and plane_orientation == source_plane_orientation)

        # Remove empty slices
        if omit_empty_frames:
            pixel_array, plane_positions, source_image_indices = \
                self._omit_empty_frames(pixel_array, plane_positions)
        else:
            source_image_indices = list(range(pixel_array.shape[0]))

        plane_position_values, plane_sort_index = \
            self.DimensionIndexSequence.get_index_values(plane_positions)

        # Get unique values of attributes in the Plane Position Sequence or
        # Plane Position Slide Sequence, which define the position of the plane
        # with respect to the three dimensional patient or slide coordinate
        # system, respectively. These can subsequently be used to look up the
        # relative position of a plane relative to the indexed dimension.
        dimension_position_values = [
            np.unique(plane_position_values[:, index], axis=0)
            for index in range(plane_position_values.shape[1])
        ]

        is_encaps = self.file_meta.TransferSyntaxUID.is_encapsulated
        if is_encaps:
            # In the case of encapsulated transfer syntaxes, we will accumulate
            # a list of encoded frames to encapsulate at the end
            full_frames_list = []
        else:
            # In the case of non-encapsulated (uncompressed) transfer syntaxes
            # we will accumulate a 1D array of pixels from all frames for
            # bitpacking at the end
            full_pixel_array = np.array([], np.bool_)

        for i, segment_number in enumerate(described_segment_numbers):
            # Pixel array for just this segment
            if pixel_array.dtype in (np.float_, np.float32, np.float64):
                # Floating-point numbers must be mapped to 8-bit integers in
                # the range [0, max_fractional_value].
                if pixel_array.ndim == 4:
                    segment_array = pixel_array[:, :, :, segment_number - 1]
                else:
                    segment_array = pixel_array
                planes = np.around(segment_array *
                                   float(self.MaximumFractionalValue))
                planes = planes.astype(np.uint8)
            elif pixel_array.dtype in (np.uint8, np.uint16):
                # Note that integer arrays with segments stacked down the last
                # dimension will already have been converted to bool, leaving
                # only "label maps" here, which must be converted to binary
                # masks.
                planes = np.zeros(pixel_array.shape, dtype=np.bool_)
                planes[pixel_array == segment_number] = True
            elif pixel_array.dtype == np.bool_:
                if pixel_array.ndim == 4:
                    planes = pixel_array[:, :, :, segment_number - 1]
                else:
                    planes = pixel_array
            else:
                raise TypeError('Pixel array has an invalid data type.')

            contained_plane_index = []
            for j in plane_sort_index:
                # Index of this frame in the original list of source indices
                source_image_index = source_image_indices[j]

                # Even though completely empty slices were removed earlier,
                # there may still be slices in which this specific segment is
                # absent. Such frames should be removed
                if omit_empty_frames and np.sum(planes[j]) == 0:
                    logger.info('skip empty plane {} of segment #{}'.format(
                        j, segment_number))
                    continue
                contained_plane_index.append(j)
                logger.info('add plane #{} for segment #{}'.format(
                    j, segment_number))

                pffp_item = Dataset()
                frame_content_item = Dataset()
                frame_content_item.DimensionIndexValues = [segment_number]

                # Look up the position of the plane relative to the indexed
                # dimension.
                try:
                    if self._coordinate_system == CoordinateSystemNames.SLIDE:
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                    else:
                        # In case of the patient coordinate system, the
                        # value of the attribute the Dimension Index Sequence
                        # points to (Image Position Patient) has a value
                        # multiplicity greater than one.
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos).all(axis=1))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                except IndexError as error:
                    raise IndexError(
                        'Could not determine position of plane #{} in '
                        'three dimensional coordinate system based on '
                        'dimension index values: {}'.format(j, error))
                frame_content_item.DimensionIndexValues.extend(index_values)
                pffp_item.FrameContentSequence = [frame_content_item]
                if self._coordinate_system == CoordinateSystemNames.SLIDE:
                    pffp_item.PlanePositionSlideSequence = plane_positions[j]
                else:
                    pffp_item.PlanePositionSequence = plane_positions[j]

                # Determining the source images that map to the frame is not
                # always trivial. Since DerivationImageSequence is a type 2
                # attribute, we leave its value empty.
                pffp_item.DerivationImageSequence = []

                if are_spatial_locations_preserved:
                    derivation_image_item = Dataset()
                    derivation_code = codes.cid7203.Segmentation
                    derivation_image_item.DerivationCodeSequence = [
                        CodedConcept(derivation_code.value,
                                     derivation_code.scheme_designator,
                                     derivation_code.meaning,
                                     derivation_code.scheme_version),
                    ]

                    derivation_src_img_item = Dataset()
                    if hasattr(source_images[0], 'NumberOfFrames'):
                        # A single multi-frame source image
                        src_img_item = self.SourceImageSequence[0]
                        # Frame numbers are one-based
                        derivation_src_img_item.ReferencedFrameNumber = (
                            source_image_index + 1)
                    else:
                        # Multiple single-frame source images
                        src_img_item = self.SourceImageSequence[
                            source_image_index]
                    derivation_src_img_item.ReferencedSOPClassUID = \
                        src_img_item.ReferencedSOPClassUID
                    derivation_src_img_item.ReferencedSOPInstanceUID = \
                        src_img_item.ReferencedSOPInstanceUID
                    purpose_code = \
                        codes.cid7202.SourceImageForImageProcessingOperation
                    derivation_src_img_item.PurposeOfReferenceCodeSequence = [
                        CodedConcept(purpose_code.value,
                                     purpose_code.scheme_designator,
                                     purpose_code.meaning,
                                     purpose_code.scheme_version),
                    ]
                    derivation_src_img_item.SpatialLocationsPreserved = 'YES'
                    derivation_image_item.SourceImageSequence = [
                        derivation_src_img_item,
                    ]
                    pffp_item.DerivationImageSequence.append(
                        derivation_image_item)
                else:
                    logger.warning('spatial locations not preserved')

                identification = Dataset()
                identification.ReferencedSegmentNumber = segment_number
                pffp_item.SegmentIdentificationSequence = [
                    identification,
                ]
                self.PerFrameFunctionalGroupsSequence.append(pffp_item)
                self.NumberOfFrames += 1

            if is_encaps:
                # Encode this frame and add to the list for encapsulation
                # at the end
                for f in contained_plane_index:
                    full_frames_list.append(self._encode_pixels(planes[f]))
            else:
                # Concatenate the 1D array for re-encoding at the end
                full_pixel_array = np.concatenate([
                    full_pixel_array, planes[contained_plane_index].flatten()
                ])

            self.SegmentSequence.append(segment_descriptions[i])

        if is_encaps:
            # Encapsulate all pre-compressed frames
            self.PixelData = encapsulate(full_frames_list)
        else:
            # Encode the whole pixel array at once
            # This allows for correct bit-packing in cases where
            # number of pixels per frame is not a multiple of 8
            self.PixelData = self._encode_pixels(full_pixel_array)

        # Add a null trailing byte if required
        if len(self.PixelData) % 2 == 1:
            self.PixelData += b'0'

        self.copy_specimen_information(src_img)
        self.copy_patient_and_study_information(src_img)
Exemplo n.º 15
0
def decode_frame(
    value: bytes,
    transfer_syntax_uid: str,
    rows: int,
    columns: int,
    samples_per_pixel: int,
    bits_allocated: int,
    bits_stored: int,
    photometric_interpretation: Union[PhotometricInterpretationValues, str],
    pixel_representation: Union[PixelRepresentationValues, int] = 0,
    planar_configuration: Optional[Union[PlanarConfigurationValues,
                                         int]] = None
) -> np.ndarray:
    """Decodes pixel data of an individual frame.

    Parameters
    ----------
    value: bytes
        Pixel data of a frame (potentially compressed in case
        of encapsulated format encoding, depending on the transfer syntax)
    transfer_syntax_uid: str
        Transfer Syntax UID
    rows: int
        Number of pixel rows in the frame
    columns: int
        Number of pixel columns in the frame
    samples_per_pixel: int
        Number of (color) samples per pixel
    bits_allocated: int
        Number of bits that need to be allocated per pixel sample
    bits_stored: int
        Number of bits that are required to store a pixel sample
    photometric_interpretation: int
        Photometric interpretation
    pixel_representation: int, optional
        Whether pixel samples are represented as unsigned integers or
        2's complements
    planar_configuration: int, optional
        Whether color samples are conded by pixel (`R1G1B1R2G2B2...`) or
        by plane (`R1R2...G1G2...B1B2...`).

    Returns
    -------
    numpy.ndarray
        Decoded pixel data

    Raises
    ------
    ValueError
        When transfer syntax is not supported.

    """
    pixel_representation = PixelRepresentationValues(
        pixel_representation).value
    photometric_interpretation = PhotometricInterpretationValues(
        photometric_interpretation).value
    if samples_per_pixel > 1:
        if planar_configuration is None:
            raise ValueError(
                'Planar configuration needs to be specified for decoding of '
                'color image frames.')
        planar_configuration = PlanarConfigurationValues(
            planar_configuration).value

    # The pydicom library does currently not support reading individual frames.
    # This hack creates a small dataset containing only a single frame, which
    # can then be decoded using the pydicom API.
    file_meta = Dataset()
    file_meta.TransferSyntaxUID = transfer_syntax_uid
    ds = Dataset()
    ds.file_meta = file_meta
    ds.Rows = rows
    ds.Columns = columns
    ds.SamplesPerPixel = samples_per_pixel
    ds.PhotometricInterpretation = photometric_interpretation
    ds.PixelRepresentation = pixel_representation
    ds.PlanarConfiguration = planar_configuration
    ds.BitsAllocated = bits_allocated
    ds.BitsStored = bits_stored
    ds.HighBit = bits_stored - 1

    if UID(file_meta.TransferSyntaxUID).is_encapsulated:
        if (transfer_syntax_uid == JPEGBaseline
                and photometric_interpretation == 'RGB'):
            # RGB color images, which were not transformed into YCbCr color
            # space upon JPEG compression, need to be handled separately.
            # Pillow assumes that images were transformed into YCbCr color
            # space prior to JPEG compression. However, with photometric
            # interpretation RGB, no color transformation was performed.
            # Setting the value of "mode" to YCbCr signals Pillow to not
            # apply any color transformation upon decompression.
            image = Image.open(BytesIO(value))
            color_mode = 'YCbCr'
            image.tile = [(
                'jpeg',
                image.tile[0][1],
                image.tile[0][2],
                (color_mode, ''),
            )]
            image.mode = color_mode
            image.rawmode = color_mode
            return np.asarray(image)
        else:
            ds.PixelData = encapsulate(frames=[value])
    else:
        ds.PixelData = value

    return ds.pixel_array
Exemplo n.º 16
0
 def time_encapsulate_single_bot(self):
     """Time encapsulating frames with 1 fragment per frame."""
     for ii in range(self.no_runs):
         encapsulate(self.test_data, 1, has_bot=True)
Exemplo n.º 17
0
    def __init__(
            self,
            pixel_array: np.ndarray,
            photometric_interpretation: Union[str,
                                              PhotometricInterpretationValues],
            bits_allocated: int,
            coordinate_system: Union[str, CoordinateSystemNames],
            study_instance_uid: str,
            series_instance_uid: str,
            series_number: int,
            sop_instance_uid: str,
            instance_number: int,
            manufacturer: str,
            patient_id: Optional[str] = None,
            patient_name: Optional[Union[str, PersonName]] = None,
            patient_birth_date: Optional[str] = None,
            patient_sex: Optional[str] = None,
            accession_number: Optional[str] = None,
            study_id: str = None,
            study_date: Optional[Union[str, datetime.date]] = None,
            study_time: Optional[Union[str, datetime.time]] = None,
            referring_physician_name: Optional[Union[str, PersonName]] = None,
            pixel_spacing: Optional[Tuple[int, int]] = None,
            laterality: Optional[Union[str, LateralityValues]] = None,
            patient_orientation: Optional[
                Union[Tuple[str, str], Tuple[PatientOrientationValuesBiped,
                                             PatientOrientationValuesBiped, ],
                      Tuple[PatientOrientationValuesQuadruped,
                            PatientOrientationValuesQuadruped, ]]] = None,
            anatomical_orientation_type: Optional[Union[
                str, AnatomicalOrientationTypeValues]] = None,
            container_identifier: Optional[str] = None,
            issuer_of_container_identifier: Optional[
                IssuerOfIdentifier] = None,
            specimen_descriptions: Optional[
                Sequence[SpecimenDescription]] = None,
            transfer_syntax_uid: str = ImplicitVRLittleEndian,
            **kwargs: Any):
        """

        Parameters
        ----------
        pixel_array: numpy.ndarray
            Array of unsigned integer pixel values representing a single-frame
            image; either a 2D grayscale image or a 3D color image
            (RGB color space)
        photometric_interpretation: Union[str, highdicom.enum.PhotometricInterpretationValues]
            Interpretation of pixel data; either ``"MONOCHROME1"`` or
            ``"MONOCHROME2"`` for 2D grayscale images or ``"RGB"`` or
            ``"YBR_FULL"`` for 3D color images
        bits_allocated: int
            Number of bits that should be allocated per pixel value
        coordinate_system: Union[str, highdicom.enum.CoordinateSystemNames]
            Subject (``"PATIENT"`` or ``"SLIDE"``) that was the target of
            imaging
        study_instance_uid: str
            Study Instance UID
        series_instance_uid: str
            Series Instance UID of the SC image series
        series_number: Union[int, None]
            Series Number of the SC image series
        sop_instance_uid: str
            SOP instance UID that should be assigned to the SC image instance
        instance_number: int
            Number that should be assigned to this SC image instance
        manufacturer: str
            Name of the manufacturer of the device that creates the SC image
            instance (in a research setting this is typically the same
            as `institution_name`)
        patient_id: str, optional
           ID of the patient (medical record number)
        patient_name: Optional[Union[str, PersonName]], optional
           Name of the patient
        patient_birth_date: str, optional
           Patient's birth date
        patient_sex: str, optional
           Patient's sex
        study_id: str, optional
           ID of the study
        accession_number: str, optional
           Accession number of the study
        study_date: Union[str, datetime.date], optional
           Date of study creation
        study_time: Union[str, datetime.time], optional
           Time of study creation
        referring_physician_name: Optional[Union[str, PersonName]], optional
            Name of the referring physician
        pixel_spacing: Tuple[int, int], optional
            Physical spacing in millimeter between pixels along the row and
            column dimension
        laterality: Union[str, highdicom.enum.LateralityValues], optional
            Laterality of the examined body part
        patient_orientation:
                Union[Tuple[str, str], Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped], Tuple[highdicom.enum.PatientOrientationValuesQuadruped, highdicom.enum.PatientOrientationValuesQuadruped]], optional
            Orientation of the patient along the row and column axes of the
            image (required if `coordinate_system` is ``"PATIENT"``)
        anatomical_orientation_type: Union[str, highdicom.enum.AnatomicalOrientationTypeValues], optional
            Type of anatomical orientation of patient relative to image (may be
            provide if `coordinate_system` is ``"PATIENT"`` and patient is
            an animal)
        container_identifier: str, optional
            Identifier of the container holding the specimen (required if
            `coordinate_system` is ``"SLIDE"``)
        issuer_of_container_identifier: highdicom.IssuerOfIdentifier, optional
            Issuer of `container_identifier`
        specimen_descriptions: Sequence[highdicom.SpecimenDescriptions], optional
            Description of each examined specimen (required if
            `coordinate_system` is ``"SLIDE"``)
        transfer_syntax_uid: str, optional
            UID of transfer syntax that should be used for encoding of
            data elements. The following lossless compressed transfer syntaxes
            are supported: RLE Lossless (``"1.2.840.10008.1.2.5"``).
        **kwargs: Any, optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        """  # noqa
        supported_transfer_syntaxes = {
            ImplicitVRLittleEndian,
            ExplicitVRLittleEndian,
            RLELossless,
        }
        if transfer_syntax_uid not in supported_transfer_syntaxes:
            raise ValueError(
                f'Transfer syntax "{transfer_syntax_uid}" is not supported')

        # Check names
        if patient_name is not None:
            check_person_name(patient_name)
        if referring_physician_name is not None:
            check_person_name(referring_physician_name)

        super().__init__(study_instance_uid=study_instance_uid,
                         series_instance_uid=series_instance_uid,
                         series_number=series_number,
                         sop_instance_uid=sop_instance_uid,
                         sop_class_uid=SecondaryCaptureImageStorage,
                         instance_number=instance_number,
                         manufacturer=manufacturer,
                         modality='OT',
                         transfer_syntax_uid=transfer_syntax_uid,
                         patient_id=patient_id,
                         patient_name=patient_name,
                         patient_birth_date=patient_birth_date,
                         patient_sex=patient_sex,
                         accession_number=accession_number,
                         study_id=study_id,
                         study_date=study_date,
                         study_time=study_time,
                         referring_physician_name=referring_physician_name,
                         **kwargs)

        coordinate_system = CoordinateSystemNames(coordinate_system)
        if coordinate_system == CoordinateSystemNames.PATIENT:
            if patient_orientation is None:
                raise TypeError(
                    'Patient orientation is required if coordinate system '
                    'is "PATIENT".')

            # General Series
            if laterality is not None:
                laterality = LateralityValues(laterality)
                self.Laterality = laterality.value

            # General Image
            if anatomical_orientation_type is not None:
                anatomical_orientation_type = AnatomicalOrientationTypeValues(
                    anatomical_orientation_type)
                self.AnatomicalOrientationType = \
                    anatomical_orientation_type.value
            else:
                anatomical_orientation_type = \
                    AnatomicalOrientationTypeValues.BIPED

            row_orientation, col_orientation = patient_orientation
            if (anatomical_orientation_type ==
                    AnatomicalOrientationTypeValues.BIPED):
                patient_orientation = (
                    PatientOrientationValuesBiped(row_orientation).value,
                    PatientOrientationValuesBiped(col_orientation).value,
                )
            else:
                patient_orientation = (
                    PatientOrientationValuesQuadruped(row_orientation).value,
                    PatientOrientationValuesQuadruped(col_orientation).value,
                )
            self.PatientOrientation = list(patient_orientation)

        elif coordinate_system == CoordinateSystemNames.SLIDE:
            if container_identifier is None:
                raise TypeError(
                    'Container identifier is required if coordinate system '
                    'is "SLIDE".')
            if specimen_descriptions is None:
                raise TypeError(
                    'Specimen descriptions are required if coordinate system '
                    'is "SLIDE".')

            # Specimen
            self.ContainerIdentifier = container_identifier
            self.IssuerOfTheContainerIdentifierSequence: List[Dataset] = []
            if issuer_of_container_identifier is not None:
                self.IssuerOftheContainerIdentifierSequence.append(
                    issuer_of_container_identifier)
            container_type_item = CodedConcept(*codes.SCT.MicroscopeSlide)
            self.ContainerTypeCodeSequence = [container_type_item]
            self.SpecimenDescriptionSequence = specimen_descriptions

        # SC Equipment
        self.ConversionType = ConversionTypeValues.DI.value

        # SC Image
        now = datetime.datetime.now()
        self.DateOfSecondaryCapture = DA(now.date())
        self.TimeOfSecondaryCapture = TM(now.time())

        # Image Pixel
        self.ImageType = ['DERIVED', 'SECONDARY', 'OTHER']
        self.Rows = pixel_array.shape[0]
        self.Columns = pixel_array.shape[1]
        allowed_types = [np.bool_, np.uint8, np.uint16]
        if not any(pixel_array.dtype == t for t in allowed_types):
            raise TypeError(
                'Pixel array must be of type np.bool_, np.uint8 or np.uint16. '
                f'Found {pixel_array.dtype}.')
        wrong_bit_depth_assignment = (
            pixel_array.dtype == np.bool_ and bits_allocated != 1,
            pixel_array.dtype == np.uint8 and bits_allocated != 8,
            pixel_array.dtype == np.uint16 and bits_allocated not in (12, 16),
        )
        if any(wrong_bit_depth_assignment):
            raise ValueError('Pixel array has an unexpected bit depth.')
        if bits_allocated not in (1, 8, 12, 16):
            raise ValueError('Unexpected number of bits allocated.')
        if transfer_syntax_uid == RLELossless and bits_allocated % 8 != 0:
            raise ValueError(
                'When using run length encoding, bits allocated must be a '
                'multiple of 8')
        self.BitsAllocated = bits_allocated
        self.HighBit = self.BitsAllocated - 1
        self.BitsStored = self.BitsAllocated
        self.PixelRepresentation = 0
        photometric_interpretation = PhotometricInterpretationValues(
            photometric_interpretation)
        if pixel_array.ndim == 3:
            accepted_interpretations = {
                PhotometricInterpretationValues.RGB.value,
                PhotometricInterpretationValues.YBR_FULL.value,
                PhotometricInterpretationValues.YBR_FULL_422.value,
                PhotometricInterpretationValues.YBR_PARTIAL_420.value,
            }
            if photometric_interpretation.value not in accepted_interpretations:
                raise ValueError(
                    'Pixel array has an unexpected photometric interpretation.'
                )
            if pixel_array.shape[-1] != 3:
                raise ValueError(
                    'Pixel array has an unexpected number of color channels.')
            if bits_allocated != 8:
                raise ValueError('Color images must be 8-bit.')
            if pixel_array.dtype != np.uint8:
                raise TypeError(
                    'Pixel array must have 8-bit unsigned integer data type '
                    'in case of a color image.')
            self.PhotometricInterpretation = photometric_interpretation.value
            self.SamplesPerPixel = 3
            self.PlanarConfiguration = 0
        elif pixel_array.ndim == 2:
            accepted_interpretations = {
                PhotometricInterpretationValues.MONOCHROME1.value,
                PhotometricInterpretationValues.MONOCHROME2.value,
            }
            if photometric_interpretation.value not in accepted_interpretations:
                raise ValueError(
                    'Pixel array has an unexpected photometric interpretation.'
                )
            self.PhotometricInterpretation = photometric_interpretation.value
            self.SamplesPerPixel = 1
        else:
            raise ValueError(
                'Pixel array has an unexpected number of dimensions.')
        if pixel_spacing is not None:
            self.PixelSpacing = pixel_spacing

        encoded_frame = encode_frame(
            pixel_array,
            transfer_syntax_uid=self.file_meta.TransferSyntaxUID,
            bits_allocated=self.BitsAllocated,
            bits_stored=self.BitsStored,
            photometric_interpretation=self.PhotometricInterpretation,
            pixel_representation=self.PixelRepresentation,
            planar_configuration=getattr(self, 'PlanarConfiguration', None))
        if self.file_meta.TransferSyntaxUID.is_encapsulated:
            self.PixelData = encapsulate([encoded_frame])
        else:
            self.PixelData = encoded_frame
Exemplo n.º 18
0
 def time_encapsulate_ten_nobot(self):
     """Time encapsulating frames with 10 fragments per frame."""
     for ii in range(self.no_runs):
         encapsulate(self.test_data, 10, has_bot=False)
Exemplo n.º 19
0
 def time_encapsulate_single_bot(self):
     """Time encapsulating frames with 1 fragment per frame."""
     for ii in range(self.no_runs):
         encapsulate(self.test_data, 1, has_bot=True)