예제 #1
0
    def __init__(
            self,
            value: Optional[Union[int, float]],
            unit: Optional[Union[CodedConcept, Code]] = None,
            event_type: Optional[Union[CodedConcept, Code]] = None) -> None:
        """
        Parameters
        ----------
        value: Union[int, float], optional
            offset in time from a particular event of significance
        unit: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional
            unit of time, e.g., "Days" or "Seconds"
        event_type: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional
            type of event to which offset is relative,
            e.g., "Baseline" or "Enrollment"

        """  # noqa
        super().__init__(
            name=CodedConcept(
                value='128740',
                meaning='Longitudinal Temporal Offset from Event',
                scheme_designator='DCM'),
            value=value,
            unit=unit,
            relationship_type=RelationshipTypeValues.HAS_OBS_CONTEXT)
        event_type_item = CodeContentItem(
            name=CodedConcept(value='128741',
                              meaning='Longitudinal Temporal Event Type',
                              scheme_designator='DCM'),
            value=event_type,
            relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD)
        self.ContentSequence = ContentSequence([event_type_item])
    def __init__(
        self,
        anatomic_location: Union[CodedConcept, Code],
        laterality: Optional[Union[CodedConcept, Code]] = None,
        topographical_modifier: Optional[Union[CodedConcept, Code]] = None
    ) -> None:
        """
        Parameters
        ----------
        anatomic_location: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
            coded anatomic location (region or structure)
        laterality: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional
            coded laterality (see :dcm:`CID 244 <part16/sect_CID_244.html>`
            "Laterality" for options)
        topographical_modifier: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional
            coded modifier of anatomic location

        """  # noqa
        super().__init__(
            name=CodedConcept(
                value='363698007',
                meaning='Finding Site',
                scheme_designator='SCT'
            ),
            value=anatomic_location,
            relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD
        )
        if laterality is not None or topographical_modifier is not None:
            self.ContentSequence = ContentSequence()
            if laterality is not None:
                laterality_item = CodeContentItem(
                    name=CodedConcept(
                        value='272741003',
                        meaning='Laterality',
                        scheme_designator='SCT'
                    ),
                    value=laterality,
                    relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD
                )
                self.ContentSequence.append(laterality_item)
            if topographical_modifier is not None:
                modifier_item = CodeContentItem(
                    name=CodedConcept(
                        value='106233006',
                        meaning='Topographical Modifier',
                        scheme_designator='SCT'
                    ),
                    value=topographical_modifier,
                    relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD
                )
                self.ContentSequence.append(modifier_item)
예제 #3
0
    def __init__(self, graphic_type: Union[GraphicTypeValues3D,
                                           str], graphic_data: np.ndarray,
                 frame_of_reference_uid: str) -> None:
        """
        Parameters
        ----------
        graphic_type: Union[highdicom.sr.enum.GraphicTypeValues3D, str]
            name of the graphic type
        graphic_data: numpy.ndarray
            array of ordered spatial coordinates, where each row of the array
            represents a (x, y, z) coordinate triplet
        frame_of_reference_uid: str
            UID of the frame of reference

        """  # noqa
        graphic_type = GraphicTypeValues3D(graphic_type)
        if graphic_type == GraphicTypeValues3D.MULTIPOINT:
            raise ValueError(
                'Graphic type "MULTIPOINT" is not valid for region.')
        if graphic_type == GraphicTypeValues3D.ELLIPSOID:
            raise ValueError(
                'Graphic type "ELLIPSOID" is not valid for region.')
        super().__init__(name=CodedConcept(value='111030',
                                           meaning='Image Region',
                                           scheme_designator='DCM'),
                         graphic_type=graphic_type,
                         graphic_data=graphic_data,
                         frame_of_reference_uid=frame_of_reference_uid,
                         relationship_type=RelationshipTypeValues.CONTAINS)
예제 #4
0
    def __init__(
            self,
            referenced_sop_class_uid: str,
            referenced_sop_instance_uid: str,
            referenced_frame_numbers: Optional[Sequence[int]] = None) -> None:
        """
        Parameters
        ----------
        referenced_sop_class_uid: str
            SOP Class UID of the referenced image object
        referenced_sop_instance_uid: str
            SOP Instance UID of the referenced image object
        referenced_frame_numbers: Sequence[int], optional
            numbers of the frames to which the reference applies in case the
            referenced image is a multi-frame image

        """
        super().__init__(
            name=CodedConcept(value='121233',
                              meaning='Source Image for Segmentation',
                              scheme_designator='DCM'),
            referenced_sop_class_uid=referenced_sop_class_uid,
            referenced_sop_instance_uid=referenced_sop_instance_uid,
            referenced_frame_numbers=referenced_frame_numbers,
            relationship_type=RelationshipTypeValues.CONTAINS)
예제 #5
0
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        value: Union[Code, CodedConcept],
        relationship_type: Optional[Union[str, RelationshipTypeValues]] = None
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code]
            concept name
        value: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code]
            coded value or an enumerated item representing a coded value
        relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional
            type of relationship with parent content item

        """  # noqa
        super(CodeContentItem, self).__init__(ValueTypeValues.CODE, name,
                                              relationship_type)
        if not isinstance(value, (
                CodedConcept,
                Code,
        )):
            raise TypeError(
                'Argument "value" must have type CodedConcept or Code.')
        if isinstance(value, Code):
            value = CodedConcept(*value)
        self.ConceptCodeSequence = [value]
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        value: Union[Code, CodedConcept],
        relationship_type: Union[str, RelationshipTypeValues, None] = None,
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
            concept name
        value: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
            coded value or an enumerated item representing a coded value
        relationship_type: Union[highdicom.sr.RelationshipTypeValues, str]
            type of relationship with parent content item

        """  # noqa
        if relationship_type is None:
            warnings.warn(
                'A future release will require that relationship types be '
                f'provided for items of type {self.__class__.__name__}.',
                DeprecationWarning)
        super(CodeContentItem, self).__init__(ValueTypeValues.CODE, name,
                                              relationship_type)
        if not isinstance(value, (
                CodedConcept,
                Code,
        )):
            raise TypeError(
                'Argument "value" must have type CodedConcept or Code.')
        if isinstance(value, Code):
            value = CodedConcept(*value)
        self.ConceptCodeSequence = [value]
예제 #7
0
    def __init__(
        self,
        value_type: Union[str, ValueTypeValues],
        name: Union[Code, CodedConcept],
        relationship_type: Optional[Union[str, RelationshipTypeValues]] = None
    ) -> None:
        """
        Parameters
        ----------
        value_type: Union[str, highdicom.sr.enum.ValueTypeValues]
            type of value encoded in a content item
        name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code]
            coded name or an enumerated item representing a coded name
        relationship_type: Union[str, highdicom.sr.enum.RelationshipTypeValues], optional
            type of relationship with parent content item

        """  # noqa
        super(ContentItem, self).__init__()
        value_type = ValueTypeValues(value_type)
        self.ValueType = value_type.value
        if not isinstance(name, (
                CodedConcept,
                Code,
        )):
            raise TypeError(
                'Argument "name" must have type CodedConcept or Code.')
        if isinstance(name, Code):
            name = CodedConcept(*name)
        self.ConceptNameCodeSequence = [name]
        if relationship_type is not None:
            relationship_type = RelationshipTypeValues(relationship_type)
            self.RelationshipType = relationship_type.value
예제 #8
0
파일: utils.py 프로젝트: pieper/highdicom
 def search_tree(node, name, value_type, relationship_type, recursive):
     matched_content_items = []
     for i, content_item in enumerate(node.ContentSequence):
         name_code = content_item.ConceptNameCodeSequence[0]
         item = ContentItem(
             value_type=content_item.ValueType,
             name=CodedConcept(
                 value=name_code.CodeValue,
                 scheme_designator=name_code.CodingSchemeDesignator,
                 meaning=name_code.CodeMeaning
             ),
             relationship_type=content_item.get('RelationshipType', None)
         )
         if (has_name(item, name) and
                 has_value_type(item, value_type) and
                 has_relationship_type(item, relationship_type)):
             matched_content_items.append(content_item)
         if hasattr(content_item, 'ContentSequence') and recursive:
             matched_content_items += search_tree(
                 node=content_item,
                 name=name,
                 value_type=value_type,
                 relationship_type=relationship_type,
                 recursive=recursive
             )
     return matched_content_items
예제 #9
0
    def __init__(
            self,
            referenced_sop_class_uid: str,
            referenced_sop_instance_uid: str,
            referenced_frame_numbers: Optional[Sequence[int]] = None) -> None:
        """
        Parameters
        ----------
        referenced_sop_class_uid: str
            SOP Class UID of the referenced image object
        referenced_sop_instance_uid: str
            SOP Instance UID of the referenced image object
        referenced_frame_numbers: Sequence[int], optional
            numbers of the frames to which the reference applies in case the
            referenced image is a multi-frame image

        Raises
        ------
        ValueError
            If any referenced frame number is not a positive integer

        """
        if referenced_frame_numbers is not None:
            if any(f < 1 for f in referenced_frame_numbers):
                raise ValueError(
                    'Referenced frame numbers must be >= 1. Frame indexing is '
                    '1-based.')
        super().__init__(
            name=CodedConcept(value='121233',
                              meaning='Source Image for Segmentation',
                              scheme_designator='DCM'),
            referenced_sop_class_uid=referenced_sop_class_uid,
            referenced_sop_instance_uid=referenced_sop_instance_uid,
            referenced_frame_numbers=referenced_frame_numbers,
            relationship_type=RelationshipTypeValues.CONTAINS)
예제 #10
0
    def __init__(self, sop_class_uid: str, sop_instance_uid: str,
                 frame_number: int, segment_number: int,
                 source_image: SourceImageForSegmentation) -> None:
        """
        Parameters
        ----------
        sop_class_uid: str
            SOP Class UID of the referenced image object
        sop_instance_uid: str
            SOP Instance UID of the referenced image object
        segment_number: int
            number of the segment to which the refernce applies
        frame_number: int
            number of the frame to which the reference applies
        source_image: highdicom.sr.content.SourceImageForSegmentation
            source image for segmentation

        """
        super().__init__()
        segmentation_item = ImageContentItem(
            name=CodedConcept(value='121214',
                              meaning='Referenced Segmentation Frame',
                              scheme_designator='DCM'),
            referenced_sop_class_uid=sop_class_uid,
            referenced_sop_instance_uid=sop_instance_uid,
            referenced_frame_numbers=frame_number,
            referenced_segment_numbers=segment_number,
            relationship_type=RelationshipTypeValues.CONTAINS)
        self.append(segmentation_item)
        if not isinstance(source_image, SourceImageForSegmentation):
            raise TypeError('Argument "source_image" must have type '
                            'SourceImageForSegmentation.')
        self.append(source_image)
예제 #11
0
    def __init__(
            self,
            sop_class_uid: str,
            sop_instance_uid: str,
            segment_number: int,
            frame_numbers: Optional[Sequence[int]] = None,
            source_images: Optional[
                Sequence[SourceImageForSegmentation]] = None,
            source_series: Optional[SourceSeriesForSegmentation] = None
    ) -> None:
        """
        Parameters
        ----------
        sop_class_uid: str
            SOP Class UID of the referenced segmentation object
        sop_instance_uid: str
            SOP Instance UID of the referenced segmentation object
        frame_numbers: Sequence[int], optional
            numbers of the frames to which the reference applies
            (in case a segmentation instance is referenced)
        segment_number: int
            number of the segment to which the refernce applies
        source_images: Sequence[highdicom.sr.content.SourceImageForSegmentation], optional
            source images for segmentation
        source_series: highdicom.sr.content.SourceSeriesForSegmentation, optional
            source series for segmentation

        Note
        ----
        Either `source_images` or `source_series` must be provided.

        """  # noqa
        super().__init__()
        segment_item = ImageContentItem(
            name=CodedConcept(value='121191',
                              meaning='Referenced Segment',
                              scheme_designator='DCM'),
            referenced_sop_class_uid=sop_class_uid,
            referenced_sop_instance_uid=sop_instance_uid,
            referenced_frame_numbers=frame_numbers,
            referenced_segment_numbers=segment_number,
            relationship_type=RelationshipTypeValues.CONTAINS)
        self.append(segment_item)
        if source_images is not None:
            for image in source_images:
                if not isinstance(image, SourceImageForSegmentation):
                    raise TypeError(
                        'Items of argument "source_images" must have type '
                        'SourceImageForSegmentation.')
                self.append(image)
        elif source_series is not None:
            if not isinstance(source_series, SourceSeriesForSegmentation):
                raise TypeError('Argument "source_series" must have type '
                                'SourceSeriesForSegmentation.')
            self.append(source_series)
        else:
            raise ValueError(
                'One of the following two arguments must be provided: '
                '"source_images" or "source_series".')
예제 #12
0
    def __init__(
        self,
        graphic_type: Union[GraphicTypeValues, str],
        graphic_data: np.ndarray,
        source_image: SourceImageForRegion,
        pixel_origin_interpretation: Optional[
            Union[PixelOriginInterpretationValues, str]
        ] = None
    ) -> None:
        """
        Parameters
        ----------
        graphic_type: Union[highdicom.sr.GraphicTypeValues, str]
            name of the graphic type
        graphic_data: numpy.ndarray
            array of ordered spatial coordinates, where each row of the array
            represents a (column, row) coordinate pair
        source_image: highdicom.sr.template.SourceImageForRegion
            source image to which `graphic_data` relates
        pixel_origin_interpretation: Union[highdicom.sr.PixelOriginInterpretationValues, str], optional
            whether pixel coordinates specified by `graphic_data` are defined
            relative to the total pixel matrix
            (``highdicom.sr.PixelOriginInterpretationValues.VOLUME``) or
            relative to an individual frame
            (``highdicom.sr.PixelOriginInterpretationValues.FRAME``)
            of the source image
            (default: ``highdicom.sr.PixelOriginInterpretationValues.VOLUME``)

        """  # noqa
        graphic_type = GraphicTypeValues(graphic_type)
        if graphic_type == GraphicTypeValues.MULTIPOINT:
            raise ValueError(
                'Graphic type "MULTIPOINT" is not valid for region.'
            )
        if not isinstance(source_image, SourceImageForRegion):
            raise TypeError(
                'Argument "source_image" must have type SourceImageForRegion.'
            )
        if pixel_origin_interpretation is None:
            pixel_origin_interpretation = PixelOriginInterpretationValues.VOLUME
        if pixel_origin_interpretation == PixelOriginInterpretationValues.FRAME:
            if (not hasattr(source_image, 'ReferencedFrameNumber') or
                    source_image.ReferencedFrameNumber is None):
                raise ValueError(
                    'Frame number of source image must be specified when value '
                    'of argument "pixel_origin_interpretation" is "FRAME".'
                )
        super().__init__(
            name=CodedConcept(
                value='111030',
                meaning='Image Region',
                scheme_designator='DCM'
            ),
            graphic_type=graphic_type,
            graphic_data=graphic_data,
            pixel_origin_interpretation=pixel_origin_interpretation,
            relationship_type=RelationshipTypeValues.CONTAINS
        )
        self.ContentSequence = [source_image]
예제 #13
0
    def __init__(
            self,
            graphic_type: Union[GraphicTypeValues3D, str],
            graphic_data: np.ndarray,
            frame_of_reference_uid: str,
            source_images: Optional[
                Sequence[SourceImageForSegmentation]] = None,
            source_series: Optional[SourceSeriesForSegmentation] = None
    ) -> None:
        """
        Parameters
        ----------
        graphic_type: Union[highdicom.sr.enum.GraphicTypeValues3D, str]
            name of the graphic type
        graphic_data: Sequence[Sequence[int]]
            ordered set of (row, column, frame) coordinate pairs
        frame_of_reference_uid: str
            unique identifier of the frame of reference within which the
            coordinates are defined
        source_images: Sequence[highdicom.sr.content.SourceImageForSegmentation], optional
            source images for segmentation
        source_series: highdicom.sr.content.SourceSeriesForSegmentation, optional
            source series for segmentation

        Note
        ----
        Either one or more source images or one source series must be provided.

        """  # noqa
        graphic_type = GraphicTypeValues3D(graphic_type)
        if graphic_type != GraphicTypeValues3D.ELLIPSOID:
            raise ValueError(
                'Graphic type for volume surface must be "ELLIPSOID".')
        super().__init__(name=CodedConcept(value='121231',
                                           meaning='Volume Surface',
                                           scheme_designator='DCM'),
                         frame_of_reference_uid=frame_of_reference_uid,
                         graphic_type=graphic_type,
                         graphic_data=graphic_data,
                         relationship_type=RelationshipTypeValues.CONTAINS)
        self.ContentSequence = ContentSequence()
        if source_images is not None:
            for image in source_images:
                if not isinstance(image, SourceImageForSegmentation):
                    raise TypeError(
                        'Items of argument "source_image" must have type '
                        'SourceImageForSegmentation.')
                self.ContentSequence.append(image)
        elif source_series is not None:
            if not isinstance(source_series, SourceSeriesForSegmentation):
                raise TypeError('Argument "source_series" must have type '
                                'SourceSeriesForSegmentation.')
            self.ContentSequence.append(source_series)
        else:
            raise ValueError(
                'One of the following two arguments must be provided: '
                '"source_images" or "source_series".')
예제 #14
0
파일: test_sr.py 프로젝트: pieper/highdicom
 def test_construction_args(self):
     c = CodedConcept(self._value, self._scheme_designator, self._meaning)
     assert c.value == self._value
     assert c.scheme_designator == self._scheme_designator
     assert c.meaning == self._meaning
     assert c.scheme_version is None
     assert c.CodeValue == self._value
     assert c.CodingSchemeDesignator == self._scheme_designator
     assert c.CodeMeaning == self._meaning
     with pytest.raises(AttributeError):
         assert c.CodingSchemeVersion
예제 #15
0
파일: test_sr.py 프로젝트: pieper/highdicom
 def test_construction_args_optional(self):
     version = 'v1.0'
     c = CodedConcept(self._value, self._scheme_designator, self._meaning,
                      version)
     assert c.value == self._value
     assert c.scheme_designator == self._scheme_designator
     assert c.meaning == self._meaning
     assert c.scheme_version == version
     assert c.CodeValue == self._value
     assert c.CodingSchemeDesignator == self._scheme_designator
     assert c.CodeMeaning == self._meaning
     assert c.CodingSchemeVersion == version
예제 #16
0
    def __init__(self, referenced_series_instance_uid: str):
        """
        Parameters
        ----------
        referenced_series_instance_uid: str
            Series Instance UID

        """
        super().__init__(name=CodedConcept(
            value='121232',
            meaning='Source Series for Segmentation',
            scheme_designator='DCM'),
                         value=referenced_series_instance_uid,
                         relationship_type=RelationshipTypeValues.CONTAINS)
예제 #17
0
    def __init__(self, referenced_sop_instance_uid: str):
        """
        Parameters
        ----------
        referenced_sop_instance_uid: str
            SOP Instance UID of the referenced object

        """
        super().__init__(
            name=CodedConcept(
                value='126100',
                meaning='Real World Value Map used for measurement',
                scheme_designator='DCM'),
            referenced_sop_class_uid='1.2.840.10008.5.1.4.1.1.67',
            referenced_sop_instance_uid=referenced_sop_instance_uid,
            relationship_type=RelationshipTypeValues.CONTAINS)
예제 #18
0
    def __init__(
            self,
            name: str,
            family: Union[Code, CodedConcept],
            version: str,
            source: Optional[str] = None,
            parameters: Optional[Dict[str, str]] = None
        ):
        """
        Parameters
        ----------
        name: str
            Name of the algorithm
        family: Union[pydicom.sr.coding.Code, highdicom.sr.coding.CodedConcept]
            Kind of algorithm family
        version: str
            Version of the algorithm
        source: str, optional
            Source of the algorithm, e.g. name of the algorithm manufacturer
        parameters: Dict[str: str], optional
            Name and actual value of the parameters with which the algorithm
            was invoked

        """  # noqa
        super().__init__()
        item = Dataset()
        item.AlgorithmName = name
        item.AlgorithmVersion = version
        item.AlgorithmFamilyCodeSequence = [
            CodedConcept(
                family.value,
                family.scheme_designator,
                family.meaning,
                family.scheme_version,
            ),
        ]
        if source is not None:
            item.AlgorithmSource = source
        if parameters is not None:
            item.AlgorithmParameters = ','.join([
                '='.join([key, value])
                for key, value in parameters.items()
            ])
        self.append(item)
예제 #19
0
def get_coded_value(item: Dataset) -> CodedConcept:
    """Gets the value of a SR Content Item with Value Type CODE.

    Parameter
    ---------
    item: pydicom.dataset.Dataset
        Content Item

    Returns
    -------
    highdicom.sr.coding.CodedConcept
        Value

    """
    try:
        value = item.ConceptCodeSequence[0]
    except AttributeError:
        raise AttributeError(
            'Dataset does not contain attribute "ConceptCodeSequence" and '
            'thus doesn\'t represent a SR Content Item of Value Type CODE.')
    return CodedConcept(value=value.CodeValue,
                        scheme_designator=value.CodingSchemeDesignator,
                        meaning=value.CodeMeaning,
                        scheme_version=value.get('CodingSchemeVersion', None))
예제 #20
0
    def __init__(self,
                 referenced_sop_class_uid: str,
                 referenced_sop_instance_uid: str,
                 referenced_frame_numbers: Optional[Sequence[int]] = None):
        """
        Parameters
        ----------
        referenced_sop_class_uid: str
            SOP Class UID of the referenced image object
        referenced_sop_instance_uid: str
            SOP Instance UID of the referenced image object
        referenced_frame_numbers: Sequence[int], optional
            numbers of the frames to which the reference applies in case the
            referenced image is a multi-frame image

        """
        super().__init__(
            name=CodedConcept(value='111040',
                              meaning='Original Source',
                              scheme_designator='DCM'),
            referenced_sop_class_uid=referenced_sop_class_uid,
            referenced_sop_instance_uid=referenced_sop_instance_uid,
            referenced_frame_numbers=referenced_frame_numbers,
            relationship_type=RelationshipTypeValues.SELECTED_FROM)
예제 #21
0
def get_coded_name(item: Dataset) -> CodedConcept:
    """Gets the concept name of a SR Content Item.

    Parameters
    ----------
    item: pydicom.dataset.Dataset
        Content Item

    Returns
    -------
    highdicom.sr.CodedConcept
        Concept name

    """
    try:
        name = item.ConceptNameCodeSequence[0]
    except AttributeError:
        raise AttributeError(
            'Dataset does not contain attribute "ConceptNameCodeSequence" and '
            'thus doesn\'t represent a SR Content Item.')
    return CodedConcept(value=name.CodeValue,
                        scheme_designator=name.CodingSchemeDesignator,
                        meaning=name.CodeMeaning,
                        scheme_version=name.get('CodingSchemeVersion', None))
예제 #22
0
    def __init__(
        self,
        segment_number: int,
        segment_label: str,
        segmented_property_category: Union[Code, CodedConcept],
        segmented_property_type: Union[Code, CodedConcept],
        algorithm_type: Union[SegmentAlgorithmTypeValues, str],
        algorithm_identification: AlgorithmIdentificationSequence,
        tracking_uid: Optional[str] = None,
        tracking_id: Optional[str] = None,
        anatomic_regions: Optional[Sequence[Union[Code, CodedConcept]]] = None,
        primary_anatomic_structures: Optional[Sequence[Union[
            Code, CodedConcept]]] = None
    ) -> None:
        """
        Parameters
        ----------
        segment_number: int
            Number of the segment
        segment_label: str
            Label of the segment
        segmented_property_category: Union[pydicom.sr.coding.Code, highdicom.sr.coding.CodedConcept]
            Category of the property the segment represents,
            e.g. ``Code("49755003", "SCT", "Morphologically Abnormal Structure")``
            (see CID 7150 Segmentation Property Categories)
        segmented_property_type: Union[pydicom.sr.coding.Code, highdicom.sr.coding.CodedConcept]
            Property the segment represents,
            e.g. ``Code("108369006", "SCT", "Neoplasm")``
            (see CID 7151 Segmentation Property Types)
        algorithm_type: Union[str, highdicom.seg.enum.SegmentAlgorithmTypeValues]
            Type of algorithm
        algorithm_identification: highdicom.content.AlgorithmIdentificationSequence, optional
            Information useful for identification of the algorithm, such
            as its name or version
        tracking_uid: str, optional
            Unique tracking identifier (universally unique)
        tracking_id: str, optional
            Tracking identifier (unique only with the domain of use)
        anatomic_regions: Sequence[Union[pydicom.sr.coding.Code, highdicom.sr.coding.CodedConcept]], optional
            Anatomic region(s) into which segment falls,
            e.g. ``Code("41216001", "SCT", "Prostate")``
            (see CID 4 Anatomic Region, CID 4031 Common Anatomic Regions, as
            as well as other CIDs for domain-specific anatomic regions)
        primary_anatomic_structures: Sequence[Union[highdicom.sr.coding.Code, highdicom.sr.coding.CodedConcept]], optional
            Anatomic structure(s) the segment represents
            (see CIDs for domain-specific primary anatomic structures)

        """  # noqa
        super().__init__()
        self.SegmentNumber = segment_number
        self.SegmentLabel = segment_label
        self.SegmentedPropertyCategoryCodeSequence = [
            CodedConcept(segmented_property_category.value,
                         segmented_property_category.scheme_designator,
                         segmented_property_category.meaning,
                         segmented_property_category.scheme_version),
        ]
        self.SegmentedPropertyTypeCodeSequence = [
            CodedConcept(segmented_property_type.value,
                         segmented_property_type.scheme_designator,
                         segmented_property_type.meaning,
                         segmented_property_type.scheme_version),
        ]
        algorithm_type = SegmentAlgorithmTypeValues(algorithm_type)
        self.SegmentAlgorithmType = algorithm_type.value
        self.SegmentAlgorithmName = algorithm_identification[0].AlgorithmName
        self.SegmentationAlgorithmIdentificationSequence = \
            algorithm_identification
        num_given_tracking_identifiers = sum(
            [tracking_id is not None, tracking_uid is not None])
        if num_given_tracking_identifiers == 2:
            self.TrackingID = tracking_id
            self.TrackingUID = tracking_uid
        elif num_given_tracking_identifiers == 1:
            raise TypeError(
                'Tracking ID and Tracking UID must both be provided.')
        if anatomic_regions is not None:
            self.AnatomicRegionSequence = [
                CodedConcept(region.value, region.scheme_designator,
                             region.meaning, region.scheme_version)
                for region in anatomic_regions
            ]
        if primary_anatomic_structures is not None:
            self.PrimaryAnatomicStructureSequence = [
                CodedConcept(structure.value, structure.scheme_designator,
                             structure.meaning, structure.scheme_version)
                for structure in primary_anatomic_structures
            ]
예제 #23
0
파일: test_sr.py 프로젝트: pieper/highdicom
 def test_equal_ignore_meaning(self):
     c1 = CodedConcept(self._value, self._scheme_designator, self._meaning)
     c2 = CodedConcept(self._value, self._scheme_designator, 'bla bla bla')
     assert c1 == c2
예제 #24
0
파일: test_sr.py 프로젝트: pieper/highdicom
 def test_equal_equivalent_coding(self):
     c1 = CodedConcept(self._value, self._scheme_designator, self._meaning)
     c2 = CodedConcept('R-00317', 'SRT', self._meaning)
     assert c1 == c2
예제 #25
0
파일: sop.py 프로젝트: fedorov/highdicom
    def add_segments(
        self,
        pixel_array: np.ndarray,
        segment_descriptions: Sequence[SegmentDescription],
        plane_positions: Optional[Sequence[PlanePositionSequence]] = None
    ) -> None:
        """Adds one or more segments to the segmentation image.

        Parameters
        ----------
        pixel_array: numpy.ndarray
            Array of segmentation pixel data of boolean, unsigned integer or
            floating point data type representing a mask image. If `pixel_array`
            is a floating-point array or a binary array (containing only the
            values ``True`` and ``False`` or ``0`` and ``1``), the segment
            number used to encode the segment is taken from
            `segment_descriptions`.
            Otherwise, if `pixel_array` contains multiple integer values, each
            value is treated as a different segment whose segment number is
            that integer value. In this case, all segments found in the array
            must be described in `segment_descriptions`. Note that this is
            valid for both ``"BINARY"`` and ``"FRACTIONAL"`` segmentations.
            For ``"FRACTIONAL"`` segmentations, values either encode the
            probability of a given pixel belonging to a segment
            (if `fractional_type` is ``"PROBABILITY"``)
            or the extent to which a segment occupies the pixel
            (if `fractional_type` is ``"OCCUPANCY"``).
            When `pixel_array` has a floating point data type, only one segment
            can be encoded. Additional segments can be subsequently
            added to the `Segmentation` instance using the ``add_segments()``
            method.
            If `pixel_array` represents a 3D image, the first dimension
            represents individual 2D planes and these planes must be ordered
            based on their position in the three-dimensional patient
            coordinate system (first along the X axis, second along the Y axis,
            and third along the Z axis).
            If `pixel_array` represents a tiled 2D image, the first dimension
            represents individual 2D tiles (for one channel and z-stack) and
            these tiles must be ordered based on their position in the tiled
            total pixel matrix (first along the row dimension and second along
            the column dimension, which are defined in the three-dimensional
            slide coordinate system by the direction cosines encoded by the
            *Image Orientation (Slide)* attribute).
        segment_descriptions: Sequence[highdicom.seg.content.SegmentDescription]
            Description of each segment encoded in `pixel_array`. In the case of
            pixel arrays with multiple integer values, the segment description
            with the corresponding segment number is used to describe each
            segment.
        plane_positions: Sequence[highdicom.content.PlanePositionSequence], optional
            Position of each plane in `pixel_array` relative to the
            three-dimensional patient or slide coordinate system.

        Raises
        ------
        ValueError
            When
                - The pixel array is not 2D or 3D numpy array
                - The shape of the pixel array does not match the source images
                - The numbering of the segment descriptions is not
                  monotonically increasing by 1
                - The numbering of the segment descriptions does
                  not begin at 1 (for the first segments added to the instance)
                  or at one greater than the last added segment (for
                  subsequent segments)
                - One or more segments already exist within the
                  segmentation instance
                - The segmentation is binary and the pixel array contains
                  integer values that belong to segments that are not described
                  in the segment descriptions
                - The segmentation is binary and pixel array has floating point
                  values not equal to 0.0 or 1.0
                - The segmentation is fractional and pixel array has floating
                  point values outside the range 0.0 to 1.0
                - The segmentation is fractional and pixel array has floating
                  point values outside the range 0.0 to 1.0
                - Plane positions are provided but the length of the array
                  does not match the number of frames in the pixel array
        TypeError
            When the dtype of the pixel array is invalid


        Note
        ----
        Segments must be sorted by segment number in ascending order and
        increase by 1.  Additionally, the first segment description must have a
        segment number one greater than the segment number of the last segment
        added to the segmentation, or 1 if this is the first segment added.

        In case `segmentation_type` is ``"BINARY"``, the number of items in
        `segment_descriptions` must be greater than or equal to the number of
        unique positive pixel values in `pixel_array`. It is possible for some
        segments described in `segment_descriptions` not to appear in the
        `pixel_array`. In case `segmentation_type` is ``"FRACTIONAL"``, only
        one segment can be encoded by `pixel_array` and hence only one item is
        permitted in `segment_descriptions`.

        """  # noqa
        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]
        if pixel_array.ndim != 3:
            raise ValueError('Pixel array must be a 2D or 3D array.')

        if pixel_array.shape[1:3] != (self.Rows, self.Columns):
            raise ValueError(
                'Pixel array representing segments has the wrong number of '
                'rows and columns.')

        # Determine the expected starting number of the segments to ensure
        # they will be continuous with existing segments
        if self._segment_inventory:
            # Next segment number is one greater than the largest existing
            # segment number
            seg_num_start = max(self._segment_inventory) + 1
        else:
            # No existing segments so start at 1
            seg_num_start = 1

        # Check segment numbers
        # Check the existing descriptions
        described_segment_numbers = np.array(
            [int(item.SegmentNumber) for item in segment_descriptions])
        # Check segment numbers in the segment descriptions are
        # monotonically increasing by 1
        if not (np.diff(described_segment_numbers) == 1).all():
            raise ValueError(
                'Segment descriptions must be sorted by segment number '
                'and monotonically increasing by 1.')
        if described_segment_numbers[0] != seg_num_start:
            if seg_num_start == 1:
                msg = ('Segment descriptions should be numbered starting '
                       f'from 1. Found {described_segment_numbers[0]}. ')
            else:
                msg = ('Segment descriptions should be numbered to '
                       'continue from existing segments. Expected the first '
                       f'segment to be numbered {seg_num_start} but found '
                       f'{described_segment_numbers[0]}.')
            raise ValueError(msg)

        if pixel_array.dtype in (np.bool_, np.uint8, np.uint16):
            segments_present = np.unique(pixel_array[pixel_array > 0].astype(
                np.uint16))

            # Special case where the mask is binary and there is a single
            # segment description. Mark the positive segment with
            # the correct segment number
            if (np.array_equal(segments_present, np.array([1]))
                    and len(segment_descriptions) == 1):
                pixel_array = pixel_array.astype(np.uint8)
                pixel_array *= described_segment_numbers.item()

            # Otherwise, the pixel values in the pixel array must all belong to
            # a described segment
            else:
                if not np.all(
                        np.in1d(segments_present, described_segment_numbers)):
                    raise ValueError('Pixel array contains segments that lack '
                                     'descriptions.')

        elif (pixel_array.dtype in (np.float_, np.float32, np.float64)):
            unique_values = np.unique(pixel_array)
            if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0:
                raise ValueError(
                    'Floating point pixel array values must be in the '
                    'range [0, 1].')
            if len(segment_descriptions) != 1:
                raise ValueError(
                    'When providing a float-valued pixel array, provide only '
                    'a single segment description')
            if self.SegmentationType == SegmentationTypeValues.BINARY.value:
                non_boolean_values = np.logical_and(unique_values > 0.0,
                                                    unique_values < 1.0)
                if np.any(non_boolean_values):
                    raise ValueError(
                        'Floating point pixel array values must be either '
                        '0.0 or 1.0 in case of BINARY segmentation type.')
                pixel_array = pixel_array.astype(np.bool_)
        else:
            raise TypeError('Pixel array has an invalid data type.')

        # Check that the new segments do not already exist
        if len(set(described_segment_numbers) & self._segment_inventory) > 0:
            raise ValueError(
                'Segment with given segment number already exists')

        # Set the optional tag value SegmentsOverlapValues to NO to indicate
        # that the segments do not overlap. We can know this for sure if it's
        # the first segment (or set of segments) to be added because they are
        # contained within a single pixel array.
        if len(self._segment_inventory) == 0:
            self.SegmentsOverlap = SegmentsOverlapValues.NO.value
        else:
            # If this is not the first set of segments to be added, we cannot
            # be sure whether there is overlap with the existing segments
            self.SegmentsOverlap = SegmentsOverlapValues.UNDEFINED.value

        src_image = self._source_images[0]
        is_multiframe = hasattr(src_image, 'NumberOfFrames')
        if is_multiframe:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_image(
                    src_image
                )
        else:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_series(
                    self._source_images
                )

        if plane_positions is None:
            if pixel_array.shape[0] != len(source_plane_positions):
                if is_multiframe:
                    raise ValueError(
                        'Number of frames in pixel array does not match number '
                        ' of frames in source image.')
                else:
                    raise ValueError(
                        'Number of frames in pixel array does not match number '
                        'of source images.')
            plane_positions = source_plane_positions
        else:
            if pixel_array.shape[0] != len(plane_positions):
                raise ValueError(
                    'Number of pixel array planes does not match number of '
                    'provided plane positions.')

        plane_position_values, plane_sort_index = \
            self.DimensionIndexSequence.get_index_values(plane_positions)

        are_spatial_locations_preserved = (
            all(plane_positions[i] == source_plane_positions[i]
                for i in range(len(plane_positions)))
            and self._plane_orientation == self._source_plane_orientation)

        # Get unique values of attributes in the Plane Position Sequence or
        # Plane Position Slide Sequence, which define the position of the plane
        # with respect to the three dimensional patient or slide coordinate
        # system, respectively. These can subsequently be used to look up the
        # relative position of a plane relative to the indexed dimension.
        dimension_position_values = [
            np.unique(plane_position_values[:, index], axis=0)
            for index in range(plane_position_values.shape[1])
        ]

        # In certain circumstances, we can add new pixels without unpacking the
        # previous ones, which is more efficient. This can be done when using
        # non-encapsulated transfer syntaxes when there is no padding required
        # for each frame to be a multiple of 8 bits.
        framewise_encoding = False
        is_encaps = self.file_meta.TransferSyntaxUID.is_encapsulated
        if not is_encaps:
            if self.SegmentationType == SegmentationTypeValues.FRACTIONAL.value:
                framewise_encoding = True
            elif self.SegmentationType == SegmentationTypeValues.BINARY.value:
                # Framewise encoding can only be used if there is no padding
                # This requires the number of pixels in each frame to be
                # multiple of 8
                if (self.Rows * self.Columns * self.SamplesPerPixel) % 8 == 0:
                    framewise_encoding = True
                else:
                    logger.warning(
                        'pixel data needs to be re-encoded for binary '
                        'bitpacking - consider using FRACTIONAL instead of '
                        'BINARY segmentation type')

        if framewise_encoding:
            # Before adding new pixel data, remove trailing null padding byte
            if len(self.PixelData) == get_expected_length(self) + 1:
                self.PixelData = self.PixelData[:-1]
        else:
            # In the case of encapsulated transfer syntaxes, we will accumulate
            # a list of encoded frames to re-encapsulate at the end
            if is_encaps:
                if hasattr(self, 'PixelData') and len(self.PixelData) > 0:
                    # Undo the encapsulation but not the encoding within each
                    # frame
                    full_frames_list = decode_data_sequence(self.PixelData)
                else:
                    full_frames_list = []
            else:
                if hasattr(self, 'PixelData') and len(self.PixelData) > 0:
                    full_pixel_array = self.pixel_array.flatten()
                else:
                    full_pixel_array = np.array([], np.bool_)

        for i, segment_number in enumerate(described_segment_numbers):
            if pixel_array.dtype in (np.float_, np.float32, np.float64):
                # Floating-point numbers must be mapped to 8-bit integers in
                # the range [0, max_fractional_value].
                planes = np.around(pixel_array *
                                   float(self.MaximumFractionalValue))
                planes = planes.astype(np.uint8)
            elif pixel_array.dtype in (np.uint8, np.uint16):
                # Labeled masks must be converted to binary masks.
                planes = np.zeros(pixel_array.shape, dtype=np.bool_)
                planes[pixel_array == segment_number] = True
            elif pixel_array.dtype == np.bool_:
                planes = pixel_array
            else:
                raise TypeError('Pixel array has an invalid data type.')

            contained_plane_index = []
            for j in plane_sort_index:
                if np.sum(planes[j]) == 0:
                    logger.info('skip empty plane {} of segment #{}'.format(
                        j, segment_number))
                    continue
                contained_plane_index.append(j)
                logger.info('add plane #{} for segment #{}'.format(
                    j, segment_number))

                pffp_item = Dataset()
                frame_content_item = Dataset()
                frame_content_item.DimensionIndexValues = [segment_number]

                # Look up the position of the plane relative to the indexed
                # dimension.
                try:
                    if self._coordinate_system == CoordinateSystemNames.SLIDE:
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                    else:
                        # In case of the patient coordinate system, the
                        # value of the attribute the Dimension Index Sequence
                        # points to (Image Position Patient) has a value
                        # multiplicity greater than one.
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos).all(axis=1))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                except IndexError as error:
                    raise IndexError(
                        'Could not determine position of plane #{} in '
                        'three dimensional coordinate system based on '
                        'dimension index values: {}'.format(j, error))
                frame_content_item.DimensionIndexValues.extend(index_values)
                pffp_item.FrameContentSequence = [frame_content_item]
                if self._coordinate_system == CoordinateSystemNames.SLIDE:
                    pffp_item.PlanePositionSlideSequence = plane_positions[j]
                else:
                    pffp_item.PlanePositionSequence = plane_positions[j]

                # Determining the source images that map to the frame is not
                # always trivial. Since DerivationImageSequence is a type 2
                # attribute, we leave its value empty.
                pffp_item.DerivationImageSequence = []

                if are_spatial_locations_preserved:
                    derivation_image_item = Dataset()
                    derivation_code = codes.cid7203.Segmentation
                    derivation_image_item.DerivationCodeSequence = [
                        CodedConcept(derivation_code.value,
                                     derivation_code.scheme_designator,
                                     derivation_code.meaning,
                                     derivation_code.scheme_version),
                    ]

                    derivation_src_img_item = Dataset()
                    if len(plane_sort_index) > len(self._source_images):
                        # A single multi-frame source image
                        src_img_item = self.SourceImageSequence[0]
                        # Frame numbers are one-based
                        derivation_src_img_item.ReferencedFrameNumber = j + 1
                    else:
                        # Multiple single-frame source images
                        src_img_item = self.SourceImageSequence[j]
                    derivation_src_img_item.ReferencedSOPClassUID = \
                        src_img_item.ReferencedSOPClassUID
                    derivation_src_img_item.ReferencedSOPInstanceUID = \
                        src_img_item.ReferencedSOPInstanceUID
                    purpose_code = \
                        codes.cid7202.SourceImageForImageProcessingOperation
                    derivation_src_img_item.PurposeOfReferenceCodeSequence = [
                        CodedConcept(purpose_code.value,
                                     purpose_code.scheme_designator,
                                     purpose_code.meaning,
                                     purpose_code.scheme_version),
                    ]
                    derivation_src_img_item.SpatialLocationsPreserved = 'YES'
                    derivation_image_item.SourceImageSequence = [
                        derivation_src_img_item,
                    ]
                    pffp_item.DerivationImageSequence.append(
                        derivation_image_item)
                else:
                    logger.warning('spatial locations not preserved')

                identification = Dataset()
                identification.ReferencedSegmentNumber = segment_number
                pffp_item.SegmentIdentificationSequence = [
                    identification,
                ]
                self.PerFrameFunctionalGroupsSequence.append(pffp_item)
                self.NumberOfFrames += 1

            if framewise_encoding:
                # Straightforward concatenation of the binary data
                self.PixelData += self._encode_pixels(
                    planes[contained_plane_index])
            else:
                if is_encaps:
                    # Encode this frame and add to the list for encapsulation
                    # at the end
                    for f in contained_plane_index:
                        full_frames_list.append(self._encode_pixels(planes[f]))
                else:
                    # Concatenate the 1D array for re-encoding at the end
                    full_pixel_array = np.concatenate([
                        full_pixel_array,
                        planes[contained_plane_index].flatten()
                    ])

            # In case of a tiled Total Pixel Matrix pixel data for the same
            # segment may be added.
            if segment_number not in self._segment_inventory:
                self.SegmentSequence.append(segment_descriptions[i])
            self._segment_inventory.add(segment_number)

        # Re-encode the whole pixel array at once if necessary
        if not framewise_encoding:
            if is_encaps:
                self.PixelData = encapsulate(full_frames_list)
            else:
                self.PixelData = self._encode_pixels(full_pixel_array)

        # Add back the null trailing byte if required
        if len(self.PixelData) % 2 == 1:
            self.PixelData += b'0'
예제 #26
0
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        value: Optional[Union[int, float]] = None,
        unit: Optional[Union[Code, CodedConcept]] = None,
        qualifier: Optional[Union[Code, CodedConcept]] = None,
        relationship_type: Optional[Union[str, RelationshipTypeValues]] = None
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code]
            concept name
        value: Union[int, float], optional
            numeric value
        unit: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional
            coded units of measurement (see CID 7181 "Abstract Multi-dimensional
            Image Model Component Units")
        qualifier: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional
            qualification of numeric value or as an alternative to
            numeric value, e.g., reason for absence of numeric value
            (see CID 42 "Numeric Value Qualifier" for options)
        relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional
            type of relationship with parent content item

        Note
        ----
        Either `value` and `unit` or `qualifier` must be specified.

        """ # noqa
        super(NumContentItem, self).__init__(ValueTypeValues.NUM, name,
                                             relationship_type)
        if value is not None:
            self.MeasuredValueSequence: List[Dataset] = []
            measured_value_sequence_item = Dataset()
            if not isinstance(value, (
                    int,
                    float,
            )):
                raise TypeError(
                    'Argument "value" must have type "int" or "float".')
            measured_value_sequence_item.NumericValue = value
            if isinstance(value, float):
                measured_value_sequence_item.FloatingPointValue = value
            if not isinstance(unit, (
                    CodedConcept,
                    Code,
            )):
                raise TypeError(
                    'Argument "unit" must have type CodedConcept or Code.')
            if isinstance(unit, Code):
                unit = CodedConcept(*unit)
            measured_value_sequence_item.MeasurementUnitsCodeSequence = [unit]
            self.MeasuredValueSequence.append(measured_value_sequence_item)
        elif qualifier is not None:
            if not isinstance(qualifier, (
                    CodedConcept,
                    Code,
            )):
                raise TypeError(
                    'Argument "qualifier" must have type "CodedConcept" or '
                    '"Code".')
            if isinstance(qualifier, Code):
                qualifier = CodedConcept(*qualifier)
            self.NumericValueQualifierCodeSequence = [qualifier]
        else:
            raise ValueError(
                'Either argument "value" or "qualifier" must be specified '
                'upon creation of NumContentItem.')
예제 #27
0
    def __init__(
            self,
            source_images: Sequence[Dataset],
            pixel_array: np.ndarray,
            segmentation_type: Union[str, SegmentationTypeValues],
            segment_descriptions: Sequence[SegmentDescription],
            series_instance_uid: str,
            series_number: int,
            sop_instance_uid: str,
            instance_number: int,
            manufacturer: str,
            manufacturer_model_name: str,
            software_versions: Union[str, Tuple[str]],
            device_serial_number: str,
            fractional_type:
        Optional[Union[
            str,
            SegmentationFractionalTypeValues]] = SegmentationFractionalTypeValues
        .PROBABILITY,
            max_fractional_value: int = 255,
            content_description: Optional[str] = None,
            content_creator_name: Optional[Union[str, PersonName]] = None,
            transfer_syntax_uid: Union[str, UID] = ImplicitVRLittleEndian,
            pixel_measures: Optional[PixelMeasuresSequence] = None,
            plane_orientation: Optional[PlaneOrientationSequence] = None,
            plane_positions: Optional[Sequence[PlanePositionSequence]] = None,
            omit_empty_frames: bool = True,
            **kwargs: Any) -> None:
        """
        Parameters
        ----------
        source_images: Sequence[pydicom.dataset.Dataset]
            One or more single- or multi-frame images (or metadata of images)
            from which the segmentation was derived
        pixel_array: numpy.ndarray
            Array of segmentation pixel data of boolean, unsigned integer or
            floating point data type representing a mask image. The array may
            be a 2D, 3D or 4D numpy array.

            If it is a 2D numpy array, it represents the segmentation of a
            single frame image, such as a planar x-ray or single instance from
            a CT or MR series.

            If it is a 3D array, it represents the segmentation of either a
            series of source images (such as a series of CT or MR images) a
            single 3D multi-frame image (such as a multi-frame CT/MR image), or
            a single 2D tiled image (such as a slide microscopy image).

            If ``pixel_array`` represents the segmentation of a 3D image, the
            first dimension represents individual 2D planes. Unless the
            ``plane_positions`` parameter is provided, the frame in
            ``pixel_array[i, ...]`` should correspond to either
            ``source_images[i]`` (if ``source_images`` is a list of single
            frame instances) or source_images[0].pixel_array[i, ...] if
            ``source_images`` is a single multiframe instance.

            Similarly, if ``pixel_array`` is a 3D array representing the
            segmentation of a tiled 2D image, the first dimension represents
            individual 2D tiles (for one channel and z-stack) and these tiles
            correspond to the frames in the source image dataset.

            If ``pixel_array`` is an unsigned integer or boolean array with
            binary data (containing only the values ``True`` and ``False`` or
            ``0`` and ``1``) or a floating-point array, it represents a single
            segment. In the case of a floating-point array, values must be in
            the range 0.0 to 1.0.

            Otherwise, if ``pixel_array`` is a 2D or 3D array containing multiple
            unsigned integer values, each value is treated as a different
            segment whose segment number is that integer value. This is
            referred to as a *label map* style segmentation.  In this case, all
            segments from 1 through ``pixel_array.max()`` (inclusive) must be
            described in `segment_descriptions`, regardless of whether they are
            present in the image.  Note that this is valid for segmentations
            encoded using the ``"BINARY"`` or ``"FRACTIONAL"`` methods.

            Note that that a 2D numpy array and a 3D numpy array with a
            single frame along the first dimension may be used interchangeably
            as segmentations of a single frame, regardless of their data type.

            If ``pixel_array`` is a 4D numpy array, the first three dimensions
            are used in the same way as the 3D case and the fourth dimension
            represents multiple segments. In this case
            ``pixel_array[:, :, :, i]`` represents segment number ``i + 1``
            (since numpy indexing is 0-based but segment numbering is 1-based),
            and all segments from 1 through ``pixel_array.shape[-1] + 1`` must
            be described in ``segment_descriptions``.

            Furthermore, a 4D array with unsigned integer data type must
            contain only binary data (``True`` and ``False`` or ``0`` and
            ``1``). In other words, a 4D array is incompatible with the *label
            map* style encoding of the segmentation.

            Where there are multiple segments that are mutually exclusive (do
            not overlap) and binary, they may be passed using either a *label
            map* style array or a 4D array. A 4D array is required if either
            there are multiple segments and they are not mutually exclusive
            (i.e. they overlap) or there are multiple segments and the
            segmentation is fractional.

            Note that if the segmentation of a single source image with
            multiple stacked segments is required, it is necessary to include
            the singleton first dimension in order to give a 4D array.

            For ``"FRACTIONAL"`` segmentations, values either encode the
            probability of a given pixel belonging to a segment
            (if `fractional_type` is ``"PROBABILITY"``)
            or the extent to which a segment occupies the pixel
            (if `fractional_type` is ``"OCCUPANCY"``).

        segmentation_type: Union[str, highdicom.seg.SegmentationTypeValues]
            Type of segmentation, either ``"BINARY"`` or ``"FRACTIONAL"``
        segment_descriptions: Sequence[highdicom.seg.SegmentDescription]
            Description of each segment encoded in `pixel_array`. In the case of
            pixel arrays with multiple integer values, the segment description
            with the corresponding segment number is used to describe each segment.
        series_instance_uid: str
            UID of the series
        series_number: Union[int, None]
            Number of the series within the study
        sop_instance_uid: str
            UID that should be assigned to the instance
        instance_number: int
            Number that should be assigned to the instance
        manufacturer: str
            Name of the manufacturer of the device (developer of the software)
            that creates the instance
        manufacturer_model_name: str
            Name of the device model (name of the software library or
            application) that creates the instance
        software_versions: Union[str, Tuple[str]]
            Version(s) of the software that creates the instance
        device_serial_number: str
            Manufacturer's serial number of the device
        fractional_type: Union[str, highdicom.seg.SegmentationFractionalTypeValues], optional
            Type of fractional segmentation that indicates how pixel data
            should be interpreted
        max_fractional_value: int, optional
            Maximum value that indicates probability or occupancy of 1 that
            a pixel represents a given segment
        content_description: str, optional
            Description of the segmentation
        content_creator_name: Optional[Union[str, PersonName]], optional
            Name of the creator of the segmentation
        transfer_syntax_uid: str, optional
            UID of transfer syntax that should be used for encoding of
            data elements. The following lossless compressed transfer syntaxes
            are supported for encapsulated format encoding in case of
            FRACTIONAL segmentation type:
            RLE Lossless (``"1.2.840.10008.1.2.5"``) and
            JPEG 2000 Lossless (``"1.2.840.10008.1.2.4.90"``).
        pixel_measures: PixelMeasures, optional
            Physical spacing of image pixels in `pixel_array`.
            If ``None``, it will be assumed that the segmentation image has the
            same pixel measures as the source image(s).
        plane_orientation: highdicom.PlaneOrientationSequence, optional
            Orientation of planes in `pixel_array` relative to axes of
            three-dimensional patient or slide coordinate space.
            If ``None``, it will be assumed that the segmentation image as the
            same plane orientation as the source image(s).
        plane_positions: Sequence[highdicom.PlanePositionSequence], optional
            Position of each plane in `pixel_array` in the three-dimensional
            patient or slide coordinate space.
            If ``None``, it will be assumed that the segmentation image has the
            same plane position as the source image(s). However, this will only
            work when the first dimension of `pixel_array` matches the number
            of frames in `source_images` (in case of multi-frame source images)
            or the number of `source_images` (in case of single-frame source
            images).
        omit_empty_frames: bool
            If True (default), frames with no non-zero pixels are omitted from
            the segmentation image. If False, all frames are included.
        **kwargs: Any, optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        Raises
        ------
        ValueError
            When
                * Length of `source_images` is zero.
                * Items of `source_images` are not all part of the same study
                  and series.
                * Items of `source_images` have different number of rows and
                  columns.
                * Length of `plane_positions` does not match number of segments
                  encoded in `pixel_array`.
                * Length of `plane_positions` does not match number of 2D planes
                  in `pixel_array` (size of first array dimension).

        Note
        ----
        The assumption is made that segments in `pixel_array` are defined in
        the same frame of reference as `source_images`.


        """  # noqa
        if len(source_images) == 0:
            raise ValueError('At least one source image is required.')

        uniqueness_criteria = set((
            image.StudyInstanceUID,
            image.SeriesInstanceUID,
            image.Rows,
            image.Columns,
        ) for image in source_images)
        if len(uniqueness_criteria) > 1:
            raise ValueError(
                'Source images must all be part of the same series and must '
                'have the same image dimensions (number of rows/columns).')

        src_img = source_images[0]
        is_multiframe = hasattr(src_img, 'NumberOfFrames')
        if is_multiframe and len(source_images) > 1:
            raise ValueError(
                'Only one source image should be provided in case images '
                'are multi-frame images.')
        supported_transfer_syntaxes = {
            ImplicitVRLittleEndian,
            ExplicitVRLittleEndian,
            JPEG2000Lossless,
            RLELossless,
        }
        if transfer_syntax_uid not in supported_transfer_syntaxes:
            raise ValueError('Transfer syntax "{}" is not supported'.format(
                transfer_syntax_uid))

        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]

        super().__init__(study_instance_uid=src_img.StudyInstanceUID,
                         series_instance_uid=series_instance_uid,
                         series_number=series_number,
                         sop_instance_uid=sop_instance_uid,
                         instance_number=instance_number,
                         sop_class_uid='1.2.840.10008.5.1.4.1.1.66.4',
                         manufacturer=manufacturer,
                         modality='SEG',
                         transfer_syntax_uid=transfer_syntax_uid,
                         patient_id=src_img.PatientID,
                         patient_name=src_img.PatientName,
                         patient_birth_date=src_img.PatientBirthDate,
                         patient_sex=src_img.PatientSex,
                         accession_number=src_img.AccessionNumber,
                         study_id=src_img.StudyID,
                         study_date=src_img.StudyDate,
                         study_time=src_img.StudyTime,
                         referring_physician_name=getattr(
                             src_img, 'ReferringPhysicianName', None),
                         **kwargs)

        # Using Container Type Code Sequence attribute would be more elegant,
        # but unfortunately it is a type 2 attribute.
        if (hasattr(src_img, 'ImageOrientationSlide')
                or hasattr(src_img, 'ImageCenterPointCoordinatesSequence')):
            self._coordinate_system = CoordinateSystemNames.SLIDE
        else:
            self._coordinate_system = CoordinateSystemNames.PATIENT

        # Frame of Reference
        self.FrameOfReferenceUID = src_img.FrameOfReferenceUID
        self.PositionReferenceIndicator = getattr(
            src_img, 'PositionReferenceIndicator', None)

        # (Enhanced) General Equipment
        self.DeviceSerialNumber = device_serial_number
        self.ManufacturerModelName = manufacturer_model_name
        self.SoftwareVersions = software_versions

        # General Reference
        self.SourceImageSequence: List[Dataset] = []
        referenced_series: Dict[str, List[Dataset]] = defaultdict(list)
        for s_img in source_images:
            ref = Dataset()
            ref.ReferencedSOPClassUID = s_img.SOPClassUID
            ref.ReferencedSOPInstanceUID = s_img.SOPInstanceUID
            self.SourceImageSequence.append(ref)
            referenced_series[s_img.SeriesInstanceUID].append(ref)

        # Common Instance Reference
        self.ReferencedSeriesSequence: List[Dataset] = []
        for series_instance_uid, referenced_images in referenced_series.items(
        ):
            ref = Dataset()
            ref.SeriesInstanceUID = series_instance_uid
            ref.ReferencedInstanceSequence = referenced_images
            self.ReferencedSeriesSequence.append(ref)

        # Image Pixel
        self.Rows = pixel_array.shape[1]
        self.Columns = pixel_array.shape[2]

        # Segmentation Image
        self.ImageType = ['DERIVED', 'PRIMARY']
        self.SamplesPerPixel = 1
        self.PhotometricInterpretation = 'MONOCHROME2'
        self.PixelRepresentation = 0
        self.ContentLabel = 'ISO_IR 192'  # UTF-8
        self.ContentDescription = content_description
        if content_creator_name is not None:
            check_person_name(content_creator_name)
        self.ContentCreatorName = content_creator_name

        segmentation_type = SegmentationTypeValues(segmentation_type)
        self.SegmentationType = segmentation_type.value
        if self.SegmentationType == SegmentationTypeValues.BINARY.value:
            self.BitsAllocated = 1
            self.HighBit = 0
            if self.file_meta.TransferSyntaxUID.is_encapsulated:
                raise ValueError(
                    'The chosen transfer syntax '
                    f'{self.file_meta.TransferSyntaxUID} '
                    'is not compatible with the BINARY segmentation type')
        elif self.SegmentationType == SegmentationTypeValues.FRACTIONAL.value:
            self.BitsAllocated = 8
            self.HighBit = 7
            segmentation_fractional_type = SegmentationFractionalTypeValues(
                fractional_type)
            self.SegmentationFractionalType = segmentation_fractional_type.value
            if max_fractional_value > 2**8:
                raise ValueError(
                    'Maximum fractional value must not exceed image bit depth.'
                )
            self.MaximumFractionalValue = max_fractional_value
        else:
            raise ValueError(
                'Unknown segmentation type "{}"'.format(segmentation_type))

        self.BitsStored = self.BitsAllocated
        self.LossyImageCompression = getattr(src_img, 'LossyImageCompression',
                                             '00')
        if self.LossyImageCompression == '01':
            self.LossyImageCompressionRatio = \
                src_img.LossyImageCompressionRatio
            self.LossyImageCompressionMethod = \
                src_img.LossyImageCompressionMethod

        self.SegmentSequence: List[Dataset] = []

        # Multi-Frame Functional Groups and Multi-Frame Dimensions
        shared_func_groups = Dataset()
        if pixel_measures is None:
            if is_multiframe:
                src_shared_fg = src_img.SharedFunctionalGroupsSequence[0]
                pixel_measures = src_shared_fg.PixelMeasuresSequence
            else:
                pixel_measures = PixelMeasuresSequence(
                    pixel_spacing=src_img.PixelSpacing,
                    slice_thickness=src_img.SliceThickness,
                    spacing_between_slices=src_img.get('SpacingBetweenSlices',
                                                       None))
            # TODO: ensure derived segmentation image and original image have
            # same physical dimensions
            # seg_row_dim = self.Rows * pixel_measures[0].PixelSpacing[0]
            # seg_col_dim = self.Columns * pixel_measures[0].PixelSpacing[1]
            # src_row_dim = src_img.Rows

        if is_multiframe:
            if self._coordinate_system == CoordinateSystemNames.SLIDE:
                source_plane_orientation = PlaneOrientationSequence(
                    coordinate_system=self._coordinate_system,
                    image_orientation=src_img.ImageOrientationSlide)
            else:
                src_sfg = src_img.SharedFunctionalGroupsSequence[0]
                source_plane_orientation = src_sfg.PlaneOrientationSequence
        else:
            source_plane_orientation = PlaneOrientationSequence(
                coordinate_system=self._coordinate_system,
                image_orientation=src_img.ImageOrientationPatient)
        if plane_orientation is None:
            plane_orientation = source_plane_orientation

        self.DimensionIndexSequence = DimensionIndexSequence(
            coordinate_system=self._coordinate_system)
        dimension_organization = Dataset()
        dimension_organization.DimensionOrganizationUID = \
            self.DimensionIndexSequence[0].DimensionOrganizationUID
        self.DimensionOrganizationSequence = [dimension_organization]

        if is_multiframe:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_image(
                    source_images[0]
                )
        else:
            source_plane_positions = \
                self.DimensionIndexSequence.get_plane_positions_of_series(
                    source_images
                )

        shared_func_groups.PixelMeasuresSequence = pixel_measures
        shared_func_groups.PlaneOrientationSequence = plane_orientation
        self.SharedFunctionalGroupsSequence = [shared_func_groups]

        # NOTE: Information about individual frames will be updated below
        self.NumberOfFrames = 0
        self.PerFrameFunctionalGroupsSequence: List[Dataset] = []

        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]
        if pixel_array.ndim not in [3, 4]:
            raise ValueError('Pixel array must be a 2D, 3D, or 4D array.')

        if pixel_array.shape[1:3] != (self.Rows, self.Columns):
            raise ValueError(
                'Pixel array representing segments has the wrong number of '
                'rows and columns.')

        # Check segment numbers
        described_segment_numbers = np.array(
            [int(item.SegmentNumber) for item in segment_descriptions])
        self._check_segment_numbers(described_segment_numbers)

        # Checks on pixels and overlap
        pixel_array, segments_overlap = self._check_pixel_array(
            pixel_array, described_segment_numbers, segmentation_type)
        self.SegmentsOverlap = segments_overlap.value

        if plane_positions is None:
            if pixel_array.shape[0] != len(source_plane_positions):
                raise ValueError(
                    'Number of frames in pixel array does not match number '
                    'of source image frames.')
            plane_positions = source_plane_positions
        else:
            if pixel_array.shape[0] != len(plane_positions):
                raise ValueError(
                    'Number of pixel array planes does not match number of '
                    'provided plane positions.')

        are_spatial_locations_preserved = (
            all(plane_positions[i] == source_plane_positions[i]
                for i in range(len(plane_positions)))
            and plane_orientation == source_plane_orientation)

        # Remove empty slices
        if omit_empty_frames:
            pixel_array, plane_positions, source_image_indices = \
                self._omit_empty_frames(pixel_array, plane_positions)
        else:
            source_image_indices = list(range(pixel_array.shape[0]))

        plane_position_values, plane_sort_index = \
            self.DimensionIndexSequence.get_index_values(plane_positions)

        # Get unique values of attributes in the Plane Position Sequence or
        # Plane Position Slide Sequence, which define the position of the plane
        # with respect to the three dimensional patient or slide coordinate
        # system, respectively. These can subsequently be used to look up the
        # relative position of a plane relative to the indexed dimension.
        dimension_position_values = [
            np.unique(plane_position_values[:, index], axis=0)
            for index in range(plane_position_values.shape[1])
        ]

        is_encaps = self.file_meta.TransferSyntaxUID.is_encapsulated
        if is_encaps:
            # In the case of encapsulated transfer syntaxes, we will accumulate
            # a list of encoded frames to encapsulate at the end
            full_frames_list = []
        else:
            # In the case of non-encapsulated (uncompressed) transfer syntaxes
            # we will accumulate a 1D array of pixels from all frames for
            # bitpacking at the end
            full_pixel_array = np.array([], np.bool_)

        for i, segment_number in enumerate(described_segment_numbers):
            # Pixel array for just this segment
            if pixel_array.dtype in (np.float_, np.float32, np.float64):
                # Floating-point numbers must be mapped to 8-bit integers in
                # the range [0, max_fractional_value].
                if pixel_array.ndim == 4:
                    segment_array = pixel_array[:, :, :, segment_number - 1]
                else:
                    segment_array = pixel_array
                planes = np.around(segment_array *
                                   float(self.MaximumFractionalValue))
                planes = planes.astype(np.uint8)
            elif pixel_array.dtype in (np.uint8, np.uint16):
                # Note that integer arrays with segments stacked down the last
                # dimension will already have been converted to bool, leaving
                # only "label maps" here, which must be converted to binary
                # masks.
                planes = np.zeros(pixel_array.shape, dtype=np.bool_)
                planes[pixel_array == segment_number] = True
            elif pixel_array.dtype == np.bool_:
                if pixel_array.ndim == 4:
                    planes = pixel_array[:, :, :, segment_number - 1]
                else:
                    planes = pixel_array
            else:
                raise TypeError('Pixel array has an invalid data type.')

            contained_plane_index = []
            for j in plane_sort_index:
                # Index of this frame in the original list of source indices
                source_image_index = source_image_indices[j]

                # Even though completely empty slices were removed earlier,
                # there may still be slices in which this specific segment is
                # absent. Such frames should be removed
                if omit_empty_frames and np.sum(planes[j]) == 0:
                    logger.info('skip empty plane {} of segment #{}'.format(
                        j, segment_number))
                    continue
                contained_plane_index.append(j)
                logger.info('add plane #{} for segment #{}'.format(
                    j, segment_number))

                pffp_item = Dataset()
                frame_content_item = Dataset()
                frame_content_item.DimensionIndexValues = [segment_number]

                # Look up the position of the plane relative to the indexed
                # dimension.
                try:
                    if self._coordinate_system == CoordinateSystemNames.SLIDE:
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                    else:
                        # In case of the patient coordinate system, the
                        # value of the attribute the Dimension Index Sequence
                        # points to (Image Position Patient) has a value
                        # multiplicity greater than one.
                        index_values = [
                            np.where((dimension_position_values[idx]
                                      == pos).all(axis=1))[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                except IndexError as error:
                    raise IndexError(
                        'Could not determine position of plane #{} in '
                        'three dimensional coordinate system based on '
                        'dimension index values: {}'.format(j, error))
                frame_content_item.DimensionIndexValues.extend(index_values)
                pffp_item.FrameContentSequence = [frame_content_item]
                if self._coordinate_system == CoordinateSystemNames.SLIDE:
                    pffp_item.PlanePositionSlideSequence = plane_positions[j]
                else:
                    pffp_item.PlanePositionSequence = plane_positions[j]

                # Determining the source images that map to the frame is not
                # always trivial. Since DerivationImageSequence is a type 2
                # attribute, we leave its value empty.
                pffp_item.DerivationImageSequence = []

                if are_spatial_locations_preserved:
                    derivation_image_item = Dataset()
                    derivation_code = codes.cid7203.Segmentation
                    derivation_image_item.DerivationCodeSequence = [
                        CodedConcept(derivation_code.value,
                                     derivation_code.scheme_designator,
                                     derivation_code.meaning,
                                     derivation_code.scheme_version),
                    ]

                    derivation_src_img_item = Dataset()
                    if hasattr(source_images[0], 'NumberOfFrames'):
                        # A single multi-frame source image
                        src_img_item = self.SourceImageSequence[0]
                        # Frame numbers are one-based
                        derivation_src_img_item.ReferencedFrameNumber = (
                            source_image_index + 1)
                    else:
                        # Multiple single-frame source images
                        src_img_item = self.SourceImageSequence[
                            source_image_index]
                    derivation_src_img_item.ReferencedSOPClassUID = \
                        src_img_item.ReferencedSOPClassUID
                    derivation_src_img_item.ReferencedSOPInstanceUID = \
                        src_img_item.ReferencedSOPInstanceUID
                    purpose_code = \
                        codes.cid7202.SourceImageForImageProcessingOperation
                    derivation_src_img_item.PurposeOfReferenceCodeSequence = [
                        CodedConcept(purpose_code.value,
                                     purpose_code.scheme_designator,
                                     purpose_code.meaning,
                                     purpose_code.scheme_version),
                    ]
                    derivation_src_img_item.SpatialLocationsPreserved = 'YES'
                    derivation_image_item.SourceImageSequence = [
                        derivation_src_img_item,
                    ]
                    pffp_item.DerivationImageSequence.append(
                        derivation_image_item)
                else:
                    logger.warning('spatial locations not preserved')

                identification = Dataset()
                identification.ReferencedSegmentNumber = segment_number
                pffp_item.SegmentIdentificationSequence = [
                    identification,
                ]
                self.PerFrameFunctionalGroupsSequence.append(pffp_item)
                self.NumberOfFrames += 1

            if is_encaps:
                # Encode this frame and add to the list for encapsulation
                # at the end
                for f in contained_plane_index:
                    full_frames_list.append(self._encode_pixels(planes[f]))
            else:
                # Concatenate the 1D array for re-encoding at the end
                full_pixel_array = np.concatenate([
                    full_pixel_array, planes[contained_plane_index].flatten()
                ])

            self.SegmentSequence.append(segment_descriptions[i])

        if is_encaps:
            # Encapsulate all pre-compressed frames
            self.PixelData = encapsulate(full_frames_list)
        else:
            # Encode the whole pixel array at once
            # This allows for correct bit-packing in cases where
            # number of pixels per frame is not a multiple of 8
            self.PixelData = self._encode_pixels(full_pixel_array)

        # Add a null trailing byte if required
        if len(self.PixelData) % 2 == 1:
            self.PixelData += b'0'

        self.copy_specimen_information(src_img)
        self.copy_patient_and_study_information(src_img)
예제 #28
0
파일: sop.py 프로젝트: aashish24/highdicom
    def add_segments(
            self,
            pixel_array: np.ndarray,
            segment_descriptions: Sequence[SegmentDescription],
            plane_positions: Optional[Sequence[PlanePositionSequence]] = None
        ) -> Dataset:
        """Adds one or more segments to the segmentation image.

        Parameters
        ----------
        pixel_array: numpy.ndarray
            Array of segmentation pixel data of boolean, unsigned integer or
            floating point data type representing a mask image. If `pixel_array`
            is a floating-point array or a binary array (containing only the
            values ``True`` and ``False`` or ``0`` and ``1``), the segment number
            used to encode the segment is taken from segment_descriptions.
            Otherwise, if pixel_array contains multiple integer values, each value
            is treated as a different segment whose segment number is that integer
            value. In this case, all segments found in the array must be described
            in `segment_descriptions`. Note that this is valid for both ``"BINARY"``
            and ``"FRACTIONAL"`` segmentations.
            For ``"FRACTIONAL"`` segmentations, values either encode the probability
            of a given pixel belonging to a segment
            (if `fractional_type` is ``"PROBABILITY"``)
            or the extent to which a segment occupies the pixel
            (if `fractional_type` is ``"OCCUPANCY"``).
            When `pixel_array` has a floating point data type, only one segment can be
            encoded. Additional segments can be subsequently
            added to the `Segmentation` instance using the ``add_segments()``
            method.
            If `pixel_array` represents a 3D image, the first dimension
            represents individual 2D planes and these planes must be ordered
            based on their position in the three-dimensional patient
            coordinate system (first along the X axis, second along the Y axis,
            and third along the Z axis).
            If `pixel_array` represents a tiled 2D image, the first dimension
            represents individual 2D tiles (for one channel and z-stack) and
            these tiles must be ordered based on their position in the tiled
            total pixel matrix (first along the row dimension and second along
            the column dimension, which are defined in the three-dimensional
            slide coordinate system by the direction cosines encoded by the
            *Image Orientation (Slide)* attribute).
        segment_descriptions: Sequence[highdicom.seg.content.SegmentDescription]
            Description of each segment encoded in `pixel_array`. In the case of
            pixel arrays with multiple integer values, the segment description
            with the corresponding segment number is used to describe each
            segment.
        plane_positions: Sequence[highdicom.content.PlanePositionSequence], optional
            Position of each plane in `pixel_array` relative to the
            three-dimensional patient or slide coordinate system.

        Note
        ----
        Items of `segment_descriptions` must be sorted by segment number in
        ascending order.
        In case `segmentation_type` is ``"BINARY"``, the number of items per
        sequence must match the number of unique positive pixel values in
        `pixel_array`. In case `segmentation_type` is ``"FRACTIONAL"``, only
        one segment can be encoded by `pixel_array` and hence only one item is
        permitted per sequence.

        """  # noqa
        if pixel_array.ndim == 2:
            pixel_array = pixel_array[np.newaxis, ...]
        if pixel_array.ndim != 3:
            raise ValueError('Pixel array must be a 2D or 3D array.')

        if pixel_array.shape[1:3] != (self.Rows, self.Columns):
            raise ValueError(
                'Pixel array representing segments has the wrong number of '
                'rows and columns.'
            )

        described_segment_numbers = np.array([
            int(item.SegmentNumber)
            for item in segment_descriptions
        ])
        # Check that there are no duplicated segment numbers in the segment
        # descriptions
        if not (np.diff(described_segment_numbers) > 0).all():
            raise ValueError(
                'Segment descriptions must be sorted by segment number.'
            )

        if pixel_array.dtype in (np.bool, np.uint8, np.uint16):
            segments_present = np.unique(
                pixel_array[pixel_array > 0].astype(np.uint16)
            )

            # Special case where the mask is binary and there is a single
            # segment description. Allow the mark the positive segment with
            # the correct segment number
            if (np.array_equal(segments_present, np.array([1])) and
                    len(segment_descriptions) == 1):
                pixel_array = pixel_array.astype(np.uint8)
                pixel_array *= described_segment_numbers.item()

            # Otherwise, the pixel values in the pixel array must all belong to
            # a described segment
            else:
                if not np.all(
                        np.in1d(segments_present, described_segment_numbers)
                    ):
                    raise ValueError(
                        'Pixel array contains segments that lack descriptions.'
                    )

        elif (pixel_array.dtype == np.float):
            unique_values = np.unique(pixel_array)
            if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0:
                raise ValueError(
                    'Floating point pixel array values must be in the '
                    'range [0, 1].'
                )
            if len(segment_descriptions) != 1:
                raise ValueError(
                    'When providing a float-valued pixel array, provide only '
                    'a single segment description'
                )
            if self.SegmentationType == SegmentationTypeValues.BINARY.value:
                non_boolean_values = np.logical_and(
                    unique_values > 0.0,
                    unique_values < 1.0
                )
                if np.any(non_boolean_values):
                    raise ValueError(
                        'Floating point pixel array values must be either '
                        '0.0 or 1.0 in case of BINARY segmentation type.'
                    )
                pixel_array = pixel_array.astype(np.bool)
        else:
            raise TypeError('Pixel array has an invalid data type.')

        # Check that the new segments do not already exist
        if len(set(described_segment_numbers) & self._segment_inventory) > 0:
            raise ValueError('Segment with given segment number already exists')

        # Set the optional tag value SegmentsOverlapValues to NO to indicate
        # that the segments do not overlap. We can know this for sure if it's
        # the first segment (or set of segments) to be added because they are
        # contained within a single pixel array.
        if len(self._segment_inventory) == 0:
            self.SegmentsOverlap = SegmentsOverlapValues.NO.value
        else:
            # If this is not the first set of segments to be added, we cannot
            # be sure whether there is overlap with the existing segments
            self.SegmentsOverlap = SegmentsOverlapValues.UNDEFINED.value

        src_img = self._source_images[0]
        is_multiframe = hasattr(src_img, 'NumberOfFrames')
        if self._coordinate_system == CoordinateSystemNames.SLIDE:
            if hasattr(src_img, 'PerFrameFunctionalGroupsSequence'):
                source_plane_positions = [
                    item.PlanePositionSlideSequence
                    for item in src_img.PerFrameFunctionalGroupsSequence
                ]
            else:
                # If Dimension Organization Type is TILED_FULL, plane
                # positions are implicit and need to be computed.
                image_origin = src_img.TotalPixelMatrixOriginSequence[0]
                orientation = tuple(
                    float(v) for v in src_img.ImageOrientationSlide
                )
                tiles_per_column = int(
                    np.ceil(
                        src_img.TotalPixelMatrixRows /
                        src_img.Rows
                    )
                )
                tiles_per_row = int(
                    np.ceil(
                        src_img.TotalPixelMatrixColumns /
                        src_img.Columns
                    )
                )
                num_focal_planes = getattr(
                    src_img,
                    'NumberOfFocalPlanes',
                    1
                )
                row_range = range(1, tiles_per_column + 1)
                column_range = range(1, tiles_per_row + 1)
                depth_range = range(1, num_focal_planes + 1)

                shared_fg = self.SharedFunctionalGroupsSequence[0]
                pixel_measures = shared_fg.PixelMeasuresSequence[0]
                pixel_spacing = tuple(
                    float(v) for v in pixel_measures.PixelSpacing
                )
                slice_thickness = getattr(
                    pixel_measures,
                    'SliceThickness',
                    1.0
                )
                spacing_between_slices = getattr(
                    pixel_measures,
                    'SpacingBetweenSlices',
                    1.0
                )
                source_plane_positions = [
                    compute_plane_positions_tiled_full(
                        row_index=r,
                        column_index=c,
                        depth_index=d,
                        x_offset=image_origin.XOffsetInSlideCoordinateSystem,
                        y_offset=image_origin.YOffsetInSlideCoordinateSystem,
                        z_offset=1.0,  # TODO
                        rows=self.Rows,
                        columns=self.Columns,
                        image_orientation=orientation,
                        pixel_spacing=pixel_spacing,
                        slice_thickness=slice_thickness,
                        spacing_between_slices=spacing_between_slices
                    )
                    for r, c, d in itertools.product(
                        row_range,
                        column_range,
                        depth_range
                    )
                ]
        else:
            if is_multiframe:
                source_plane_positions = [
                    item.PlanePositionSequence
                    for item in src_img.PerFrameFunctionalGroupsSequence
                ]
            else:
                source_plane_positions = [
                    PlanePositionSequence(
                        coordinate_system=CoordinateSystemNames.PATIENT,
                        image_position=src_img.ImagePositionPatient
                    )
                    for src_img in self._source_images
                ]

        if plane_positions is None:
            plane_positions = source_plane_positions
        are_spatial_locations_preserved = (
            all(
                plane_positions[i] == source_plane_positions[i]
                for i in range(len(plane_positions))
            ) and
            self._plane_orientation == self._source_plane_orientation
        )

        if pixel_array.shape[0] != len(plane_positions):
            raise ValueError(
                'Number of pixel array planes does not match number of '
                'provided image positions.'
            )

        # For each dimension other than the Referenced Segment Number,
        # obtain the value of the attribute that the Dimension Index Pointer
        # points to in the element of the Plane Position Sequence or
        # Plane Position Slide Sequence.
        # Per definition, this is the Image Position Patient attribute
        # in case of the patient coordinate system, or the
        # X/Y/Z Offset In Slide Coordinate System and the Column/Row
        # Position in Total Image Pixel Matrix attributes in case of the
        # the slide coordinate system.
        plane_position_values = np.array([
            [
                np.array(p[0][indexer.DimensionIndexPointer].value)
                for indexer in self.DimensionIndexSequence[1:]
            ]
            for p in plane_positions
        ])

        # Planes need to be sorted according to the Dimension Index Value
        # based on the order of the items in the Dimension Index Sequence.
        # Here we construct an index vector that we can subsequently use to
        # sort planes before adding them to the Pixel Data element.
        _, plane_sort_index = np.unique(
            plane_position_values,
            axis=0,
            return_index=True
        )

        # Get unique values of attributes in the Plane Position Sequence or
        # Plane Position Slide Sequence, which define the position of the plane
        # with respect to the three dimensional patient or slide coordinate
        # system, respectively. These can subsequently be used to look up the
        # relative position of a plane relative to the indexed dimension.
        dimension_position_values = [
            np.unique(plane_position_values[:, index], axis=0)
            for index in range(plane_position_values.shape[1])
        ]

        # When using binary segmentation type, the previous frames may have been
        # padded to be a multiple of 8. In this case, we need to decode the
        # pixel data, add the new pixels and then re-encode. This process
        # should be avoided if it is not necessary in order to improve
        # efficiency.
        if (self.SegmentationType == SegmentationTypeValues.BINARY.value and
                ((self.Rows * self.Columns * self.SamplesPerPixel) % 8) > 0):
            re_encode_pixel_data = True
            logger.warning(
                'pixel data needs to be re-encoded for binary bitpacking - '
                'consider using FRACTIONAL instead of BINARY segmentation type'
            )
            # If this is the first segment added, the pixel array is empty
            if hasattr(self, 'PixelData') and len(self.PixelData) > 0:
                full_pixel_array = self.pixel_array.flatten()
            else:
                full_pixel_array = np.array([], np.bool)
        else:
            re_encode_pixel_data = False

            # Before adding new pixel data, remove trailing null padding byte
            if len(self.PixelData) == get_expected_length(self) + 1:
                self.PixelData = self.PixelData[:-1]

        for i, segment_number in enumerate(described_segment_numbers):
            if pixel_array.dtype == np.float:
                # Floating-point numbers must be mapped to 8-bit integers in
                # the range [0, max_fractional_value].
                planes = np.around(
                    pixel_array * float(self.MaximumFractionalValue)
                )
                planes = planes.astype(np.uint8)
            elif pixel_array.dtype in (np.uint8, np.uint16):
                # Labeled masks must be converted to binary masks.
                planes = np.zeros(pixel_array.shape, dtype=np.bool)
                planes[pixel_array == segment_number] = True
            elif pixel_array.dtype == np.bool:
                planes = pixel_array

            contained_plane_index = []
            for j in plane_sort_index:
                if np.sum(planes[j]) == 0:
                    logger.info(
                        'skip empty plane {} of segment #{}'.format(
                            j, segment_number
                        )
                    )
                    continue
                contained_plane_index.append(j)
                logger.info(
                    'add plane #{} for segment #{}'.format(
                        j, segment_number
                    )
                )

                pffp_item = Dataset()
                frame_content_item = Dataset()
                frame_content_item.DimensionIndexValues = [segment_number]

                # Look up the position of the plane relative to the indexed
                # dimension.
                try:
                    if self._coordinate_system == CoordinateSystemNames.SLIDE:
                        index_values = [
                            np.where(
                                (dimension_position_values[idx] == pos)
                            )[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                    else:
                        # In case of the patient coordinate system, the
                        # value of the attribute the Dimension Index Sequence
                        # points to (Image Position Patient) has a value
                        # multiplicity greater than one.
                        index_values = [
                            np.where(
                                (dimension_position_values[idx] == pos).all(
                                    axis=1
                                )
                            )[0][0] + 1
                            for idx, pos in enumerate(plane_position_values[j])
                        ]
                except IndexError as error:
                    raise IndexError(
                        'Could not determine position of plane #{} in '
                        'three dimensional coordinate system based on '
                        'dimension index values: {}'.format(j, error)
                    )
                frame_content_item.DimensionIndexValues.extend(index_values)
                pffp_item.FrameContentSequence = [frame_content_item]
                if self._coordinate_system == CoordinateSystemNames.SLIDE:
                    pffp_item.PlanePositionSlideSequence = plane_positions[j]
                else:
                    pffp_item.PlanePositionSequence = plane_positions[j]

                # Determining the source images that map to the frame is not
                # always trivial. Since DerivationImageSequence is a type 2
                # attribute, we leave its value empty.
                pffp_item.DerivationImageSequence = []

                if are_spatial_locations_preserved:
                    derivation_image_item = Dataset()
                    derivation_code = codes.cid7203.Segmentation
                    derivation_image_item.DerivationCodeSequence = [
                        CodedConcept(
                            derivation_code.value,
                            derivation_code.scheme_designator,
                            derivation_code.meaning,
                            derivation_code.scheme_version
                        ),
                    ]

                    derivation_src_img_item = Dataset()
                    if len(plane_sort_index) > len(self._source_images):
                        # A single multi-frame source image
                        src_img_item = self.SourceImageSequence[0]
                        # Frame numbers are one-based
                        derivation_src_img_item.ReferencedFrameNumber = j + 1
                    else:
                        # Multiple single-frame source images
                        src_img_item = self.SourceImageSequence[j]
                    derivation_src_img_item.ReferencedSOPClassUID = \
                        src_img_item.ReferencedSOPClassUID
                    derivation_src_img_item.ReferencedSOPInstanceUID = \
                        src_img_item.ReferencedSOPInstanceUID
                    purpose_code = \
                        codes.cid7202.SourceImageForImageProcessingOperation
                    derivation_src_img_item.PurposeOfReferenceCodeSequence = [
                        CodedConcept(
                            purpose_code.value,
                            purpose_code.scheme_designator,
                            purpose_code.meaning,
                            purpose_code.scheme_version
                        ),
                    ]
                    derivation_src_img_item.SpatialLocationsPreserved = 'YES'
                    derivation_image_item.SourceImageSequence = [
                        derivation_src_img_item,
                    ]
                    pffp_item.DerivationImageSequence.append(
                        derivation_image_item
                    )
                else:
                    logger.warning('spatial locations not preserved')

                identification = Dataset()
                identification.ReferencedSegmentNumber = segment_number
                pffp_item.SegmentIdentificationSequence = [
                    identification,
                ]
                self.PerFrameFunctionalGroupsSequence.append(pffp_item)
                self.NumberOfFrames += 1

            contained_plane_index = np.array(contained_plane_index, dtype=int)
            if re_encode_pixel_data:
                full_pixel_array = np.concatenate([
                    full_pixel_array,
                    planes[contained_plane_index].flatten()
                ])
            else:
                self.PixelData += self._encode_pixels(
                    planes[contained_plane_index]
                )

            # In case of a tiled Total Pixel Matrix pixel data for the same
            # segment may be added.
            if segment_number not in self._segment_inventory:
                self.SegmentSequence.append(segment_descriptions[i])
            self._segment_inventory.add(segment_number)

        # Re-encode the whole pixel array at once if necessary
        if re_encode_pixel_data:
            self.PixelData = self._encode_pixels(full_pixel_array)

        # Add back the null trailing byte if required
        if len(self.PixelData) % 2 == 1:
            self.PixelData += b'0'
예제 #29
0
    def __init__(
            self,
            pixel_array: np.ndarray,
            photometric_interpretation: Union[str,
                                              PhotometricInterpretationValues],
            bits_allocated: int,
            coordinate_system: Union[str, CoordinateSystemNames],
            study_instance_uid: str,
            series_instance_uid: str,
            series_number: int,
            sop_instance_uid: str,
            instance_number: int,
            manufacturer: str,
            patient_id: Optional[str] = None,
            patient_name: Optional[Union[str, PersonName]] = None,
            patient_birth_date: Optional[str] = None,
            patient_sex: Optional[str] = None,
            accession_number: Optional[str] = None,
            study_id: str = None,
            study_date: Optional[Union[str, datetime.date]] = None,
            study_time: Optional[Union[str, datetime.time]] = None,
            referring_physician_name: Optional[Union[str, PersonName]] = None,
            pixel_spacing: Optional[Tuple[int, int]] = None,
            laterality: Optional[Union[str, LateralityValues]] = None,
            patient_orientation: Optional[
                Union[Tuple[str, str], Tuple[PatientOrientationValuesBiped,
                                             PatientOrientationValuesBiped, ],
                      Tuple[PatientOrientationValuesQuadruped,
                            PatientOrientationValuesQuadruped, ]]] = None,
            anatomical_orientation_type: Optional[Union[
                str, AnatomicalOrientationTypeValues]] = None,
            container_identifier: Optional[str] = None,
            issuer_of_container_identifier: Optional[
                IssuerOfIdentifier] = None,
            specimen_descriptions: Optional[
                Sequence[SpecimenDescription]] = None,
            transfer_syntax_uid: str = ImplicitVRLittleEndian,
            **kwargs: Any):
        """

        Parameters
        ----------
        pixel_array: numpy.ndarray
            Array of unsigned integer pixel values representing a single-frame
            image; either a 2D grayscale image or a 3D color image
            (RGB color space)
        photometric_interpretation: Union[str, highdicom.enum.PhotometricInterpretationValues]
            Interpretation of pixel data; either ``"MONOCHROME1"`` or
            ``"MONOCHROME2"`` for 2D grayscale images or ``"RGB"`` or
            ``"YBR_FULL"`` for 3D color images
        bits_allocated: int
            Number of bits that should be allocated per pixel value
        coordinate_system: Union[str, highdicom.enum.CoordinateSystemNames]
            Subject (``"PATIENT"`` or ``"SLIDE"``) that was the target of
            imaging
        study_instance_uid: str
            Study Instance UID
        series_instance_uid: str
            Series Instance UID of the SC image series
        series_number: Union[int, None]
            Series Number of the SC image series
        sop_instance_uid: str
            SOP instance UID that should be assigned to the SC image instance
        instance_number: int
            Number that should be assigned to this SC image instance
        manufacturer: str
            Name of the manufacturer of the device that creates the SC image
            instance (in a research setting this is typically the same
            as `institution_name`)
        patient_id: str, optional
           ID of the patient (medical record number)
        patient_name: Optional[Union[str, PersonName]], optional
           Name of the patient
        patient_birth_date: str, optional
           Patient's birth date
        patient_sex: str, optional
           Patient's sex
        study_id: str, optional
           ID of the study
        accession_number: str, optional
           Accession number of the study
        study_date: Union[str, datetime.date], optional
           Date of study creation
        study_time: Union[str, datetime.time], optional
           Time of study creation
        referring_physician_name: Optional[Union[str, PersonName]], optional
            Name of the referring physician
        pixel_spacing: Tuple[int, int], optional
            Physical spacing in millimeter between pixels along the row and
            column dimension
        laterality: Union[str, highdicom.enum.LateralityValues], optional
            Laterality of the examined body part
        patient_orientation:
                Union[Tuple[str, str], Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped], Tuple[highdicom.enum.PatientOrientationValuesQuadruped, highdicom.enum.PatientOrientationValuesQuadruped]], optional
            Orientation of the patient along the row and column axes of the
            image (required if `coordinate_system` is ``"PATIENT"``)
        anatomical_orientation_type: Union[str, highdicom.enum.AnatomicalOrientationTypeValues], optional
            Type of anatomical orientation of patient relative to image (may be
            provide if `coordinate_system` is ``"PATIENT"`` and patient is
            an animal)
        container_identifier: str, optional
            Identifier of the container holding the specimen (required if
            `coordinate_system` is ``"SLIDE"``)
        issuer_of_container_identifier: highdicom.IssuerOfIdentifier, optional
            Issuer of `container_identifier`
        specimen_descriptions: Sequence[highdicom.SpecimenDescriptions], optional
            Description of each examined specimen (required if
            `coordinate_system` is ``"SLIDE"``)
        transfer_syntax_uid: str, optional
            UID of transfer syntax that should be used for encoding of
            data elements. The following lossless compressed transfer syntaxes
            are supported: RLE Lossless (``"1.2.840.10008.1.2.5"``).
        **kwargs: Any, optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        """  # noqa
        supported_transfer_syntaxes = {
            ImplicitVRLittleEndian,
            ExplicitVRLittleEndian,
            RLELossless,
        }
        if transfer_syntax_uid not in supported_transfer_syntaxes:
            raise ValueError(
                f'Transfer syntax "{transfer_syntax_uid}" is not supported')

        # Check names
        if patient_name is not None:
            check_person_name(patient_name)
        if referring_physician_name is not None:
            check_person_name(referring_physician_name)

        super().__init__(study_instance_uid=study_instance_uid,
                         series_instance_uid=series_instance_uid,
                         series_number=series_number,
                         sop_instance_uid=sop_instance_uid,
                         sop_class_uid=SecondaryCaptureImageStorage,
                         instance_number=instance_number,
                         manufacturer=manufacturer,
                         modality='OT',
                         transfer_syntax_uid=transfer_syntax_uid,
                         patient_id=patient_id,
                         patient_name=patient_name,
                         patient_birth_date=patient_birth_date,
                         patient_sex=patient_sex,
                         accession_number=accession_number,
                         study_id=study_id,
                         study_date=study_date,
                         study_time=study_time,
                         referring_physician_name=referring_physician_name,
                         **kwargs)

        coordinate_system = CoordinateSystemNames(coordinate_system)
        if coordinate_system == CoordinateSystemNames.PATIENT:
            if patient_orientation is None:
                raise TypeError(
                    'Patient orientation is required if coordinate system '
                    'is "PATIENT".')

            # General Series
            if laterality is not None:
                laterality = LateralityValues(laterality)
                self.Laterality = laterality.value

            # General Image
            if anatomical_orientation_type is not None:
                anatomical_orientation_type = AnatomicalOrientationTypeValues(
                    anatomical_orientation_type)
                self.AnatomicalOrientationType = \
                    anatomical_orientation_type.value
            else:
                anatomical_orientation_type = \
                    AnatomicalOrientationTypeValues.BIPED

            row_orientation, col_orientation = patient_orientation
            if (anatomical_orientation_type ==
                    AnatomicalOrientationTypeValues.BIPED):
                patient_orientation = (
                    PatientOrientationValuesBiped(row_orientation).value,
                    PatientOrientationValuesBiped(col_orientation).value,
                )
            else:
                patient_orientation = (
                    PatientOrientationValuesQuadruped(row_orientation).value,
                    PatientOrientationValuesQuadruped(col_orientation).value,
                )
            self.PatientOrientation = list(patient_orientation)

        elif coordinate_system == CoordinateSystemNames.SLIDE:
            if container_identifier is None:
                raise TypeError(
                    'Container identifier is required if coordinate system '
                    'is "SLIDE".')
            if specimen_descriptions is None:
                raise TypeError(
                    'Specimen descriptions are required if coordinate system '
                    'is "SLIDE".')

            # Specimen
            self.ContainerIdentifier = container_identifier
            self.IssuerOfTheContainerIdentifierSequence: List[Dataset] = []
            if issuer_of_container_identifier is not None:
                self.IssuerOftheContainerIdentifierSequence.append(
                    issuer_of_container_identifier)
            container_type_item = CodedConcept(*codes.SCT.MicroscopeSlide)
            self.ContainerTypeCodeSequence = [container_type_item]
            self.SpecimenDescriptionSequence = specimen_descriptions

        # SC Equipment
        self.ConversionType = ConversionTypeValues.DI.value

        # SC Image
        now = datetime.datetime.now()
        self.DateOfSecondaryCapture = DA(now.date())
        self.TimeOfSecondaryCapture = TM(now.time())

        # Image Pixel
        self.ImageType = ['DERIVED', 'SECONDARY', 'OTHER']
        self.Rows = pixel_array.shape[0]
        self.Columns = pixel_array.shape[1]
        allowed_types = [np.bool_, np.uint8, np.uint16]
        if not any(pixel_array.dtype == t for t in allowed_types):
            raise TypeError(
                'Pixel array must be of type np.bool_, np.uint8 or np.uint16. '
                f'Found {pixel_array.dtype}.')
        wrong_bit_depth_assignment = (
            pixel_array.dtype == np.bool_ and bits_allocated != 1,
            pixel_array.dtype == np.uint8 and bits_allocated != 8,
            pixel_array.dtype == np.uint16 and bits_allocated not in (12, 16),
        )
        if any(wrong_bit_depth_assignment):
            raise ValueError('Pixel array has an unexpected bit depth.')
        if bits_allocated not in (1, 8, 12, 16):
            raise ValueError('Unexpected number of bits allocated.')
        if transfer_syntax_uid == RLELossless and bits_allocated % 8 != 0:
            raise ValueError(
                'When using run length encoding, bits allocated must be a '
                'multiple of 8')
        self.BitsAllocated = bits_allocated
        self.HighBit = self.BitsAllocated - 1
        self.BitsStored = self.BitsAllocated
        self.PixelRepresentation = 0
        photometric_interpretation = PhotometricInterpretationValues(
            photometric_interpretation)
        if pixel_array.ndim == 3:
            accepted_interpretations = {
                PhotometricInterpretationValues.RGB.value,
                PhotometricInterpretationValues.YBR_FULL.value,
                PhotometricInterpretationValues.YBR_FULL_422.value,
                PhotometricInterpretationValues.YBR_PARTIAL_420.value,
            }
            if photometric_interpretation.value not in accepted_interpretations:
                raise ValueError(
                    'Pixel array has an unexpected photometric interpretation.'
                )
            if pixel_array.shape[-1] != 3:
                raise ValueError(
                    'Pixel array has an unexpected number of color channels.')
            if bits_allocated != 8:
                raise ValueError('Color images must be 8-bit.')
            if pixel_array.dtype != np.uint8:
                raise TypeError(
                    'Pixel array must have 8-bit unsigned integer data type '
                    'in case of a color image.')
            self.PhotometricInterpretation = photometric_interpretation.value
            self.SamplesPerPixel = 3
            self.PlanarConfiguration = 0
        elif pixel_array.ndim == 2:
            accepted_interpretations = {
                PhotometricInterpretationValues.MONOCHROME1.value,
                PhotometricInterpretationValues.MONOCHROME2.value,
            }
            if photometric_interpretation.value not in accepted_interpretations:
                raise ValueError(
                    'Pixel array has an unexpected photometric interpretation.'
                )
            self.PhotometricInterpretation = photometric_interpretation.value
            self.SamplesPerPixel = 1
        else:
            raise ValueError(
                'Pixel array has an unexpected number of dimensions.')
        if pixel_spacing is not None:
            self.PixelSpacing = pixel_spacing

        encoded_frame = encode_frame(
            pixel_array,
            transfer_syntax_uid=self.file_meta.TransferSyntaxUID,
            bits_allocated=self.BitsAllocated,
            bits_stored=self.BitsStored,
            photometric_interpretation=self.PhotometricInterpretation,
            pixel_representation=self.PixelRepresentation,
            planar_configuration=getattr(self, 'PlanarConfiguration', None))
        if self.file_meta.TransferSyntaxUID.is_encapsulated:
            self.PixelData = encapsulate([encoded_frame])
        else:
            self.PixelData = encoded_frame
예제 #30
0
파일: test_sr.py 프로젝트: pieper/highdicom
 def test_not_equal(self):
     c1 = CodedConcept(self._value, self._scheme_designator, self._meaning)
     c2 = CodedConcept('373099004', 'SCT', 'Median Value of population')
     assert c1 != c2