Пример #1
0
def _DT_from_str(value: str) -> DT:
    value = value.rstrip()
    length = len(value)
    if length < 4 or length > 26:
        logger.warn(f"Expected length between 4 and 26, got length {length}")

    return DT(value)
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        value: Union[str, datetime.datetime, DT],
        relationship_type: Union[str, RelationshipTypeValues, None] = None
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
            concept name
        value: Union[str, datetime.datetime, pydicom.valuerep.DT]
            datetime
        relationship_type: Union[highdicom.sr.RelationshipTypeValues, str]
            type of relationship with parent content item

        """  # noqa
        if relationship_type is None:
            warnings.warn(
                'A future release will require that relationship types be '
                f'provided for items of type {self.__class__.__name__}.',
                DeprecationWarning)
        super(DateTimeContentItem, self).__init__(ValueTypeValues.DATETIME,
                                                  name, relationship_type)
        self.DateTime = DT(value)
Пример #3
0
 def test_datetime_item_construction_from_datetime(self):
     name = codes.DCM.ImagingStartDatetime
     value = datetime.now()
     i = DateTimeContentItem(name=name, value=value)
     assert i.ValueType == 'DATETIME'
     assert i.ConceptNameCodeSequence[0] == name
     assert i.DateTime == DT(value)
     with pytest.raises(AttributeError):
         assert i.RelationshipType
Пример #4
0
def convert_DT_string(byte_string, is_little_endian, struct_format=None):
    """Read and return a DT value"""
    if datetime_conversion:
        if not in_py2:
            byte_string = byte_string.decode(encoding)
        length = len(byte_string)
        if length < 14 or length > 26:
            logger.warn("Expected length between 14 and 26, got length %d", length)
        return DT(byte_string)
    else:
        return convert_string(byte_string, is_little_endian, struct_format)
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        temporal_range_type: Union[str, TemporalRangeTypeValues],
        referenced_sample_positions: Optional[Sequence[int]] = None,
        referenced_time_offsets: Optional[Sequence[float]] = None,
        referenced_date_time: Optional[Sequence[datetime.datetime]] = None,
        relationship_type: Union[str, RelationshipTypeValues, None] = None
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
            concept name
        temporal_range_type: Union[highdicom.sr.TemporalRangeTypeValues, str]
            name of the temporal range type
        referenced_sample_positions: Sequence[int], optional
            one-based relative sample position of acquired time points
            within the time series
        referenced_time_offsets: Sequence[float], optional
            seconds after start of the acquisition of the time series
        referenced_date_time: Sequence[datetime.datetime], optional
            absolute time points
        relationship_type: Union[highdicom.sr.RelationshipTypeValues, str]
            type of relationship with parent content item

        """  # noqa
        if relationship_type is None:
            warnings.warn(
                'A future release will require that relationship types be '
                f'provided for items of type {self.__class__.__name__}.',
                DeprecationWarning)
        super(TcoordContentItem, self).__init__(ValueTypeValues.TCOORD, name,
                                                relationship_type)
        temporal_range_type = TemporalRangeTypeValues(temporal_range_type)
        self.TemporalRangeType = temporal_range_type.value
        if referenced_sample_positions is not None:
            self.ReferencedSamplePositions = [
                int(v) for v in referenced_sample_positions
            ]
        elif referenced_time_offsets is not None:
            self.ReferencedTimeOffsets = [
                float(v) for v in referenced_time_offsets
            ]
        elif referenced_date_time is not None:
            self.ReferencedDateTime = [DT(v) for v in referenced_date_time]
        else:
            raise ValueError(
                'One of the following arguments is required: "{}"'.format(
                    '", "'.join([
                        'referenced_sample_positions',
                        'referenced_time_offsets', 'referenced_date_time'
                    ])))
Пример #6
0
    def __init__(
        self,
        name: Union[Code, CodedConcept],
        value: Union[str, datetime.datetime, DT],
        relationship_type: Optional[Union[str, RelationshipTypeValues]] = None
    ) -> None:
        """
        Parameters
        ----------
        name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code]
            concept name
        value: Union[str, datetime.datetime, pydicom.valuerep.DT]
            datetime
        relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional
            type of relationship with parent content item

        """  # noqa
        super(DateTimeContentItem, self).__init__(ValueTypeValues.DATETIME,
                                                  name, relationship_type)
        self.DateTime = DT(value)
Пример #7
0
def _DT_from_byte_string(byte_string):
    byte_string = byte_string.rstrip()
    length = len(byte_string)
    if length < 4 or length > 26:
        logger.warn("Expected length between 4 and 26, got length %d", length)
    return DT(byte_string)
Пример #8
0
    def __init__(
            self,
            evidence: Sequence[Dataset],
            content: Dataset,
            series_instance_uid: str,
            series_number: int,
            sop_instance_uid: str,
            instance_number: int,
            manufacturer: str,
            is_complete: bool = False,
            is_final: bool = False,
            is_verified: bool = False,
            institution_name: Optional[str] = None,
            institutional_department_name: Optional[str] = None,
            verifying_observer_name: Optional[str] = None,
            verifying_organization: Optional[str] = None,
            performed_procedure_codes: Optional[
                Sequence[Union[Code, CodedConcept]]
            ] = None,
            requested_procedures: Optional[Sequence[Dataset]] = None,
            previous_versions: Optional[Sequence[Dataset]] = None,
            record_evidence: bool = True,
            **kwargs
        ):
        """
        Parameters
        ----------
        evidence: Sequence[pydicom.dataset.Dataset]
            Instances that are referenced in the content tree and from which
            the created SR document instance should inherit patient and study
            information
        content: pydicom.dataset.Dataset
            Root container content items that should be included in the
            SR document
        series_instance_uid: str
            Series Instance UID of the SR document series
        series_number: Union[int, None]
            Series Number of the SR document series
        sop_instance_uid: str
            SOP instance UID that should be assigned to the SR document instance
        instance_number: int
            Number that should be assigned to this SR document instance
        manufacturer: str
            Name of the manufacturer of the device that creates the SR document
            instance (in a research setting this is typically the same
            as `institution_name`)
        is_complete: bool, optional
            Whether the content is complete (default: ``False``)
        is_final: bool, optional
            Whether the report is the definitive means of communicating the
            findings (default: ``False``)
        is_verified: bool, optional
            Whether the report has been verified by an observer accountable
            for its content (default: ``False``)
        institution_name: str, optional
            Name of the institution of the person or device that creates the
            SR document instance
        institutional_department_name: str, optional
            Name of the department of the person or device that creates the
            SR document instance
        verifying_observer_name: Union[str, None], optional
            Name of the person that verfied the SR document
            (required if `is_verified`)
        verifying_organization: str
            Name of the organization that verfied the SR document
            (required if `is_verified`)
        performed_procedure_codes: List[pydicom.sr.coding.CodedConcept]
            Codes of the performed procedures that resulted in the SR document
        requested_procedures: List[pydicom.dataset.Dataset]
            Requested procedures that are being fullfilled by creation of the
            SR document
        previous_versions: List[pydicom.dataset.Dataset]
            Instances representing previous versions of the SR document
        record_evidence: bool, optional
            Whether provided `evidence` should be recorded, i.e. included
            in Current Requested Procedure Evidence Sequence or Pertinent
            Other Evidence Sequence (default: ``True``)
        **kwargs: Dict[str, Any], optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        Note
        ----
        Each dataset in `evidence` must be part of the same study.

        """
        super().__init__(
            study_instance_uid=evidence[0].StudyInstanceUID,
            series_instance_uid=series_instance_uid,
            series_number=series_number,
            sop_instance_uid=sop_instance_uid,
            sop_class_uid=Comprehensive3DSRStorage,
            instance_number=instance_number,
            manufacturer=manufacturer,
            modality='SR',
            transfer_syntax_uid=None,
            patient_id=evidence[0].PatientID,
            patient_name=evidence[0].PatientName,
            patient_birth_date=evidence[0].PatientBirthDate,
            patient_sex=evidence[0].PatientSex,
            accession_number=evidence[0].AccessionNumber,
            study_id=evidence[0].StudyID,
            study_date=evidence[0].StudyDate,
            study_time=evidence[0].StudyTime,
            referring_physician_name=evidence[0].ReferringPhysicianName,
            **kwargs
        )

        if institution_name is not None:
            self.InstitutionName = institution_name
            if institutional_department_name is not None:
                self.InstitutionalDepartmentName = institutional_department_name

        now = datetime.datetime.now()
        if is_complete:
            self.CompletionFlag = 'COMPLETE'
        else:
            self.CompletionFlag = 'PARTIAL'
        if is_verified:
            if verifying_observer_name is None:
                raise ValueError(
                    'Verifying Observer Name must be specified if SR document '
                    'has been verified.'
                )
            if verifying_organization is None:
                raise ValueError(
                    'Verifying Organization must be specified if SR document '
                    'has been verified.'
                )
            self.VerificationFlag = 'VERIFIED'
            observer_item = Dataset()
            observer_item.VerifyingObserverName = verifying_observer_name
            observer_item.VerifyingOrganization = verifying_organization
            observer_item.VerificationDateTime = DT(now)
            self.VerifyingObserverSequence = [observer_item]
        else:
            self.VerificationFlag = 'UNVERIFIED'
        if is_final:
            self.PreliminaryFlag = 'FINAL'
        else:
            self.PreliminaryFlag = 'PRELIMINARY'

        # Add content to dataset
        for tag, value in content.items():
            self[tag] = value

        evd_collection = collections.defaultdict(list)
        for evd in evidence:
            if evd.StudyInstanceUID != evidence[0].StudyInstanceUID:
                raise ValueError(
                    'Referenced data sets must all belong to the same study.'
                )
            evd_instance_item = Dataset()
            evd_instance_item.ReferencedSOPClassUID = evd.SOPClassUID
            evd_instance_item.ReferencedSOPInstanceUID = evd.SOPInstanceUID
            evd_collection[evd.SeriesInstanceUID].append(
                evd_instance_item
            )
        evd_study_item = Dataset()
        evd_study_item.StudyInstanceUID = evidence[0].StudyInstanceUID
        evd_study_item.ReferencedSeriesSequence = []
        for evd_series_uid, evd_instance_items in evd_collection.items():
            evd_series_item = Dataset()
            evd_series_item.SeriesInstanceUID = evd_series_uid
            evd_series_item.ReferencedSOPSequence = evd_instance_items
            evd_study_item.ReferencedSeriesSequence.append(evd_series_item)
        if requested_procedures is not None:
            self.ReferencedRequestSequence = requested_procedures
            self.CurrentRequestedProcedureEvidenceSequence = [evd_study_item]
        else:
            if record_evidence:
                self.PertinentOtherEvidenceSequence = [evd_study_item]

        if previous_versions is not None:
            pre_collection = collections.defaultdict(list)
            for pre in previous_versions:
                if pre.StudyInstanceUID != evidence[0].StudyInstanceUID:
                    raise ValueError(
                        'Previous version data sets must belong to the '
                        'same study.'
                    )
                pre_instance_item = Dataset()
                pre_instance_item.ReferencedSOPClassUID = pre.SOPClassUID
                pre_instance_item.ReferencedSOPInstanceUID = pre.SOPInstanceUID
                pre_collection[pre.SeriesInstanceUID].append(
                    pre_instance_item
                )
            pre_study_item = Dataset()
            pre_study_item.StudyInstanceUID = pre.StudyInstanceUID
            pre_study_item.ReferencedSeriesSequence = []
            for pre_series_uid, pre_instance_items in pre_collection.items():
                pre_series_item = Dataset()
                pre_series_item.SeriesInstanceUID = pre_series_uid
                pre_series_item.ReferencedSOPSequence = pre_instance_items
                pre_study_item.ReferencedSeriesSequence.append(pre_series_item)
            self.PredecessorDocumentsSequence = [pre_study_item]

        if performed_procedure_codes is not None:
            self.PerformedProcedureCodeSequence = performed_procedure_codes
        else:
            self.PerformedProcedureCodeSequence = []

        # TODO
        self.ReferencedPerformedProcedureStepSequence = []

        self.copy_patient_and_study_information(evidence[0])
Пример #9
0
def main():
    parser = argparse.ArgumentParser(description='argparse example.')
    parser.add_argument('input', help="Root directory", metavar='<input>')
    parser.add_argument('output', help="Output directory", metavar='<output>')
    parser.add_argument('--ext',
                        help='File extension. default: %(default)s',
                        metavar='str',
                        default='.mha')
    parser.add_argument(
        '--prefix',
        help='Prefix of the output filename. default: %(default)s',
        metavar='str',
        default='SE')
    parser.add_argument('--compress',
                        help='Compress the output image. default: %(default)s',
                        type=str,
                        choices=['auto', 'true', 'false'],
                        default='auto')
    parser.add_argument('--offset',
                        help='Offset to the number. default: %(default)s',
                        type=int,
                        metavar='int',
                        default=1)
    parser.add_argument('--logdir',
                        help='Directory to store logs. default: %(default)s',
                        metavar='str',
                        default=None)
    parser.add_argument('--verbose',
                        help='Verbosity. default: %(default)s',
                        type=int,
                        metavar='level',
                        default=0)

    args = parser.parse_args()

    logger.setLevel(verbosity_to_level(args.verbose))
    if args.logdir is not None:
        logdir = Path(args.logdir)
        logdir.mkdir(parents=True, exist_ok=True)
        handler = FileHandler(
            logdir /
            '{}.log'.format(datetime.today().strftime("%y%m%d_%H%M%S")))
        handler.setLevel(verbosity_to_level(args.verbose))
        handler.setFormatter(log_format)
        logger.addHandler(handler)

    root_dir = Path(args.input)
    out_dir = Path(args.output)

    compression = {'auto': None, 'true': True, 'false': False}[args.compress]
    dtype = None
    prefix = args.prefix
    ext = args.ext
    offset = args.offset

    logger.info('Collect dicom information')
    all_files = [
        str(e) for e in tqdm.tqdm(root_dir.glob('**/*'), desc='list all files')
        if e.is_file()
    ]

    key_tags = [
        'PatientID', 'SeriesInstanceUID', 'AcquisitionDate', 'AcquisitionTime',
        'ImageOrientationPatient', 'ImagePositionPatient'
    ]
    dcm_files = []
    for fn in tqdm.tqdm(all_files):
        try:
            dcm = pydicom.dcmread(fn, stop_before_pixels=True)
            dcm_files.append([fn] + [dcm.get(tag) for tag in key_tags])
        except Exception as e:
            logger.warning({'filename': fn, 'exception': e})

    df = pd.DataFrame(dcm_files, columns=['filepath'] + key_tags)

    logger.info('Convert dicom files')

    def sort_dicom(df):
        orientation = np.array(df['ImageOrientationPatient'].iloc[0]).reshape(
            (2, 3))
        third_axis = np.cross(orientation[0], orientation[1])
        locs = df['ImagePositionPatient'].map(lambda p: np.dot(third_axis, p))
        sorted_index = np.argsort(locs)
        return df.iloc[sorted_index]

    FLOAT_TYPES = set([
        sitk.sitkFloat32, sitk.sitkFloat64, sitk.sitkVectorFloat32,
        sitk.sitkVectorFloat64
    ])

    for patient_id, df_patient in df.groupby('PatientID'):
        logger.info(patient_id)
        sids, times = [], []
        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            sids.append(series_id)
            dts = df_series.apply(
                lambda row: DT(row.AcquisitionDate + row.AcquisitionTime),
                axis=1).tolist()
            if len(df_series) <= 2:
                times.append(dts[0])
            else:
                dts.sort()
                times.append(dts[len(dts) // 2])
        nums = np.argsort(np.argsort(times))
        series_id2series_number = dict(zip(sids, nums))

        for series_id, df_series in df_patient.groupby('SeriesInstanceUID'):
            logger.debug(series_id)
            output_filename = out_dir / patient_id / (prefix + '{:d}'.format(
                series_id2series_number[series_id] + offset) + ext)
            output_filename.parent.mkdir(parents=True, exist_ok=True)
            filenames = sort_dicom(df_series)['filepath'].tolist()
            reader = sitk.ImageSeriesReader()
            reader.SetFileNames(filenames)
            image = reader.Execute()
            if image.GetPixelID() == sitk.sitkFloat64 and dtype is None:
                f = sitk.CastImageFilter()
                f.SetOutputPixelType(sitk.sitkFloat32)
                image = f.Execute(image)
            writer = sitk.ImageFileWriter()
            if compression is None:
                compression = image.GetPixelID() not in FLOAT_TYPES
            writer.SetUseCompression(compression)
            writer.SetFileName(str(output_filename))
            writer.Execute(image)

    logger.info('End')
Пример #10
0
    def __init__(self,
                 evidence: Sequence[Dataset],
                 content: Dataset,
                 series_instance_uid: str,
                 series_number: int,
                 sop_instance_uid: str,
                 sop_class_uid: str,
                 instance_number: int,
                 manufacturer: Optional[str] = None,
                 is_complete: bool = False,
                 is_final: bool = False,
                 is_verified: bool = False,
                 institution_name: Optional[str] = None,
                 institutional_department_name: Optional[str] = None,
                 verifying_observer_name: Optional[Union[str,
                                                         PersonName]] = None,
                 verifying_organization: Optional[str] = None,
                 performed_procedure_codes: Optional[Sequence[Union[
                     Code, CodedConcept]]] = None,
                 requested_procedures: Optional[Sequence[Dataset]] = None,
                 previous_versions: Optional[Sequence[Dataset]] = None,
                 record_evidence: bool = True,
                 **kwargs: Any) -> None:
        """
        Parameters
        ----------
        evidence: Sequence[pydicom.dataset.Dataset]
            Instances that are referenced in the content tree and from which
            the created SR document instance should inherit patient and study
            information
        content: pydicom.dataset.Dataset
            Root container content items that should be included in the
            SR document
        series_instance_uid: str
            Series Instance UID of the SR document series
        series_number: Union[int, None]
            Series Number of the SR document series
        sop_instance_uid: str
            SOP Instance UID that should be assigned to the SR document instance
        sop_class_uid: str
            SOP Class UID for the SR document type
        instance_number: int
            Number that should be assigned to this SR document instance
        manufacturer: str, optional
            Name of the manufacturer of the device that creates the SR document
            instance (in a research setting this is typically the same
            as `institution_name`)
        is_complete: bool, optional
            Whether the content is complete (default: ``False``)
        is_final: bool, optional
            Whether the report is the definitive means of communicating the
            findings (default: ``False``)
        is_verified: bool, optional
            Whether the report has been verified by an observer accountable
            for its content (default: ``False``)
        institution_name: str, optional
            Name of the institution of the person or device that creates the
            SR document instance
        institutional_department_name: str, optional
            Name of the department of the person or device that creates the
            SR document instance
        verifying_observer_name: Union[str, pydicom.valuerep.PersonName, None], optional
            Name of the person that verified the SR document
            (required if `is_verified`)
        verifying_organization: str, optional
            Name of the organization that verified the SR document
            (required if `is_verified`)
        performed_procedure_codes: List[highdicom.sr.CodedConcept], optional
            Codes of the performed procedures that resulted in the SR document
        requested_procedures: List[pydicom.dataset.Dataset], optional
            Requested procedures that are being fullfilled by creation of the
            SR document
        previous_versions: List[pydicom.dataset.Dataset], optional
            Instances representing previous versions of the SR document
        record_evidence: bool, optional
            Whether provided `evidence` should be recorded (i.e. included
            in Pertinent Other Evidence Sequence) even if not referenced by
            content items in the document tree (default: ``True``)
        **kwargs: Any, optional
            Additional keyword arguments that will be passed to the constructor
            of `highdicom.base.SOPClass`

        Raises
        ------
        ValueError
            When no `evidence` is provided

        """  # noqa: E501
        if len(evidence) == 0:
            raise ValueError('No evidence was provided.')
        super().__init__(study_instance_uid=evidence[0].StudyInstanceUID,
                         series_instance_uid=series_instance_uid,
                         series_number=series_number,
                         sop_instance_uid=sop_instance_uid,
                         sop_class_uid=sop_class_uid,
                         instance_number=instance_number,
                         manufacturer=manufacturer,
                         modality='SR',
                         transfer_syntax_uid=None,
                         patient_id=evidence[0].PatientID,
                         patient_name=evidence[0].PatientName,
                         patient_birth_date=evidence[0].PatientBirthDate,
                         patient_sex=evidence[0].PatientSex,
                         accession_number=evidence[0].AccessionNumber,
                         study_id=evidence[0].StudyID,
                         study_date=evidence[0].StudyDate,
                         study_time=evidence[0].StudyTime,
                         referring_physician_name=getattr(
                             evidence[0], 'ReferringPhysicianName', None),
                         **kwargs)

        if institution_name is not None:
            self.InstitutionName = institution_name
            if institutional_department_name is not None:
                self.InstitutionalDepartmentName = institutional_department_name

        now = datetime.datetime.now()
        if is_complete:
            self.CompletionFlag = 'COMPLETE'
        else:
            self.CompletionFlag = 'PARTIAL'
        if is_verified:
            if verifying_observer_name is None:
                raise ValueError(
                    'Verifying Observer Name must be specified if SR document '
                    'has been verified.')
            if verifying_organization is None:
                raise ValueError(
                    'Verifying Organization must be specified if SR document '
                    'has been verified.')
            self.VerificationFlag = 'VERIFIED'
            observer_item = Dataset()
            check_person_name(verifying_observer_name)
            observer_item.VerifyingObserverName = verifying_observer_name
            observer_item.VerifyingOrganization = verifying_organization
            observer_item.VerificationDateTime = DT(now)

            #  Type 2 attribute - we will leave empty
            observer_item.VerifyingObserverIdentificationCodeSequence = []

            self.VerifyingObserverSequence = [observer_item]
        else:
            self.VerificationFlag = 'UNVERIFIED'
        if is_final:
            self.PreliminaryFlag = 'FINAL'
        else:
            self.PreliminaryFlag = 'PRELIMINARY'

        # Add content to dataset
        for tag, value in content.items():
            self[tag] = value

        ref_items, unref_items = self._collect_evidence(evidence, content)
        if len(ref_items) > 0:
            self.CurrentRequestedProcedureEvidenceSequence = ref_items
        if len(unref_items) > 0 and record_evidence:
            self.PertinentOtherEvidenceSequence = unref_items

        if requested_procedures is not None:
            self.ReferencedRequestSequence = requested_procedures

        if previous_versions is not None:
            pre_items = self._collect_predecessors(previous_versions)
            self.PredecessorDocumentsSequence = pre_items

        if performed_procedure_codes is not None:
            self.PerformedProcedureCodeSequence = performed_procedure_codes
        else:
            self.PerformedProcedureCodeSequence = []

        # TODO: unclear how this would work
        self.ReferencedPerformedProcedureStepSequence: List[Dataset] = []

        self.copy_patient_and_study_information(evidence[0])