Esempio n. 1
0
    def setUp(self):
        # Create simple dataset for all tests
        ds = Dataset()
        ds.PatientName = "Name^Patient"

        # Set up a simple nested sequence
        # first, the innermost sequence
        subitem1 = Dataset()
        subitem1.ContourNumber = 1
        subitem1.ContourData = ['2', '4', '8', '16']
        subitem2 = Dataset()
        subitem2.ContourNumber = 2
        subitem2.ContourData = ['32', '64', '128', '196']

        sub_ds = Dataset()
        sub_ds.ContourSequence = Sequence(
            (subitem1,
             subitem2))  # XXX in 0.9.5 will need sub_ds.ContourSequence

        # Now the top-level sequence
        ds.ROIContourSequence = Sequence(
            (sub_ds, ))  # Comma necessary to make it a one-tuple

        # Store so each test can use it
        self.ds = ds
Esempio n. 2
0
def set_referenced_image_info(dataset, series_instance_uid, sop_class_uid,
                              sop_instance_uid):
    referenced_series_dataset = Dataset()
    referenced_series_dataset.SeriesInstanceUID = series_instance_uid
    referenced_image_dataset = Dataset()
    referenced_image_dataset.ReferencedSOPClassUID = sop_class_uid
    referenced_image_dataset.ReferencedSOPInstanceUID = sop_instance_uid
    referenced_series_dataset.ReferencedImages = Sequence(
        [referenced_image_dataset])
    dataset.ReferencedSeries = Sequence([referenced_series_dataset])
Esempio n. 3
0
    def create_dicom_plan(self):
        """ Create a dummy DICOM RT-plan object.

        The only data which is forwarded to this object, is self.patient_name.
        :returns: a DICOM RT-plan object.
        """
        meta = Dataset()
        meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage
        meta.MediaStorageSOPInstanceUID = "1.2.3"
        meta.ImplementationClassUID = "1.2.3.4"
        meta.TransferSyntaxUID = UID.ImplicitVRLittleEndian  # Implicit VR Little Endian - Default Transfer Syntax
        ds = FileDataset("file", {}, file_meta=meta, preamble=b"\0" * 128)
        ds.PatientsName = self.patient_name
        if self.patient_id in (None, ''):
            ds.PatientID = datetime.datetime.today().strftime('%Y%m%d-%H%M%S')
        else:
            ds.PatientID = self.patient_id  # Patient ID tag 0x0010,0x0020 (type LO - Long String)
        ds.PatientsSex = ''  # Patient's Sex tag 0x0010,0x0040 (type CS - Code String)
        #                      Enumerated Values: M = male F = female O = other.
        ds.PatientsBirthDate = '19010101'
        ds.SpecificCharacterSet = 'ISO_IR 100'
        ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage

        # Study Instance UID tag 0x0020,0x000D (type UI - Unique Identifier)
        # self._dicom_study_instance_uid may be either set in __init__ when creating new object
        #   or set when import a DICOM file
        #   Study Instance UID for structures is the same as Study Instance UID for CTs
        ds.StudyInstanceUID = self._dicom_study_instance_uid

        # Series Instance UID tag 0x0020,0x000E (type UI - Unique Identifier)
        # self._pl_dicom_series_instance_uid may be either set in __init__ when creating new object
        #   Series Instance UID might be different than Series Instance UID for CTs
        ds.SeriesInstanceUID = self._plan_dicom_series_instance_uid

        ds.Modality = "RTPLAN"
        ds.SeriesDescription = 'RT Plan'
        ds.RTPlanDate = datetime.datetime.today().strftime('%Y%m%d')
        ds.RTPlanGeometry = ''
        ds.RTPlanLabel = 'B1'
        ds.RTPlanTime = datetime.datetime.today().strftime('%H%M%S')
        structure_ref = Dataset()
        structure_ref.RefdSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'  # RT Structure Set Storage
        structure_ref.RefdSOPInstanceUID = '1.2.3'
        ds.RefdStructureSets = Sequence([structure_ref])

        dose_ref = Dataset()
        dose_ref.DoseReferenceNumber = 1
        dose_ref.DoseReferenceStructureType = 'SITE'
        dose_ref.DoseReferenceType = 'TARGET'
        dose_ref.TargetPrescriptionDose = self.target_dose
        dose_ref.DoseReferenceDescription = "TUMOR"
        ds.DoseReferences = Sequence([dose_ref])
        return ds
Esempio n. 4
0
def get_graphic_annotation(sop_class, sop_instance_uid, layer_name,
                           graphic_objects, text_objects):
    ds_graphic_annotation = Dataset()
    referenced_sequence_dataset = Dataset()
    referenced_sequence_dataset.ReferencedSOPClassUID = sop_class
    referenced_sequence_dataset.ReferencedSOPInstanceUID = sop_instance_uid
    ds_graphic_annotation.ReferencedImageSequence = Sequence(
        [referenced_sequence_dataset])
    ds_graphic_annotation.GraphicLayer = layer_name
    if graphic_objects is not None and len(graphic_objects) > 0:
        ds_graphic_annotation.GraphicObjects = Sequence(graphic_objects)
    if text_objects is not None and len(text_objects) > 0:
        ds_graphic_annotation.TextObjects = Sequence(text_objects)
    return ds_graphic_annotation
Esempio n. 5
0
 def testInvalidAssignment(self):
     """Sequence: validate exception for invalid assignment"""
     seq = Sequence([
         Dataset(),
     ])
     # Attempt to assign an integer to the first element
     self.assertRaises(TypeError, seq.__setitem__, 0, 1)
Esempio n. 6
0
    def create_dicom_contours(self):
        """ Creates and returns a list of Dicom CONTOUR objects from self.
        """

        # in order to get DICOM readable by Eclipse we need to connect each contour with CT slice
        # CT slices are identified by SOPInstanceUID
        # first we assume some default value if we cannot figure out CT slice info (i.e. CT cube is not loaded)
        ref_sop_instance_uid = '1.2.3'

        # then we check if CT cube is loaded
        if self.cube is not None:

            # if CT cube is loaded we extract DICOM representation of the cube (1 dicom per slice)
            # and select DICOM object for current slice based on slice position
            # it is time consuming as for each call of this method we generate full DICOM representation (improve!)
            candidates = [dcm for dcm in self.cube.create_dicom() if dcm.SliceLocation == self.get_position()]
            if len(candidates) > 0:
                # finally we extract CT slice SOP Instance UID
                ref_sop_instance_uid = candidates[0].SOPInstanceUID

        contour_list = []
        for item in self.contour:
            con = Dataset()
            contour = []
            for p in item.contour:
                contour.extend([p[0], p[1], p[2]])
            con.ContourData = contour
            con.ContourGeometricType = 'CLOSED_PLANAR'
            con.NumberofContourPoints = item.number_of_points()
            cont_image_item = Dataset()
            cont_image_item.ReferencedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage SOP Class
            cont_image_item.ReferencedSOPInstanceUID = ref_sop_instance_uid  # CT slice Instance UID
            con.ContourImageSequence = Sequence([cont_image_item])
            contour_list.append(con)
        return contour_list
Esempio n. 7
0
def add_presentation_lut(dicom):  # LUT - Look Up Table for colors
    ds_presentation_lut = Dataset()
    ds_presentation_lut.LUTDescriptor = [256, 0, 12]
    ds_presentation_lut.data_element("LUTDescriptor").VR = "US"
    ds_presentation_lut.LUTExplanation = "LUT with gamma 1.0, descriptor 256/0/12"
    ds_presentation_lut.LUTData = [
        0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240,
        256, 273, 289, 305, 321, 337, 353, 369, 385, 401, 417, 433, 449, 465,
        481, 497, 513, 529, 546, 562, 578, 594, 610, 626, 642, 658, 674, 690,
        706, 722, 738, 754, 770, 786, 802, 819, 835, 851, 867, 883, 899, 915,
        931, 947, 963, 979, 995, 1011, 1027, 1043, 1059, 1075, 1092, 1108,
        1124, 1140, 1156, 1172, 1188, 1204, 1220, 1236, 1252, 1268, 1284, 1300,
        1316, 1332, 1348, 1365, 1381, 1397, 1413, 1429, 1445, 1461, 1477, 1493,
        1509, 1525, 1541, 1557, 1573, 1589, 1605, 1621, 1638, 1654, 1670, 1686,
        1702, 1718, 1734, 1750, 1766, 1782, 1798, 1814, 1830, 1846, 1862, 1878,
        1894, 1911, 1927, 1943, 1959, 1975, 1991, 2007, 2023, 2039, 2055, 2071,
        2087, 2103, 2119, 2135, 2151, 2167, 2184, 2200, 2216, 2232, 2248, 2264,
        2280, 2296, 2312, 2328, 2344, 2360, 2376, 2392, 2408, 2424, 2440, 2457,
        2473, 2489, 2505, 2521, 2537, 2553, 2569, 2585, 2601, 2617, 2633, 2649,
        2665, 2681, 2697, 2713, 2730, 2746, 2762, 2778, 2794, 2810, 2826, 2842,
        2858, 2874, 2890, 2906, 2922, 2938, 2954, 2970, 2986, 3003, 3019, 3035,
        3051, 3067, 3083, 3099, 3115, 3131, 3147, 3163, 3179, 3195, 3211, 3227,
        3243, 3259, 3276, 3292, 3308, 3324, 3340, 3356, 3372, 3388, 3404, 3420,
        3436, 3452, 3468, 3484, 3500, 3516, 3532, 3549, 3565, 3581, 3597, 3613,
        3629, 3645, 3661, 3677, 3693, 3709, 3725, 3741, 3757, 3773, 3789, 3805,
        3822, 3838, 3854, 3870, 3886, 3902, 3918, 3934, 3950, 3966, 3982, 3998,
        4014, 4030, 4046, 4062, 4078, 4095
    ]
    ds_presentation_lut.data_element("LUTData").VR = "US"
    dicom.PresentationLUTSequence = Sequence([ds_presentation_lut])
Esempio n. 8
0
def read_sequence(fp,
                  is_implicit_VR,
                  is_little_endian,
                  bytelength,
                  encoding,
                  offset=0):
    """Read and return a Sequence -- i.e. a list of Datasets"""

    seq = []  # use builtin list to start for speed, convert to Sequence at end
    is_undefined_length = False
    if bytelength != 0:  # SQ of length 0 possible (PS 3.5-2008 7.5.1a (p.40)
        if bytelength == 0xffffffff:
            is_undefined_length = True
            bytelength = None
        fp_tell = fp.tell  # for speed in loop
        fpStart = fp_tell()
        while (not bytelength) or (fp_tell() - fpStart < bytelength):
            file_tell = fp.tell()
            dataset = read_sequence_item(fp, is_implicit_VR, is_little_endian,
                                         encoding, offset)
            if dataset is None:  # None is returned if hit Sequence Delimiter
                break
            dataset.file_tell = file_tell + offset
            seq.append(dataset)
    seq = Sequence(seq)
    seq.is_undefined_length = is_undefined_length
    return seq
Esempio n. 9
0
    def __init__(self, suid, showProgress=False, parent=None):
        """Default constructor
        """
        super(DicomSeries, self).__init__(suid, parent)

        # Init dataset list and the callback
        self._datasets = Sequence()
        self._showProgress = showProgress

        # Init properties
        self._suid = suid
        self._modality = ""
        self._info = None
        self._shape = None
        self._sampling = None
        self._description = ""
        self._newDescription = ""
        self._date = ""
        self._time = ""

        self._studyInstanceUid = None
        self._files = []
        self._dsrDocuments = []
        self._approvedReportText = ""
        self._isApproved = False
Esempio n. 10
0
def add_displayed_area_selection(dicom, columns, rows):
    ds_displayed_area_selection = Dataset()
    ds_displayed_area_selection.DisplayedAreaTopLeftHandCorner = [1, 1]
    ds_displayed_area_selection.DisplayedAreaBottomRightHandCorner = [
        columns, rows
    ]
    ds_displayed_area_selection.PresentationSizeMode = "SCALE TO FIT"
    ds_displayed_area_selection.PresentationPixelAspectRatio = [1, 1]
    dicom.DisplayedAreaSelections = Sequence([ds_displayed_area_selection])
Esempio n. 11
0
    def create_dicom(self):
        """ Creates a Dicom RT-Dose object from self.

        This function can be used to convert a TRiP98 Dose file to Dicom format.

        :returns: a Dicom RT-Dose object.
        """

        if not _dicom_loaded:
            raise ModuleNotLoadedError("Dicom")
        if not self.header_set:
            raise InputError("Header not loaded")

        ds = self.create_dicom_base()
        ds.Modality = 'RTDOSE'
        ds.SamplesperPixel = 1
        ds.BitsAllocated = self.num_bytes * 8
        ds.BitsStored = self.num_bytes * 8
        ds.AccessionNumber = ''
        ds.SeriesDescription = 'RT Dose'
        ds.DoseUnits = 'GY'
        ds.DoseType = 'PHYSICAL'
        ds.DoseGridScaling = self.target_dose / 10**5
        ds.DoseSummationType = 'PLAN'
        ds.SliceThickness = ''
        ds.InstanceCreationDate = '19010101'
        ds.InstanceCreationTime = '000000'
        ds.NumberOfFrames = len(self.cube)
        ds.PixelRepresentation = 0
        ds.StudyID = '1'
        ds.SeriesNumber = 14
        ds.GridFrameOffsetVector = [
            x * self.slice_distance for x in range(self.dimz)
        ]
        ds.InstanceNumber = ''
        ds.NumberofFrames = len(self.cube)
        ds.PositionReferenceIndicator = "RF"
        ds.TissueHeterogeneityCorrection = ['IMAGE', 'ROI_OVERRIDE']
        ds.ImagePositionPatient = [
            "%.3f" % (self.xoffset * self.pixel_size),
            "%.3f" % (self.yoffset * self.pixel_size),
            "%.3f" % (self.slice_pos[0])
        ]
        ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.2'
        ds.SOPInstanceUID = '1.2.246.352.71.7.320687012.47206.20090603085223'
        ds.SeriesInstanceUID = '1.2.246.352.71.2.320687012.28240.20090603082420'

        # Bind to rtplan
        rt_set = Dataset()
        rt_set.RefdSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.5'
        rt_set.RefdSOPInstanceUID = '1.2.3'
        ds.ReferencedRTPlans = Sequence([rt_set])
        pixel_array = np.zeros((len(self.cube), ds.Rows, ds.Columns),
                               dtype=self.pydata_type)
        pixel_array[:][:][:] = self.cube[:][:][:]
        ds.PixelData = pixel_array.tostring()
        return ds
Esempio n. 12
0
    def testValidAssignment(self):
        """Sequence: ensure ability to assign a Dataset to a Sequence item"""
        ds = Dataset()
        ds.add_new((1, 1), 'IS', 1)

        # Create a single element Sequence first
        seq = Sequence([Dataset(), ])
        seq[0] = ds

        self.assertEqual(seq[0], ds, "Dataset modified during assignment")
Esempio n. 13
0
def add_graphic_layer(dicom, layer_name, layer_description, layer_order):
    ds_graphic_layer = Dataset()
    ds_graphic_layer.GraphicLayer = layer_name
    ds_graphic_layer.GraphicLayerOrder = layer_order
    ds_graphic_layer.GraphicLayerRecommendedDisplayGrayscaleValue = 65535
    ds_graphic_layer.GraphicLayerDescription = layer_description
    if dicom.get("GraphicLayers"):
        dicom.GraphicLayers.append(ds_graphic_layer)
    else:
        dicom.GraphicLayers = Sequence([ds_graphic_layer])
Esempio n. 14
0
    def __init__(self, suid, showProgress):
        # Init dataset list and the callback
        self._datasets = Sequence()
        self._showProgress = showProgress

        # Init props
        self._suid = suid
        self._info = None
        self._shape = None
        self._sampling = None
Esempio n. 15
0
    def create_dicom_plan(self):
        """ Create a dummy Dicom RT-plan object.

        The only data which is forwarded to this object, is self.patient_name.
        :returns: a Dicom RT-plan object.
        """
        meta = Dataset()
        meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage
        meta.MediaStorageSOPInstanceUID = "1.2.3"
        meta.ImplementationClassUID = "1.2.3.4"
        meta.TransferSyntaxUID = UID.ImplicitVRLittleEndian  # Implicit VR Little Endian - Default Transfer Syntax
        ds = FileDataset("file", {}, file_meta=meta, preamble=b"\0" * 128)
        ds.PatientsName = self.patient_name
        ds.PatientID = "123456"
        ds.PatientsSex = '0'
        ds.PatientsBirthDate = '19010101'
        ds.SpecificCharacterSet = 'ISO_IR 100'
        ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage
        ds.StudyInstanceUID = '1.2.3'
        ds.SOPInstanceUID = '1.2.3'

        ds.Modality = "RTPLAN"
        ds.SeriesDescription = 'RT Plan'
        ds.SeriesInstanceUID = '2.16.840.1.113662.2.12.0.3057.1241703565.43'
        ds.RTPlanDate = '19010101'
        ds.RTPlanGeometry = ''
        ds.RTPlanLabel = 'B1'
        ds.RTPlanTime = '000000'
        structure_ref = Dataset()
        structure_ref.RefdSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'  # RT Structure Set Storage
        structure_ref.RefdSOPInstanceUID = '1.2.3'
        ds.RefdStructureSets = Sequence([structure_ref])

        dose_ref = Dataset()
        dose_ref.DoseReferenceNumber = 1
        dose_ref.DoseReferenceStructureType = 'SITE'
        dose_ref.DoseReferenceType = 'TARGET'
        dose_ref.TargetPrescriptionDose = self.target_dose
        dose_ref.DoseReferenceDescription = "TUMOR"
        ds.DoseReferences = Sequence([dose_ref])
        return ds
Esempio n. 16
0
    def testValidInitialization(self):
        """Sequence: Ensure valid creation of Sequences using Dataset inputs"""
        inputs = {'PatientPosition': 'HFS',
                  'PatientSetupNumber': '1',
                  'SetupTechniqueDescription': ''}
        patientSetups = Dataset()
        patientSetups.update(inputs)

        # Construct the sequence
        seq = Sequence((patientSetups,))
        self.assertTrue(isinstance(seq[0], Dataset),
                        "Dataset modified during Sequence creation")
Esempio n. 17
0
    def create_dicom_contour_data(self, i):
        """ Based on self.slices, Dicom conours are generated for the Dicom ROI.

        :returns: Dicom ROI_CONTOURS
        """
        roi_contours = Dataset()
        contours = []
        for slice in self.slices.values():
            contours.extend(slice.create_dicom_contours())
        roi_contours.Contours = Sequence(contours)
        roi_contours.ROIDisplayColor = self.get_color(i)

        return roi_contours
    def _convert_value(self, val):
        """Convert Dicom string values if possible to e.g. numbers. Handle the case
        of multiple value data_elements"""
        if self.VR == 'SQ':  # a sequence - leave it alone
            from dicom.sequence import Sequence
            if isinstance(val, Sequence):
                return val
            else:
                return Sequence(val)

        # if the value is a list, convert each element
        try:
            val.append
        except AttributeError:  # not a list
            return self._convert(val)
        else:
            returnvalue = []
            for subval in val:
                returnvalue.append(self._convert(subval))
            return returnvalue
Esempio n. 19
0
    def __init__(self, suid, showProgress):
        # Init dataset list and the callback
        self._datasets = Sequence()
        self._showProgress = showProgress

        # Init props
        self._suid = suid
        self._info = None
        self._shape = None
        self._sampling = None

        #        #RJH adding field for filenames
        #        self._filenames = []

        # RJH adding uniform_orientation
        # to aid in classifying series
        self._uniform_orientation = None

        # RJH adding field for norm(IPP[last] - IPP[first]) as 3D
        # analog of ImageOrientationPatient that describes
        # the relative position between voxels of the same
        # row and column in adjacent slices
        self._slice_direction = None
Esempio n. 20
0
    def _storeIdentity(self, dcmFile):
        """
        """
        # Collect attributes I want to keep and encrypt
        protectedAttributes = []

        # Frame Of Reference UID
        if "FrameOfReferenceUID" in dcmFile:
            ds1 = Dataset()
            ds1[0x0020, 0x0052] = dcmFile[0x0020, 0x0052]
            protectedAttributes.append(ds1)
        # Patient ID
        if "PatientID" in dcmFile:
            ds2 = Dataset()
            ds2[0x0010, 0x0020] = dcmFile[0x0010, 0x0020]
            protectedAttributes.append(ds2)
        # Patient name
        if "PatientName" in dcmFile:
            ds3 = Dataset()
            ds3[0x0010, 0x0010] = dcmFile[0x0010, 0x0010]
            protectedAttributes.append(ds3)
        # Patient birth date
        if "PatientBirthDate" in dcmFile:
            ds4 = Dataset()
            ds4[0x0010, 0x0030] = dcmFile[0x0010, 0x0030]
            protectedAttributes.append(ds4)
        # SOP Instance UID
        if "SOPInstanceUID" in dcmFile:
            ds5 = Dataset()
            ds5[0x0008, 0x0018] = dcmFile[0x0008, 0x0018]
            protectedAttributes.append(ds5)
        # StudyInstance UID
        if "StudyInstanceUID" in dcmFile:
            ds6 = Dataset()
            ds6[0x0020, 0x000D] = dcmFile[0x0020, 0x000D]
            protectedAttributes.append(ds6)

        # Instance of Encrypted Attributes Data Set
        encryptedAttributesDs = Dataset()

        # Set the Modified Attributes Sequence (0400,0550) to
        # the Attributes to be protected
        t = dicom.tag.Tag((0x400, 0x550))
        encryptedAttributesDs[t] = dicom.dataelem.DataElement(
            t, "SQ", Sequence(protectedAttributes))

        # Serialize these original DICOM data to string
        encryptedDicomAttributes = pickle.dumps(encryptedAttributesDs)

        # Encrypt
        encryptedData = self._svcCrypto.encrypt(encryptedDicomAttributes)

        # Encrypted Attributes Sequence item with two attributes
        item = Dataset()

        # Set the attribute Encrypted Content Transfer Syntax UID (0400,0510) to
        # the UID of the Transfer Syntax used to encode the instance of the Encrypted Attributes Data Set
        t = dicom.tag.Tag((0x400, 0x510))
        item[t] = dicom.dataelem.DataElement(
            t, "UI", dcmFile.file_meta[0x0002, 0x0010].value)

        # Set the atribute Encrypted Content (0400,0520) to
        # the data resulting from the encryption of the Encrypted Attributes Data Set instance
        t = dicom.tag.Tag((0x400, 0x520))
        item[t] = dicom.dataelem.DataElement(t, "OB", encryptedData)

        # Set the attribute Encrypted Attributes Sequence (0400,0500)
        # each item consists of two attributes ( (0400,0510); (0400,0520) )
        t = dicom.tag.Tag((0x400, 0x500))
        dcmFile[t] = dicom.dataelem.DataElement(t, "SQ", Sequence([item]))

        # Set the attribute Patient Identity Removed (0012,0062) to YES
        t = dicom.tag.Tag((0x12, 0x62))
        dcmFile[t] = dicom.dataelem.DataElement(t, "CS", "YES")

        # Codes of corresponding profiles and options as a dataset
        profilesOptionsDs = Dataset()

        # De-identification Method Coding Scheme Designator (0008,0102)
        t = dicom.tag.Tag((0x8, 0x102))

        profilesOptionsDs[t] = dicom.dataelem.DataElement(
            t, "DS",
            MultiValue(dicom.valuerep.DS,
                       self._deidentConfig.GetAppliedMethodCodes()))

        # Set the attribute De-identification method code sequence (0012,0064)
        # to the created dataset of codes for profiles and options
        t = dicom.tag.Tag((0x12, 0x64))
        dcmFile[t] = dicom.dataelem.DataElement(t, "SQ",
                                                Sequence([profilesOptionsDs]))
Esempio n. 21
0
    def create_dicom(self):
        """ Generates and returns Dicom RTSTRUCT object, which holds all VOIs.

        :returns: a Dicom RTSTRUCT object holding any VOIs.
        """
        if _dicom_loaded is False:
            raise ModuleNotLoadedError("Dicom")
        meta = Dataset()
        meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'  # RT Structure Set Storage SOP Class
        # see https://github.com/darcymason/pydicom/blob/master/pydicom/_uid_dict.py
        meta.MediaStorageSOPInstanceUID = "1.2.3"
        meta.ImplementationClassUID = "1.2.3.4"
        meta.TransferSyntaxUID = UID.ImplicitVRLittleEndian  # Implicit VR Little Endian - Default Transfer Syntax
        ds = FileDataset("file", {}, file_meta=meta, preamble=b"\0" * 128)
        if self.cube is not None:
            ds.PatientsName = self.cube.patient_name
        else:
            ds.PatientsName = ""
        ds.PatientID = "123456"
        ds.PatientsSex = '0'
        ds.PatientsBirthDate = '19010101'
        ds.SpecificCharacterSet = 'ISO_IR 100'
        ds.AccessionNumber = ''
        ds.is_little_endian = True
        ds.is_implicit_VR = True
        ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3'  # RT Structure Set Storage SOP Class
        ds.SOPInstanceUID = '1.2.3'  # !!!!!!!!!!
        ds.StudyInstanceUID = '1.2.3'  # !!!!!!!!!!
        ds.SeriesInstanceUID = '1.2.3'  # !!!!!!!!!!
        ds.FrameofReferenceUID = '1.2.3'  # !!!!!!!!!
        ds.SeriesDate = '19010101'  # !!!!!!!!
        ds.ContentDate = '19010101'  # !!!!!!
        ds.StudyDate = '19010101'  # !!!!!!!
        ds.SeriesTime = '000000'  # !!!!!!!!!
        ds.StudyTime = '000000'  # !!!!!!!!!!
        ds.ContentTime = '000000'  # !!!!!!!!!
        ds.StructureSetLabel = 'pyTRiP plan'
        ds.StructureSetDate = '19010101'
        ds.StructureSetTime = '000000'
        ds.StructureSetName = 'ROI'
        ds.Modality = 'RTSTRUCT'
        roi_label_list = []
        roi_data_list = []
        roi_structure_roi_list = []

        # to get DICOM which can be loaded in Eclipse we need to store information about UIDs of all slices in CT
        # first we check if DICOM cube is loaded
        if self.cube is not None:
            rt_ref_series_data = Dataset()
            rt_ref_series_data.SeriesInstanceUID = '1.2.3.4.5'
            rt_ref_series_data.ContourImageSequence = Sequence([])

            # each CT slice corresponds to one DICOM file
            for slice_dicom in self.cube.create_dicom():
                slice_dataset = Dataset()
                slice_dataset.ReferencedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'  # CT Image Storage SOP Class
                slice_dataset.ReferencedSOPInstanceUID = slice_dicom.SOPInstanceUID  # most important - slice UID
                rt_ref_series_data.ContourImageSequence.append(slice_dataset)

            rt_ref_study_seq_data = Dataset()
            rt_ref_study_seq_data.ReferencedSOPClassUID = '1.2.840.10008.3.1.2.3.2'  # Study Component Management Class
            rt_ref_study_seq_data.ReferencedSOPInstanceUID = '1.2.3.4.5'
            rt_ref_study_seq_data.RTReferencedSeriesSequence = Sequence([rt_ref_series_data])

            rt_ref_frame_study_data = Dataset()
            rt_ref_frame_study_data.RTReferencedStudySequence = Sequence([rt_ref_study_seq_data])
            rt_ref_frame_study_data.FrameOfReferenceUID = '1.2.3.4.5'
            ds.ReferencedFrameOfReferenceSequence = Sequence([rt_ref_frame_study_data])

        for i in range(self.number_of_vois()):
            roi_label = self.vois[i].create_dicom_label()
            roi_label.ObservationNumber = str(i + 1)
            roi_label.ReferencedROINumber = str(i + 1)
            roi_label.RefdROINumber = str(i + 1)
            roi_contours = self.vois[i].create_dicom_contour_data(i)
            roi_contours.RefdROINumber = str(i + 1)
            roi_contours.ReferencedROINumber = str(i + 1)

            roi_structure_roi = self.vois[i].create_dicom_structure_roi()
            roi_structure_roi.ROINumber = str(i + 1)

            roi_structure_roi_list.append(roi_structure_roi)
            roi_label_list.append(roi_label)
            roi_data_list.append(roi_contours)
        ds.RTROIObservations = Sequence(roi_label_list)
        ds.ROIContours = Sequence(roi_data_list)
        ds.StructureSetROIs = Sequence(roi_structure_roi_list)
        return ds
Esempio n. 22
0
def add_graphic_annotations(dicom, graphic_annotations):
    dicom.GraphicAnnotations = Sequence(graphic_annotations)
Esempio n. 23
0
def ParseFDF(ds, fdf_properties, procpar, args):
    """
    ParseFDF modify the dicom dataset structure based on FDF
    header information.

    Comment text copied from VNMRJ Programming.pdf

    :param ds:       Dicom dataset
    :param fdf_properties: Dict of fdf header label/value pairs
    :param procpar:  Dict of procpar label/value pairs
    :param args:     Argparse object
    :return ds:      Return updated dicom dataset struct
    :return fdfrank: Number of dimensions (rank) of fdf file
    """

    # if procpar['recon'] == 'external' and fdf_properties['rank'] == '3'
    # and procpar:
    #     fdf_tmp = fdf_properties['roi']
    #     fdf_properties['roi'][0:1] = fdf_tmp[1:2]
    #     fdf_properties['roi'][2] = fdf_tmp[0]
    #     fdf_tmp = fdf_properties['matrix']
    #     fdf_properties['matrix'][0:1] = fdf_tmp[1:2]
    #     fdf_properties['matrix'][2] = fdf_tmp[0]

    #----------------------------------------------------------
    # General implementation checks
    filename = fdf_properties['filename']

    # File dimensionality or Rank fields
    # rank is a positive integer value `(1, 2, 3, 4,...) giving the
    # number of dimensions in the data file (e.g., int rank=2;).
    fdfrank = fdf_properties['rank']
    acqndims = procpar['acqdim']
    CommentStr = '''Acquisition dimensionality (ie 2D or 3D) does not
    match between fdf and procpar'''
    AssumptionStr = '''Procpar nv2 > 0 indicates 3D acquisition and
    fdf rank property indicates dimensionality.\n''' +\
        'Using local FDF value ' + \
        str(fdfrank) + ' instead of procpar value ' + str(acqndims) + '.'
    if args.verbose:
        print 'Acqdim (type): ' + ds.MRAcquisitionType + " acqndims " + str(
            acqndims)

    AssertImplementation(acqndims != fdfrank, filename, CommentStr,
                         AssumptionStr)

    # matrix is a set of rank integers giving the number of data
    # points in each dimension (e.g., for rank=2, float
    # matrix[]={256,256};)
    if fdfrank == 3:
        fdf_size_matrix = fdf_properties['matrix'][0:3]
    else:
        fdf_size_matrix = fdf_properties['matrix'][0:2]
    if args.verbose:
        print "FDF size matrix ", fdf_size_matrix, type(fdf_size_matrix)
    #fdf_size_matrix = numpy.array(fdf_matrix)

    # spatial_rank is a string ("none", "voxel", "1dfov", "2dfov",
    # "3dfov") for the type of data (e.g., char
    # *spatial_rank="2dfov";).
    spatial_rank = fdf_properties['spatial_rank']

    #  0018,0023 MR Acquisition Type (optional)
    # Identification of spatial data encoding scheme.
    # Defined Terms: 1D 2D 3D
    fdf_MRAcquisitionType = '2D'
    if spatial_rank == "3dfov":
        fdf_MRAcquisitionType = '3D'
    CommentStr = 'MR Acquisition type does not match between fdf and procpar'
    AssumptionStr = '''In fdf, MR Acquisition type defined by spatial_rank
    and matrix. \n
        For 2D, spatial_rank="2dfov" and matrix has two elements eg.
    {256,256}. \n
        For 3D, spatial_rank="3dfov" and matrix has three elements.\n
        In procpar, MR Acquisition type is defined by nv2 > 0 or lpe2 > 0.\n
        Using local FDF value ''' + fdf_MRAcquisitionType + \
        ' instead of procpar value ' + ds.MRAcquisitionType + '.'
    AssertImplementation(ds.MRAcquisitionType != fdf_MRAcquisitionType,
                         filename, CommentStr, AssumptionStr)
    ds.MRAcquisitionType = fdf_MRAcquisitionType

    # Data Content Fields
    # The following entries define the data type and size.
    #  - storage is a string ("integer", "float") that defines the data
    # type (e.g., char *storage="float";).
    #  - bits is an integer (8, 16, 32, or 64) that defines the size of the
    # data (e.g., float bits=32;).
    # - type is a string ("real", "imag", "absval", "complex") that defines the
    # numerical data type (e.g., char *type="absval";).

    # roi is the size of the acquired data volume (three floating
    # point values), in centimeters, in the user's coordinate frame,
    # not the magnet frame (e.g., float roi[]={10.0,15.0,0.208};). Do
    # not confuse this roi with ROIs that might be specified inside
    # the data set.
    if fdfrank == 3:
        roi = fdf_properties['roi'][0:3]
    else:
        roi = fdf_properties['roi'][0:2]
    if args.verbose:
        print "FDF roi ", roi, type(roi)
    #roi = numpy.array(roi_text)

    # PixelSpacing - 0028,0030 Pixel Spacing (mandatory)
    PixelSpacing = map(lambda x, y: x * 10.0 / y, roi, fdf_size_matrix)
    if PixelSpacing[0] != ds.PixelSpacing[0] or \
       PixelSpacing[1] != ds.PixelSpacing[1]:
        print "Pixel spacing mismatch, procpar ", ds.PixelSpacing, " fdf spacing ", str(
            PixelSpacing[0]), ', ', str(PixelSpacing[1])
    if args.verbose:
        print "Pixel Spacing : Procpar   ", ds.PixelSpacing
        print "Pixel Spacing : FDF props ", PixelSpacing
    # (0028,0030) Pixel Spacing
    ds.PixelSpacing = [str(PixelSpacing[0]), str(PixelSpacing[1])]

    # FDF slice thickness
    if fdfrank == 3:
        fdfthk = fdf_properties['roi'][2] / fdf_properties['matrix'][2] * 10
    else:
        fdfthk = fdf_properties['roi'][2] * 10.0

    CommentStr = 'Slice thickness does not match between fdf and procpar'
    AssumptionStr = '''In fdf, slice thickness defined by roi[2] for 2D or
    roi[2]/matrix[2].\n
        In procpar, slice thickness defined by thk (2D) or lpe2*10/(fn2/2) or
    lpe2*10/nv2.\n
        Using local FDF value ''' + str(
        fdfthk) + ' instead of procpar value ' + str(ds.SliceThickness) + '.'
    if args.verbose:
        print 'fdfthk : ' + str(fdfthk)
        print 'SliceThinkness: ' + str(ds.SliceThickness)

    SliceThickness = float(ds.SliceThickness)

    # fix me Quick hack to avoid assert errors for diffusion and 3D magnitude
    # images
    # if not ('diff' in procpar.keys() and procpar["diff"] == 'y'):
    #	 if MRAcquisitionType == '3D':
    #	     print 'Not testing slicethickness in diffusion and 3D MR FDFs'
    #	else:
    AssertImplementation(SliceThickness != fdfthk, filename, CommentStr,
                         AssumptionStr)

    # Slice Thickness 0018,0050 Slice Thickness (optional)
    if fdfrank == 3:
        if len(PixelSpacing) != 3:
            print "Slice thickness: 3D procpar spacing not available"
            print " fdfthk ", fdfthk
        else:
            if PixelSpacing[2] != ds.SliceThickness:
                print "Slice Thickness mismatch, procpar ", ds.SliceThickness, " fdf spacing ", PixelSpacing[
                    2], fdfthk

    # Force slice thickness to be from fdf props
    ds.SliceThickness = str(fdfthk)
    SliceThickness = fdfthk

    #-------------------------------------------------------------------------
    # GROUP 0020: Relationship

    ds.ImageComments = A2D.FDF2DCM_Image_Comments + \
        '\n' + fdf_properties['filetext']

    orientation = numpy.array(fdf_properties['orientation']).reshape(3, 3)
    location = numpy.array(fdf_properties['location']) * 10.0
    span = numpy.array(numpy.append(fdf_properties['span'], 0) * 10.0)

    if args.verbose:
        print "FDF Span: ", span, span.shape
        print "FDF Location: ", location, location.shape

    ds, ImageTransformationMatrix = ProcparToDicomMap.CalcTransMatrix(
        ds, orientation, location, span, fdfrank, PixelSpacing, SliceThickness)

    # Nuclear Data Fields
    # Data fields may contain data generated by
    # interactions between more than one nucleus (e.g., a 2D chemical shift
    # correlation map between protons and carbon). Such data requires
    # interpreting the term ppm for the specific nucleus, if ppm to frequency
    # conversions are necessary, and properly labeling axes arising from
    # different nuclei. To properly interpret ppm and label axes, the identity
    # of the nucleus in question and the corresponding nuclear resonance
    # frequency are needed. These fields are related to the abscissa values
    # "ppm1", "ppm2", and "ppm3" in that the 1, 2, and 3 are indices into the
    # nucleus and nucfreq fields. That is, the nucleus for the axis with
    # abscissa string "ppm1" is the first entry in the nucleus field.  -
    # nucleus is one entry ("H1", "F19", same as VNMR tn parameter) for each rf
    # channel (e.g., char *nucleus[]={"H1","H1"};).  - nucfreq is the nuclear
    # frequency (floating point) used for each rf channel (e.g., float
    # nucfreq[]={200.067,200.067};).

    if fdf_properties['nucleus'][0] != ds.ImagedNucleus:
        print 'Imaged nucleus mismatch: ', fdf_properties['nucleus'], \
            ds.ImagedNucleus
    if math.fabs(fdf_properties['nucfreq'][0] -
                 float(ds.ImagingFrequency)) > 0.01:
        print 'Imaging frequency mismatch: ', fdf_properties['nucfreq'], \
            ds.ImagingFrequency

    # Change patient position and orientation in
    # if procpar['recon'] == 'external' and fdf_properties['rank'] == '3':

    #-------------------------------------------------------------------------
    # GROUP 0028: Image Presentation
    # A good short description of this section can be found here:
    # http://dicomiseasy.blogspot.com.au/2012/08/chapter-12-pixel-data.html

    # Implementation check
    CommentStr = 'Number of rows does not match between fdf and procpar'
    AssumptionStr = '''In FDF, number of rows is defined by
        matrix[1]. \n In procpar, for 3D datasets number of rows is
        either fn1/2 or nv (%s ,%s).\n For 2D datasets, number of
        rows is fn/2.0 or np (%s , %s).\n Using local FDF value %s
        instead of procpar value %s.
    ''' % (str(procpar['fn1'] / 2.0), str(
        procpar['nv']), str(procpar['fn'] / 2.0), str(
            procpar['np']), str(fdf_properties['matrix'][1]), str(ds.Rows))
    AssertImplementation(
        int(float(ds.Rows)) != int(fdf_properties['matrix'][1]), filename,
        CommentStr, AssumptionStr)
    if args.verbose:
        print 'Rows ', procpar['fn'] / 2.0, procpar['fn1'] / 2.0, \
            procpar['nv'], procpar['np'] / 2.0
        print '   Procpar: rows ', ds.Rows
        print '   FDF prop rows ', fdf_properties['matrix'][1]
    ds.Rows = fdf_properties['matrix'][1]  # (0028,0010) Rows

    # Implementation check
    CommentStr = 'Number of columns does not match between fdf and procpar'
    AssumptionStr = '''In FDF, number of columns is defined by
    matrix[0]. \n In procpar, for 3D datasets number of columns is
    either fn/2 or np (%s,%s).\n For 2D datasets, number of rows is
    fn1/2.0 or nv (%s ,%s).\n Using local FDF value %s instead of
    procpar value %s.
    ''' % (str(procpar['fn'] / 2.0), str(
        procpar['np']), str(procpar['fn1'] / 2.0), str(
            procpar['nv']), str(fdf_properties['matrix'][0]), str(ds.Columns))
    AssertImplementation(
        int(float(ds.Columns)) != int(fdf_properties['matrix'][0]), filename,
        CommentStr, AssumptionStr)
    if args.verbose:
        print 'Columns ', procpar['fn'] / 2.0, procpar['fn1'] / 2.0, \
            procpar['nv'], procpar['np'] / 2.0, fdf_properties['matrix'][0]
        print '   Procpar: Cols ', ds.Rows
        print '   FDF prop Cols ', fdf_properties['matrix'][0]
    ds.Columns = fdf_properties['matrix'][0]  # (0028,0011) Columns

    #-------------------------------------------------------------------------
    # Number of frames
    # DICOMHDR code:
    #	  elseif $tag='(0028,0008)' then	" no of frames "
    #	    $dim = 2  "default 2D"
    #	    exists('nv2','parameter'):$ex
    #	    if($ex > 0) then
    #	      if(nv2 > 0) then
    #		on('fn2'):$on		"3D data"
    #		if($on) then
    #		  $pe2 = fn2/2.0
    #		else
    #		  $pe2 = nv2
    #		endif
    #		$dim = 3
    #	      endif
    #	    endif
    #
    #	    if ($dim = 3) then
    #	      $f = $pe2	   "no of frames for 3D"
    #	    else
    #	      substr(seqcon,3,1):$spe1
    #	      if($spe1 = 's') then
    #		$f = (ns * (arraydim/nv) * ne)	 "sems type"
    #	      else
    #		$f = (ns * arraydim * ne)	"compressed gems type"
    #	      endif
    #	      if($imagesout='single') then
    #		$f = $f	"single image output: frames=(no_of_slices * \
    #    array_size * ne)"
    #	      else
    #		$f = 1				" single frame"
    #	      endif
    #	    endif
    #	   $fs=''
    #	    format($f,0,0):$fs
    #	    $value='['+$fs+']'
    #	    if $DEBUG then write('alpha','    new value = "%s"',$value) endif

    # if fdfrank == 3:
    #	 ds.NumberOfFrames = fdf_properties['matrix'][2]

    # dicom3tool uses frames to create enhanced MR
    # ds.NumberOfFrames = fdf_properties['slices']
    # ds.FrameAcquisitionNumber = fdf_properties['slice_no']

    #    if 'ne' in procpar.keys() and procpar['ne'] > 1:
    #	 print 'Processing multi-echo sequence image'

    if 'echo_no' in fdf_properties.keys():
        volume = fdf_properties['echo_no']

    if (len(ds.ImageType) >= 3 and ds.ImageType[2] == "MULTIECHO") and \
       ('echoes' in fdf_properties.keys() and fdf_properties['echoes'] > 1):
        print 'Multi-echo sequence'
        # TE 0018,0081 Echo Time (in ms) (optional)
        if 'TE' in fdf_properties.keys():
            if ds.EchoTime != str(fdf_properties['TE']):
                print "Echo Time mismatch: ", ds.EchoTime, fdf_properties['TE']
            ds.EchoTime = str(fdf_properties['TE'])
        # 0018,0086 Echo Number (optional)
    if 'echo_no' in fdf_properties.keys():
        if ds.EchoNumber != fdf_properties['echo_no']:
            print "Echo Number mismatch: ", ds.EchoNumber,\
                fdf_properties['echo_no']
        ds.EchoNumber = fdf_properties['echo_no']

    if len(ds.ImageType) >= 3 and ds.ImageType[2] == "ASL":
        ds = ParseASL(ds, procpar, fdf_properties)

    # if 'echoes' in fdf_properties.keys() and fdf_properties['echoes'] > 1 \
    #    and fdf_properties['array_dim'] == 1:
    #    ds.AcquisitionNumber = fdf_properties['echo_no']
    #    ds.ImagesInAcquisition = fdf_properties['echoes']
    # else:

    ds.AcquisitionNumber = fdf_properties['array_index']
    if 'array_dim' in fdf_properties.keys():
        ds.ImagesInAcquisition = fdf_properties['array_dim']
    else:
        ds.ImagesInAcquisition = 1

    # if len(ds.ImageType) >= 3 and
    if ds.ImageType[2] == 'DIFFUSION':
        ds = ParseDiffusionFDF(ds, procpar, fdf_properties, args)

    # Multi dimension Organisation and Index module
    DimOrgSeq = Dataset()
    # ds.add_new((0x0020,0x9164), 'UI', DimensionOrganizationUID)

    # or SEQUENCE == "Diffusion":
    if (len(ds.ImageType) >= 3 and ds.ImageType[2] == "MULTIECHO") or (
            ds.ImageType[2] == "DIFFUSION" and ds.AcquisitionNumber == 1):
        DimensionOrganizationUID = [
            ProcparToDicomMap.CreateUID(A2D.UID_Type_DimensionIndex1, [], [],
                                        args.verbose),
            ProcparToDicomMap.CreateUID(A2D.UID_Type_DimensionIndex2, [], [],
                                        args.verbose)
        ]
        DimOrgSeq.add_new((0x0020, 0x9164), 'UI', DimensionOrganizationUID)
        ds.DimensionOrganizationType = '3D_TEMPORAL'  # or 3D_TEMPORAL
    else:
        DimensionOrganizationUID = ProcparToDicomMap.CreateUID(
            A2D.UID_Type_DimensionIndex1, [], [], args.verbose)
        # if args.verbose:
        #    print "DimUID", DimensionOrganizationUID
        DimOrgSeq.add_new((0x0020, 0x9164), 'UI', [DimensionOrganizationUID])
        ds.DimensionOrganizationType = '3D'  # or 3D_TEMPORAL

    ds.DimensionOrganizationSequence = Sequence([DimOrgSeq])

    if len(ds.ImageType) >= 3 and ds.ImageType[2] == 'MULTIECHO':
        DimIndexSeq1 = Dataset()
        # Image position patient 20,32 or 20,12
        DimIndexSeq1.DimensionIndexPointer = (0x0020, 0x0032)

        # #DimIndexSeq1.DimensionIndexPrivateCreator=
        # #DimIndexSeq1.FunctionalGroupPointer=
        # #DimIndexSeq1.FunctionalGroupPrivateCreator=
        DimIndexSeq1.add_new((0x0020, 0x9164), 'UI',
                             DimOrgSeq.DimensionOrganizationUID[0])
        DimIndexSeq1.DimensionDescriptionLabel = 'Third Spatial dimension'

        DimIndexSeq2 = Dataset()
        DimIndexSeq2.DimensionIndexPointer = (0x0018, 0x0081)  # Echo Time
        # DimIndexSeq2.DimensionIndexPrivateCreator=
        # DimIndexSeq2.FunctionalGroupPointer=
        # DimIndexSeq2.FunctionalGroupPrivateCreator=
        DimIndexSeq2.add_new((0x0020, 0x9164), 'UI',
                             DimOrgSeq.DimensionOrganizationUID[1])
        DimIndexSeq2.DimensionDescriptionLabel = 'Fourth dimension (multiecho)'
        ds.DimensionIndexSequence = Sequence([DimIndexSeq2, DimIndexSeq1])

    elif (ds.ImageType[2] == "DIFFUSION" and ds.AcquisitionNumber == 1):
        DimIndexSeq1 = Dataset()
        # Image position patient 20,32 or 20,12
        DimIndexSeq1.DimensionIndexPointer = (0x0020, 0x0032)

        # #DimIndexSeq1.DimensionIndexPrivateCreator=
        # #DimIndexSeq1.FunctionalGroupPointer=
        # #DimIndexSeq1.FunctionalGroupPrivateCreator=
        DimIndexSeq1.add_new((0x0020, 0x9164), 'UI',
                             DimOrgSeq.DimensionOrganizationUID[0])
        DimIndexSeq1.DimensionDescriptionLabel = 'Third Spatial dimension'

        DimIndexSeq2 = Dataset()
        DimIndexSeq2.DimensionIndexPointer = (0x0018, 0x9087
                                              )  # Diffusion b-value
        # DimIndexSeq2.DimensionIndexPrivateCreator=
        # DimIndexSeq2.FunctionalGroupPointer=
        # DimIndexSeq2.FunctionalGroupPrivateCreator=
        DimIndexSeq2.add_new((0x0020, 0x9164), 'UI',
                             DimOrgSeq.DimensionOrganizationUID[1])
        DimIndexSeq2.DimensionDescriptionLabel = 'Fourth dimension (diffusion b value)'
        ds.DimensionIndexSequence = Sequence([DimIndexSeq2, DimIndexSeq1])
    else:
        DimIndexSeq1 = Dataset()
        # Image position patient 20,32 or 20,12
        DimIndexSeq1.DimensionIndexPointer = (0x0020, 0x0032)
        # #DimIndexSeq1.DimensionIndexPrivateCreator=
        # #DimIndexSeq1.FunctionalGroupPointer=
        # #DimIndexSeq1.FunctionalGroupPrivateCreator=
        DimIndexSeq1.add_new((0x0020, 0x9164), 'UI',
                             [DimensionOrganizationUID])
        DimIndexSeq1.DimensionDescriptionLabel = 'Third Spatial dimension'
        ds.DimensionIndexSequence = Sequence([DimIndexSeq1])

        # Module: Image Pixel (mandatory)
        # Reference: DICOM Part 3: Information Object Definitions C.7.6.3
        # ds.Rows                     # 0028,0010 Rows (mandatory)
        # ds.Columns                  # 0028,0011 Columns (mandatory)
        # ds.BitsStored               # 0028,0101 (mandatory)
        # ds.HighBit                  # 0028,0102 (mandatory)
        # ds.PixelRepresentation# 0028,0103 Pixel Representation (mandatory)
        # ds.PixelData               #
        # 7fe0,0010 Pixel Data (mandatory)

    FrameContentSequence = Dataset()
    # FrameContentSequence.FrameAcquisitionNumber = '1'
    # fdf_properties['slice_no']
    # FrameContentSequence.FrameReferenceDateTime
    # FrameContentSequence.FrameAcquisitionDateTime
    # FrameContentSequence.FrameAcquisitionDuration
    # FrameContentSequence.CardiacCyclePosition
    # FrameContentSequence.RespiratoryCyclePosition
    # FrameContentSequence.DimensionIndexValues = 1 #islice
    # --- fdf_properties['array_no']
    # FrameContentSequence.TemporalPositionIndex = 1
    FrameContentSequence.StackID = [str(1)]  # fourthdimid
    FrameContentSequence.InStackPositionNumber = [int(1)]  # fourthdimindex
    FrameContentSequence.FrameComments = fdf_properties['filetext']
    FrameContentSequence.FrameLabel = 'DimX'
    ds.FrameContentSequence = Sequence([FrameContentSequence])

    return ds, fdfrank, fdf_size_matrix, ImageTransformationMatrix
Esempio n. 24
0
 def testDefaultInitialization(self):
     """Sequence: Ensure a valid Sequence is created"""
     empty = Sequence()
     self.assertTrue(len(empty) == 0, "Non-empty Sequence created")
Esempio n. 25
0
def ParseDiffusionFDF(ds, procpar, fdf_properties, args):
    """ParseDiffusionFDF

    :param ds: Dicom dataset
    :param procpar: Procpar dictionary tag/value pairs
    :param fdf_properties: Tag/value pairs of local fdf file

    :param args: Input arguments
    :returns: Dicom struct
    """
    if args.verbose:
        print 'Processing diffusion image'

    # Get procpar diffusion parameters
    bvalue = procpar['bvalue']  # 64 element array
    bvaluesortidx = numpy.argsort(bvalue)
    bvalSave = procpar['bvalSave']
    # if 'bvalvs' in procpar.keys():
    #    BvalVS = procpar['bvalvs']
    # excluded in external recons by vnmrj, unused here
    bvalueRS = procpar['bvalrs']  # 64
    bvalueRR = procpar['bvalrr']  # 64
    bvalueRP = procpar['bvalrp']  # 64
    bvaluePP = procpar['bvalpp']  # 64
    bvalueSP = procpar['bvalsp']  # 64
    bvalueSS = procpar['bvalss']  # 64

    if procpar['recon'] == 'external':
        diffusion_idx = 0
        while True:
            if math.fabs(bvalue[diffusion_idx] -
                         fdf_properties['bvalue']) < 0.005:
                break
            diffusion_idx += 1
        # diffusion_idx = fdf_properties['array_index'] - 1
    else:
        diffusion_idx = fdf_properties['array_index'] * 2

    if diffusion_idx > len(bvalue):
        print '''Procpar bvalue does not contain enough values
        determined by fdf_properties array_index'''

    if args.verbose:
        print 'Diffusion index ', diffusion_idx, ' arrary index ',
        fdf_properties['array_index']

    # Sort diffusion based on sorted index of bvalue instead of
    # fdf_properties['array_index']
    if ds.MRAcquisitionType == '2D':
        ds.AcquisitionNumber = fdf_properties['array_index']
        ds.FrameAcquisitionNumber = fdf_properties['array_index']
        loc = numpy.array(fdf_properties['location'], dtype='|S9')
        ds.ImagePositionPatient = [loc[0], loc[1], loc[2]]
        orient = numpy.array(fdf_properties['orientation'], dtype='|S9')
        ds.ImageOrientationPatient = [
            orient[0], orient[1], orient[2], orient[3], orient[4], orient[5],
            orient[6], orient[7], orient[8]
        ]
    else:
        ds.AcquisitionNumber = bvaluesortidx[diffusion_idx]

    if math.fabs(bvalue[diffusion_idx] - fdf_properties['bvalue']) > 0.005:
        print 'Procpar and fdf B-value mismatch: procpar value ',
        bvalue[diffusion_idx], ' and  local fdf value ',
        fdf_properties['bvalue'], ' array idx ', fdf_properties['array_index']

    # MR Diffusion Sequence (0018,9117) see DiffusionMacro.txt
    # B0 scan does not need the MR Diffusion Gradient Direction Sequence macro
    # and its directionality should be set to NONE the remaining scans relate
    # to particular directions hence need the direction macro
    diffusionseq = Dataset()
    if fdf_properties['bvalue'] < 20:
        diffusionseq.DiffusionBValue = 0
        diffusionseq.DiffusionDirectionality = 'NONE'
    else:
        diffusionseq.DiffusionBValue = int(fdf_properties['bvalue'])
        # TODO  One of: DIRECTIONAL,  BMATRIX, ISOTROPIC, NONE
        diffusionseq.DiffusionDirectionality = 'BMATRIX'

        # Diffusion Gradient Direction Sequence (0018,9076)
        diffusiongraddirseq = Dataset()
        # Diffusion Gradient Orientation  (0018,9089)
        # diffusiongraddirseq.add_new((0x0018,0x9089), 'FD',[
        # fdf_properties['dro'],  fdf_properties['dpe'],
        # fdf_properties['dsl']])
        diffusiongraddirseq.DiffusionGradientOrientation = [
            fdf_properties['dro'], fdf_properties['dpe'], fdf_properties['dsl']
        ]
        diffusionseq.DiffusionGradientDirectionSequence = Sequence(
            [diffusiongraddirseq])
        # diffusionseq.add_new((0x0018,0x9076), 'SQ',
        # Sequence([diffusiongraddirseq]))

        # Diffusion b-matrix Sequence (0018,9601)
        diffbmatseq = Dataset()
        diffbmatseq.DiffusionBValueXX = bvalueRR[diffusion_idx] / bvalSave
        diffbmatseq.DiffusionBValueXY = bvalueRS[diffusion_idx] / bvalSave
        diffbmatseq.DiffusionBValueXZ = bvalueRP[diffusion_idx] / bvalSave
        diffbmatseq.DiffusionBValueYY = bvalueSS[diffusion_idx] / bvalSave
        diffbmatseq.DiffusionBValueYZ = bvalueSP[diffusion_idx] / bvalSave
        diffbmatseq.DiffusionBValueZZ = bvaluePP[diffusion_idx] / bvalSave
        diffusionseq.DiffusionBMatrixSequence = Sequence([diffbmatseq])

    # TODO  One of: FRACTIONAL, RELATIVE, VOLUME_RATIO
    diffusionseq.DiffusionAnisotropyType = 'FRACTIONAL'
    ds.MRDiffusionSequence = Sequence([diffusionseq])

    MRImageFrameType = Dataset()
    MRImageFrameType.FrameType = ["ORIGINAL", "PRIMARY", "DIFFUSION",
                                  "NONE"]  # same as ds.ImageType
    MRImageFrameType.PixelPresentation = ["MONOCHROME"]
    MRImageFrameType.VolumetrixProperties = ["VOLUME"]
    MRImageFrameType.VolumeBasedCalculationTechnique = ["NONE"]
    MRImageFrameType.ComplexImageComponent = ["MAGNITUDE"]
    MRImageFrameType.AcquisitionContrast = ["DIFFUSION"]
    ds.MRImageFrameTypeSequence = Sequence([MRImageFrameType])

    return ds
Esempio n. 26
0
    def run(self, ident_dir, clean_dir):
        # Get first date for tags set in relative_dates
        date_adjust = None
        audit_date_correct = None
        if self.relative_dates is not None:
            date_adjust = {
                tag: first_date - datetime(1970, 1, 1)
                for tag, first_date in self.get_first_date(
                    ident_dir, self.relative_dates).items()
            }
        for root, _, files in os.walk(ident_dir):
            for filename in files:
                if filename.startswith('.'):
                    continue
                source_path = os.path.join(root, filename)
                try:
                    ds = dicom.read_file(source_path)
                except IOError:
                    logger.error('Error reading file %s' % source_path)
                    self.close_all()
                    return False
                except InvalidDicomError:  # DICOM formatting error
                    self.quarantine_file(source_path, ident_dir,
                                         'Could not read DICOM file.')
                    continue

                move, reason = self.check_quarantine(ds)

                if move:
                    self.quarantine_file(source_path, ident_dir, reason)
                    continue

                # Store adjusted dates for recovery
                obfusc_dates = None
                if self.relative_dates is not None:
                    obfusc_dates = {
                        tag: datetime.strptime(ds[tag].value, '%Y%m%d') -
                        date_adjust[tag]
                        for tag in self.relative_dates
                    }

                # Keep CSA Headers
                csa_headers = dict()
                if self.keep_csa_headers and (0x29, 0x10) in ds:
                    csa_headers[(0x29, 0x10)] = ds[(0x29, 0x10)]
                    for offset in [0x10, 0x20]:
                        elno = (0x10 * 0x0100) + offset
                        csa_headers[(0x29, elno)] = ds[(0x29, elno)]

                destination_dir = self.destination(source_path, clean_dir,
                                                   ident_dir)
                if not os.path.exists(destination_dir):
                    os.makedirs(destination_dir)
                try:
                    ds, study_pk = self.anonymize(ds)
                except ValueError, e:
                    self.quarantine_file(
                        source_path, ident_dir,
                        'Error running anonymize function. There may be a '
                        'DICOM element value that does not match the specified'
                        ' Value Representation (VR). Error was: %s' % e)
                    continue

                # Recover relative dates
                if self.relative_dates is not None:
                    for tag in self.relative_dates:
                        if audit_date_correct != study_pk and tag in AUDIT.keys(
                        ):
                            self.audit.update(
                                ds[tag], obfusc_dates[tag].strftime('%Y%m%d'),
                                study_pk)
                        ds[tag].value = obfusc_dates[tag].strftime('%Y%m%d')
                    audit_date_correct = study_pk

                # Restore CSA Header
                if len(csa_headers) > 0:
                    for tag in csa_headers:
                        ds[tag] = csa_headers[tag]

                # Set Patient Identity Removed to YES
                t = Tag((0x12, 0x62))
                ds[t] = DataElement(t, 'CS', 'YES')

                # Set the De-identification method code sequence
                method_ds = Dataset()
                t = dicom.tag.Tag((0x8, 0x102))
                if self.profile == 'clean':
                    method_ds[t] = DataElement(
                        t, 'DS', MultiValue(DS, ['113100', '113105']))
                else:
                    method_ds[t] = DataElement(t, 'DS',
                                               MultiValue(DS, ['113100']))
                t = dicom.tag.Tag((0x12, 0x64))
                ds[t] = DataElement(t, 'SQ', Sequence([method_ds]))

                out_filename = ds[
                    SOP_INSTANCE_UID].value if self.rename else filename
                clean_name = os.path.join(destination_dir, out_filename)
                try:
                    ds.save_as(clean_name)
                except IOError:
                    logger.error('Error writing file %s' % clean_name)
                    self.close_all()
                    return False
Esempio n. 27
0
def _splitSerieIfRequired(serie, series):
    """ _splitSerieIfRequired(serie, series)
    Split the serie in multiple series if this is required.
    The choice is based on examing the image position relative to
    the previous image. If it differs too much, it is assumed
    that there is a new dataset. This can happen for example in
    unspitted gated CT data.
    """

    # Sort the original list and get local name
    serie._sort()
    L = serie._datasets

    # Init previous slice
    ds1 = L[0]

    # Check whether we can do this
    if not "ImagePositionPatient" in ds1:
        return

    # Initialize a list of new lists
    L2 = [[ds1]]

    # Init slice distance estimate
    distance = 0

    for index in range(1, len(L)):

        # Get current slice
        ds2 = L[index]

        # Get positions
        pos1 = float(ds1.ImagePositionPatient[2])
        pos2 = float(ds2.ImagePositionPatient[2])

        # Get distances
        newDist = abs(pos1 - pos2)
        #deltaDist = abs(firstPos-pos2)

        # If the distance deviates more than 2x from what we've seen,
        # we can agree it's a new dataset.
        if distance and newDist > 2.1 * distance:
            L2.append([])
            distance = 0
        else:
            # Test missing file
            if distance and newDist > 1.5 * distance:
                print 'Warning: missing file after "%s"' % ds1.filename
            distance = newDist

        # Add to last list
        L2[-1].append(ds2)

        # Store previous
        ds1 = ds2

    # Split if we should
    if len(L2) > 1:

        # At what position are we now?
        i = series.index(serie)

        # Create new series
        series2insert = []
        for L in L2:
            newSerie = DicomSeries(serie.suid, serie._showProgress)
            newSerie._datasets = Sequence(L)
            series2insert.append(newSerie)

        # Insert series and remove self
        for newSerie in reversed(series2insert):
            series.insert(i, newSerie)
        series.remove(serie)
Esempio n. 28
0
def create_rtstruct(RS_File, im_mask_ax, im_mask_sag, im_mask_cor):

    rmin, rmax, cmin, cmax, zmin, zmax = bbox2_3D(im_mask_sag, 0)
    if rmin < 0:
        rmin = 0
    if cmin < 0:
        cmin = 0
    if zmin < 0:
        zmin = 0

    im_mask_ax_adj = np.zeros(np.shape(im_mask_ax))
    im_mask_ax_adj[rmin:rmax, cmin:cmax,
                   zmin:zmax] = im_mask_ax[rmin:rmax, cmin:cmax, zmin:zmax]

    ss = dicom.read_file(RS_File)
    contour_name = FLAGS.structure
    data_path = FLAGS.data_dir

    ## Add Contour
    UID = ss.SOPInstanceUID.split('.')
    UID_NEW = UID[:-1]
    UID_NEW.append(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")[0:19])
    ss.SOPInstanceUID = '.'.join(UID_NEW)
    ss.StructureSetName = FLAGS.structure_match + '_DLV3_' + datetime.datetime.now(
    ).strftime("%Y%m%d%H%M%S%f")[0:19]
    ss.StructureSetLabel = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S%f")[0:19]
    ss.InstanceCreationDate = datetime.datetime.now().strftime("%Y%m%d")
    ss.InstanceCreationTime = datetime.datetime.now().strftime("%H%M%S.%f")
    ROINumList = []
    for s in ss.ROIContourSequence:
        ROINumList.append(s.ReferencedROINumber)

    ## Add StructureSetROISequence
    ss_new = Dataset()
    ss_new.ROINumber = np.int(max(ROINumList)) + 1
    ss_new.ReferencedFrameOfReferenceUID = ss.StructureSetROISequence[
        len(ROINumList) - 1].ReferencedFrameOfReferenceUID
    ss_new.ROIName = FLAGS.structure_match + '_DLV3'
    ss_new.ROIDescription = ''
    ss_new.ROIGenerationAlgorithm = 'MANUAL'
    ss.StructureSetROISequence.append(ss_new)

    ## Add RTROIObservationsSequence
    ss_new = Dataset()
    ss_new.ObservationNumber = np.int(max(ROINumList)) + 1
    ss_new.ReferencedROINumber = np.int(max(ROINumList)) + 1
    ss_new.ROIObservationDescription = 'Type:Soft, Range:*/*, Fill:0, Opacity:0.0, Thickness:1, LineThickness:2'
    ss_new.RTROIInterpretedType = ''
    ss_new.ROIInterpreter = ''
    ss.RTROIObservationsSequence.append(ss_new)

    ## Add ROIContourSequence
    ss_new = Dataset()
    ss_new.ReferencedROINumber = np.int(max(ROINumList)) + 1
    ss_new.ROIDisplayColor = ['255', '0', '0']
    ss_new.ContourSequence = Sequence()

    k = 0
    ss_referenceclass = ss.ROIContours[0].Contours[0].ContourImageSequence[
        0].ReferencedSOPClassUID
    for item in ss.StructureSetROISequence[:]:
        ## Check if structure is equal to specified structure name
        if item.ROIName == FLAGS.structure_match:
            ## ss_maxslice: determines maximum number of image slices contour lives on
            ss_maxslice = len(ss.ROIContours[k].Contours)
            ## pattern collects referenced SOP for DICOM collection, searched dir for CT_files list
            pattern = ss.ROIContours[k].Contours[0].ContourImageSequence[
                0].ReferencedSOPInstanceUID
            pattern = '*' + '.'.join(pattern.split('.')[:-2])
            pattern = pattern[:-3] + '*'
            CT_files = find(pattern, data_path)
            try:
                CT_files.remove(RS_File)
            except:
                print('RS not found in CT list')

            if CT_files:

                ## Open first CT image, get size, total number of files and
                ## initialize Numpy Arrays for data collection
                ct_maxslice = len(CT_files)
                img = dicom.read_file(CT_files[0])
                img_size = np.shape(img.pixel_array)
                im_mask = np.zeros((img_size[0], img_size[1], ct_maxslice))
                im_data = np.zeros((img_size[0], img_size[1], ct_maxslice))
                z0 = img.ImagePositionPatient[2]

                ## Since DICOM files are not in spatial order, determine
                ## "z0" or starting z position
                for slice in range(0, ct_maxslice):
                    img = dicom.read_file(CT_files[slice])
                    if 'RS' in CT_files[slice]:
                        print('not structure')
                    else:
                        if z0 > img.ImagePositionPatient[2]:
                            z0 = img.ImagePositionPatient[2]

            for slice in range(0, ct_maxslice):
                if 'RS' in CT_files[slice]:
                    print('not structure')
                else:
                    contour_dicom = Dataset()
                    img = dicom.read_file(CT_files[slice])
                    x_y = np.array(img.ImagePositionPatient)
                    xsp_ysp = np.array(img.PixelSpacing)
                    z_prime = float(img.ImagePositionPatient[2])
                    zsp = float(img.SliceThickness)
                    z = int((z_prime - z0) / zsp)
                    if np.max(im_mask_ax_adj[:, :, z]) > 0 and (np.max(
                            im_mask_sag[:, :, z] > 0)):
                        r = im_mask_ax_adj[:, :, z]
                        contours = measure.find_contours(r, 0.5)
                        for n, contour in enumerate(contours):
                            pointList = []
                            contour_dicom = Dataset()
                            contour_dicom.ContourGeometricType = 'CLOSED_PLANAR'
                            for i in range(0, len(contour)):
                                y = contour[i][0]
                                x = contour[i][1]
                                x_prime = x * xsp_ysp[0] + x_y[0]
                                y_prime = y * xsp_ysp[1] + x_y[1]
                                pointList.append(x_prime)
                                pointList.append(y_prime)
                                pointList.append(z_prime)

                            if len(pointList) > 0:
                                contour_dicom.NumberOfContourPoints = len(
                                    contour)
                                contour_dicom.ContourData = pointList
                                contour_dicom.ContourImageSequence = Sequence()
                                img_seq = Dataset()
                                img_seq.ReferencedSOPClassUID = ss_referenceclass
                                img_seq.ReferencedSOPInstanceUID = CT_files[
                                    slice].split(os.sep)[-1].replace(
                                        '.dcm', '').replace('MR.', '')
                                contour_dicom.ContourImageSequence.append(
                                    img_seq)
                                ss_new.ContourSequence.append(contour_dicom)
            k = k + 1

    ss.ROIContourSequence.append(ss_new)
    filename = RS_File.split(os.sep)
    filename[-1] = contour_name + str(
        datetime.datetime.now().strftime("%Y%m%d")) + '.dcm'
    print(filename)
    ss.save_as(os.sep.join(filename))

    return