Exemple #1
0
def write_ATvalue(fp, data_element):
    """Write a data_element tag to a file."""
    try:
        iter(
            data_element.value
        )  # see if is multi-valued AT;  # Note will fail if Tag ever derived from true tuple rather than being a long
    except TypeError:
        tag = Tag(
            data_element.value)  # make sure is expressed as a Tag instance
        fp.write_tag(tag)
    else:
        tags = [Tag(tag) for tag in data_element.value]
        for tag in tags:
            fp.write_tag(tag)
Exemple #2
0
def saveImage(m, b, minMax):
    image['0008', '0008'].value[0] = 'DERIVED'  # Image Type
    image['0008', '0008'].value[1] = 'SECONDARY'  # Image Type
    image['0008', '103E'].value = 'Description'  # Series description
    image.add_new(Tag(['0008', '103F']), 'LO', 'Series description code')  # Series description
    image.add_new(Tag(['0020', '0016']), 'LO', uuid.uuid4().hex)  # Sop Instance
    image.add_new(Tag(['0002', '0003']), 'LO', image['0020', '0016'].value)  # Sop Instance
    image['0020', '000E'].value = uuid.uuid4().hex  # Series instance UID
    image.add_new(Tag(['0028', '0106']), 'FL', minMax[0])  # Smallest Image Pixel Value
    image.add_new(Tag(['0028', '0107']), 'FL', minMax[1])  # Largest Image Pixel Value
    image['0028', '1052'].value = b  # Rescale intercept
    image['0028', '1053'].value = m  # Rescale slope
    image['0028', '0100'].value = 16  # Rescale slope
    image.save_as("newfilename.dcm")
Exemple #3
0
    def testGetFromRaw(self):
        """Dataset: get(tag) returns same object as ds[tag] for raw element.."""
        # This came from issue 88, where get(tag#) returned a RawDataElement,
        #     while get(name) converted to a true DataElement
        test_tag = 0x100010
        test_elem = RawDataElement(Tag(test_tag), 'PN', 4, 'test', 0, True,
                                   True)
        ds = Dataset({Tag(test_tag): test_elem})
        by_get = ds.get(test_tag)
        by_item = ds[test_tag]

        # self.assertEqual(type(elem_get), type(name_get), "Dataset.get() returned different type for name vs tag access")
        msg = "Dataset.get() returned different objects for ds.get(tag) and ds[tag]:\nBy get():%r\nBy ds[tag]:%r\n"
        self.assertEqual(by_get, by_item, msg % (by_get, by_item))
Exemple #4
0
def _is_bval_type_b(grouped_dicoms):
    """
    Check if the bvals are stored in the second of 2 currently known ways for single frame dti
    """
    bval_tag = Tag(0x0018, 0x9087)
    bvec_tag = Tag(0x0018, 0x9089)
    for group in grouped_dicoms:
        if bvec_tag in group[0] and bval_tag in group[0]:
            bvec = common.get_fd_array_value(group[0][bvec_tag], 3)
            bval = common.get_fd_value(group[0][bval_tag])
            if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(
                    bvec[2]) and _is_float(bval) and bval != 0:
                return True
    return False
def _create_bvecs(sorted_dicoms, bvec_file):
    """
    Calculate the bvecs and write the to a bvec file
    # inspired by dicom2nii from mricron
    # see  http://users.fmrib.ox.ac.uk/~robson/internal/Dicom2Nifti111.m
    """
    if type(sorted_dicoms[0]) is list:
        dicom_headers = sorted_dicoms[0][0]
    else:
        dicom_headers = sorted_dicoms[0]

    # get the patient orientation
    image_orientation = dicom_headers.ImageOrientationPatient
    read_vector = numpy.array([float(image_orientation[0]), float(image_orientation[1]), float(image_orientation[2])])
    phase_vector = numpy.array([float(image_orientation[3]), float(image_orientation[4]), float(image_orientation[5])])
    mosaic_vector = numpy.cross(read_vector, phase_vector)

    # normalize the vectors
    read_vector /= numpy.linalg.norm(read_vector)
    phase_vector /= numpy.linalg.norm(phase_vector)
    mosaic_vector /= numpy.linalg.norm(mosaic_vector)
    # create an empty array for the new bvecs
    bvecs = numpy.zeros([len(sorted_dicoms), 3])
    # for each slice calculate the new bvec
    for index in range(0, len(sorted_dicoms)):
        if type(sorted_dicoms[0]) is list:
            dicom_headers = sorted_dicoms[index][0]
        else:
            dicom_headers = sorted_dicoms[index]

        # get the bval als this is needed in some checks
        bval = common.get_is_value(dicom_headers[Tag(0x0019, 0x100c)])
        # get the bvec if it exists in the headers
        bvec = numpy.array([0, 0, 0])
        if Tag(0x0019, 0x100e) in dicom_headers:
            # in case of implicit VR the private field cannot be split into an array, we do this here
            bvec = numpy.array(common.get_fd_array_value(dicom_headers[Tag(0x0019, 0x100e)], 3))
        # if bval is 0 or the vector is 0 no projection is needed and the vector is 0,0,0
        new_bvec = numpy.array([0, 0, 0])

        if bval > 0 and not (bvec == [0, 0, 0]).all():
            # project the bvec and invert the y direction
            new_bvec = numpy.array(
                [numpy.dot(bvec, read_vector), -numpy.dot(bvec, phase_vector), numpy.dot(bvec, mosaic_vector)])
            # normalize the bvec
            new_bvec /= numpy.linalg.norm(new_bvec)
        bvecs[index, :] = new_bvec
    # save the found bvecs to the file
    common.write_bvec_file(bvecs, bvec_file)
Exemple #6
0
def _write_file_meta_info(fp, meta_dataset):
    """Write the dicom group 2 dicom storage File Meta Information to the file.

    The file should already be positioned past the 128 byte preamble.
    Raises ValueError if the required data_elements (elements 2,3,0x10,0x12)
    are not in the dataset. If the dataset came from a file read with
    read_file(), then the required data_elements should already be there.
    """
    fp.write(b'DICM')

    # File meta info is always LittleEndian, Explicit VR. After will change these
    #    to the transfer syntax values set in the meta info
    fp.is_little_endian = True
    fp.is_implicit_VR = False

    if Tag((2, 1)) not in meta_dataset:
        meta_dataset.add_new((2, 1), 'OB',
                             b"\0\1")  # file meta information version

    # Now check that required meta info tags are present:
    missing = []
    for element in [2, 3, 0x10, 0x12]:
        if Tag((2, element)) not in meta_dataset:
            missing.append(Tag((2, element)))
    if missing:
        raise ValueError(
            "Missing required tags {0} for file meta information".format(
                str(missing)))

    # Put in temp number for required group length, save current location to come back
    meta_dataset[(2, 0)] = DataElement((2, 0), 'UL', 0)  # put 0 to start
    group_length_data_element_size = 12  # !based on DICOM std ExplVR
    group_length_tell = fp.tell()

    # Write the file meta datset, including temp group length
    length = write_dataset(fp, meta_dataset)
    group_length = length - group_length_data_element_size  # counts from end of that

    # Save end of file meta to go back to
    end_of_file_meta = fp.tell()

    # Go back and write the actual group length
    fp.seek(group_length_tell)
    group_length_data_element = DataElement((2, 0), 'UL', group_length)
    write_data_element(fp, group_length_data_element)

    # Return to end of file meta, ready to write remainder of the file
    fp.seek(end_of_file_meta)
Exemple #7
0
    def prep_data(self, PathDicom):
        self.PathDicom = PathDicom
        self.lstFilesDCM = []
        self.lstRSFile = []
        self.Dicom_info = []

        fileList = []
        for dirName, dirs, fileList in os.walk(PathDicom):
            break
        if len(fileList) < 10:  # If there are no files, break out
            return None
        for filename in fileList:
            try:
                ds = dicom.read_file(os.path.join(dirName, filename))
                if ds.Modality == 'CT' or ds.Modality == 'MR':  # check whether the file's DICOM
                    self.lstFilesDCM.append(os.path.join(dirName, filename))
                    self.Dicom_info.append(ds)
                elif ds.Modality == 'RTSTRUCT':
                    self.lstRSFile = os.path.join(dirName, filename)
            except:
                # if filename.find('Iteration_') == 0:
                #     os.remove(PathDicom+filename)
                continue
        self.RefDs = dicom.read_file(self.lstFilesDCM[0])
        self.mask_exist = False
        if self.lstRSFile:
            self.RS_struct = dicom.read_file(self.lstRSFile)
            if Tag((0x3006, 0x020)) in self.RS_struct.keys():
                self.ROI_Structure = self.RS_struct.StructureSetROISequence
            else:
                self.ROI_Structure = []
            self.rois_in_case = []
            for Structures in self.ROI_Structure:
                self.rois_in_case.append(Structures.ROIName)
Exemple #8
0
 def update(self, dictionary):
     """Extend dict.update() to handle *named tags*."""
     for key, value in dictionary.items():
         if is_stringlike(key):
             setattr(self, key, value)
         else:
             self[Tag(key)] = value
    def __init__(self,
                 tag,
                 VR,
                 value,
                 file_value_tell=None,
                 is_undefined_length=False,
                 already_converted=False):
        """Create a data element instance.

        Most user code should instead use DICOM keywords
        to create data_elements, for which only the value is supplied,
        and the VR and tag are determined from the dicom dictionary.

        tag -- dicom (group, element) tag in any form accepted by Tag().
        VR -- dicom value representation (see DICOM standard part 6)
        value -- the value of the data element. One of the following:
            - a single string value
            - a number
            - a list or tuple with all strings or all numbers
            - a multi-value string with backslash separator
        file_value_tell -- used internally by Dataset, to store the write
            position for ReplaceDataElementValue method
        is_undefined_length -- used internally to store whether the length
            field in this data element was 0xFFFFFFFFL, i.e. "undefined length"

        """
        self.tag = Tag(tag)
        self.VR = VR  # Note!: you must set VR before setting value
        if already_converted:
            self._value = value
        else:
            self.value = value  # calls property setter which will convert
        self.file_tell = file_value_tell
        self.is_undefined_length = is_undefined_length
def read_delimiter_item(fp, delimiter):
    """Read and ignore an expected delimiter.

    If the delimiter is not found or correctly formed, a warning is logged.
    """
    found = fp.read(4)
    if found != delimiter:
        logger.warn("Expected delimitor %s, got %s at file position 0x%x",
                    Tag(delimiter), Tag(found),
                    fp.tell() - 4)
    length = fp.read_UL()
    if length != 0:
        logger.warn(
            "Expected delimiter item to have length 0, got %d at file position 0x%x",
            length,
            fp.tell() - 4)
Exemple #11
0
def _get_grouped_dicoms(dicom_input):
    """
    Search all dicoms in the dicom directory, sort and validate them

    fast_read = True will only read the headers not the data
    """

    # Order all dicom files by InstanceNumber
    dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)

    # now group per stack
    grouped_dicoms = [[]]  # list with first element a list
    stack_index = 0
    previous_stack_position = -1

    # loop over all sorted dicoms
    stack_position_tag = Tag(
        0x0020, 0x9057)  # put this there as this is a slow step and used a lot
    for index in range(0, len(dicoms)):
        dicom_ = dicoms[index]
        # if the stack number decreases we moved to the next stack
        stack_position = 0
        if stack_position_tag in dicom_:
            stack_position = dicom_[stack_position_tag].value
        if previous_stack_position > stack_position:
            stack_index += 1
            grouped_dicoms.append([])
        grouped_dicoms[stack_index].append(dicom_)
        previous_stack_position = stack_position

    return grouped_dicoms
Exemple #12
0
def _get_grouped_dicoms(dicom_input):
    """
    Search all dicoms in the dicom directory, sort and validate them

    fast_read = True will only read the headers not the data
    """
    # Order all dicom files by InstanceNumber
    dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)

    # now group per stack
    grouped_dicoms = [[]]  # list with first element a list
    timepoint_index = 0
    previous_stack_position = -1

    # loop over all sorted dicoms
    stack_position_tag = Tag(
        0x2001, 0x100a)  # put this there as this is a slow step and used a lot
    for index in range(0, len(dicoms)):
        dicom_ = dicoms[index]
        stack_position = 0
        if stack_position_tag in dicom_:
            stack_position = common.get_is_value(dicom_[stack_position_tag])
        if previous_stack_position == stack_position:
            # if the stack number is the same we move to the next timepoint
            timepoint_index += 1
            if len(grouped_dicoms) <= timepoint_index:
                grouped_dicoms.append([])
        else:
            # if it changes move back to the first timepoint
            timepoint_index = 0
        grouped_dicoms[timepoint_index].append(dicom_)
        previous_stack_position = stack_position

    return grouped_dicoms
Exemple #13
0
def get_private_entry(tag, private_creator):
    """Return the tuple (VR, VM, name, is_retired) from a private dictionary"""
    tag = Tag(tag)
    try:
        private_dict = private_dictionaries[private_creator]
    except KeyError:
        raise KeyError("Private creator {0} not in private dictionary".format(
            private_creator))

    # private elements are usually agnostic for "block" (see PS3.5-2008 7.8.1 p44)
    # Some elements in _private_dict are explicit; most have "xx" for high-byte of element
    # Try exact key first, but then try with "xx" in block position
    try:
        dict_entry = private_dict[tag]
    except KeyError:
        #     so here put in the "xx" in the block position for key to look up
        group_str = "%04x" % tag.group
        elem_str = "%04x" % tag.elem
        key = "%sxx%s" % (group_str, elem_str[-2:])
        if key not in private_dict:
            raise KeyError(
                "Tag {0} not in private dictionary for private creator {1}".
                format(key, private_creator))
        dict_entry = private_dict[key]
    return dict_entry
Exemple #14
0
def _is_bval_type_a(grouped_dicoms):
    """
    Check if the bvals are stored in the first of 2 currently known ways for single frame dti
    """
    bval_tag = Tag(0x2001, 0x1003)
    bvec_x_tag = Tag(0x2005, 0x10b0)
    bvec_y_tag = Tag(0x2005, 0x10b1)
    bvec_z_tag = Tag(0x2005, 0x10b2)
    for group in grouped_dicoms:
        if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
                        bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
                        bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
                        bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
                        common.get_fl_value(group[0][bval_tag]) != 0:
            return True
    return False
def _classic_get_grouped_dicoms(dicom_input):
    """
    Search all dicoms in the dicom directory, sort and validate them

    fast_read = True will only read the headers not the data
    """
    # Loop overall files and build dict
    # Order all dicom files by InstanceNumber
    dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)

    # now group per stack
    grouped_dicoms = []

    # loop over all sorted dicoms
    stack_position_tag = Tag(0x0020, 0x0012)  # in this case it is the acquisition number
    for index in range(0, len(dicoms)):
        dicom_ = dicoms[index]
        if stack_position_tag not in dicom_:
            stack_index = 0
        else:
            stack_index = dicom_[stack_position_tag].value - 1
        while len(grouped_dicoms) <= stack_index:
            grouped_dicoms.append([])
        grouped_dicoms[stack_index].append(dicom_)

    return grouped_dicoms
Exemple #16
0
 def update(self, dictionary):
     """Extend dict.update() to handle DICOM keywords."""
     for key, value in dictionary.items():
         if isinstance(key, (str, unicode)):
             setattr(self, key, value)
         else:
             self[Tag(key)] = value
Exemple #17
0
 def testGetExists4(self):
     """Dataset: dataset.get() returns an existing item by Tag............"""
     ds = self.dummy_dataset()
     unit = ds.get(Tag(0x300A00B2), None).value
     self.assertEqual(
         unit, 'unit001',
         "dataset.get() did not return existing member by tuple tag")
Exemple #18
0
 def testGetDefault4(self):
     """Dataset: dataset.get() returns default for non-existing Tag......."""
     ds = self.dummy_dataset()
     not_there = ds.get(Tag(0x99999999), "not-there")
     self.assertEqual(
         not_there, "not-there",
         "dataset.get() did not return default value for non-member by Tag")
Exemple #19
0
def CleanName(tag):
    """Return the dictionary descriptive text string but without bad characters.
    
    Used for e.g. *named tags* of Dataset instances (before DICOM keywords were
    part of the standard)
    
    """
    tag = Tag(tag)
    if tag not in DicomDictionary:
        if tag.element == 0:  # 0=implied group length in DICOM versions < 3
            return "GroupLength"
        else:
            return ""
    s = dictionary_description(tag)  # Descriptive name in dictionary
    # remove blanks and nasty characters
    s = s.translate(normTable, r""" !@#$%^&*(),;:.?\|{}[]+-="'’/""")

    # Take "Sequence" out of name as more natural sounding
    # e..g "BeamSequence"->"Beams"; "ReferencedImageBoxSequence"->"ReferencedImageBoxes"
    # 'Other Patient ID' exists as single value AND as sequence so check for it and leave 'Sequence' in
    if dictionaryVR(tag) == "SQ" and not s.startswith("OtherPatientIDs"):
        if s.endswith("Sequence"):
            s = s[:-8] + "s"
            if s.endswith("ss"):
                s = s[:-1]
            if s.endswith("xs"):
                s = s[:-1] + "es"
            if s.endswith("Studys"):
                s = s[:-2] + "ies"
    return s
Exemple #20
0
    def testJPEG2000(self):
        """JPEG2000: Returns correct values for sample data elements............"""
        expected = [Tag(0x0054, 0x0010),
                    Tag(0x0054, 0x0020)
                    ]  # XX also tests multiple-valued AT data element
        got = self.jpeg.FrameIncrementPointer
        self.assertEqual(
            got, expected,
            "JPEG2000 file, Frame Increment Pointer: expected %s, got %s" %
            (expected, got))

        got = self.jpeg.DerivationCodeSequence[0].CodeMeaning
        expected = 'Lossy Compression'
        self.assertEqual(
            got, expected,
            "JPEG200 file, Code Meaning got %s, expected %s" % (got, expected))
Exemple #21
0
 def __delitem__(self, key):
     """Intercept requests to delete an attribute by key, e.g. del ds[tag]"""
     # Assume is a standard tag (for speed in common case)
     try:
         dict.__delitem__(self, key)
     # If not a standard tag, than convert to Tag and try again
     except KeyError:
         tag = Tag(key)
         dict.__delitem__(self, tag)
Exemple #22
0
def _is_multiframe_4d(dicom_input):
    """
    Use this function to detect if a dicom series is a philips multiframe 4D dataset
    """
    # check if it is multi frame dicom
    if not is_multiframe_dicom(dicom_input):
        return False

    header = dicom_input[0]

    # check if there are multiple stacks
    number_of_stack_slices = common.get_ss_value(header[Tag(
        0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
    number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)
    if number_of_stacks <= 1:
        return False

    return True
def _get_asconv_headers(mosaic):
    """
    Getter for the asconv headers (asci header info stored in the dicom)
    """
    asconv_headers = re.findall(r'### ASCCONV BEGIN(.*)### ASCCONV END ###',
                                mosaic[Tag(0x0029, 0x1020)].value.decode(encoding='ISO-8859-1'),
                                re.DOTALL)[0]

    return asconv_headers
Exemple #24
0
def _is_4d(grouped_dicoms):
    """
    Use this function to detect if a dicom series is a ge 4d dataset
    NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
    (containing one series)
    """
    # read dicom header
    header = grouped_dicoms[0][0]

    # check if the dicom contains stack information
    if Tag(0x0020, 0x9056) not in header or Tag(0x0020, 0x9057) not in header:
        return False

    # check if contains multiple stacks
    if len(grouped_dicoms) <= 1:
        return False

    return True
Exemple #25
0
def _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nifti,
                        nifti_file):
    """
    Write the bvals from the sorted dicom files to a bval file
    Inspired by https://github.com/IBIC/ibicUtils/blob/master/ibicBvalsBvecs.py
    """

    # create the empty arrays
    number_of_stack_slices = common.get_ss_value(multiframe_dicom[Tag(
        0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
    number_of_stacks = int(
        int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)

    bvals = numpy.zeros([number_of_stacks], dtype=numpy.int32)
    bvecs = numpy.zeros([number_of_stacks, 3])

    # loop over all timepoints and create a list with all bvals and bvecs
    for stack_index in range(0, number_of_stacks):
        stack = multiframe_dicom[Tag(0x5200, 0x9230)][stack_index]
        if str(stack[Tag(0x0018,
                         0x9117)][0][Tag(0x0018,
                                         0x9075)].value) == 'DIRECTIONAL':
            bvals[stack_index] = common.get_fd_value(stack[Tag(
                0x0018, 0x9117)][0][Tag(0x0018, 0x9087)])
            bvecs[stack_index, :] = common.get_fd_array_value(
                stack[Tag(0x0018,
                          0x9117)][0][Tag(0x0018,
                                          0x9076)][0][Tag(0x0018, 0x9089)], 3)

    # truncate nifti if needed
    nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti,
                                                nifti_file)

    # save the found bvecs to the file
    if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
        common.write_bval_file(bvals, bval_file)
        common.write_bvec_file(bvecs, bvec_file)
    else:
        bval_file = None
        bvec_file = None

    return bval_file, bvec_file
Exemple #26
0
def _get_bvals_bvecs(grouped_dicoms):
    """
    Write the bvals from the sorted dicom files to a bval file
    """
    # loop over all timepoints and create a list with all bvals and bvecs
    bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
    bvecs = numpy.zeros([len(grouped_dicoms), 3])

    for group_index in range(0, len(grouped_dicoms)):
        dicom_ = grouped_dicoms[group_index][0]
        # 0019:10bb: Diffusion X
        # 0019:10bc: Diffusion Y
        # 0019:10bd: Diffusion Z
        # 0043:1039: B-values (4 values, 1st value is actual B value)

        # bval can be stored both in string as number format in dicom so implement both
        # some workarounds needed for implicit transfer syntax to work
        if isinstance(dicom_[Tag(0x0043, 0x1039)].value,
                      string_types):  # this works for python2.7
            original_bval = float(dicom_[Tag(0x0043,
                                             0x1039)].value.split('\\')[0])
        elif isinstance(dicom_[Tag(0x0043, 0x1039)].value,
                        bytes):  # this works for python3.o
            original_bval = float(dicom_[Tag(
                0x0043, 0x1039)].value.decode("utf-8").split('\\')[0])
        else:
            original_bval = dicom_[Tag(0x0043, 0x1039)][0]
        original_bvec = numpy.array([0, 0, 0], dtype=numpy.float)
        original_bvec[0] = -float(dicom_[Tag(
            0x0019, 0x10bb)].value)  # invert based upon mricron output
        original_bvec[1] = float(dicom_[Tag(0x0019, 0x10bc)].value)
        original_bvec[2] = float(dicom_[Tag(0x0019, 0x10bd)].value)

        # Add calculated B Value
        if original_bval != 0:  # only normalize if there is a value
            corrected_bval = original_bval * pow(
                numpy.linalg.norm(original_bvec), 2)
            if numpy.linalg.norm(original_bvec) != 0:
                normalized_bvec = original_bvec / numpy.linalg.norm(
                    original_bvec)
            else:
                normalized_bvec = original_bvec
        else:
            corrected_bval = original_bval
            normalized_bvec = original_bvec

        bvals[group_index] = int(
            round(corrected_bval
                  ))  # we want the original numbers back as in the protocol
        bvecs[group_index, :] = normalized_bvec

    return bvals, bvecs
Exemple #27
0
    def __setitem__(self, key, value):
        """Operator for dataset[key]=value. Check consistency, and deal with private tags"""
        if not isinstance(value, (DataElement, RawDataElement)):  # ok if is subclass, e.g. DeferredDataElement
            raise TypeError("Dataset contents must be DataElement instances.\n"
                            "To set a data_element value use data_element.value=val")
        tag = Tag(value.tag)
        if key != tag:
            raise ValueError("data_element.tag must match the dictionary key")

        data_element = value
        if tag.is_private:
            # See PS 3.5-2008 section 7.8.1 (p. 44) for how blocks are reserved
            logger.debug("Setting private tag %r" % tag)
            private_block = tag.elem >> 8
            private_creator_tag = Tag(tag.group, private_block)
            if private_creator_tag in self and tag != private_creator_tag:
                if isinstance(data_element, RawDataElement):
                    data_element = DataElement_from_raw(data_element, self._character_set)
                data_element.private_creator = self[private_creator_tag].value
        dict.__setitem__(self, tag, data_element)
Exemple #28
0
def _is_multiframe_anatomical(dicom_input):
    """
    Use this function to detect if a dicom series is a philips multiframe anatomical dataset
    NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
    (containing one series)
    """
    # check if it is multi frame dicom
    if not is_multiframe_dicom(dicom_input):
        return False

    header = dicom_input[0]

    # check if there are multiple stacks
    number_of_stack_slices = common.get_ss_value(header[Tag(
        0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
    number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)

    if number_of_stacks > 1:
        return False

    return True
Exemple #29
0
    def testRTDose(self):
        """Returns correct values for sample data elements in test RT Dose file"""
        dose = read_file(rtdose_name)
        self.assertEqual(dose.FrameIncrementPointer, Tag((0x3004, 0x000c)),
                         "Frame Increment Pointer not the expected value")
        self.assertEqual(dose.FrameIncrementPointer, dose[0x28, 9].value,
                         "FrameIncrementPointer does not match the value accessed by tag number")

        # try a value that is nested the deepest (so deep I break it into two steps!)
        fract = dose.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0]
        beamnum = fract.ReferencedBeamSequence[0].ReferencedBeamNumber
        self.assertEqual(beamnum, 1, "Beam number not the expected value")
def _is_diffusion_imaging(header_input):
    """
    Use this function to detect if a dicom series is a siemens dti dataset
    NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
    (containing one series)
    """

    # bval and bvec should be present
    if Tag(0x0019, 0x100c) not in header_input:
        return False

    return True