def __setitem__(self, key, value): """Operator for dataset[key]=value. Check consistency, and deal with private tags""" if not isinstance( value, (DataElement, RawDataElement)): # ok if is subclass, e.g. DeferredDataElement raise TypeError( "Dataset contents must be DataElement instances.\n" "To set a data_element value use data_element.value=val") tag = Tag(value.tag) if key != tag: raise ValueError("data_element.tag must match the dictionary key") data_element = value if tag.is_private: # See PS 3.5-2008 section 7.8.1 (p. 44) for how blocks are reserved logger.debug("Setting private tag %r" % tag) private_block = tag.elem >> 8 private_creator_tag = Tag(tag.group, private_block) if private_creator_tag in self and tag != private_creator_tag: if isinstance(data_element, RawDataElement): data_element = DataElement_from_raw( data_element, self._character_set) data_element.private_creator = self[private_creator_tag].value dict.__setitem__(self, tag, data_element)
def get_frame_ds(frame, ds): # "frame" starts from 1, not 0. Need to subtract 1 for correct indexing. slicenum = frame - 1 n_frames = ds.NumberOfFrames print "Extracting frame " + str(frame) + " of " + str(n_frames) # print " Copying original dataset" ds_new = copy.deepcopy(ds) # ds_new = pickle.loads(pickle.dumps(ds)) # ds_new = ujson.loads(ujson.dumps(ds)) # ds_new = Dataset() # ds_new = add_all(ds_new,ds) # print ds._character_set # print " Stripping out old tags" del ds_new[Tag(0x5200, 0x9230)] del ds_new[Tag(0x5200, 0x9229)] # print " Adding new tags" ds_new = add_all(ds_new, ds[0x5200, 0x9229][0]) ds_new = add_all(ds_new, ds[0x5200, 0x9230][slicenum]) # print " Correcting rows and columns" rows = int(ds_new.Rows) cols = int(ds_new.Columns) # print " Copying pixel data" ds_new.PixelData = ds.PixelData[slicenum * (rows * cols * 2):(slicenum + 1) * (rows * cols * 2)] # print " Replacing instance number" ds_new.InstanceNumber = frame ds_new.NumberOfFrames = 1 ds_new._character_set = ds._character_set # print " Returning split dataset" return ds_new
def apply_scaling(data, dicom_headers): """ Rescale the data based on the RescaleSlope and RescaleOffset Based on the scaling from pydicomseries :param dicom_headers: dicom headers to use to retreive the scaling factors :param data: the input data """ # Apply the rescaling if needed private_scale_slope_tag = Tag(0x2005, 0x100E) private_scale_intercept_tag = Tag(0x2005, 0x100D) if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \ or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers: rescale_slope = 1 rescale_intercept = 0 private_scale_slope = 1.0 private_scale_intercept = 0.0 if 'RescaleSlope' in dicom_headers: rescale_slope = dicom_headers.RescaleSlope if 'RescaleIntercept' in dicom_headers: rescale_intercept = dicom_headers.RescaleIntercept try: # this section can sometimes fail due to unknown private fields if private_scale_slope_tag in dicom_headers: private_scale_slope = float(dicom_headers[private_scale_slope_tag].value) if private_scale_slope_tag in dicom_headers: private_scale_slope = float(dicom_headers[private_scale_slope_tag].value) except: pass return do_scaling(data, rescale_slope, rescale_intercept, private_scale_slope, private_scale_intercept) else: return data
def setUp(self): self.int1 = 0x300a00b0 self.tup1 = (0x300a, 0xb0) self.tup3 = (0xFFFE, 0xFFFC) self.t1 = Tag(self.int1) self.t2 = Tag(self.tup1) self.t3 = Tag(self.tup3)
def testNameFinding(self): """dicom_dictionary: get long and short names for a data_element name""" names = all_names_for_tag(Tag(0x300a00b2)) # Treatment Machine Name expected = ['TreatmentMachineName'] self.assertEqual(names, expected, "Expected %s, got %s" % (expected, names)) names = all_names_for_tag(Tag(0x300A0120)) expected = ['BeamLimitingDeviceAngle', 'BLDAngle'] self.assertEqual(names, expected, "Expected %s, got %s" % (expected, names))
def testJPEG2000(self): """JPEG2000: Returns correct values for sample data elements............""" expected = [Tag(0x0054, 0x0010), Tag(0x0054, 0x0020)] # XX also tests multiple-valued AT data element got = self.jpeg.FrameIncrementPointer self.assertEqual(got, expected, "JPEG2000 file, Frame Increment Pointer: expected %s, got %s" % (expected, got)) got = self.jpeg.DerivationCodeSequence[0].CodeMeaning expected = 'Lossy Compression' self.assertEqual(got, expected, "JPEG200 file, Code Meaning got %s, expected %s" % (got, expected))
def dicom_to_nifti(dicom_input, output_file): """ This function will convert an anatomical dicom series to a nifti Examples: See unit test :param output_file: filepath to the output nifti :param dicom_input: directory with the dicom files for a single scan, or list of read in dicoms """ if len(dicom_input) <= 0: raise ConversionError('NO_DICOM_FILES_FOUND') dicom_input = sorted(dicom_input, key=lambda k: k.InstanceNumber) # remove localizers based on image type dicom_input = _remove_localizers_by_imagetype(dicom_input) if settings.validate_slicecount: # remove_localizers based on image orientation (only valid if slicecount is validated) dicom_input = _remove_localizers_by_orientation(dicom_input) # validate all the dicom files for correct orientations common.validate_slicecount(dicom_input) if settings.validate_orientation: # validate that all slices have the same orientation common.validate_orientation(dicom_input) if settings.validate_orthogonal: # validate that we have an orthogonal image (to detect gantry tilting etc) common.validate_orthogonal(dicom_input) dicom_input = sorted(dicom_input, key=lambda k: k.InstanceNumber) if settings.validate_sliceincrement: # validate that all slices have a consistent slice increment common.validate_sliceincrement(dicom_input) # Get data; originally z,y,x, transposed to x,y,z data = common.get_volume_pixeldata(dicom_input) affine = common.create_affine(dicom_input) # Convert to nifti img = nibabel.Nifti1Image(data, affine) # Set TR and TE if available if Tag(0x0018, 0x0081) in dicom_input[0] and Tag(0x0018, 0x0081) in dicom_input[0]: common.set_tr_te(img, float(dicom_input[0].RepetitionTime), float(dicom_input[0].EchoTime)) # Save to disk print('Saving nifti to disk %s' % output_file) img.to_filename(output_file) return {'NII_FILE': output_file}
def write_ATvalue(fp, data_element): """Write a data_element tag to a file.""" try: iter(data_element.value) # see if is multi-valued AT; # Note will fail if Tag ever derived from true tuple rather than being a long except TypeError: tag = Tag(data_element.value) # make sure is expressed as a Tag instance fp.write_tag(tag) else: tags = [Tag(tag) for tag in data_element.value] for tag in tags: fp.write_tag(tag)
def testNoPixelsRead(self): """Returns all data elements before pixels using stop_before_pixels=False""" # Just check the tags, and a couple of values ctpartial = read_file(ct_name, stop_before_pixels=True) ctpartial_tags = sorted(ctpartial.keys()) ctfull = read_file(ct_name) ctfull_tags = sorted(ctfull.keys()) msg = "Tag list of partial CT read (except pixel tag and padding) did not match full read" msg += "\nExpected: %r\nGot %r" % (ctfull_tags[:-2], ctpartial_tags) missing = [Tag(0x7fe0, 0x10), Tag(0xfffc, 0xfffc)] self.assertEqual(ctfull_tags, ctpartial_tags + missing, msg)
def testGetFromRaw(self): """Dataset: get(tag) returns same object as ds[tag] for raw element..""" # This came from issue 88, where get(tag#) returned a RawDataElement, # while get(name) converted to a true DataElement test_tag = 0x100010 test_elem = RawDataElement(Tag(test_tag), 'PN', 4, 'test', 0, True, True) ds = Dataset({Tag(test_tag): test_elem}) by_get = ds.get(test_tag) by_item = ds[test_tag] # self.assertEqual(type(elem_get), type(name_get), "Dataset.get() returned different type for name vs tag access") msg = "Dataset.get() returned different objects for ds.get(tag) and ds[tag]:\nBy get():%r\nBy ds[tag]:%r\n" self.assertEqual(by_get, by_item, msg % (by_get, by_item))
def _is_bval_type_b(grouped_dicoms): """ Check if the bvals are stored in the second of 2 currently known ways for single frame dti """ bval_tag = Tag(0x0018, 0x9087) bvec_tag = Tag(0x0018, 0x9089) for group in grouped_dicoms: if bvec_tag in group[0] and bval_tag in group[0]: bvec = common.get_fd_array_value(group[0][bvec_tag], 3) bval = common.get_fd_value(group[0][bval_tag]) if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float( bvec[2]) and _is_float(bval) and bval != 0: return True return False
def saveImage(m, b, minMax): image['0008', '0008'].value[0] = 'DERIVED' # Image Type image['0008', '0008'].value[1] = 'SECONDARY' # Image Type image['0008', '103E'].value = 'Description' # Series description image.add_new(Tag(['0008', '103F']), 'LO', 'Series description code') # Series description image.add_new(Tag(['0020', '0016']), 'LO', uuid.uuid4().hex) # Sop Instance image.add_new(Tag(['0002', '0003']), 'LO', image['0020', '0016'].value) # Sop Instance image['0020', '000E'].value = uuid.uuid4().hex # Series instance UID image.add_new(Tag(['0028', '0106']), 'FL', minMax[0]) # Smallest Image Pixel Value image.add_new(Tag(['0028', '0107']), 'FL', minMax[1]) # Largest Image Pixel Value image['0028', '1052'].value = b # Rescale intercept image['0028', '1053'].value = m # Rescale slope image['0028', '0100'].value = 16 # Rescale slope image.save_as("newfilename.dcm")
def _create_bvecs(sorted_dicoms, bvec_file): """ Calculate the bvecs and write the to a bvec file # inspired by dicom2nii from mricron # see http://users.fmrib.ox.ac.uk/~robson/internal/Dicom2Nifti111.m """ if type(sorted_dicoms[0]) is list: dicom_headers = sorted_dicoms[0][0] else: dicom_headers = sorted_dicoms[0] # get the patient orientation image_orientation = dicom_headers.ImageOrientationPatient read_vector = numpy.array([float(image_orientation[0]), float(image_orientation[1]), float(image_orientation[2])]) phase_vector = numpy.array([float(image_orientation[3]), float(image_orientation[4]), float(image_orientation[5])]) mosaic_vector = numpy.cross(read_vector, phase_vector) # normalize the vectors read_vector /= numpy.linalg.norm(read_vector) phase_vector /= numpy.linalg.norm(phase_vector) mosaic_vector /= numpy.linalg.norm(mosaic_vector) # create an empty array for the new bvecs bvecs = numpy.zeros([len(sorted_dicoms), 3]) # for each slice calculate the new bvec for index in range(0, len(sorted_dicoms)): if type(sorted_dicoms[0]) is list: dicom_headers = sorted_dicoms[index][0] else: dicom_headers = sorted_dicoms[index] # get the bval als this is needed in some checks bval = common.get_is_value(dicom_headers[Tag(0x0019, 0x100c)]) # get the bvec if it exists in the headers bvec = numpy.array([0, 0, 0]) if Tag(0x0019, 0x100e) in dicom_headers: # in case of implicit VR the private field cannot be split into an array, we do this here bvec = numpy.array(common.get_fd_array_value(dicom_headers[Tag(0x0019, 0x100e)], 3)) # if bval is 0 or the vector is 0 no projection is needed and the vector is 0,0,0 new_bvec = numpy.array([0, 0, 0]) if bval > 0 and not (bvec == [0, 0, 0]).all(): # project the bvec and invert the y direction new_bvec = numpy.array( [numpy.dot(bvec, read_vector), -numpy.dot(bvec, phase_vector), numpy.dot(bvec, mosaic_vector)]) # normalize the bvec new_bvec /= numpy.linalg.norm(new_bvec) bvecs[index, :] = new_bvec # save the found bvecs to the file common.write_bvec_file(bvecs, bvec_file)
def _write_file_meta_info(fp, meta_dataset): """Write the dicom group 2 dicom storage File Meta Information to the file. The file should already be positioned past the 128 byte preamble. Raises ValueError if the required data_elements (elements 2,3,0x10,0x12) are not in the dataset. If the dataset came from a file read with read_file(), then the required data_elements should already be there. """ fp.write(b'DICM') # File meta info is always LittleEndian, Explicit VR. After will change these # to the transfer syntax values set in the meta info fp.is_little_endian = True fp.is_implicit_VR = False if Tag((2, 1)) not in meta_dataset: meta_dataset.add_new((2, 1), 'OB', b"\0\1") # file meta information version # Now check that required meta info tags are present: missing = [] for element in [2, 3, 0x10, 0x12]: if Tag((2, element)) not in meta_dataset: missing.append(Tag((2, element))) if missing: raise ValueError( "Missing required tags {0} for file meta information".format( str(missing))) # Put in temp number for required group length, save current location to come back meta_dataset[(2, 0)] = DataElement((2, 0), 'UL', 0) # put 0 to start group_length_data_element_size = 12 # !based on DICOM std ExplVR group_length_tell = fp.tell() # Write the file meta datset, including temp group length length = write_dataset(fp, meta_dataset) group_length = length - group_length_data_element_size # counts from end of that # Save end of file meta to go back to end_of_file_meta = fp.tell() # Go back and write the actual group length fp.seek(group_length_tell) group_length_data_element = DataElement((2, 0), 'UL', group_length) write_data_element(fp, group_length_data_element) # Return to end of file meta, ready to write remainder of the file fp.seek(end_of_file_meta)
def _get_grouped_dicoms(dicom_input): """ Search all dicoms in the dicom directory, sort and validate them fast_read = True will only read the headers not the data """ # Order all dicom files by InstanceNumber dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber) # now group per stack grouped_dicoms = [[]] # list with first element a list timepoint_index = 0 previous_stack_position = -1 # loop over all sorted dicoms stack_position_tag = Tag( 0x2001, 0x100a) # put this there as this is a slow step and used a lot for index in range(0, len(dicoms)): dicom_ = dicoms[index] stack_position = 0 if stack_position_tag in dicom_: stack_position = common.get_is_value(dicom_[stack_position_tag]) if previous_stack_position == stack_position: # if the stack number is the same we move to the next timepoint timepoint_index += 1 if len(grouped_dicoms) <= timepoint_index: grouped_dicoms.append([]) else: # if it changes move back to the first timepoint timepoint_index = 0 grouped_dicoms[timepoint_index].append(dicom_) previous_stack_position = stack_position return grouped_dicoms
def read_delimiter_item(fp, delimiter): """Read and ignore an expected delimiter. If the delimiter is not found or correctly formed, a warning is logged. """ found = fp.read(4) if found != delimiter: logger.warn("Expected delimitor %s, got %s at file position 0x%x", Tag(delimiter), Tag(found), fp.tell() - 4) length = fp.read_UL() if length != 0: logger.warn( "Expected delimiter item to have length 0, got %d at file position 0x%x", length, fp.tell() - 4)
def update(self, dictionary): """Extend dict.update() to handle *named tags*.""" for key, value in dictionary.items(): if is_stringlike(key): setattr(self, key, value) else: self[Tag(key)] = value
def get_private_entry(tag, private_creator): """Return the tuple (VR, VM, name, is_retired) from a private dictionary""" tag = Tag(tag) try: private_dict = private_dictionaries[private_creator] except KeyError: raise KeyError("Private creator {0} not in private dictionary".format( private_creator)) # private elements are usually agnostic for "block" (see PS3.5-2008 7.8.1 p44) # Some elements in _private_dict are explicit; most have "xx" for high-byte of element # Try exact key first, but then try with "xx" in block position try: dict_entry = private_dict[tag] except KeyError: # so here put in the "xx" in the block position for key to look up group_str = "%04x" % tag.group elem_str = "%04x" % tag.elem key = "%sxx%s" % (group_str, elem_str[-2:]) if key not in private_dict: raise KeyError( "Tag {0} not in private dictionary for private creator {1}". format(key, private_creator)) dict_entry = private_dict[key] return dict_entry
def prep_data(self, PathDicom): self.PathDicom = PathDicom self.lstFilesDCM = [] self.lstRSFile = [] self.Dicom_info = [] fileList = [] for dirName, dirs, fileList in os.walk(PathDicom): break if len(fileList) < 10: # If there are no files, break out return None for filename in fileList: try: ds = dicom.read_file(os.path.join(dirName, filename)) if ds.Modality == 'CT' or ds.Modality == 'MR': # check whether the file's DICOM self.lstFilesDCM.append(os.path.join(dirName, filename)) self.Dicom_info.append(ds) elif ds.Modality == 'RTSTRUCT': self.lstRSFile = os.path.join(dirName, filename) except: # if filename.find('Iteration_') == 0: # os.remove(PathDicom+filename) continue self.RefDs = dicom.read_file(self.lstFilesDCM[0]) self.mask_exist = False if self.lstRSFile: self.RS_struct = dicom.read_file(self.lstRSFile) if Tag((0x3006, 0x020)) in self.RS_struct.keys(): self.ROI_Structure = self.RS_struct.StructureSetROISequence else: self.ROI_Structure = [] self.rois_in_case = [] for Structures in self.ROI_Structure: self.rois_in_case.append(Structures.ROIName)
def _is_bval_type_a(grouped_dicoms): """ Check if the bvals are stored in the first of 2 currently known ways for single frame dti """ bval_tag = Tag(0x2001, 0x1003) bvec_x_tag = Tag(0x2005, 0x10b0) bvec_y_tag = Tag(0x2005, 0x10b1) bvec_z_tag = Tag(0x2005, 0x10b2) for group in grouped_dicoms: if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \ bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \ bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \ bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \ common.get_fl_value(group[0][bval_tag]) != 0: return True return False
def _classic_get_grouped_dicoms(dicom_input): """ Search all dicoms in the dicom directory, sort and validate them fast_read = True will only read the headers not the data """ # Loop overall files and build dict # Order all dicom files by InstanceNumber dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber) # now group per stack grouped_dicoms = [] # loop over all sorted dicoms stack_position_tag = Tag(0x0020, 0x0012) # in this case it is the acquisition number for index in range(0, len(dicoms)): dicom_ = dicoms[index] if stack_position_tag not in dicom_: stack_index = 0 else: stack_index = dicom_[stack_position_tag].value - 1 while len(grouped_dicoms) <= stack_index: grouped_dicoms.append([]) grouped_dicoms[stack_index].append(dicom_) return grouped_dicoms
def CleanName(tag): """Return the dictionary descriptive text string but without bad characters. Used for e.g. *named tags* of Dataset instances (before DICOM keywords were part of the standard) """ tag = Tag(tag) if tag not in DicomDictionary: if tag.element == 0: # 0=implied group length in DICOM versions < 3 return "GroupLength" else: return "" s = dictionary_description(tag) # Descriptive name in dictionary # remove blanks and nasty characters s = s.translate(normTable, r""" !@#$%^&*(),;:.?\|{}[]+-="'’/""") # Take "Sequence" out of name as more natural sounding # e..g "BeamSequence"->"Beams"; "ReferencedImageBoxSequence"->"ReferencedImageBoxes" # 'Other Patient ID' exists as single value AND as sequence so check for it and leave 'Sequence' in if dictionaryVR(tag) == "SQ" and not s.startswith("OtherPatientIDs"): if s.endswith("Sequence"): s = s[:-8] + "s" if s.endswith("ss"): s = s[:-1] if s.endswith("xs"): s = s[:-1] + "es" if s.endswith("Studys"): s = s[:-2] + "ies" return s
def update(self, dictionary): """Extend dict.update() to handle DICOM keywords.""" for key, value in dictionary.items(): if isinstance(key, (str, unicode)): setattr(self, key, value) else: self[Tag(key)] = value
def __init__(self, tag, VR, value, file_value_tell=None, is_undefined_length=False, already_converted=False): """Create a data element instance. Most user code should instead use DICOM keywords to create data_elements, for which only the value is supplied, and the VR and tag are determined from the dicom dictionary. tag -- dicom (group, element) tag in any form accepted by Tag(). VR -- dicom value representation (see DICOM standard part 6) value -- the value of the data element. One of the following: - a single string value - a number - a list or tuple with all strings or all numbers - a multi-value string with backslash separator file_value_tell -- used internally by Dataset, to store the write position for ReplaceDataElementValue method is_undefined_length -- used internally to store whether the length field in this data element was 0xFFFFFFFFL, i.e. "undefined length" """ self.tag = Tag(tag) self.VR = VR # Note!: you must set VR before setting value if already_converted: self._value = value else: self.value = value # calls property setter which will convert self.file_tell = file_value_tell self.is_undefined_length = is_undefined_length
def _get_grouped_dicoms(dicom_input): """ Search all dicoms in the dicom directory, sort and validate them fast_read = True will only read the headers not the data """ # Order all dicom files by InstanceNumber dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber) # now group per stack grouped_dicoms = [[]] # list with first element a list stack_index = 0 previous_stack_position = -1 # loop over all sorted dicoms stack_position_tag = Tag( 0x0020, 0x9057) # put this there as this is a slow step and used a lot for index in range(0, len(dicoms)): dicom_ = dicoms[index] # if the stack number decreases we moved to the next stack stack_position = 0 if stack_position_tag in dicom_: stack_position = dicom_[stack_position_tag].value if previous_stack_position > stack_position: stack_index += 1 grouped_dicoms.append([]) grouped_dicoms[stack_index].append(dicom_) previous_stack_position = stack_position return grouped_dicoms
def testGetDefault4(self): """Dataset: dataset.get() returns default for non-existing Tag.......""" ds = self.dummy_dataset() not_there = ds.get(Tag(0x99999999), "not-there") self.assertEqual( not_there, "not-there", "dataset.get() did not return default value for non-member by Tag")
def testGetExists4(self): """Dataset: dataset.get() returns an existing item by Tag............""" ds = self.dummy_dataset() unit = ds.get(Tag(0x300A00B2), None).value self.assertEqual( unit, 'unit001', "dataset.get() did not return existing member by tuple tag")
def _is_multiframe_4d(dicom_input): """ Use this function to detect if a dicom series is a philips multiframe 4D dataset """ # check if it is multi frame dicom if not is_multiframe_dicom(dicom_input): return False header = dicom_input[0] # check if there are multiple stacks number_of_stack_slices = common.get_ss_value(header[Tag( 0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]) number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices) if number_of_stacks <= 1: return False return True
def __delitem__(self, key): """Intercept requests to delete an attribute by key, e.g. del ds[tag]""" # Assume is a standard tag (for speed in common case) try: dict.__delitem__(self, key) # If not a standard tag, than convert to Tag and try again except KeyError: tag = Tag(key) dict.__delitem__(self, tag)
def _get_asconv_headers(mosaic): """ Getter for the asconv headers (asci header info stored in the dicom) """ asconv_headers = re.findall(r'### ASCCONV BEGIN(.*)### ASCCONV END ###', mosaic[Tag(0x0029, 0x1020)].value.decode(encoding='ISO-8859-1'), re.DOTALL)[0] return asconv_headers