def _create_empty_pixel_measures_sequence(self) -> pydicom.Dataset: ds = pydicom.Dataset() ds.SharedFunctionalGroupsSequence = pydicom.Sequence([pydicom.Dataset()]) ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence = pydicom.Sequence( [pydicom.Dataset()] ) return ds
def test_dicom_from_dict(): baseline_dataset = pydicom.Dataset() baseline_dataset.Manufacturer = 'PyMedPhys' beam_sequence = pydicom.Dataset() beam_sequence.Manufacturer = 'PyMedPhys' baseline_dataset.BeamSequence = [beam_sequence] created_dataset = dicom_dataset_from_dict({ 'Manufacturer': 'PyMedPhys', 'BeamSequence': [{ 'Manufacturer': 'PyMedPhys' }] }) assert created_dataset == baseline_dataset
def gen_file(self, fn=None): # print("Setting file meta information...") # Populate required values for file meta information file_meta = pydicom.Dataset() file_meta.FileMetaInformationGroupLength = 60 # Will be rewritten but must exist file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT file_meta.MediaStorageSOPInstanceUID = "1.2.3" file_meta.ImplementationClassUID = "1.2.3.4" # Wants all of these to be legit: # * (0002,0000) FileMetaInformationGroupLength, UL, 4 # * (0002,0001) FileMetaInformationVersion, OB, 2 # X * (0002,0002) MediaStorageSOPClassUID, UI, N # X * (0002,0003) MediaStorageSOPInstanceUID, UI, N # * (0002,0010) TransferSyntaxUID, UI, N # X * (0002,0012) ImplementationClassUID, UI, N ds = pydicom.FileDataset(fn, self.as_pydicom_ds(), file_meta=file_meta, preamble=b"\0" * 128) # print(ds) with NamedTemporaryFile() as f: ds.save_as(filename=f.name, write_like_original=True) self.file = f.read()
def _encode_response(instance, response_attrs: list, encoding: str): """Creates a C-FIND response dataset :param instance: database model instance :type instance: peewee.Model :param response_attrs: list of response attributes (tag, attribute name in the database model and VR) :type response_attrs: list :param encoding: response encoding :type encoding: str :return: C-FIND-RSP dataset :rtype: pydicom.Dataset """ rsp = pydicom.Dataset() rsp.SpecificCharacterSet = encoding for tag, attr_name, vr, func in response_attrs: if attr_name is None: # Attribute not supported rsp.add_new(tag, vr, None) else: if not isinstance(attr_name, tuple): attr = getattr(instance, attr_name) else: attr = instance for field in attr_name: attr = getattr(attr, field) if func: attr = func(attr) rsp.add_new(tag, vr, attr) return rsp
def set_metadata(self, key, value, force: bool = False): """Sets metadata for all headers. Args: key (str or pydicom.BaseTag): Metadata field to access. value (Any): The value. force (bool, optional): If ``True``, force the header to set key even if key does not exist in header. Raises: RuntimeError: If ``self._headers`` is ``None``. """ if self._headers is None: if not force: raise ValueError( "No headers found. To generate headers and write keys, `force` must be True." ) self._headers = self._validate_and_format_headers( [pydicom.Dataset()]) warnings.warn( "Headers were generated and may not contain all attributes " "required to save the volume in DICOM format.") VR_registry = {float: "DS", int: "IS", str: "LS"} for h in self.headers(flatten=True): if force and key not in h: try: setattr(h, key, value) except TypeError: h.add_new(key, VR_registry[type(value)], value) else: h[key].value = value
def test_dicom_from_dict(): baseline_dataset = pydicom.Dataset() baseline_dataset.Manufacturer = "PyMedPhys" beam_sequence = pydicom.Dataset() beam_sequence.Manufacturer = "PyMedPhys" baseline_dataset.BeamSequence = [beam_sequence] created_dataset = dicom_dataset_from_dict({ "Manufacturer": "PyMedPhys", "BeamSequence": [{ "Manufacturer": "PyMedPhys" }] }) assert created_dataset == baseline_dataset
def test_add_private_tag(self): ds = pydicom.Dataset() tagger.add_private_tag(ds, self.group, self.identifier, self.tag_value) identifier_elem = ds.get((self.group, 0x0010)) private_elem = ds.get((self.group, 0x1000)) self.assertTrue(identifier_elem.value == "Flywheel") self.assertTrue(private_elem.value == "DICOM Send")
def mk_dcm(dcm_path, slice_data, meta): file_meta = pydicom.Dataset() file_meta.TransferSyntaxUID = '1.2.840.10008.1.2' file_meta.MediaStorageSOPClassUID = 'Secondary Capture Image Storage' file_meta.MediaStorageSOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780' file_meta.ImplementationClassUID = '1.3.6.1.4.1.9590.100.1.0.100.4.0' ds = pydicom.FileDataset(dcm_path, {}, file_meta=file_meta, preamble=b'\0' * 128) ds.Modality = 'WSD' ds.ContentDate = str(datetime.date.today()).replace('-', '') ds.ContentTime = str(time.time()) #milliseconds since the epoch ds.StudyInstanceUID = '1.3.6.1.4.1.9590.100.1.1.124313977412360175234271287472804872093' ds.SeriesInstanceUID = '1.3.6.1.4.1.9590.100.1.1.369231118011061003403421859172643143649' ds.SOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780' ds.SOPClassUID = 'Secondary Capture Image Storage' ds.SecondaryCaptureDeviceManufctur = 'Python 3.6' # TAGS NECESSARY TO CONTAIN IMAGE DATA: ds.SamplesPerPixel = 1 ds.PhotometricInterpretation = "MONOCHROME2" ds.PixelRepresentation = 1 ds.HighBit = 15 ds.BitsStored = 16 ds.BitsAllocated = 16 ds.RescaleIntercept = 0 ds.RescaleSlope = 1 ds.WindowCenter = 80 ds.WindowWidth = 600 ds.Columns = slice_data.shape[0] ds.Rows = slice_data.shape[1] ds.PixelData = slice_data.tobytes() [ds.add_new(k, 'LO', v) for k, v in meta.items()] ds.save_as(dcm_path)
def set_shared_functional_groups_sequence( target: pydicom.Dataset, segmentation: sitk.Image ) -> None: spacing = segmentation.GetSpacing() dataset = pydicom.Dataset() dataset.PixelMeasuresSequence = [pydicom.Dataset()] dataset.PixelMeasuresSequence[0].PixelSpacing = [f"{x:e}" for x in spacing[:2]] dataset.PixelMeasuresSequence[0].SliceThickness = f"{spacing[2]:e}" dataset.PixelMeasuresSequence[0].SpacingBetweenSlices = f"{spacing[2]:e}" dataset.PlaneOrientationSequence = [pydicom.Dataset()] dataset.PlaneOrientationSequence[ 0 ].ImageOrientationPatient = sitk_to_dcm_orientation(segmentation) target.SharedFunctionalGroupsSequence = pydicom.Sequence([dataset])
def test_identifier_with_unknown_vr(): # The fundamental feature being tested is behaviour in # response to a programmer error. # The programmer error is specification of an identifier that has a VR # that has not been addressed in the strategy. # However, because the strategy is only applied when the identifier is found # in the dataset, the error will only surface in that circumstance replacement_strategy = pseudonymisation_api.pseudonymisation_dispatch logging.info("Using pseudonymisation strategy") identifying_keywords_with_vr_unknown_to_strategy = [ "CodingSchemeURL", "PatientID" ] logging.info("Using keyword with VR = UR") ds_input = pydicom.Dataset() ds_input.PatientID = "ABC123" # not expected to cause problems if the identifier with unknown VR is not in the data assert (anonymise_dataset( ds_input, replacement_strategy=replacement_strategy, identifying_keywords=identifying_keywords_with_vr_unknown_to_strategy, ) is not None) # should raise the error if the identifier with unknown VR is in the data with pytest.raises(KeyError): ds_input.CodingSchemeURL = "https://scheming.coders.co.nz" anonymise_dataset( ds_input, replacement_strategy=replacement_strategy, identifying_keywords= identifying_keywords_with_vr_unknown_to_strategy, )
def test_RuleHandler() -> None: ds = pydicom.Dataset() tag = 0x00000001 ds[tag] = pydicom.DataElement(value=b"1", tag=tag, VR="CS") handler = RuleHandler(lambda _: "x") handler(ds, tag) assert ds[tag].value == "x"
def add_dimension_organization( self, dim_organization: DimensionOrganizationSequence) -> None: """Adds a dimension organization sequence to the dataset. This methods registers the (0x0020, 0x9164) DimensionOrganizationUID and appends all items from the sequence to (0x0020, 0x9222) DimensionIndexSequence. Args: dim_organization: A `DimensionOrganizationSequence` with one or more dimension items configured. """ if 'DimensionOrganizationSequence' not in self: self.DimensionOrganizationSequence = pydicom.Sequence() self.DimensionIndexSequence = pydicom.Sequence() for item in self.DimensionOrganizationSequence: if item.DimensionOrganizationUID == dim_organization[ 0].DimensionOrganizationUID: raise ValueError( 'Dimension organization with UID ' f'{item.DimensionOrganizationUID} already exists') item = pydicom.Dataset() item.DimensionOrganizationUID = dim_organization[ 0].DimensionOrganizationUID self.DimensionOrganizationSequence.append(item) self.DimensionIndexSequence.extend(dim_organization)
def add_dimension( self, dimension_index_pointer: Union[str, pydicom.tag.Tag], functional_group_pointer: Optional[Union[str, pydicom.tag.Tag]] = None): ds = pydicom.Dataset() if len(self) > 0: ds.DimensionOrganizationUID = self[0].DimensionOrganizationUID else: ds.DimensionOrganizationUID = pydicom.uid.generate_uid() if isinstance(dimension_index_pointer, str): dimension_index_pointer = pydicom.tag.Tag( pydicom.datadict.tag_for_keyword(dimension_index_pointer)) ds.DimensionIndexPointer = dimension_index_pointer ds.DimensionDescriptionLabel = pydicom.datadict.keyword_for_tag( dimension_index_pointer ) or f'Unknown tag {dimension_index_pointer}' if functional_group_pointer is not None: if isinstance(functional_group_pointer, str): functional_group_pointer = pydicom.tag.Tag( pydicom.datadict.tag_for_keyword(functional_group_pointer)) ds.FunctionalGroupPointer = functional_group_pointer self.append(ds)
def set_shared_functional_groups_sequence(target: pydicom.Dataset, segmentation: sitk.Image): spacing = segmentation.GetSpacing() dataset = pydicom.Dataset() dataset.PixelMeasuresSequence = [pydicom.Dataset()] dataset.PixelMeasuresSequence[0].PixelSpacing = [ f'{x:e}' for x in spacing[:2] ] dataset.PixelMeasuresSequence[0].SliceThickness = f'{spacing[2]:e}' dataset.PixelMeasuresSequence[0].SpacingBetweenSlices = f'{spacing[2]:e}' dataset.PlaneOrientationSequence = [pydicom.Dataset()] dataset.PlaneOrientationSequence[0].ImageOrientationPatient = [ f'{x:e}' for x in np.ravel(segmentation.GetDirection())[:6] ] target.SharedFunctionalGroupsSequence = pydicom.Sequence([dataset])
def test_failure_storage(memory_storage: storage.InMemoryStorage): memory_storage.new_file('1.2.3.4', '1.2.3', '1.2.3.5', 'test') ds = pydicom.Dataset() ds.SOPInstanceUID = '1.2.3.4' memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_FAILURE, ds) with pytest.raises(storage.StorageFiles.DoesNotExist): # pylint: disable=no-member storage.StorageFiles.get( storage.StorageFiles.sop_instance_uid == '1.2.3.4')
def test_successful_storage(memory_storage: storage.InMemoryStorage): memory_storage.new_file('1.2.3.4', '1.2.3', '1.2.3.5', 'test') ds = pydicom.Dataset() ds.SOPInstanceUID = '1.2.3.4' memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_DONE, ds) _file = storage.StorageFiles.get( storage.StorageFiles.sop_instance_uid == '1.2.3.4') assert _file.is_stored == True
def gen_dummy_sequence(ds: pydicom.Dataset, tag: str) -> None: """Generates a dummy sequence with 3 attributes""" ds.add_new(tag, 'SQ', []) ds[tag].value.append(pydicom.Dataset()) ds[tag].value[0].add_new('0x00080100', 'SH', replace_with_dummy_str('SH')) ds[tag].value[0].add_new('0x00080102', 'SH', replace_with_dummy_str('SH')) ds[tag].value[0].add_new('0x00080104', 'LO', replace_with_dummy_str('LO')) return ds
def setUp(self): # bunch of test CT files (each from a different imaginary image series...) syscfg = get_sysconfig(filepath='./cfg/unit_test_system.cfg', username="******", want_logfile="") self.ct1 = pydicom.Dataset() self.ct1.SeriesDescription = "Foo Bar" self.ct1.KVP = "100" self.ct1.ConvolutionKernel = "UB" self.ct2 = pydicom.Dataset() self.ct2.SeriesDescription = "Bar Foo" self.ct2.KVP = "110" self.ct2.ConvolutionKernel = "UB" self.ct3 = pydicom.Dataset() self.ct3.SeriesDescription = "foo barr" self.ct3.KVP = "120" self.ct3.ConvolutionKernel = "UB" self.ct4 = pydicom.Dataset() self.ct4.SeriesDescription = "quux" self.ct4.KVP = "210" self.ct4.ConvolutionKernel = "BU" self.hlut_conf = tempfile.NamedTemporaryFile(prefix="test_hlut", suffix=".conf", delete=False) self.hlut_conf.write(b""" [First test CT protocol] density = Schneider2000DensitiesTable.txt composition = Schneider2000MaterialsTable.txt Series Description = Foo Bar [Second test CT protocol] -1024,-50 = G4_AIR -50,5000 = G4_GRAPHITE Series Description = Bar Foo Convolution Kernel = UB KVP = 110 [Third test CT protocol] -1024,-50 = G4_AIR -50,50 = G4_WATER 50,3000 = G4_STAINLESS-STEEL Series Description = foo barr """) self.hlut_conf.close() self.all_hluts = hlut_conf.getInstance(fname=self.hlut_conf.name)
def from_dcmqi_metainfo(metainfo: Union[dict, str]) -> pydicom.Dataset: """Converts a `metainfo.json` file from the dcmqi project to a `pydicom.Dataset` with the matching DICOM data elements set from JSON. Those JSON files can be easilly created using the segmentation editor tool from QIICR/dcmqi: http://qiicr.org/dcmqi/#/seg When converting the JSON to a DICOM dataset, the validity of the provided JSON document is ensured using the official JSON schema files from the dcmqi project. Args: metainfo: Either a `str` for a file path to read from or a `dict` with the JSON already imported or constructed in source code. Returns: A `pydicom.Dataset` containg all values from the JSON document and some defaults if the elements were not available. """ # Add convienence loader of JSON dictionary if isinstance(metainfo, str): with open(metainfo) as ifile: metainfo = json.load(ifile) assert isinstance(metainfo, dict) # Validate dictionary against dcmqi JSON schemas validator = _create_validator() if not validator.is_valid(metainfo): raise NotImplementedError() # Create dataset from provided JSON dataset = pydicom.Dataset() tags_with_defaults = [ ("BodyPartExamined", ""), ("ClinicalTrialCoordinatingCenterName", ""), ("ClinicalTrialSeriesID", "Session1"), ("ClinicalTrialTimePointID", "1"), ("ContentCreatorName", "Reader1"), ("ContentDescription", "Image segmentation"), ("ContentLabel", "SEGMENTATION"), ("InstanceNumber", "1"), ("SeriesDescription", "Segmentation"), ("SeriesNumber", "300"), ] for tag_name, default_value in tags_with_defaults: dataset.__setattr__(tag_name, metainfo.get(tag_name, default_value)) if len(metainfo["segmentAttributes"]) > 1: raise ValueError( "Only metainfo.json files written for single-file input are supported" ) dataset.SegmentSequence = pydicom.Sequence( [_create_segment_dataset(x) for x in metainfo["segmentAttributes"][0]]) return dataset
def test_tagged_once(self): ds = pydicom.Dataset() tagger.add_private_tag(ds, self.group, self.identifier, self.tag_value) for tag in range(0x0011, 0x00FF): self.assertFalse(ds.get((self.group, tag))) # Make sure dicom is only tagged once tagger.add_private_tag(ds, self.group, self.identifier, self.tag_value) for tag in range(0x0011, 0x00FF): self.assertFalse(ds.get((self.group, tag)))
def test_find_empty(pacs: server.Server, pacs_client: client.DICOMClient): request = pydicom.Dataset() request.PatientName = None request.PatientSex = None request.SpecificCharacterSet = 'ISO_IR 192' request.QueryRetrieveLevel = 'STUDY' request.NumberOfPatientRelatedStudies = None results = pacs_client.find(request) assert not list(results)
def save(self) -> None: """ Save input data into DICOM file. :return: None """ if not self.parseInput(): return else: filename = QtWidgets.QFileDialog.getSaveFileName(self, "Save DICOM", "../../..", "*.dcm")[0] if filename == '': return else: fm = pd.Dataset() # CT Image Storage fm.MediaStorageSOPClassUID = "1.2.840.10008.5.1.4.1.1.2" fm.MediaStorageSOPInstanceUID = pd.uid.generate_uid() fm.TransferSyntaxUID = pd.uid.ExplicitVRLittleEndian fm.ImplementationClassUID = pd.uid.generate_uid() ds = pd.FileDataset(None, {}) ds.file_meta = fm # CT Image Storage ds.SOPClassUID = "1.2.840.10008.5.1.4.1.1.2" ds.SOPInstanceUID = pd.uid.generate_uid() ds.ContentDate = f"{self.date_time.year:04}{self.date_time.month:02}{self.date_time.day:02}" ds.ContentTime = f"{self.date_time.hour:02}{self.date_time.minute:02}{self.date_time.second:02}" ds.StudyInstanceID = pd.uid.generate_uid() ds.SeriesInstanceID = pd.uid.generate_uid() ds.Modality = "CT" ds.ConversionType = 'WSD' # workstation ds.ImageType = r"ORIGINAL\PRIMARY\AXIAL" ds.PatientName = self.input_data["PatientName"] ds.PatientID = self.input_data["PatientID"] ds.PatientSex = self.input_data["PatientSex"] ds.ImageComments = self.input_data["ImageComments"] ds.PixelData = self.img.astype(np.uint8).tobytes() ds.Rows, ds.Columns = self.img.shape ds.SamplesPerPixel = 1 ds.PixelRepresentation = 0 ds.PhotometricInterpretation = "MONOCHROME2" ds.BitsAllocated, ds.BitsStored = 8, 8 ds.HighBit = 7 ds.is_little_endian = True ds.is_implicit_VR = False ds.save_as(f"{filename}.dcm", write_like_original=False) self.close()
def test_full_cycle(pacs: server.Server, pacs_client: client.DICOMClient, test_ds: pydicom.Dataset): test_storage(pacs, pacs_client, test_ds) find_request = pydicom.Dataset() find_request.QueryRetrieveLevel = 'IMAGE' find_request.StudyInstanceUID = None find_request.SeriesInstanceUID = None find_request.SOPInstanceUID = None results = list(pacs_client.find(find_request)) assert len(results) == 1 ae = CStoreAE(test_ds, 'TEST_CLIENT', 11112) ae.add_scp(sopclass.storage_scp) with ae: move_request = pydicom.Dataset() move_request.QueryRetrieveLevel = 'IMAGE' move_request.StudyInstanceUID = test_ds.StudyInstanceUID move_request.SeriesInstanceUID = test_ds.SeriesInstanceUID move_request.SOPInstanceUID = test_ds.SOPInstanceUID pacs_client.move(move_request)
def __init__(self, volumes, foo="foo", bar="bar") -> None: self.volumes = volumes self._from_file_args = {} self.foo = foo self._bar = bar # Attributes that should not be serialized self._temp_path = "some/path" self.__some_attr__ = 1234 self.__pydicom_header__ = pydicom.Dataset()
def test_get_files(memory_storage: storage.InMemoryStorage): ts = uid.ImplicitVRLittleEndian ctx = asceprovider.PContextDef(1, '1.2.3', ts) cmd_ds = pydicom.Dataset() cmd_ds.AffectedSOPClassUID = '1.2.3' cmd_ds.AffectedSOPInstanceUID = '1.2.3.4' fp, start = memory_storage.bus.send_one(ae.AEChannels.ON_GET_FILE, ctx, cmd_ds) ds = pydicom.Dataset() ds.SOPInstanceUID = '1.2.3.4' ds.SOPClassUID = '1.2.3' ds_stream = dsutils.encode(ds, ts.is_implicit_VR, ts.is_little_endian) fp.write(ds_stream) fp.seek(start) memory_storage.bus.broadcast(storage.StorageChannels.ON_STORE_DONE, ds) for sop_class_uid, _ts, ds in memory_storage.on_store_get_files( ['1.2.3.4']): assert sop_class_uid == '1.2.3' assert _ts == ts assert ds.SOPInstanceUID == '1.2.3.4'
def _create_code_sequence(data: dict) -> pydicom.Sequence: """Helper function for creating a DICOM sequence from JSON attributes. Returns: A `pydicom.Sequence` with a single `pydicom.Dataset` item containing all attributes from the JSON document. """ dataset = pydicom.Dataset() for key in data: dataset.__setattr__(key, data[key]) return pydicom.Sequence([dataset])
def write_dicom(output_name, data, image_number=0): # Write the data to a dicom file. # Dicom files are difficult to set up correctly, this file will likely # crash when trying to open it using dcm2nii. However, if it is loaded in # python (e.g., dicom.dcmread) then pixel_array contains the relevant # voxel data # Convert data from float to in dataInts = data.astype(np.int16) # Populate required values for file meta information file_meta = dicom.Dataset() file_meta.MediaStorageSOPClassUID = '1.2' # '1.2.840.10008.5.1.4.1.1.2' file_meta.MediaStorageSOPInstanceUID = "1.2.3" file_meta.ImplementationClassUID = "1.2.3.4" file_meta.TransferSyntaxUID = '1.2.840.10008.1.2' # Create the FileDataset ds = dicom.FileDataset(output_name, {}, file_meta=file_meta, preamble=b"\0" * 128) # Set image dimensions frames, rows, cols = dataInts.shape ds.Rows = rows ds.Columns = cols ds.NumberOfFrames = frames ds.SamplesPerPixel = 1 ds.BitsAllocated = 16 ds.BitsStored = 16 ds.PixelRepresentation = 0 ds.InstanceNumber = image_number ds.ImagePositionPatient = [0, 0, 0] ds.ImageOrientationPatient = [.01, 0, 0, 0, 0, 0] # Add the data elements -- not trying to set all required here. Check DICOM # standard ds.PatientName = "sim" ds.PatientID = "sim" # Set the transfer syntax ds.is_little_endian = True ds.is_implicit_VR = True # Set creation date/time image_datetime = script_datetime + datetime.timedelta(seconds=image_number) timeStr = image_datetime.strftime('%H%M%S') ds.ContentDate = image_datetime.strftime('%Y%m%d') ds.ContentTime = timeStr # Add the data ds.PixelData = dataInts.tobytes() ds.save_as(output_name)
def test_identifier_but_no_tag(self): # Test when tag isn't there but identifier is ds = pydicom.Dataset() ds.add_new((0x0021, 0x0010), "LO", "Flywheel") tagger.add_private_tag(ds, self.group, self.identifier, self.tag_value) identifier_elem = ds.get((self.group, 0x0010)) private_elem = ds.get((self.group, 0x1000)) self.assertTrue(identifier_elem.value == "Flywheel") self.assertTrue(private_elem.value == "DICOM Send") for tag in range(0x0011, 0x00FF): self.assertFalse(ds.get((self.group, tag)))
def test_ds(): ds = pydicom.Dataset() ds.PatientName = 'Test^Test^Test' ds.PatientSex = 'M' ds.PatientID = 'auto1' ds.SpecificCharacterSet = 'ISO_IR 192' ds.StudyInstanceUID = uid.generate_uid() ds.SeriesInstanceUID = uid.generate_uid() ds.SOPInstanceUID = uid.generate_uid() ds.SOPClassUID = uids.BASIC_TEXT_SR_STORAGE return ds
def flatten_data(d, new_dataset=None): if new_dataset is None: new_dataset = pydicom.Dataset() new_dataset.is_little_endian = d.is_little_endian new_dataset.is_implicit_VR = d.is_implicit_VR for element in d.iterall(): if not isinstance(element.value, pydicom.sequence.Sequence): new_dataset.add(element) # else: # print('Skipping', element) return new_dataset