def gen_other_case(ds: pydicom.Dataset, attributes: list) -> pydicom.Dataset: """Generates a test dataset with critical mock values to REMOVE""" for attr in attributes: vr = attr[1] if vr in ['DA', 'DT']: attrvalue = gen_dummy_date() elif vr == 'TM': attrvalue = gen_dummy_hour() elif vr == 'OB': attrvalue = gen_uuid128('') elif vr in ['SH', 'LO']: attrvalue = replace_with_dummy_str(vr) elif vr == 'UI': attrvalue = gen_dicom_uid('', '') elif vr == 'SQ': gen_dummy_sequence(ds, attr[0]) elif vr == 'DS': attrvalue = float(random.randint(0, 999)) elif vr == 'IS': attrvalue = random.randint(0, 999) elif vr == 'PN': attrvalue = f"Dr. William MADIE" if vr != 'SQ': ds.add_new(attr[0], vr, attrvalue) return ds
def setup(self): """Run prior to each test""" self.ae = None self.p = None self.func = None self.tfile = tempfile.NamedTemporaryFile() self.db_location = self.tfile.name self.instance_location = tempfile.TemporaryDirectory() self.q_patient = ds = Dataset() ds.QueryRetrieveLevel = "PATIENT" ds.PatientID = None self.q_study = ds = Dataset() ds.QueryRetrieveLevel = "STUDY" ds.PatientID = None ds.StudyInstanceUID = None self.q_series = ds = Dataset() ds.QueryRetrieveLevel = "SERIES" ds.PatientID = None ds.StudyInstanceUID = None ds.SeriesInstanceUID = None self.q_image = ds = Dataset() ds.QueryRetrieveLevel = "IMAGE" ds.PatientID = None ds.StudyInstanceUID = None ds.SeriesInstanceUID = None ds.SOPInstanceUID = None
def test_when_patient_birth_date(self): expected = '002Y' dicom = Dataset() dicom.update({'StudyDate': '20181217', 'PatientBirthDate': '20161210'}) assert age_in_years(dicom) == expected
def __init__(self, sequence_data): """Object initialization Parameters ---------- sequence_data : List of items with data to generate each sequence item, in the format of a list with a dictionary for each item, which in turn can contain a sequence, e.g. list of dictionaries """ super().__init__() for sequence_item in sequence_data: # Initiate dataset ds = Dataset() # Set required DICOM attributes ds.SeriesInstanceUID = generate_uid() ds.ReferencedSOPSequence = generate_sequence( "ReferencedSOPSequence", dict()) # Update and insert additional DICOM attributes as available ds = update_and_insert_additional_DICOM_attributes_in_ds( ds, sequence_item) # Remove mutually exclusive elemenets if "ReferencedSOPSequence" in sequence_item and "ReferencedImageSequence" in ds: del ds.ReferencedImageSequence elif "ReferencedImageSequence" in sequence_item and "ReferencedSOPSequence" in ds: del ds.ReferencedSOPSequence self.sequence.append(ds)
def move_study_uid(study_uid, one_per_series=False): """Move a full study, one_per_series doesn't work yet""" d = Dataset() d.QueryRetrieveLevel = 'STUDY' d.StudyInstanceUID = study_uid x = do_move(d) return x
def from_dataset(cls, model_instance=None, ds: Dataset = None): if model_instance is None: model_instance = cls() fields = cls._meta.get_fields() for field in fields: if isinstance(field, ManyToOneRel): continue if isinstance(field, ForeignKey) or isinstance( field, OneToOneField): continue elif isinstance(field, models.AutoField): continue elif isinstance(field, models.FileField) or isinstance( field, models.ImageField): continue elif isinstance(field, models.DateField): date_field_value = ds.get(FIELDS_TO_TAG[field.name], None) if date_field_value is None: continue match = DICOM_DATE_REGEX.match(date_field_value) if match: year, month, day = match.group(1), match.group( 2), match.group(3) setattr(model_instance, field.name, datetime.date(int(year), int(month), int(day))) elif isinstance(field, models.TimeField): pass # To implement lately else: setattr(model_instance, field.name, ds.get(FIELDS_TO_TAG[field.name], None)) return model_instance
def gen_dummy_sequence(ds: pydicom.Dataset, tag: str) -> None: """Generates a dummy sequence with 3 attributes""" ds.add_new(tag, 'SQ', []) ds[tag].value.append(pydicom.Dataset()) ds[tag].value[0].add_new('0x00080100', 'SH', replace_with_dummy_str('SH')) ds[tag].value[0].add_new('0x00080102', 'SH', replace_with_dummy_str('SH')) ds[tag].value[0].add_new('0x00080104', 'LO', replace_with_dummy_str('LO')) return ds
def gen_ui_case(ds: pydicom.Dataset) -> pydicom.Dataset: """Generates a test dataset with critical UI mock values""" initial_prefix_uid = '1.3.6.1.4.1.14519.5.2.1.2135.6389.' suffix_uid = 799402065306178004127703292730 for tag in ui_tags: ds.add_new(tag, 'UI', f"{initial_prefix_uid}{suffix_uid}") suffix_uid += 1 return ds
def test_study_find_no_patient_attrs(pacs_srv: pacs.PACS): request = Dataset() request.SpecificCharacterSet = 'ISO_IR 192' request.QueryRetrieveLevel = 'STUDY' request.AccessionNumber = '1234' results = list(pacs_srv.c_find(request)) assert len(results) == 1 assert results[0].AccessionNumber == '1234'
def test_basic(self): """Test basic operation of the QR get service.""" self.p = p = self.func([ "--database-location", self.db_location, "--instance-location", self.instance_location.name, "-d", ]) time.sleep(1) _send_datasets() time.sleep(1) query = Dataset() query.QueryRetrieveLevel = "PATIENT" query.PatientID = "1CT1" datasets = [] def handle_store(event): datasets.append(event.dataset) return 0x0000 self.ae = ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_supported_context(CTImageStorage) scp = ae.start_server( ("localhost", 11113), block=False, evt_handlers=[(evt.EVT_C_STORE, handle_store)], ) model = PatientRootQueryRetrieveInformationModelMove ae.add_requested_context(model) assoc = ae.associate("localhost", 11112) assert assoc.is_established responses = assoc.send_c_move(query, "STORESCP", model) status, ds = next(responses) assert status.Status == 0xFF00 assert ds is None status, ds = next(responses) assert status.Status == 0x0000 assert ds is None pytest.raises(StopIteration, next, responses) assoc.release() scp.shutdown() p.terminate() p.wait() assert 1 == len(datasets) assert "CompressedSamples^CT1" == datasets[0].PatientName
def create_dcm_file(self): suffix = '.dcm' filename_little_endian = tempfile.NamedTemporaryFile( suffix=suffix).name filename_big_endian = tempfile.NamedTemporaryFile(suffix=suffix).name print("Setting file meta information...") file_meta = Dataset() file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' file_meta.MediaStorageSOPInstanceUID = "1.2.3" file_meta.ImplementationClassUID = "1.2.3.4" print("Setting dataset values...") ds = FileDataset(filename_little_endian, {}, file_meta=file_meta, preamble=b"\0" * 128) ds.PatientName = self.get_patient_name( ) + " " + self.get_patient_surname() ds.PatientID = self.get_patient_id() ds.PatientSex = self.get_patient_sex() ds.PatientAge = self.get_patient_age() ds.PatientWeight = self.get_patient_weight() ds.ImageComment = self.get_patient_comment() ds.PatientBirthDate = self.get_patient_birth() # Set the transfer syntax ds.is_little_endian = True ds.is_implicit_VR = True # Set creation date/time dt = datetime.datetime.now() ds.ContentDate = dt.strftime('%Y%m%d') timeStr = dt.strftime('%H%M%S.%f') # long format with micro seconds ds.ContentTime = timeStr ds.BitsAllocated = 16 ds.Rows = self.image.shape[0] ds.Columns = self.image.shape[1] ds.PixelRepresentation = 0 ds.SamplesPerPixel = 1 ds.PhotometricInterpretation = "MONOCHROME2" image = self.image image *= 255 image = image.astype("uint16") ds.PixelData = Image.fromarray(image).tobytes() print("Writing test file", filename_little_endian) ds.save_as(filename_little_endian) print("File saved.") ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRBigEndian ds.is_little_endian = False ds.is_implicit_VR = False print("Writing test file as Big Endian Explicit VR", filename_big_endian) ds.save_as(filename_big_endian) return ds
def add_to_roi(rtss, roi_name, roi_coordinates, data_set): """ Add new contour image sequence ROI to rtss :param rtss: dataset of RTSS :param roi_name: ROIName :param roi_coordinates: Coordinates of pixels for new ROI :param data_set: Data Set of selected DICOM image file :return: rtss, with added ROI """ # Creating a new ROIContourSequence, ContourSequence, ContourImageSequence contour_sequence = Sequence([Dataset()]) contour_image_sequence = Sequence([Dataset()]) number_of_contour_points = len(roi_coordinates) / 3 referenced_sop_class_uid = data_set.SOPClassUID referenced_sop_instance_uid = data_set.SOPInstanceUID existing_roi_number = None for item in rtss["StructureSetROISequence"]: if item.ROIName == roi_name: existing_roi_number = item.ROINumber position = None # Get the index of the ROI for index, contour in enumerate(rtss.ROIContourSequence): if contour.ReferencedROINumber == existing_roi_number: position = index new_contour_number = len(rtss.ROIContourSequence[position].ContourSequence) + 1 # ROI Sequence for contour in contour_sequence: # if data_set.get("ReferencedImageSequence"): contour.add_new(Tag("ContourImageSequence"), "SQ", contour_image_sequence) # Contour Sequence for contour_image in contour_image_sequence: contour_image.add_new(Tag("ReferencedSOPClassUID"), "UI", referenced_sop_class_uid) # CT Image Storage contour_image.add_new(Tag("ReferencedSOPInstanceUID"), "UI", referenced_sop_instance_uid) contour.add_new(Tag("ContourNumber"), "IS", new_contour_number) if not _is_closed_contour(roi_coordinates): contour.add_new(Tag("ContourGeometricType"), "CS", "OPEN_PLANAR") contour.add_new(Tag("NumberOfContourPoints"), "IS", number_of_contour_points) contour.add_new(Tag("ContourData"), "DS", roi_coordinates) else: contour.add_new(Tag("ContourGeometricType"), "CS", "CLOSED_PLANAR") contour.add_new(Tag("NumberOfContourPoints"), "IS", number_of_contour_points-1) contour.add_new(Tag("ContourData"), "DS", roi_coordinates[0:-3]) rtss.ROIContourSequence[position].ContourSequence.extend(contour_sequence) return rtss
def generate_dicom_scans(dst, num_scans=10, intercept=0, slope=1): spacing = (0.4 + 0.4 * np.random.rand(num_scans, 3) + np.array([1 + 0.5 * np.random.rand(), 0, 0])) origin = np.random.randint(-200, 200, (num_scans, 3)) for i in range(num_scans): num_slices = np.random.randint(128, 169) scan_id = np.random.randint(2**16) scan_data = np.random.randint(0, 256, (num_slices, 128, 128)) folder = os.path.join(dst, hex(scan_id).replace('x', '').upper().zfill(8)) if not os.path.exists(folder): os.makedirs(folder) for k in range(num_slices): slice_name = (hex(scan_id + k).replace('x', '').upper().zfill(8)) filename = os.path.join(folder, slice_name) pixel_array = (scan_data[k, ...] - intercept) / slope locZ = float(origin[i, 0] + spacing[i, 0] * k) locY, locX = float(origin[i, 1]), float(origin[i, 2]) file_meta = DicomDataset() file_meta.MediaStorageSOPClassUID = "Secondary Capture Image Storage" file_meta.MediaStorateSOPInstanceUID = (hex(scan_id).replace( 'x', '').upper().zfill(8)) file_meta.ImplementationClassUID = slice_name dataset = DicomFileDataset(filename, {}, file_meta=file_meta, preamble=b"\0" * 128) dataset.PixelData = pixel_array.astype(np.uint16).tostring() dataset.RescaleSlope = slope dataset.RescaleIntercept = intercept dataset.ImagePositionPatient = MultiValue( type_constructor=float, iterable=[locZ, locY, locX]) dataset.PixelSpacing = MultiValue( type_constructor=float, iterable=[float(spacing[i, 1]), float(spacing[i, 2])]) dataset.SliceThickness = float(spacing[i, 0]) dataset.Modality = 'WSD' dataset.Columns = pixel_array.shape[0] dataset.Rows = pixel_array.shape[1] dataset.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian dataset.PixelRepresentation = 1 dataset.BitsAllocated = 16 dataset.BitsStored = 16 dataset.SamplesPerPixel = 1 write_file(filename, dataset)
def edit_func(ds: Dataset) -> Dataset: # TODO: Handle nested attributes (VR of SQ) for attr_name, val in edit_dict.items(): if val is None and hasattr(ds, attr_name): delattr(ds, attr_name) elif hasattr(ds, attr_name): setattr(ds, attr_name, val) if update_uids: ds.walk(update_uids_cb) if hasattr(ds, "file_meta"): ds.file_meta.MediaStorageSOPInstanceUID = ds.SOPInstanceUID return ds
def set_file_meta(instance: Dataset): meta = FileMetaDataset() # meta.FileMetaInformationGroupLength = ?? # meta.FileMetaInformationVersion = b'\x00\x01' meta.MediaStorageSOPClassUID = instance.SOPClassUID meta.MediaStorageSOPInstanceUID = instance.SOPInstanceUID meta.TransferSyntaxUID = ImplicitVRLittleEndian instance.is_implicit_VR = instance.is_little_endian = True # meta.ImplementationClassUID = ?? # meta.ImplementationVersionName = ?? instance.file_meta = meta instance.preamble = b'\x00' * 128
def anonymize_dataset(dataset: pydicom.Dataset, extra_anonymization_rules: dict = None, delete_private_tags: bool = True) -> None: """ Anonymize a pydicom Dataset by using anonymization rules which links an action to a tag :param dataset: Dataset to be anonymize :param extra_anonymization_rules: Rules to be applied on the dataset :param delete_private_tags: Define if private tags should be delete or not """ current_anonymization_actions = initialize_actions() if extra_anonymization_rules is not None: current_anonymization_actions.update(extra_anonymization_rules) private_tags = [] for tag, action in current_anonymization_actions.items(): # The meta header information is located in the `file_meta` dataset # For tags with tag group `0x0002` we thus apply the action to the `file_meta` dataset if tag[0] == 0x0002: # Apply rule to meta information header action(dataset.file_meta, tag) else: action(dataset, tag) try: # `get()` does not accept the 4-indices tags in `dicomfields.py` # so only attempt to get the tag with <=2-indices tags # otherwise set to None element = None if len(tag) <= 2: element = dataset.get(tag) except: print("Cannot get element from tag: ", tag_to_hex_strings(tag)) if element and element.tag.is_private: private_tags.append(get_private_tag(dataset, tag)) # X - Private tags = (0xgggg, 0xeeee) where 0xgggg is odd if delete_private_tags: dataset.remove_private_tags() # Adding back private tags if specified in dictionary for privateTag in private_tags: creator = privateTag["creator"] element = privateTag["element"] block = dataset.private_block(creator["tagGroup"], creator["creatorName"], create=True) if element is not None: block.add_new(element["offset"], element["element"].VR, element["element"].value)
def test_basic(self): """Test basic operation of the QR get service.""" self.p = p = self.func([ '--database-location', self.db_location, '--instance-location', self.instance_location.name, '-d' ]) time.sleep(1) _send_datasets() time.sleep(1) query = Dataset() query.QueryRetrieveLevel = 'PATIENT' query.PatientID = '1CT1' datasets = [] def handle_store(event): datasets.append(event.dataset) return 0x0000 self.ae = ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 model = PatientRootQueryRetrieveInformationModelGet ae.add_requested_context(model) ae.add_requested_context(CTImageStorage) role = build_role(CTImageStorage, scp_role=True) assoc = ae.associate('localhost', 11112, ext_neg=[role], evt_handlers=[(evt.EVT_C_STORE, handle_store)]) assert assoc.is_established responses = assoc.send_c_get(query, model) status, ds = next(responses) assert status.Status == 0xFF00 assert ds is None status, ds = next(responses) assert status.Status == 0x0000 assert ds is None pytest.raises(StopIteration, next, responses) assoc.release() p.terminate() p.wait() assert 1 == len(datasets) assert "CompressedSamples^CT1" == datasets[0].PatientName
def process(self, ds: Dataset = None, *args, **kwargs): tags = {} for tag_key in ds.dir(): tag = ds.data_element(tag_key) if tag_key == 'PixelData': continue if not hasattr(tag, 'name') or not hasattr(tag, 'value'): continue tag_value = tag.value # Delete in future if isinstance(tag_value, Sequence) or isinstance( tag_value, MultiValue) or isinstance(tag_value, dict): continue tags[tag.name] = tag_value return json.dumps(tags, cls=DicomJsonEncoder)
def add_key_documents(self, referenced_dcm_files, referenced_frames=None): """Add key document Arguments: referenced_dcm_files {[type]} -- [description] referenced_frames {[type]} -- [description] """ if referenced_frames is None: for referenced_dcm_file in referenced_dcm_files: ds = Dataset() ds_ref = read_file(referenced_dcm_file) ds.ReferencedSOPSequence = generate_sequence( "ReferencedSOPSequence", [ { "ReferencedSOPClassUID": ds_ref.SOPClassUID, "ReferencedSOPInstanceUID": ds_ref.SOPInstanceUID, } ], ) ds.RelationshipType = ("CONTAINS",) ds.ValueType = "IMAGE" self.dataset.ContentSequence.append(ds) else: if len(referenced_dcm_files) != len(referenced_frames): print( "Number of referenced DCM files is expected to correspond to the number of referenced frames" ) exit else: for referenced_dcm_file, referenced_frame_numbers in zip( referenced_dcm_files, referenced_frames ): ds = Dataset() ds_ref = read_file(referenced_dcm_file) ds.ReferencedSOPSequence = generate_sequence( "ReferencedSOPSequence", [ { "ReferencedSOPClassUID": ds_ref.SOPClassUID, "ReferencedSOPInstanceUID": ds_ref.SOPInstanceUID, "ReferencedFrameNumber": referenced_frame_numbers, } ], ) ds.RelationshipType = ("CONTAINS",) ds.ValueType = "IMAGE" self.dataset.ContentSequence.append(ds)
def add_text_object(self, referenced_dcm_file, layer_name, text_value, anchor_point, cielab_value=None, shadow_style=None): """Add text object Arguments: referenced_dcm_file {[type]} -- [description] layer_name {[type]} -- [description] text_value {[type]} -- [description] anchor_point {[type]} -- [description] Keyword Arguments: cielab_value {[type]} -- [description] (default: {None}) shadow_style {[type]} -- [description] (default: {None}) """ ds = Dataset() ds_ref = read_file(referenced_dcm_file) ds.ReferencedImageSequence = generate_sequence( "ReferencedImageSequence", [{ "ReferencedSOPClassUID": ds_ref.SOPClassUID, "ReferencedSOPInstanceUID": ds_ref.SOPInstanceUID }]) ds.GraphicLayer = layer_name ds.TextObjectSequence = generate_sequence( "TextObjectSequence", [{ "AnchorPointAnnotationUnits": "PIXEL", "UnformattedTextValue": text_value, "AnchorPoint": anchor_point, "AnchorPointVisibility": "N", }]) if cielab_value or shadow_style: ds.TextObjectSequence[0].TextStyleSequence = generate_sequence( "TextStyleSequence", [{}]) if cielab_value: ds.TextObjectSequence[0].TextStyleSequence[ 0].TextColorCIELabValue = cielab_value if shadow_style: ds.TextObjectSequence[0].TextStyleSequence[ 0].ShadowStyle = shadow_style if "GraphicAnnotationSequence" not in self.dataset: self.dataset.GraphicAnnotationSequence = generate_sequence( "GraphicAnnotationSequence", [{}]) self.dataset.GraphicAnnotationSequence.append(ds)
def datasetFromJSON(data: dict): #return Dataset.from_json(data) ds = Dataset() for key in data.keys(): tag = Tag(key) try: if 'Value' in data[key].keys(): if data[key]['vr'] == 'SQ': tempds = [] for subdata in data[key]['Value']: tempds.append(datasetFromJSON(subdata)) seq = Sequence(tempds) ds[key] = DataElement(tag, data[key]['vr'], seq) elif type( data[key]['Value'][0] ) == dict and 'Alphabetic' in data[key]['Value'][0].keys(): ds[key] = DataElement(tag, data[key]['vr'], data[key]['Value'][0]['Alphabetic']) else: if len(data[key]['Value']) > 1: ds[key] = DataElement(tag, data[key]['vr'], data[key]['Value']) else: ds[key] = DataElement(tag, data[key]['vr'], data[key]['Value'][0]) else: ds[key] = DataElement(tag, data[key]['vr'], '') except: from IPython import embed embed() return ds
def test_update_patient_result_raise_if_id_change(patient_dataset_factory): slice_1 = patient_dataset_factory(PatientID='1') slice_2 = patient_dataset_factory(PatientID='2') result = Dataset() update_patient_result(result, slice_1) with pytest.raises(ValueError): update_patient_result(result, slice_2)
def test_update_patient_result_missing_study_date(patient_dataset_factory): result = Dataset() update_patient_result(result, patient_dataset_factory(StudyDate='')) assert result.PatientMostRecentStudyDate == '' update_patient_result(result, patient_dataset_factory(StudyDate=date(2018, 1, 1))) assert result.PatientMostRecentStudyDate == date(2018, 1, 1)
def test_update_patient_result_additional_tags_present( patient_dataset_factory): result = Dataset() dataset = patient_dataset_factory(PatientSex='M') additional_tags = ['PatientSex'] update_patient_result(result, dataset, additional_tags) assert result.PatientSex == 'M'
def dicom_dataset_from_dict(input_dict: dict, template_ds=None): """Create a pydicom DICOM object from a dictionary""" if template_ds is None: dataset = Dataset() else: dataset = deepcopy(template_ds) for key, value in input_dict.items(): if key not in DICOM_NAMES: raise ValueError( "{} is not within the DICOM dictionary.".format(key)) if isinstance(value, dict): setattr(dataset, key, dicom_dataset_from_dict(value)) elif isinstance(value, list): if np.all([not isinstance(item, dict) for item in value]): convert_nparray_and_set_key_value_in_dataset( dataset, key, value) elif np.all([isinstance(item, dict) for item in value]): setattr(dataset, key, [dicom_dataset_from_dict(item) for item in value]) else: raise ValueError( "{} should contain either only dictionaries, or no " "dictionaries".format(key)) else: convert_nparray_and_set_key_value_in_dataset(dataset, key, value) return dataset
def construct_metadata_from_DICOM_dictionary( dicom: pydicom.Dataset, ) -> Dict[str, object]: """ Converts DICOM dictionary tags to a pythonic dictionary. Args: dicom: An open DICOM dataset. Returns: The original DICOM dictionary (in the DICOM sense) to a pythonic dictionary. The pixel data tag is ignored. """ d = {} for element in dicom.values(): try: if element.tag == constants.DICOM_PIXEL_TAG: continue # Convert RawDataElement if type(element.value) in constants.DICOM_SPECIFIC_TYPES: # Convert DICOM type to Python type with function lookup table. d[element.description()] = constants.DICOM_TYPE_CONVERSION[ type(element.value)](element.value) else: d[element.description()] = element.value except (KeyError, TypeError): # Just don't include problematic key-values continue return d
def test_sr_query(self): """Test expected response from StudyRoot query.""" self.ae = ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 model = StudyRootQueryRetrieveInformationModelFind ae.add_requested_context(model) self.p = p = self.func( [ "--database-location", self.db_location, "--instance-location", self.instance_location.name, "-d", ] ) time.sleep(0.5) _send_datasets() time.sleep(1) ds = Dataset() ds.QueryRetrieveLevel = "STUDY" ds.PatientID = "4MR1" assoc = ae.associate("localhost", 11112) assert assoc.is_established responses = assoc.send_c_find(ds, model) for ii in range(2): status, ds = next(responses) assert status.Status == 0xFF00 assert "4MR1" == ds.PatientID assert ds.RetrieveAETitle == "QRSCP" assert ds.QueryRetrieveLevel == "STUDY" assert 3 == len(ds) status, ds = next(responses) assert status.Status == 0x0000 assert ds is None pytest.raises(StopIteration, next, responses) assoc.release() p.terminate() p.wait()
def test_update_patient_result_single_study(patient_dataset_factory): result = Dataset() update_patient_result(result, patient_dataset_factory(StudyInstanceUID='1')) update_patient_result(result, patient_dataset_factory(StudyInstanceUID='1')) assert len(result.PatientStudyInstanceUIDs) == 1 assert result.PatientStudyInstanceUIDs[0].name == '1'
def create_empty_iod(self): """Creates and empty IOD with the required DICOM tags but no values Parameters ---------- """ super().create_empty_iod() self.copy_required_dicom_attributes(Dataset(), include_optional=True)
def test_update_patient_result_multiple_studys(patient_dataset_factory): result = Dataset() update_patient_result(result, patient_dataset_factory(StudyInstanceUID='1')) update_patient_result(result, patient_dataset_factory(StudyInstanceUID='2')) assert len(result.PatientStudyInstanceUIDs) == 2 assert {uid.name for uid in result.PatientStudyInstanceUIDs} == {'1', '2'}