def test_network_timeout(self): """ Check AE network timeout change produces good value """ ae = AE(scu_sop_class=['1.2.840.10008.1.1']) ae.network_timeout = None self.assertTrue(ae.network_timeout == 60) ae.network_timeout = -100 self.assertTrue(ae.network_timeout == 60) ae.network_timeout = 'a' self.assertTrue(ae.network_timeout == 60) ae.network_timeout = 0 self.assertTrue(ae.network_timeout == 0) ae.network_timeout = 30 self.assertTrue(ae.network_timeout == 30)
def test_recv_short_aborts(self): """Test receiving short PDU causes abort.""" commands = [ ('recv', None), # recv a-associate-rq ('send', a_associate_ac), ('send', b"\x07\x00\x00\x00\x00\x04\x00\x00"), # Send short PDU ('exit', None) ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 0.2 ae.add_requested_context('1.2.840.10008.1.1') assoc = ae.associate('localhost', 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send short pdu # Need to wait for network timeout to expire timeout = 0 while not assoc.is_aborted and timeout < 1: time.sleep(0.05) timeout += 0.05 assert assoc.is_aborted scp.step() scp.shutdown()
def verify(self) -> bool: ae = AE(ae_title=self.client_ae) ae.add_requested_context(Verification) # setting timeout here doesn't appear to have any effect ae.network_timeout = self.timeout with association(ae, self.pacs_url, self.pacs_port, self.remote_ae) as assoc: logger.debug('Association accepted by the peer') # Send a DIMSE C-ECHO request to the peer # status is a pydicom Dataset object with (at a minimum) a # (0000, 0900) Status element status = assoc.send_c_echo() # Output the response from the peer if status.Status in status_success_or_pending: logger.debug('C-ECHO Response: 0x{0:04x}'.format( status.Status)) return True else: logger.warning('C-ECHO Failure Response: 0x{0:04x}'.format( status.Status)) return False return False
def test_unknown_pdu_aborts(self): commands = [ ("recv", None), # recv a-associate-rq ("send", a_associate_ac), ("send", b"\x53\x00\x00\x00\x00\x02"), ("recv", None), ("exit", None), ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_requested_context("1.2.840.10008.1.1") assoc = ae.associate("localhost", 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send bad PDU time.sleep(0.1) scp.step() # receive abort scp.step() scp.shutdown() assert assoc.is_aborted
def test_exception_in_reactor(self): """Test that an exception being raised in the DUL reactor kills the DUL and aborts the association. """ commands = [ ('recv', None), # recv a-associate-rq ('send', a_associate_ac), ('wait', 0.1), # Give some time to monkey patch ('send', a_release_rq), # Trigger the exception ('recv', None), # recv a-abort ('wait', 0.2), ] scp = start_server(commands) ae = AE() ae.network_timeout = 0.2 ae.add_requested_context('1.2.840.10008.1.1') assoc = ae.associate('localhost', 11112) assert assoc.is_established def patch_read_pdu(): raise NotImplementedError assoc.dul._read_pdu_data = patch_read_pdu time.sleep(0.4) assert assoc.is_aborted scp.shutdown()
def test_recv_bad_pdu_aborts(self): """Test receiving undecodable PDU causes abort.""" commands = [ ('recv', None), # recv a-associate-rq ('send', a_associate_ac), ('send', b"\x07\x00\x00\x00\x00\x02\x00\x00"), ('recv', None), ('exit', None) ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_requested_context('1.2.840.10008.1.1') assoc = ae.associate('localhost', 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send bad PDU scp.step() # receive abort assert assoc.is_aborted scp.step() scp.shutdown()
def get_implicit_assoc(host, port): ae = AE(ae_title=b'robot') ae.acse_timeout = 2 ae.dimse_timeout = 2 ae.network_timeout = 2 ae.add_requested_context('1.2.840.10008.1.1', ImplicitVRLittleEndian) assoc = ae.associate(host, int(port)) return assoc
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"echoscu.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "echoscu") APP_LOGGER.debug("echoscu.py v%s", __version__) APP_LOGGER.debug("") # Set Transfer Syntax options transfer_syntax = [ ExplicitVRLittleEndian, ImplicitVRLittleEndian, DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian, ] if args.request_little: transfer_syntax = [ExplicitVRLittleEndian] elif args.request_big: transfer_syntax = [ExplicitVRBigEndian] elif args.request_implicit: transfer_syntax = [ImplicitVRLittleEndian] # Create local AE ae = AE(ae_title=args.calling_aet) ae.add_requested_context(Verification, transfer_syntax) # Set timeouts ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout # Request association with remote AE assoc = ae.associate(args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu) # If we successfully associated then send C-ECHO if assoc.is_established: for ii in range(args.repeat): assoc.send_c_echo() # Abort or release association if args.abort: assoc.abort() else: assoc.release() else: # Failed to associate: timeout, refused, connection closed, aborted sys.exit(1)
def receive_store_simultaneous(nr_assoc, ds_per_assoc, use_yappi=False): """Run a Storage SCP and transfer datasets with simultaneous storescu's. Parameters ---------- nr_assoc : int The number of simultaneous associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() def handle(event): return 0x0000 ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.maximum_associations = 15 ae.add_supported_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) server = ae.start_server( ('', 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)] ) time.sleep(0.5) start_time = time.time() run_times = [] is_successful = True processes = [] for ii in range(nr_assoc): processes.append(start_storescu(ds_per_assoc)) while None in [pp.poll() for pp in processes]: pass returncodes = list(set([pp.returncode for pp in processes])) if len(returncodes) != 1 or returncodes[0] != 0: is_successful = False if is_successful: print( "C-STORE SCP transferred {} total datasets over {} " "association(s) in {:.2f} s" .format(nr_assoc * ds_per_assoc, nr_assoc, time.time() - start_time) ) else: print("C-STORE SCP benchmark failed") server.shutdown()
def receive_store_simultaneous(test_ds, nr_assoc, ds_per_assoc, use_yappi=False): """Run a Storage SCP and transfer datasets with simultaneous storescu's. Parameters ---------- test_ds : pydicom.dataset.Dataset The test dataset to use nr_assoc : int The number of simultaneous associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() def handle(event): return 0x0000 ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.maximum_associations = 15 ae.add_supported_context(test_ds.SOPClassUID, ImplicitVRLittleEndian) server = ae.start_server( ("localhost", 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)] ) time.sleep(0.5) start_time = time.time() is_successful = True processes = [] for ii in range(nr_assoc): processes.append(start_storescu(test_ds, ds_per_assoc)) while None in [pp.poll() for pp in processes]: pass returncodes = list(set([pp.returncode for pp in processes])) if len(returncodes) != 1 or returncodes[0] != 0: is_successful = False if is_successful: print( f"C-STORE SCP transferred {nr_assoc * ds_per_assoc} total " f"{os.path.basename(test_ds.filename)} datasets over " f"{nr_assoc} association{'' if nr_assoc == 1 else 's'} " f"in {time.time() - start_time:.2f} s" ) else: print("C-STORE SCP benchmark failed") server.shutdown()
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print('storescp.py v{}'.format(__version__)) sys.exit() APP_LOGGER = setup_logging(args, 'storescp') APP_LOGGER.debug('storescp.py v{0!s}'.format(__version__)) APP_LOGGER.debug('') # Set Transfer Syntax options transfer_syntax = ALL_TRANSFER_SYNTAXES[:] if args.prefer_uncompr: transfer_syntax.remove(ImplicitVRLittleEndian) transfer_syntax.append(ImplicitVRLittleEndian) elif args.prefer_little: transfer_syntax.remove(ExplicitVRLittleEndian) transfer_syntax.insert(0, ExplicitVRLittleEndian) elif args.prefer_big: transfer_syntax.remove(ExplicitVRBigEndian) transfer_syntax.insert(0, ExplicitVRBigEndian) elif args.implicit: transfer_syntax = [ImplicitVRLittleEndian] handlers = [(evt.EVT_C_STORE, handle_store, [args, APP_LOGGER])] # Create application entity ae = AE(ae_title=args.ae_title) # Add presentation contexts with specified transfer syntaxes for context in AllStoragePresentationContexts: ae.add_supported_context(context.abstract_syntax, transfer_syntax) if not args.no_echo: for context in VerificationPresentationContexts: ae.add_supported_context(context.abstract_syntax, transfer_syntax) ae.maximum_pdu_size = args.max_pdu # Set timeouts ae.network_timeout = args.network_timeout ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.start_server((args.bind_address, args.port), evt_handlers=handlers)
def test_exception_in_reactor(self): """Test that an exception being raised in the DUL reactor kills the DUL and aborts the association. """ commands = [ ("recv", None), # recv a-associate-rq ("send", a_associate_ac), ("send", a_release_rq), # Trigger the exception ("recv", None), # recv a-abort ("exit", None), ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_requested_context("1.2.840.10008.1.1") assoc = ae.associate("localhost", 11112, evt_handlers=hh) assert assoc.is_established def patch_read_pdu(): raise NotImplementedError assoc.dul._read_pdu_data = patch_read_pdu scp.step() while assoc.dul.is_alive(): time.sleep(0.001) scp.step() assert assoc.is_aborted scp.step() scp.shutdown()
def test_recv_failure_aborts(self): """Test connection close during PDU recv causes abort.""" commands = [ ('recv', None), # recv a-associate-rq ('send', a_associate_ac), ('wait', 0.1), # Don't want to accidentally kill the DUL ('send', b"\x07\x00\x00\x00\x00\x04"), ('wait', 0.3) ] scp = start_server(commands) ae = AE() ae.network_timeout = 0.2 ae.add_requested_context('1.2.840.10008.1.1') assoc = ae.associate('localhost', 11112) assert assoc.is_established time.sleep(0.4) assert assoc.is_aborted scp.shutdown()
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"echoscp.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "echoscp") APP_LOGGER.debug(f"echoscp.py v{__version__}") APP_LOGGER.debug("") # Set Transfer Syntax options transfer_syntax = ALL_TRANSFER_SYNTAXES[:] if args.prefer_uncompr: transfer_syntax.remove(str(ImplicitVRLittleEndian)) transfer_syntax.append(ImplicitVRLittleEndian) elif args.prefer_little: transfer_syntax.remove(str(ExplicitVRLittleEndian)) transfer_syntax.insert(0, ExplicitVRLittleEndian) elif args.prefer_big: transfer_syntax.remove(str(ExplicitVRBigEndian)) transfer_syntax.insert(0, ExplicitVRBigEndian) elif args.implicit: transfer_syntax = [ImplicitVRLittleEndian] handlers = [(evt.EVT_C_ECHO, handle_echo)] # Create application entity ae = AE(ae_title=args.ae_title) ae.add_supported_context(Verification, transfer_syntax) ae.maximum_pdu_size = args.max_pdu ae.network_timeout = args.network_timeout ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.start_server((args.bind_address, args.port), evt_handlers=handlers)
def test_recv_short_aborts(self, caplog): """Test receiving short PDU causes abort.""" with caplog.at_level(logging.ERROR, logger="pynetdicom"): commands = [ ("recv", None), # recv a-associate-rq ("send", a_associate_ac), ("send", b"\x07\x00\x00\x00\x00\x04"), # Send first 6 ("send", b"\x00\x00"), # Send short remainder ("exit", None), ] self.scp = scp = start_server(commands) def handle(event): scp.step() # recv A-ASSOCIATE-RQ scp.step() # send A-ASSOCIATE-AC hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 0.5 ae.add_requested_context("1.2.840.10008.1.1") assoc = ae.associate("localhost", 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send short pdu time.sleep(0.1) assoc.dul.socket.socket.close() # Need to wait for network timeout to expire timeout = 0 while not assoc.is_aborted and timeout < 1: time.sleep(0.05) timeout += 0.05 scp.step() scp.step() # exit assert assoc.is_aborted scp.shutdown() assert "Connection closed before the entire PDU was received" in caplog.text
def main(): global inference_queue inference_queue = deque() global dicom_store dicom_store = {} # Parent folder to all storage requests os_helpers.make_directory(config.SCP_STORAGE_PATH) ae = AE() ae.network_timeout = None ae.acse_timeout = None ae.dimse_timeout = None ae.maximum_pdu_size = 0 ae.maximum_associations = 14 # Tested with 14 threads handlers = [ (evt.EVT_ACCEPTED, handle_accepted), (evt.EVT_C_STORE, handle_store), (evt.EVT_RELEASED, handle_release), ] storage_sop_classes = [ cx.abstract_syntax for cx in AllStoragePresentationContexts ] for uid in storage_sop_classes: ae.add_supported_context(uid, ALL_TRANSFER_SYNTAXES) ae.start_server((config.SCP_IP, config.SCP_PORT), block=False, evt_handlers=handlers) print_listening() inference_loop()
def test_recv_failure_aborts(self, caplog): """Test connection close during PDU recv causes abort.""" with caplog.at_level(logging.ERROR, logger="pynetdicom"): commands = [ ("recv", None), # recv a-associate-rq ("send", a_associate_ac), ("send", b"\x07\x00\x00\x00\x00\x04"), ("exit", None), ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 0.2 ae.add_requested_context("1.2.840.10008.1.1") assoc = ae.associate("localhost", 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send short pdu scp.step() # close connection scp.shutdown() # Need to wait for network timeout to expire timeout = 0 while not assoc.is_aborted and timeout < 1: time.sleep(0.05) timeout += 0.05 assert assoc.is_aborted assert ( "The received PDU is shorter than expected (6 of 10 bytes received)" ) in caplog.text
def main(): handlers = [(evt.EVT_C_STORE, handle_store), (evt.EVT_RELEASED, handle_release)] ae = AE() ae.network_timeout = None ae.acse_timeout = None ae.dimse_timeout = None ae.maximum_pdu_size = 0 ae.maximum_associations = 1 # TODO Handle more than one storage_sop_classes = [ cx.abstract_syntax for cx in AllStoragePresentationContexts ] for uid in storage_sop_classes: ae.add_supported_context(uid, ALL_TRANSFER_SYNTAXES) print("\nListening for association request on port:", config.SCP_PORT) ae.start_server((config.SCP_IP, config.SCP_PORT), block=True, evt_handlers=handlers)
def test_recv_bad_pdu_aborts(self, caplog): """Test receiving undecodable PDU causes abort.""" with caplog.at_level(logging.ERROR, logger="pynetdicom"): commands = [ ("recv", None), # recv a-associate-rq ("send", a_associate_ac), ("send", b"\x07\x00\x00\x00\x00\x02\x00\x00"), ("recv", None), ("exit", None), ] self.scp = scp = start_server(commands) def handle(event): scp.step() scp.step() hh = [(evt.EVT_REQUESTED, handle)] ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_requested_context("1.2.840.10008.1.1") assoc = ae.associate("localhost", 11112, evt_handlers=hh) assert assoc.is_established scp.step() # send bad PDU while assoc.dul.is_alive(): time.sleep(0.001) scp.step() # receive abort scp.step() scp.shutdown() assert assoc.is_aborted assert "Unable to decode the received PDU data" in caplog.text
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"qrscp.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "qrscp") APP_LOGGER.debug(f"qrscp.py v{__version__}") APP_LOGGER.debug("") APP_LOGGER.debug("Using configuration from:") APP_LOGGER.debug(f" {args.config}") APP_LOGGER.debug("") config = ConfigParser() config.read(args.config) if args.ae_title: config["DEFAULT"]["ae_title"] = args.ae_title if args.port: config["DEFAULT"]["port"] = args.port if args.max_pdu: config["DEFAULT"]["max_pdu"] = args.max_pdu if args.acse_timeout: config["DEFAULT"]["acse_timeout"] = args.acse_timeout if args.dimse_timeout: config["DEFAULT"]["dimse_timeout"] = args.dimse_timeout if args.network_timeout: config["DEFAULT"]["network_timeout"] = args.network_timeout if args.bind_address: config["DEFAULT"]["bind_address"] = args.bind_address if args.database_location: config["DEFAULT"]["database_location"] = args.database_location if args.instance_location: config["DEFAULT"]["instance_location"] = args.instance_location # Log configuration settings _log_config(config, APP_LOGGER) app_config = config["DEFAULT"] dests = {} for ae_title in config.sections(): dest = config[ae_title] # Convert to bytes and validate the AE title ae_title = set_ae(ae_title, "ae_title", False, False) dests[ae_title] = (dest["address"], dest.getint("port")) # Use default or specified configuration file current_dir = os.path.abspath(os.path.dirname(__file__)) instance_dir = os.path.join(current_dir, app_config["instance_location"]) db_path = os.path.join(current_dir, app_config["database_location"]) # The path to the database db_path = f"sqlite:///{db_path}" db.create(db_path) # Clean up the database and storage directory if args.clean: response = input( "This will delete all instances from both the storage directory " "and the database. Are you sure you wish to continue? [yes/no]: ") if response != "yes": sys.exit() if clean(db_path, instance_dir, APP_LOGGER): sys.exit() else: sys.exit(1) # Try to create the instance storage directory os.makedirs(instance_dir, exist_ok=True) ae = AE(app_config["ae_title"]) ae.maximum_pdu_size = app_config.getint("max_pdu") ae.acse_timeout = app_config.getfloat("acse_timeout") ae.dimse_timeout = app_config.getfloat("dimse_timeout") ae.network_timeout = app_config.getfloat("network_timeout") ## Add supported presentation contexts # Verification SCP ae.add_supported_context(Verification, ALL_TRANSFER_SYNTAXES) # Storage SCP - support all transfer syntaxes for cx in AllStoragePresentationContexts: ae.add_supported_context(cx.abstract_syntax, ALL_TRANSFER_SYNTAXES, scp_role=True, scu_role=False) # Query/Retrieve SCP ae.add_supported_context(PatientRootQueryRetrieveInformationModelFind) ae.add_supported_context(PatientRootQueryRetrieveInformationModelMove) ae.add_supported_context(PatientRootQueryRetrieveInformationModelGet) ae.add_supported_context(StudyRootQueryRetrieveInformationModelFind) ae.add_supported_context(StudyRootQueryRetrieveInformationModelMove) ae.add_supported_context(StudyRootQueryRetrieveInformationModelGet) # Set our handler bindings handlers = [ (evt.EVT_C_ECHO, handle_echo, [args, APP_LOGGER]), (evt.EVT_C_FIND, handle_find, [db_path, args, APP_LOGGER]), (evt.EVT_C_GET, handle_get, [db_path, args, APP_LOGGER]), (evt.EVT_C_MOVE, handle_move, [dests, db_path, args, APP_LOGGER]), (evt.EVT_C_STORE, handle_store, [instance_dir, db_path, args, APP_LOGGER]), ] # Listen for incoming association requests ae.start_server((app_config["bind_address"], app_config.getint("port")), evt_handlers=handlers)
def receive_store_internal( test_ds, nr_assoc, ds_per_assoc, write_ds=0, use_yappi=False ): """Run a Storage SCP and transfer datasets with pynetdicom alone. Parameters ---------- test_ds : pydicom.dataset.Dataset The test dataset to use nr_assoc : int The total number of (sequential) associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. write_ds : int, optional ``0`` to not write to file (default), ``1`` to write the received dataset to file using event.dataset, ``2`` to write using the raw ``bytes``, . ``3`` to write raw bytes with unlimited PDU size. use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() def handle(event): if write_ds == 1: with tempfile.TemporaryFile("w+b") as tfile: ds = event.dataset ds.file_meta = event.file_meta ds.save_as(tfile) elif write_ds in (2, 3): with tempfile.TemporaryFile("w+b") as tfile: tfile.write(b"\x00" * 128) tfile.write(b"DICM") write_file_meta_info(tfile, event.file_meta) tfile.write(event.request.DataSet.getvalue()) return 0x0000 ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 if write_ds == 3: ae.maximum_pdu_size = 0 ae.add_supported_context(test_ds.SOPClassUID, ImplicitVRLittleEndian) ae.add_requested_context(test_ds.SOPClassUID, ImplicitVRLittleEndian) server = ae.start_server( ("localhost", 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)] ) time.sleep(0.5) start_time = time.time() is_successful = True for ii in range(nr_assoc): assoc = ae.associate("127.0.0.1", 11112) if assoc.is_established: for jj in range(ds_per_assoc): assoc.send_c_store(test_ds) assoc.release() if is_successful: write_msg = ["", " (write)", " (write fast)", " (write fastest)"][write_ds] print( f"C-STORE SCU/SCP transferred {nr_assoc * ds_per_assoc} total " f"{os.path.basename(test_ds.filename)} datasets over " f"{nr_assoc} association{'' if nr_assoc == 1 else 's'}{write_msg} " f"in {time.time() - start_time:.2f} s" ) else: print("C-STORE SCU/SCP benchmark failed") server.shutdown()
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"findscu.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "findscu") APP_LOGGER.debug(f"findscu.py v{__version__}") APP_LOGGER.debug("") # Create query (identifier) dataset try: # If you're looking at this to see how QR Find works then `identifer` # is a pydicom Dataset instance with your query keys, e.g.: # identifier = Dataset() # identifier.QueryRetrieveLevel = 'PATIENT' # identifier.PatientName = '' identifier = create_dataset(args, APP_LOGGER) except Exception as exc: APP_LOGGER.exception(exc) sys.exit(1) # Create application entity # Binding to port 0 lets the OS pick an available port ae = AE(ae_title=args.calling_aet) # Set timeouts ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout # Set the Presentation Contexts we are requesting the Find SCP support ae.requested_contexts = (QueryRetrievePresentationContexts + BasicWorklistManagementPresentationContexts) # Query/Retrieve Information Models if args.worklist: query_model = ModalityWorklistInformationFind elif args.study: query_model = StudyRootQueryRetrieveInformationModelFind elif args.psonly: query_model = PatientStudyOnlyQueryRetrieveInformationModelFind else: query_model = PatientRootQueryRetrieveInformationModelFind # Extended Negotiation ext_neg = [] ext_opts = [ args.relational_query, args.dt_matching, args.fuzzy_names, args.timezone_adj, args.enhanced_conversion, ] if not args.worklist and any(ext_opts): app_info = b"" for option in ext_opts: app_info += b"\x01" if option else b"\x00" item = SOPClassExtendedNegotiation() item.sop_class_uid = query_model item.service_class_application_information = app_info ext_neg = [item] elif args.worklist and any([args.fuzzy_names, args.timezone_adj]): app_info = b"\x01\x01" for option in [args.fuzzy_names, args.timezone_adj]: app_info += b"\x01" if option else b"\x00" item = SOPClassExtendedNegotiation() item.sop_class_uid = query_model item.service_class_application_information = app_info ext_neg = [item] # Request association with (QR/BWM) Find SCP assoc = ae.associate( args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu, ext_neg=ext_neg, ) if assoc.is_established: # Send C-FIND request, `responses` is a generator responses = assoc.send_c_find(identifier, query_model) # Used to generate filenames if args.write used fname = generate_filename() for (status, rsp_identifier) in responses: # If `status.Status` is one of the 'Pending' statuses then # `rsp_identifier` is the C-FIND response's Identifier dataset if status and status.Status in [0xFF00, 0xFF01]: if args.write: rsp_identifier.file_meta = get_file_meta( assoc, query_model) rsp_identifier.save_as(next(fname), write_like_original=False) # Release the association assoc.release() else: sys.exit(1)
except: dcm_files = [] yield 0 for dcm in dcm_files: ds = dcmread(dcm, force=True) yield 0xFF00, ds handlers = [(evt.EVT_C_GET, handle_get)] # Create application entity ae = AE(ae_title=args.aetitle) for context in StoragePresentationContexts: ae.add_supported_context(context.abstract_syntax, transfer_syntax, scp_role=True, scu_role=False) for context in QueryRetrievePresentationContexts: ae.add_supported_context(context.abstract_syntax, transfer_syntax) ae.maximum_pdu_size = args.max_pdu # Set timeouts ae.network_timeout = args.timeout ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.start_server(('', args.port), evt_handlers=handlers)
dcm_files = [os.path.join(basedir, x) for x in dcm_files] for dcm in dcm_files: data = read_file(dcm, force=True) d = Dataset() d.QueryRetrieveLevel = dataset.QueryRetrieveLevel d.RetrieveAETitle = args.aetitle d.PatientName = data.PatientName yield d # Create application entity ae = AE( ae_title=args.aetitle, port=args.port, scu_sop_class=[], scp_sop_class=QueryRetrieveSOPClassList, transfer_syntax=transfer_syntax, ) ae.maximum_pdu_size = args.max_pdu # Set timeouts ae.network_timeout = args.timeout ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.on_c_find = on_c_find ae.start()
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print('storescu.py v{}'.format(__version__)) sys.exit() APP_LOGGER = setup_logging(args, 'storescu') APP_LOGGER.debug('storescu.py v{0!s}'.format(__version__)) APP_LOGGER.debug('') lfiles, badfiles = get_files(args.path, args.recurse) for bad in badfiles: APP_LOGGER.error("Cannot access path: {}".format(bad)) ae = AE(ae_title=args.calling_aet) ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout if args.required_contexts: # Only propose required presentation contexts lfiles, contexts = get_contexts(lfiles, APP_LOGGER) if len(contexts) > 128: raise ValueError( "More than 128 presentation contexts required with the " "'--required-contexts' flag, please try again without it or " "with fewer files" ) for abstract, transfer in contexts.items(): ae.add_requested_context(abstract, transfer) else: # Propose the default presentation contexts if args.request_little: transfer_syntax = [ExplicitVRLittleEndian] elif args.request_big: transfer_syntax = [ExplicitVRBigEndian] elif args.request_implicit: transfer_syntax = [ImplicitVRLittleEndian] else: transfer_syntax = [ ExplicitVRLittleEndian, ImplicitVRLittleEndian, DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian ] for cx in StoragePresentationContexts: ae.add_requested_context(cx.abstract_syntax, transfer_syntax) if not lfiles: APP_LOGGER.warning("No suitable DICOM files found") sys.exit() # Request association with remote assoc = ae.associate( args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu ) if assoc.is_established: ii = 1 for fpath in lfiles: APP_LOGGER.info('Sending file: {}'.format(fpath)) try: ds = dcmread(fpath) status = assoc.send_c_store(ds, ii) ii += 1 except InvalidDicomError: APP_LOGGER.error('Bad DICOM file: {}'.format(fpath)) except ValueError as exc: APP_LOGGER.error("Store failed: {}".format(fpath)) except Exception as exc: APP_LOGGER.error("Store failed: {}".format(fpath)) APP_LOGGER.exception(exc) assoc.release() else: sys.exit(1)
def send_store(nr_assoc, ds_per_assoc, use_yappi=False): """Send a number of sequential C-STORE requests. Parameters ---------- nr_assoc : int The total number of (sequential) associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() # Start SCP server = start_storescp() time.sleep(0.5) ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_requested_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) # Start timer start_time = time.time() run_times = [] is_successful = True for ii in range(nr_assoc): if not is_successful: break assoc = ae.associate('localhost', 11112) if assoc.is_established: for jj in range(ds_per_assoc): try: status = assoc.send_c_store(DATASET) if status and status.Status != 0x0000: is_successful = False break except RuntimeError: is_successful = False break assoc.release() if is_successful: run_times.append(time.time() - start_time) else: is_successful = False break if is_successful: print("C-STORE SCU transferred {} total datasets over {} " "association(s) in {:.2f} s".format(nr_assoc * ds_per_assoc, nr_assoc, time.time() - start_time)) else: print("C-STORE SCU benchmark failed") server.terminate()
def receive_store_internal(nr_assoc, ds_per_assoc, write_ds=0, use_yappi=False): """Run a Storage SCP and transfer datasets with pynetdicom alone. Parameters ---------- nr_assoc : int The total number of (sequential) associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. write_ds : int, optional ``0`` to not write to file (default), ``1`` to write the received dataset to file using event.dataset, ``2`` to write using the raw ``bytes``, . ``3`` to write raw bytes with unlimited PDU size. use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() def handle(event): if write_ds == 1: with tempfile.TemporaryFile('w+b') as tfile: ds = event.dataset ds.file_meta = event.file_meta ds.save_as(tfile) elif write_ds in (2, 3): with tempfile.TemporaryFile('w+b') as tfile: tfile.write(b'\x00' * 128) tfile.write(b'DICM') write_file_meta_info(tfile, event.file_meta) tfile.write(event.request.DataSet.getvalue()) return 0x0000 ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 if write_ds == 3: ae.maximum_pdu_size = 0 ae.add_supported_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) ae.add_requested_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) server = ae.start_server(('', 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)]) time.sleep(0.5) start_time = time.time() run_times = [] is_successful = True for ii in range(nr_assoc): assoc = ae.associate('127.0.0.1', 11112) if assoc.is_established: for jj in range(ds_per_assoc): assoc.send_c_store(DATASET) assoc.release() if is_successful: print("C-STORE SCU/SCP transferred {} total datasets over {} " "association(s) in {:.2f} s".format(nr_assoc * ds_per_assoc, nr_assoc, time.time() - start_time)) else: print("C-STORE SCU/SCP benchmark failed") server.shutdown()
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"movescu.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "movescu") APP_LOGGER.debug(f"movescu.py v{__version__}") APP_LOGGER.debug("") # Create query (identifier) dataset try: # If you're looking at this to see how QR Move works then `identifer` # is a pydicom Dataset instance with your query keys, e.g.: # identifier = Dataset() # identifier.QueryRetrieveLevel = 'PATIENT' # identifier.PatientName = '*' identifier = create_dataset(args, APP_LOGGER) except Exception as exc: APP_LOGGER.exception(exc) sys.exit(1) # Create application entity ae = AE() # Start the Store SCP (optional) scp = None if args.store: transfer_syntax = ALL_TRANSFER_SYNTAXES[:] store_handlers = [(evt.EVT_C_STORE, handle_store, [args, APP_LOGGER])] ae.ae_title = args.store_aet for cx in AllStoragePresentationContexts: ae.add_supported_context(cx.abstract_syntax, transfer_syntax) scp = ae.start_server(("localhost", args.store_port), block=False, evt_handlers=store_handlers) ae.ae_title = args.calling_aet ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout ae.requested_contexts = QueryRetrievePresentationContexts ae.supported_contexts = [] # Query/Retrieve Information Models if args.study: query_model = StudyRootQueryRetrieveInformationModelMove elif args.psonly: query_model = PatientStudyOnlyQueryRetrieveInformationModelMove else: query_model = PatientRootQueryRetrieveInformationModelMove # Extended Negotiation ext_neg = [] ext_opts = [args.relational_retrieval, args.enhanced_conversion] if any(ext_opts): app_info = b"" for option in ext_opts: app_info += b"\x01" if option else b"\x00" item = SOPClassExtendedNegotiation() item.sop_class_uid = query_model item.service_class_application_information = app_info ext_neg = [item] # Request association with remote AE assoc = ae.associate( args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu, ext_neg=ext_neg, ) if assoc.is_established: # Send query move_aet = args.move_aet or args.calling_aet responses = assoc.send_c_move(identifier, move_aet, query_model) for (status, rsp_identifier) in responses: # If `status.Status` is one of the 'Pending' statuses then # `rsp_identifier` is the C-MOVE response's Identifier dataset if status and status.Status in [0xFF00, 0xFF01]: # `rsp_identifier` is a pydicom Dataset containing a query # response. You may want to do something interesting here... pass assoc.release() _EXIT_VALUE = 0 else: _EXIT_VALUE = 1 # Shutdown the Storage SCP (if used) if scp: scp.shutdown() sys.exit(_EXIT_VALUE)
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print(f"getscu.py v{__version__}") sys.exit() APP_LOGGER = setup_logging(args, "getscu") APP_LOGGER.debug(f"getscu.py v{__version__}") APP_LOGGER.debug("") # Create query (identifier) dataset try: # If you're looking at this to see how QR Get works then `identifer` # is a pydicom Dataset instance with your query keys, e.g.: # identifier = Dataset() # identifier.QueryRetrieveLevel = 'PATIENT' # identifier.PatientName = '*' identifier = create_dataset(args, APP_LOGGER) except Exception as exc: APP_LOGGER.exception(exc) sys.exit(1) # Exclude these SOP Classes _exclusion = [ EncapsulatedSTLStorage, EncapsulatedOBJStorage, EncapsulatedMTLStorage, ] store_contexts = [ cx for cx in StoragePresentationContexts if cx.abstract_syntax not in _exclusion ] # Create application entity # Binding to port 0 lets the OS pick an available port ae = AE(ae_title=args.calling_aet) ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout # Extended Negotiation - SCP/SCU Role Selection ext_neg = [] ae.add_requested_context(PatientRootQueryRetrieveInformationModelGet) ae.add_requested_context(StudyRootQueryRetrieveInformationModelGet) ae.add_requested_context(PatientStudyOnlyQueryRetrieveInformationModelGet) for cx in store_contexts: ae.add_requested_context(cx.abstract_syntax) # Add SCP/SCU Role Selection Negotiation to the extended negotiation # We want to act as a Storage SCP ext_neg.append(build_role(cx.abstract_syntax, scp_role=True)) if args.study: query_model = StudyRootQueryRetrieveInformationModelGet elif args.psonly: query_model = PatientStudyOnlyQueryRetrieveInformationModelGet else: query_model = PatientRootQueryRetrieveInformationModelGet # Extended Negotiation - SOP Class Extended ext_opts = [args.relational_retrieval, args.enhanced_conversion] if any(ext_opts): app_info = b"" for option in ext_opts: app_info += b"\x01" if option else b"\x00" item = SOPClassExtendedNegotiation() item.sop_class_uid = query_model item.service_class_application_information = app_info ext_neg.append(item) # Request association with remote assoc = ae.associate( args.addr, args.port, ae_title=args.called_aet, ext_neg=ext_neg, evt_handlers=[(evt.EVT_C_STORE, handle_store, [args, APP_LOGGER])], max_pdu=args.max_pdu, ) if assoc.is_established: # Send query responses = assoc.send_c_get(identifier, query_model) for (status, rsp_identifier) in responses: # If `status.Status` is one of the 'Pending' statuses then # `rsp_identifier` is the C-GET response's Identifier dataset if status and status.Status in [0xFF00, 0xFF01]: # `rsp_identifier` is a pydicom Dataset containing a query # response. You may want to do something interesting here... pass assoc.release() else: sys.exit(1)
def main(args=None): """Run the application.""" if args is not None: sys.argv = args args = _setup_argparser() if args.version: print('findscu.py v{}'.format(__version__)) sys.exit() APP_LOGGER = setup_logging(args, 'findscu') APP_LOGGER.debug('findscu.py v{0!s}'.format(__version__)) APP_LOGGER.debug('') # Create query (identifier) dataset try: # If you're looking at this to see how QR Find works then `identifer` # is a pydicom Dataset instance with your query keys, e.g.: # identifier = Dataset() # identifier.QueryRetrieveLevel = 'PATIENT' # identifier.PatientName = '' identifier = create_dataset(args, APP_LOGGER) except Exception as exc: APP_LOGGER.exception(exc) raise exc sys.exit(1) # Create application entity # Binding to port 0 lets the OS pick an available port ae = AE(ae_title=args.calling_aet) # Set timeouts ae.acse_timeout = args.acse_timeout ae.dimse_timeout = args.dimse_timeout ae.network_timeout = args.network_timeout # Set the Presentation Contexts we are requesting the Find SCP support ae.requested_contexts = (QueryRetrievePresentationContexts + BasicWorklistManagementPresentationContexts) # Query/Retrieve Information Models if args.worklist: query_model = ModalityWorklistInformationFind elif args.study: query_model = StudyRootQueryRetrieveInformationModelFind elif args.psonly: query_model = PatientStudyOnlyQueryRetrieveInformationModelFind else: query_model = PatientRootQueryRetrieveInformationModelFind # Request association with (QR/BWM) Find SCP assoc = ae.associate(args.addr, args.port, ae_title=args.called_aet, max_pdu=args.max_pdu) if assoc.is_established: # Send C-FIND request, `responses` is a generator responses = assoc.send_c_find(identifier, query_model) # Used to generate filenames if args.write used fname = generate_filename() for (status, rsp_identifier) in responses: # If `status.Status` is one of the 'Pending' statuses then # `rsp_identifier` is the C-FIND response's Identifier dataset if status and status.Status in [0xFF00, 0xFF01]: if args.write: rsp_identifier.file_meta = get_file_meta( assoc, query_model) rsp_identifier.save_as(next(fname), write_like_original=False) # Release the association assoc.release() else: sys.exit(1)
def receive_store_internal(nr_assoc, ds_per_assoc, write_ds=False, use_yappi=False): """Run a Storage SCP and transfer datasets with pynetdicom alone. Parameters ---------- nr_assoc : int The total number of (sequential) associations that will be made. ds_per_assoc : int The number of C-STORE requests sent per successful association. write_ds : bool, optional True to write the received dataset to file, False otherwise (default). use_yappi : bool, optional True to use the yappi profiler, False otherwise (default). """ if use_yappi: init_yappi() def handle(event): if write_ds: # TODO: optimise write using event.request.DataSet instead # Standard write using dataset decode and re-encode tfile = tempfile.TemporaryFile(mode='w+b') ds = event.dataset ds.file_meta = event.file_meta ds.save_as(tfile) return 0x0000 ae = AE() ae.acse_timeout = 5 ae.dimse_timeout = 5 ae.network_timeout = 5 ae.add_supported_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) ae.add_requested_context(DATASET.SOPClassUID, ImplicitVRLittleEndian) server = ae.start_server( ('', 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)] ) time.sleep(0.5) start_time = time.time() run_times = [] is_successful = True for ii in range(nr_assoc): assoc = ae.associate('127.0.0.1', 11112) if assoc.is_established: for jj in range(ds_per_assoc): assoc.send_c_store(DATASET) assoc.release() if is_successful: print( "C-STORE SCU/SCP transferred {} total datasets over {} " "association(s) in {:.2f} s" .format(nr_assoc * ds_per_assoc, nr_assoc, time.time() - start_time) ) else: print("C-STORE SCU/SCP benchmark failed") server.shutdown()