def dispatch(args): """ Main entry function. """ if helper.is_terminated(): return helper.g_log("events.run", 1) try: config.read_config() except Exception: logger.exception("Unable to read configuration. Skipping processing.") monitor.send_event( monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to read configuration (possibly locked)", ) return success_folder = Path(config.mercure[mercure_folders.SUCCESS]) error_folder = Path(config.mercure[mercure_folders.ERROR]) retry_max = config.mercure["retry_max"] retry_delay = config.mercure["retry_delay"] # TODO: Sort list so that the oldest DICOMs get dispatched first with os.scandir(config.mercure[mercure_folders.OUTGOING]) as it: for entry in it: if entry.is_dir() and not has_been_send(entry.path) and is_ready_for_sending(entry.path): logger.info(f"Sending folder {entry.path}") execute(Path(entry.path), success_folder, error_folder, retry_max, retry_delay) # If termination is requested, stop processing series after the # active one has been completed if helper.is_terminated(): break
def clean(args): """ Main entry function. """ if helper.is_terminated(): return helper.g_log("events.run", 1) try: config.read_config() except Exception: logger.exception("Unable to read configuration. Skipping processing.") monitor.send_event( monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to read configuration (possibly locked)", ) return # TODO: Adaptively reduce the retention time if the disk space is running low if _is_offpeak( config.mercure["offpeak_start"], config.mercure["offpeak_end"], datetime.now().time(), ): success_folder = config.mercure[mercure_folders.SUCCESS] discard_folder = config.mercure[mercure_folders.DISCARD] retention = timedelta(seconds=config.mercure["retention"]) clean_dir(success_folder, retention) clean_dir(discard_folder, retention)
def terminate_process(signalNumber, frame): """Triggers the shutdown of the service.""" helper.g_log("events.shutdown", 1) logger.info("Shutdown requested") monitor.send_event(monitor.h_events.SHUTDOWN_REQUEST, monitor.severity.INFO) # Note: main_loop can be read here because it has been declared as global variable if "main_loop" in globals() and main_loop.is_running: main_loop.stop() helper.trigger_terminate()
def search_folder(counter): global processor_lockfile global processor_is_locked helper.g_log('events.run', 1) tasks = {} for entry in os.scandir(config.mercure['processing_folder']): if entry.is_dir() and is_ready_for_processing(entry.path): modification_time = entry.stat().st_mtime tasks[entry.path] = modification_time # Check if processing has been suspended via the UI if processor_lockfile.exists(): if not processor_is_locked: processor_is_locked = True logger.info("Processing halted") return False else: if processor_is_locked: processor_is_locked = False logger.info("Processing resumed") # Return if no tasks have been found if not len(tasks): return False sorted_tasks = sorted(tasks) # TODO: Add priority sorting. However, do not honor the priority flag for every third run # so that stagnation of cases is avoided # Only process one case at a time because the processing might take a while and # another instance might have processed the other entries already. So the folder # needs to be refreshed each time task = sorted_tasks[0] try: process_series(task) # Return true, so that the parent function will trigger another search of the folder return True except Exception: logger.exception(f'Problems while processing series {task}') monitor.send_series_event(monitor.s_events.ERROR, entry, 0, "", "Exception while processing") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, "Exception while processing series") return False
monitor.send_event(monitor.h_events.BOOT, monitor.severity.INFO, f'PID = {os.getpid()}') if len(config.mercure['graphite_ip']) > 0: logger.info( f'Sending events to graphite server: {config.mercure["graphite_ip"]}' ) graphite_prefix = 'mercure.' + appliance_name + '.processor.' + instance_name graphyte.init(config.mercure['graphite_ip'], config.mercure['graphite_port'], prefix=graphite_prefix) logger.info(f'Processing folder: {config.mercure["processing_folder"]}') processor_lockfile = Path(config.mercure['processing_folder'] + '/HALT') # Start the timer that will periodically trigger the scan of the incoming folder global main_loop main_loop = helper.RepeatedTimer( config.mercure['dispatcher_scan_interval'], run_processor, exit_processor, {}) main_loop.start() helper.g_log('events.boot', 1) # Start the asyncio event loop for asynchronous function calls helper.loop.run_forever() # Process will exit here once the asyncio loop has been stopped monitor.send_event(monitor.h_events.SHUTDOWN, monitor.severity.INFO) logger.info('Going down now')
logger.info(f"Appliance name = {appliance_name}") logger.info(f"Instance name = {instance_name}") logger.info(f"Instance PID = {os.getpid()}") logger.info(sys.version) monitor.configure("dispatcher", instance_name, config.mercure["bookkeeper"]) monitor.send_event(monitor.h_events.BOOT, monitor.severity.INFO, f"PID = {os.getpid()}") if len(config.mercure["graphite_ip"]) > 0: logging.info(f'Sending events to graphite server: {config.mercure["graphite_ip"]}') graphite_prefix = "mercure." + appliance_name + ".dispatcher." + instance_name graphyte.init( config.mercure["graphite_ip"], config.mercure["graphite_port"], prefix=graphite_prefix, ) logger.info(f"Dispatching folder: {config.mercure[mercure_folders.OUTGOING]}") global main_loop main_loop = helper.RepeatedTimer(config.mercure["dispatcher_scan_interval"], dispatch, exit_dispatcher, {}) main_loop.start() helper.g_log("events.boot", 1) # Start the asyncio event loop for asynchronous function calls helper.loop.run_forever() monitor.send_event(monitor.h_events.SHUTDOWN, monitor.severity.INFO) logging.info("Going down now")
def run_router(args): """Main processing function that is called every second.""" if helper.is_terminated(): return helper.g_log('events.run', 1) #logger.info('') #logger.info('Processing incoming folder...') try: config.read_config() except Exception: logger.exception( "Unable to update configuration. Skipping processing.") monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to update configuration (possibly locked)") return filecount = 0 series = {} complete_series = {} error_files_found = False # Check the incoming folder for completed series. To this end, generate a map of all # series in the folder with the timestamp of the latest DICOM file as value for entry in os.scandir(config.mercure['incoming_folder']): if entry.name.endswith(".tags") and not entry.is_dir(): filecount += 1 seriesString = entry.name.split('#', 1)[0] modificationTime = entry.stat().st_mtime if seriesString in series.keys(): if modificationTime > series[seriesString]: series[seriesString] = modificationTime else: series[seriesString] = modificationTime # Check if at least one .error file exists. In that case, the incoming folder should # be searched for .error files at the end of the update run if (not error_files_found) and entry.name.endswith(".error"): error_files_found = True # Check if any of the series exceeds the "series complete" threshold for entry in series: if ((time.time() - series[entry]) > config.mercure['series_complete_trigger']): complete_series[entry] = series[entry] #logger.info(f'Files found = {filecount}') #logger.info(f'Series found = {len(series)}') #logger.info(f'Complete series = {len(complete_series)}') helper.g_log('incoming.files', filecount) helper.g_log('incoming.series', len(series)) # Process all complete series for entry in sorted(complete_series): try: route_series(entry) except Exception: logger.exception(f'Problems while processing series {entry}') monitor.send_series_event(monitor.s_events.ERROR, entry, 0, "", "Exception while processing") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, "Exception while processing series") # If termination is requested, stop processing series after the active one has been completed if helper.is_terminated(): return if error_files_found: route_error_files() # Now, check if studies in the studies folder are ready for routing/processing route_studies()