def route_error_files(): """ Looks for error files, moves these files and the corresponding DICOM files to the error folder, and sends an alert to the bookkeeper instance. """ error_files_found = 0 for entry in os.scandir(config.mercure['incoming_folder']): if entry.name.endswith(".error") and not entry.is_dir(): # Check if a lock file exists. If not, create one. lock_file=Path(config.mercure['incoming_folder'] / entry.name + mercure_names.LOCK) if lock_file.exists(): continue try: lock=helper.FileLock(lock_file) except: continue logger.error(f'Found incoming error file {entry.name}') error_files_found += 1 shutil.move(config.mercure['incoming_folder'] + '/' + entry.name, config.mercure['error_folder'] + '/' + entry.name) dicom_filename = entry.name[:-6] dicom_file = Path(config.mercure['incoming_folder'] + '/' + dicom_filename) if dicom_file.exists(): shutil.move(config.mercure['incoming_folder'] + '/' + dicom_filename, config.mercure['error_folder'] + '/' + dicom_filename) lock.free() if error_files_found > 0: monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Error parsing {error_files_found} incoming files') return
def push_files(file_list, target_path, copy_files): """ Copies or moves the given files to the target path. If copy_files is True, files are copied, otherwise moved. Note that this function does not create a lock file (this needs to be done by the calling function). """ if copy_files == False: operation = shutil.move else: operation = shutil.copy source_folder = config.mercure[mercure_folders.INCOMING] + "/" target_folder = target_path + "/" for entry in file_list: try: operation(source_folder + entry + mercure_names.DCM, target_folder + entry + mercure_names.DCM) operation(source_folder + entry + mercure_names.TAGS, target_folder + entry + mercure_names.TAGS) except Exception: logger.exception(f"Problem while pushing file to outgoing {entry}") logger.exception(f"Source folder {source_folder}") logger.exception(f"Target folder {target_folder}") monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Problem while pushing file to outgoing {entry}") return False return True
def push_files(file_list, target_path, copy_files): """Copies or moves the given files to the target path. If copy_files is True, files are copied, otherwise moved.""" if (copy_files == False): operation = shutil.move else: operation = shutil.copy source_folder = config.mercure[mercure_folders.INCOMING] + '/' target_folder = target_path + '/' # TODO: Secure operation with lock file for entry in file_list: try: operation(source_folder + entry + mercure_names.DCM, target_folder + entry + mercure_names.DCM) operation(source_folder + entry + mercure_names.TAGS, target_folder + entry + mercure_names.TAGS) except Exception: logger.exception(f'Problem while pushing file to outgoing {entry}') logger.exception(f'Source folder {source_folder}') logger.exception(f'Target folder {target_folder}') monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Problem while pushing file to outgoing {entry}') return False return True
def dispatch(args): """ Main entry function. """ if helper.is_terminated(): return helper.g_log("events.run", 1) try: config.read_config() except Exception: logger.exception("Unable to read configuration. Skipping processing.") monitor.send_event( monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to read configuration (possibly locked)", ) return success_folder = Path(config.mercure[mercure_folders.SUCCESS]) error_folder = Path(config.mercure[mercure_folders.ERROR]) retry_max = config.mercure["retry_max"] retry_delay = config.mercure["retry_delay"] # TODO: Sort list so that the oldest DICOMs get dispatched first with os.scandir(config.mercure[mercure_folders.OUTGOING]) as it: for entry in it: if entry.is_dir() and not has_been_send(entry.path) and is_ready_for_sending(entry.path): logger.info(f"Sending folder {entry.path}") execute(Path(entry.path), success_folder, error_folder, retry_max, retry_delay) # If termination is requested, stop processing series after the # active one has been completed if helper.is_terminated(): break
def get_triggered_rules(tagList): """Evaluates the routing rules and returns a list with trigger rules.""" triggered_rules = {} discard_rule = "" for current_rule in config.mercure["rules"]: try: if config.mercure["rules"][current_rule].get("disabled", "False") == "True": continue if rule_evaluation.parse_rule( config.mercure["rules"][current_rule].get("rule", "False"), tagList): triggered_rules[current_rule] = current_rule if config.mercure["rules"][current_rule].get( "action", "") == mercure_actions.DISCARD: discard_rule = current_rule break except Exception as e: logger.error(e) logger.error(f"Invalid rule found: {current_rule}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Invalid rule: {current_rule}") continue logger.info("Triggered rules:") logger.info(triggered_rules) return triggered_rules, discard_rule
def create_study_task(folder_name, applied_rule, study_UID, tags_list): """Generate task file with information on the study""" task_filename = folder_name + mercure_names.TASKFILE study_info = {} study_info["study_uid"] = study_UID study_info["complete_trigger"] = config.mercure[ mercure_config.RULES][applied_rule]["study_trigger_condition"] study_info["complete_required_series"] = config.mercure[ mercure_config.RULES][applied_rule]["study_trigger_series"] study_info["creation_time"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') task_json = {} task_json[mercure_sections.STUDY] = study_info task_json.update( add_info(study_UID, mercure_options.STUDY, applied_rule, tags_list)) try: with open(task_filename, 'w') as task_file: json.dump(task_json, task_file) except: logger.error(f"Unable to create task file {task_filename}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to create task file {task_filename}") return False return True
def _move_sent_directory(source_folder, destination_folder): """ This check is needed if there is already a folder with the same name in the success folder. If so a new directory is create with a timestamp as suffix. """ try: if (destination_folder / source_folder.name).exists(): target_folder = destination_folder / (source_folder.name + "_" + datetime.now().isoformat()) logger.debug(f"Moving {source_folder} to {target_folder}") shutil.move(source_folder, target_folder, copy_function=shutil.copy2) (Path(target_folder) / mercure_names.PROCESSING).unlink() else: logger.debug( f"Moving {source_folder} to {destination_folder / source_folder.name}" ) shutil.move(source_folder, destination_folder / source_folder.name) (destination_folder / source_folder.name / mercure_names.PROCESSING).unlink() except: logger.info( f"Error moving folder {source_folder} to {destination_folder}") send_event(h_events.PROCESSING, severity.ERROR, f"Error moving {source_folder} to {destination_folder}")
def push_series_studylevel(triggered_rules, file_list, series_UID, tags_list): """Prepeares study-level routing for the current series.""" # Move series into individual study-level folder for every rule for current_rule in triggered_rules: if config.mercure[mercure_config.RULES][current_rule].get( mercure_rule.ACTION_TRIGGER, mercure_options.SERIES) == mercure_options.STUDY: folder_name = series_UID + mercure_defs.SEPARATOR + current_rule if (not os.path.exists(folder_name)): try: os.mkdir(folder_name) except: logger.error(f'Unable to create folder {folder_name}') monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create folder {folder_name}') continue try: lock_file = Path(folder_name / mercure_names.LOCK) lock = helper.FileLock(lock_file) except: # Can't create lock file, so something must be seriously wrong logger.error(f'Unable to create lock file {lock_file}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create lock file {lock_file}') return push_files(file_list, folder_name, (len(triggered_rules) > 1)) lock.free()
def is_study_complete(folder): # TODO: Evaluate study completeness criteria # Read stored task file to determine completeness criteria try: with open(Path(folder) / mercure_names.TASKFILE, "r") as json_file: task = json.load(json_file) complete_trigger = task.get(mercure_sections.STUDY, {}).get("complete_trigger", "") complete_required_series = task.get(mercure_sections.STUDY, {}).get("complete_required_series", "") creation_time = task.get(mercure_sections.STUDY, {}).get("creation_time", "") if not complete_trigger: return False # TODO: Check for trigger condition except Exception: logger.exception(f"Invalid task file in study folder {folder}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Invalid task file in study folder {folder}") return False return False
def clean(args): """ Main entry function. """ if helper.is_terminated(): return helper.g_log("events.run", 1) try: config.read_config() except Exception: logger.exception("Unable to read configuration. Skipping processing.") monitor.send_event( monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to read configuration (possibly locked)", ) return # TODO: Adaptively reduce the retention time if the disk space is running low if _is_offpeak( config.mercure["offpeak_start"], config.mercure["offpeak_end"], datetime.now().time(), ): success_folder = config.mercure[mercure_folders.SUCCESS] discard_folder = config.mercure[mercure_folders.DISCARD] retention = timedelta(seconds=config.mercure["retention"]) clean_dir(success_folder, retention) clean_dir(discard_folder, retention)
def route_studies(): studies_ready = {} with os.scandir(config.mercure[mercure_folders.STUDIES]) as it: for entry in it: if (entry.is_dir() and not is_study_locked(entry.path) and is_study_complete(entry.path)): modificationTime = entry.stat().st_mtime studies_ready[entry.name] = modificationTime # Process all complete studies for entry in sorted(studies_ready): try: route_study(entry) except Exception: logger.exception(f'Problems while processing study {entry}') # TODO: Add study events to bookkeeper #monitor.send_series_event(monitor.s_events.ERROR, entry, 0, "", "Exception while processing") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Exception while processing study {entry}") # If termination is requested, stop processing after the active study has been completed if helper.is_terminated(): return
def write_configfile(json_content): """Rewrites the config file using the JSON data passed as argument. Used by the config editor of the webgui.""" configuration_file = Path(configuration_filename) # Check for existence of lock file lock_file=Path(configuration_file.parent/configuration_file.stem).with_suffix(mercure_names.LOCK) if lock_file.exists(): raise ResourceWarning(f"Configuration file locked: {lock_file}") try: lock=helper.FileLock(lock_file) except: raise ResourceWarning(f"Unable to lock configuration file: {lock_file}") with open(configuration_file, "w") as json_file: json.dump(json_content, json_file, indent=4) monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.INFO, "Wrote configuration file.") logger.info(f"Wrote configuration into: {configuration_file}") try: lock.free() except: # Can't delete lock file, so something must be seriously wrong logger.error(f'Unable to remove lock file {lock_file}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to remove lock file {lock_file}') return
def checkFolders(): """Checks if all required folders for handling the DICOM files exist.""" for entry in ['incoming_folder','studies_folder', 'outgoing_folder','success_folder','error_folder','discard_folder', 'processing_folder']: if not Path(mercure[entry]).exists(): logger.error(f"Folder not found {mercure[entry]}") monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.CRITICAL, "Folders are missing") return False return True
def terminate_process(signalNumber, frame): """Triggers the shutdown of the service.""" helper.g_log("events.shutdown", 1) logger.info("Shutdown requested") monitor.send_event(monitor.h_events.SHUTDOWN_REQUEST, monitor.severity.INFO) # Note: main_loop can be read here because it has been declared as global variable if "main_loop" in globals() and main_loop.is_running: main_loop.stop() helper.trigger_terminate()
def read_config(): """Reads the configuration settings (rules, targets, general settings) from the configuration file. The configuration will only be updated if the file has changed compared the the last function call. If the configuration file is locked by another process, an exception will be raised.""" global mercure global configuration_timestamp configuration_file = Path(configuration_filename) # Check for existence of lock file lock_file = Path(configuration_file.parent / configuration_file.stem).with_suffix(mercure_names.LOCK) if lock_file.exists(): raise ResourceWarning(f"Configuration file locked: {lock_file}") if configuration_file.exists(): # Get the modification date/time of the configuration file stat = os.stat(configuration_filename) try: timestamp = stat.st_mtime except AttributeError: timestamp = 0 # Check if the configuration file is newer than the version # loaded into memory. If not, return if timestamp <= configuration_timestamp: return mercure logger.info(f"Reading configuration from: {configuration_filename}") with open(configuration_file, "r") as json_file: loaded_config = json.load(json_file) # Reset configuration to default values (to ensure all needed # keys are present in the configuration) mercure = {} mercure = mercure_defaults # Now merge with values loaded from configuration file mercure.update(loaded_config) # TODO: Check configuration for errors (esp targets and rules) # Check if directories exist if not checkFolders(): raise FileNotFoundError("Configured folders missing") #logger.info("") #logger.info("Active configuration: ") #logger.info(json.dumps(mercure, indent=4)) #logger.info("") configuration_timestamp = timestamp monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.INFO, "Configuration updated") return mercure else: raise FileNotFoundError( f"Configuration file not found: {configuration_file}")
def remove_series(file_list): """Deletes the given files from the incoming folder.""" source_folder=config.mercure[mercure_folders.INCOMING] + '/' for entry in file_list: try: os.remove(source_folder+entry+mercure_names.TAGS) os.remove(source_folder+entry+mercure_names.DCM) except Exception: logger.exception(f'Error while removing file {entry}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Error while removing file {entry}')
def push_series_discard(fileList,series_UID,discard_series): """Discards the series by moving all files into the "discard" folder, which is periodically cleared.""" # Define the source and target folder. Use UUID as name for the target folder in the # discard directory to avoid collisions discard_path =config.mercure['discard_folder'] + '/' + str(uuid.uuid1()) discard_folder=discard_path + '/' source_folder =config.mercure['incoming_folder'] + '/' # Create subfolder in the discard directory and validate that is has been created try: os.mkdir(discard_path) except Exception: logger.exception(f'Unable to create outgoing folder {discard_path}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create discard folder {discard_path}') return if not Path(discard_path).exists(): logger.error(f'Creating discard folder not possible {discard_path}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Creating discard folder not possible {discard_path}') return # Create lock file in destination folder (to prevent the cleaner module to work on the folder). Note that # the DICOM series in the incoming folder has already been locked in the parent function. try: lock_file=Path(discard_path / mercure_names.LOCK) lock=helper.FileLock(lock_file) except: # Can't create lock file, so something must be seriously wrong logger.error(f'Unable to create lock file {lock_file}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create lock file in discard folder {lock_file}') return info_text = "" if discard_series: info_text = "Discard by rule " + discard_series monitor.send_series_event(monitor.s_events.DISCARD, series_UID, len(fileList), "", info_text) for entry in fileList: try: shutil.move(source_folder+entry+mercure_names.DCM,discard_folder+entry+mercure_names.DCM) shutil.move(source_folder+entry+mercure_names.TAGS,discard_folder+entry+mercure_names.TAGS) except Exception: logger.exception(f'Problem while discarding file {entry}') logger.exception(f'Source folder {source_folder}') logger.exception(f'Target folder {discard_folder}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Problem during discarding file {entry}') monitor.send_series_event(monitor.s_events.MOVE, series_UID, len(fileList), discard_path, "") try: lock.free() except: # Can't delete lock file, so something must be seriously wrong logger.error(f'Unable to remove lock file {lock_file}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to remove lock file {lock_file}') return
def process_series(folder): logger.info(f'Now processing = {folder}') lock_file = Path(folder / mercure_names.PROCESSING) if lock_file.exists(): logger.warning(f"Folder already contains lockfile {folder}/" + mercure_names.PROCESSING) return try: lock = helper.FileLock(lock_file) except: # Can't create lock file, so something must be seriously wrong logger.error(f'Unable to create lock file {lock_file}') monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create lock file in processing folder {lock_file}') return processing_success = True needs_dispatching = False # TODO: Perform the processing time.sleep(10) # TODO: Error handling # Create a new lock file to ensure that no other process picks up the folder while copying try: lock_file = lock_file = Path(folder / mercure_names.LOCK_EXTENSION) lock_file.touch() except: logger.info(f"Error locking folder to be moved {folder}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Error locking folder to be moved {folder}") # Remove the processing lock lock.free() if not processing_success: move_folder(folder, config.mercure['error_folder']) else: if needs_dispatching: move_folder(folder, config.mercure['outgoing_folder']) else: move_folder(folder, config.mercure['success_folder']) logger.info(f'Done processing case') return
def search_folder(counter): global processor_lockfile global processor_is_locked helper.g_log('events.run', 1) tasks = {} for entry in os.scandir(config.mercure['processing_folder']): if entry.is_dir() and is_ready_for_processing(entry.path): modification_time = entry.stat().st_mtime tasks[entry.path] = modification_time # Check if processing has been suspended via the UI if processor_lockfile.exists(): if not processor_is_locked: processor_is_locked = True logger.info("Processing halted") return False else: if processor_is_locked: processor_is_locked = False logger.info("Processing resumed") # Return if no tasks have been found if not len(tasks): return False sorted_tasks = sorted(tasks) # TODO: Add priority sorting. However, do not honor the priority flag for every third run # so that stagnation of cases is avoided # Only process one case at a time because the processing might take a while and # another instance might have processed the other entries already. So the folder # needs to be refreshed each time task = sorted_tasks[0] try: process_series(task) # Return true, so that the parent function will trigger another search of the folder return True except Exception: logger.exception(f'Problems while processing series {task}') monitor.send_series_event(monitor.s_events.ERROR, entry, 0, "", "Exception while processing") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, "Exception while processing series") return False
def push_series_studylevel(triggered_rules, file_list, series_UID, tags_list): """Prepeares study-level routing for the current series.""" # Move series into individual study-level folder for every rule for current_rule in triggered_rules: if config.mercure[mercure_config.RULES][current_rule].get( mercure_rule.ACTION_TRIGGER, mercure_options.SERIES) == mercure_options.STUDY: first_series = False # Create folder to buffer the series until study completion study_UID = tags_list["StudyInstanceUID"] folder_name = study_UID + mercure_defs.SEPARATOR + current_rule if not os.path.exists(folder_name): try: os.mkdir(folder_name) first_series = True except: logger.error(f"Unable to create folder {folder_name}") monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to create folder {folder_name}") continue lock_file = Path(folder_name) / mercure_names.LOCK try: lock = helper.FileLock(lock_file) except: # Can't create lock file, so something must be seriously wrong logger.error(f"Unable to create lock file {lock_file}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to create lock file {lock_file}") continue if first_series: # Create task file with information on complete criteria create_study_task(folder_name, current_rule, study_UID, tags_list) else: # Add data from latest series to task file update_study_task(folder_name, current_rule, study_UID, tags_list) # Copy (or move) the files into the study folder push_files(file_list, folder_name, (len(triggered_rules) > 1)) lock.free()
def get_triggered_rules(tagList): """Evaluates the routing rules and returns a list with triggered rules.""" triggered_rules = {} discard_rule = "" fallback_rule = "" # Iterate over all defined processing rules for current_rule in config.mercure["rules"]: try: # Check if the current rule has been disabled if config.mercure["rules"][current_rule].get( mercure_rule.DISABLED, "False") == "True": continue # If the current rule is flagged as fallback rule, remember the name (to avoid repeated iteration over the rules) if config.mercure["rules"][current_rule].get( mercure_rule.FALLBACK, "False") == "True": fallback_rule = current_rule # Check if the current rule is triggered for the provided tag set if rule_evaluation.parse_rule( config.mercure["rules"][current_rule].get( mercure_rule.RULE, "False"), tagList): triggered_rules[current_rule] = current_rule if config.mercure["rules"][current_rule].get( mercure_rule.ACTION, "") == mercure_actions.DISCARD: discard_rule = current_rule # If the triggered rule's action is to discard, stop further iteration over the rules break except Exception as e: logger.error(e) logger.error(f"Invalid rule found: {current_rule}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Invalid rule: {current_rule}") continue # If no rule has triggered but a fallback rule exists, then apply this rule if (len(triggered_rules) == 0) and (fallback_rule): triggered_rules[fallback_rule] = fallback_rule if config.mercure["rules"][fallback_rule].get( mercure_rule.ACTION, "") == mercure_actions.DISCARD: discard_rule = fallback_rule logger.info("Triggered rules:") logger.info(triggered_rules) return triggered_rules, discard_rule
def create_series_task_processing(folder_name, applied_rule, series_UID, tags_list): """Generate task file with processing information for the series""" task_filename = folder_name + mercure_names.TASKFILE task_json = generate_taskfile_process(series_UID, mercure_options.SERIES, applied_rule, tags_list) try: with open(task_filename, 'w') as task_file: json.dump(task_json, task_file) except: logger.error(f"Unable to create task file {task_filename}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to create task file {task_filename}") return False return True
def parse_rule(rule, tags): """Parses the given rule, replaces all tag variables with values from the given tags dictionary, and evaluates the rule. If the rule is invalid, an exception will be raised.""" try: logger.info(f"Rule: {rule}") rule = replace_tags(rule, tags) logger.info(f"Evaluated: {rule}") result = eval(rule, {"__builtins__": {}}, safe_eval_cmds) logger.info(f"Result: {result}") return result except Exception as e: logger.error(f"ERROR: {e}") logger.warn(f"WARNING: Invalid rule expression {rule}", '"' + rule + '"') monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.ERROR, f"Invalid rule encountered {rule}") return False
def push_series_studylevel(triggered_rules, file_list, series_UID, tags_list): """Prepeares study-level routing for the current series.""" # Move series into individual study-level folder for every rule for current_rule in triggered_rules: if config.mercure[mercure_config.RULES][current_rule].get( mercure_rule.ACTION_TRIGGER, mercure_options.SERIES) == mercure_options.STUDY: folder_name = series_UID + mercure_defs.SEPARATOR + current_rule if (not os.path.exists(folder_name)): try: os.mkdir(folder_name) except: logger.error(f'Unable to create folder {folder_name}') monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to create folder {folder_name}') continue push_files(file_list, folder_name, (len(triggered_rules) > 1))
def move_folder(source_folder_str, destination_folder_str): source_folder=Path(source_folder_str) destination_folder=Path(destination_folder_str) target_folder=destination_folder / source_folder.name if target_folder.exists(): target_folder=destination_folder / (source_folder.name + "_" + datetime.now().isoformat()) logger.debug(f"Moving {source_folder} to {target_folder}") try: shutil.move(source_folder, target_folder) lockfile=target_folder / mercure_names.LOCK lockfile.unlink() except: logger.info(f"Error moving folder {source_folder} to {destination_folder}") logger.error(traceback.format_exc()) monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Error moving {source_folder} to {destination_folder}")
def is_study_complete(folder): """Returns true if the study in the given folder is ready for processing, i.e. if the completeness criteria of the triggered rule has been met""" try: # Read stored task file to determine completeness criteria with open(Path(folder) / mercure_names.TASKFILE, "r") as json_file: task = json.load(json_file) # Check if processing of the study has been enforced (e.g., via UI selection) if task.get(mercure_sections.STUDY, {}).get(mercure_study.COMPLETE_FORCE, "False") == "True": return True complete_trigger = task.get(mercure_sections.STUDY, {}).get(mercure_study.COMPLETE_TRIGGER, "") if not complete_trigger: error_text = f"Missing trigger condition in task file in study folder {folder}" logger.error(error_text) monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, error_text) return False complete_required_series = task.get(mercure_sections.STUDY, {}).get( mercure_study.COMPLETE_REQUIRED_SERIES, "") # If trigger condition is received series but list of required series is missing, then switch to timeout mode instead if (complete_trigger == mercure_rule.STUDY_TRIGGER_CONDITION_RECEIVED_SERIES) and ( not complete_required_series): complete_trigger = mercure_rule.STUDY_TRIGGER_CONDITION_TIMEOUT warning_text = f"Missing series for trigger condition in study folder {folder}. Using timeout instead" logger.warning(warning_text) monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.WARNING, warning_text) # Check for trigger condition if complete_trigger == mercure_rule.STUDY_TRIGGER_CONDITION_TIMEOUT: return check_study_timeout(task) elif complete_trigger == mercure_rule.STUDY_TRIGGER_CONDITION_RECEIVED_SERIES: return check_study_series(task, complete_required_series) else: error_text = f"Invalid trigger condition in task file in study folder {folder}" logger.error(error_text) monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, error_text) return False except Exception: error_text = f"Invalid task file in study folder {folder}" logger.exception(error_text) monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, error_text) return False return False
def delete_folder(entry): """ Deletes given folder. """ delete_path = entry[0] series_uid = find_series_uid(delete_path) try: rmtree(delete_path) logger.info(f"Deleted folder {delete_path} from {series_uid}") send_series_event(s_events.CLEAN, series_uid, 0, delete_path, "Deleted folder") except Exception as e: logger.info(f"Unable to delete folder {delete_path}") logger.exception(e) send_series_event(s_events.ERROR, series_uid, 0, delete_path, "Unable to delete folder") monitor.send_event( monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to delete folder {delete_path}", )
def create_series_task(folder_name, triggered_rules, series_UID, tags_list, target): """Create task file for the received series""" # For routing-only: triggered_rules is dict and target is string containing the target name # For processing-only and both: triggered_rule is string and target is empty task_filename = folder_name + mercure_names.TASKFILE task_json = compose_task(series_UID, mercure_options.SERIES, triggered_rules, tags_list, target) try: with open(task_filename, "w") as task_file: json.dump(task_json, task_file) except: logger.error(f"Unable to create task file {task_filename}") monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f"Unable to create task file {task_filename}") return False return True
def save_config(): """Saves the current configuration in a file on the disk. Raises an exception if the file has been locked by another process.""" global configuration_timestamp configuration_file = Path(configuration_filename) # Check for existence of lock file lock_file = Path(configuration_file.parent / configuration_file.stem).with_suffix(mercure_names.LOCK) if lock_file.exists(): raise ResourceWarning(f"Configuration file locked: {lock_file}") try: lock = helper.FileLock(lock_file) except: raise ResourceWarning( f"Unable to lock configuration file: {lock_file}") with open(configuration_file, "w") as json_file: json.dump(mercure, json_file, indent=4) try: stat = os.stat(configuration_file) configuration_timestamp = stat.st_mtime except AttributeError: configuration_timestamp = 0 monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.INFO, "Saved new configuration.") logger.info(f"Stored configuration into: {configuration_file}") try: lock.free() except: # Can't delete lock file, so something must be seriously wrong logger.error(f'Unable to remove lock file {lock_file}') monitor.send_event(monitor.h_events.PROCESSING, monitor.severity.ERROR, f'Unable to remove lock file {lock_file}') return
def run_processor(args): """Main processing function that is called every second.""" if helper.is_terminated(): return try: config.read_config() except Exception: logger.exception( "Unable to update configuration. Skipping processing.") monitor.send_event(monitor.h_events.CONFIG_UPDATE, monitor.severity.WARNING, "Unable to update configuration (possibly locked)") return call_counter = 0 while (search_folder(call_counter)): call_counter += 1 # If termination is requested, stop processing series after the active one has been completed if helper.is_terminated(): return