def validate_file_of_sample_dirs(sample_dirs_file): """Verify each of the sample directories in the sample directory file is not empty and contains fastq's. Parameters ---------- sample_dirs_file : str Path to the file of sample directories """ found_error = False with open(sample_dirs_file) as f: for directory in f: directory = directory.strip() if not utils.verify_non_empty_directory("Sample directory", directory): found_error = True else: files = fastq.list_fastq_files(directory) if len(files) == 0: utils.report_error("Sample directory %s does not contain any fastq files." % directory) found_error = True if found_error: if os.environ.get("StopOnSampleError") == "true": sys.exit(1) else: log_error("================================================================================")
def validate_file_of_sample_dirs(sample_dirs_file): """Verify each of the sample directories in the sample directory file is not empty and contains fastq's. Parameters ---------- sample_dirs_file : str Path to the file of sample directories """ found_error = False with open(sample_dirs_file) as f: for directory in f: directory = directory.strip() if not utils.verify_non_empty_directory("Sample directory", directory): found_error = True else: files = fastq.list_fastq_files(directory) if len(files) == 0: utils.report_error( "Sample directory %s does not contain any fastq files." % directory) found_error = True if found_error: if os.environ.get("StopOnSampleError") == "true": sys.exit(1) else: log_error( "================================================================================" )
def handle_called_process_exception(exc_type, exc_value, exc_traceback): """This function handles exceptions in the child processes executed by the snp-pipeline. It Logs the error and exits the SNP Pipeline if configured to do so. """ global log_dir external_program_command = exc_value.cmd error_code = exc_value.returncode trace_entries = traceback.extract_tb(exc_traceback) file_name, line_number, function_name, code_text = trace_entries[-1] exc_type_name = exc_type.__name__ stop_on_error_env = os.environ.get("StopOnSampleError") stop_on_error = stop_on_error_env is None or stop_on_error_env == "true" error_output_file = os.environ.get("errorOutputFile") # Error code 98 indicates an error affecting a single sample when the pipeline is configured to ignore such errors. # This error has already been reported, but it should not stop execution. if error_code == 98 and not stop_on_error: return # When configured to ignore single-sample errors, try to continue execution if # xargs returns 123 when executing an array of jobs, one per sample if error_code == 123 and not stop_on_error: return # A subprocess failed and was already trapped. # Actually, we cannot be 100% certain the error was trapped if the error code is 123. This # indicates an error in an array of sample jobs launched in parallel by xargs; but since # StopOnSampleError is true, we will assume the error was already trapped. if error_code == 100 or (stop_on_error and error_code == 123): log_error("See also the log files in directory " + log_dir) log_error("Shutting down the SNP Pipeline.") log_error("================================================================================") # Send the error log contents to stderr if error_output_file: with open(error_output_file, "r") as err_log: shutil.copyfileobj(err_log, sys.stderr) sys.exit(1) # An error occured and was not already trapped. # Error code 98 indicates an already handled error affecting a single sample when the pipeline is configured to ignore such errors if error_code != 98: external_program_command = external_program_command.replace("set -o pipefail; ", "") log_error("Error detected while running " + utils.program_name_with_command()) log_error("") log_error("The error occured while running:") log_error(" %s" % external_program_command) log_error("") log_error("Shutting down the SNP Pipeline.") log_error("================================================================================") # Send the error log contents to stderr if error_output_file: with open(error_output_file, "r") as err_log: shutil.copyfileobj(err_log, sys.stderr) sys.exit(1)
def handle_called_process_exception(exc_type, exc_value, exc_traceback): """This function handles exceptions in the child processes executed by the snp-pipeline. It Logs the error and exits the SNP Pipeline if configured to do so. """ global log_dir external_program_command = exc_value.cmd error_code = exc_value.returncode trace_entries = traceback.extract_tb(exc_traceback) file_name, line_number, function_name, code_text = trace_entries[-1] exc_type_name = exc_type.__name__ stop_on_error_env = os.environ.get("StopOnSampleError") stop_on_error = stop_on_error_env is None or stop_on_error_env == "true" error_output_file = os.environ.get("errorOutputFile") # Error code 98 indicates an error affecting a single sample when the pipeline is configured to ignore such errors. # This error has already been reported, but it should not stop execution. if error_code == 98 and not stop_on_error: return # When configured to ignore single-sample errors, try to continue execution if # xargs returns 123 when executing an array of jobs, one per sample if error_code == 123 and not stop_on_error: return # A subprocess failed and was already trapped. # Actually, we cannot be 100% certain the error was trapped if the error code is 123. This # indicates an error in an array of sample jobs launched in parallel by xargs; but since # StopOnSampleError is true, we will assume the error was already trapped. if error_code == 100 or (stop_on_error and error_code == 123): log_error("See also the log files in directory " + log_dir) log_error("Shutting down the SNP Pipeline.") log_error( "================================================================================" ) # Send the error log contents to stderr if error_output_file: with open(error_output_file, "r") as err_log: shutil.copyfileobj(err_log, sys.stderr) sys.exit(1) # An error occured and was not already trapped. # Error code 98 indicates an already handled error affecting a single sample when the pipeline is configured to ignore such errors if error_code != 98: external_program_command = external_program_command.replace( "set -o pipefail; ", "") log_error("Error detected while running " + utils.program_name_with_command()) log_error("") log_error("The error occured while running:") log_error(" %s" % external_program_command) log_error("") log_error("Shutting down the SNP Pipeline.") log_error( "================================================================================" ) # Send the error log contents to stderr if error_output_file: with open(error_output_file, "r") as err_log: shutil.copyfileobj(err_log, sys.stderr) sys.exit(1)