def _add_run_reference_dataset_report(p): opts = [ add_log_debug_option, U.add_report_output, U.add_ds_reference_input ] f = compose(*opts) return f(p)
def add_get_job_list_options(p): fs = [ add_base_and_sal_options, add_max_items_option(25, "Max Number of jobs") ] f = compose(*fs) return f(p)
def test_simple(self): def f(x): return x * 2 def g(y): return y + 2 h = compose(f, g) value = h(7) assert value == 18
def get_parser(): desc = "Run multiple testkit.cfg files in parallel" p = get_default_argparser(__version__, desc) fs = [ add_log_debug_option, TU.add_override_chunked_mode, TU.add_override_distribute_option, add_ignore_test_failures_option ] f = compose(*fs) p = f(p) p.add_argument( 'testkit_cfg_fofn', type=validate_testkit_cfg_fofn, help= "File of butler.cfg file name relative to the current dir (e.g., RS_Resquencing/testkit.cfg" ) p.add_argument('-n', '--nworkers', type=int, default=1, help="Number of jobs to concurrently run.") p.add_argument("-j", "--junit-xml", dest="junit_out", action="store", default="junit_combined_results.xml", help="JUnit output file for all tests") p.set_defaults(func=_args_run_multi_testkit_cfg) return p
def add_base_options_with_emit_tool_contract(p): funcs = [ add_base_options, add_resolved_tool_contract_option, add_emit_tool_contract_option ] fs = compose(*funcs) return fs(p)
def test_simple(self): f = lambda x: x * 2 g = lambda y: y + 2 h = compose(f, g) value = h(7) self.assertEquals(value, 18)
def test_partial(self): def add(a, b): return a + b add_five = functools.partial(add, 5) add_two = functools.partial(add, 2) f = compose(add_five, add_two) value = f(5) self.assertEquals(value, 12)
def __add_pipeline_parser_options(p): """Common options for all running pipelines or tasks""" funcs = [ TU.add_override_chunked_mode, TU.add_override_distribute_option, _add_webservice_config, _add_rc_preset_xml_option, _add_preset_json_option, _add_preset_xml_option, _add_output_dir_option, _add_entry_point_option, add_log_debug_option ] f = compose(*funcs) return f(p)
def add_task_parser_options(p): funcs = [ TU.add_override_chunked_mode, TU.add_override_distribute_option, _add_webservice_config, _add_rc_preset_xml_option, _add_preset_xml_option, _add_preset_json_option, _add_output_dir_option, _add_entry_point_single_task_option, _add_task_id_run_option, add_log_debug_option ] f = compose(*funcs) return f(p)
def get_parser(): desc = "Testkit Tool to run pbsmrtpipe jobs." p = get_default_argparser_with_base_opts(__version__, desc) funcs = [ TU.add_override_chunked_mode, TU.add_override_distribute_option, _add_config_file_option, add_tests_only_option, add_ignore_test_failures_option, add_output_xml_option ] f = compose(*funcs) p = f(p) return p
def get_parser(): desc = "Testkit Tool to run pbsmrtpipe jobs." p = get_default_argparser_with_base_opts(__version__, desc) funcs = [TU.add_override_chunked_mode, TU.add_override_distribute_option, _add_config_file_option, add_tests_only_option, add_ignore_test_failures_option, add_output_xml_option] f = compose(*funcs) p = f(p) return p
def _f(): return compose([])
return testkit_cfgs def testkit_cfg_fofn_to_files(fofn): return _testkit_cfg_fofn_to_files(fofn, os.path.dirname(fofn)) def _validate_testkit_cfg_fofn(path): p = os.path.abspath(path) # files will be relative the supplied fofn dir_name = os.path.dirname(p) _testkit_cfg_fofn_to_files(p, dir_name) return p validate_testkit_cfg_fofn = compose(_validate_testkit_cfg_fofn, validate_file) def _merge_xml_results(xml_type, merge_function, testkit_cfgs, output_file, file_base): log.info("Combining individual %s XML files", xml_type) xml_files = [] for testkit_cfg in testkit_cfgs: xml_file = os.path.join(os.path.dirname(testkit_cfg), file_base) if os.path.exists(xml_file): xml_files.append(xml_file) if len(xml_files) == 0: log.error("No %s XML outputs found", xml_type) else: merge_function(output_file, xml_files)
def add_base_options(p): funcs = [add_debug_option, add_log_level_option] fs = compose(*funcs) return fs(p)
def add_sal_and_xml_dir_options(p): fx = [add_common_options, add_sal_options, add_xml_or_dir_option] f = compose(*fx) return f(p)
def add_base_and_sal_options(p): fx = [add_common_options, add_sal_options] f = compose(*fx) return f(p)
"Invalid DataSet(s) {p} {e}".format(p=path, e=e)) return wrapper def _validate_xml(path): try: p = ET.parse(path) _ = p.getroot() return path except ET.ParseError as e: raise argparse.ArgumentTypeError("Invalid XML file {p} {e}".format( p=path, e=e)) validate_xml_file = compose(_validate_xml, validate_file) validate_subreadset = compose(_validate_dataset(SubreadSet), validate_xml_file) valdiate_hdfsubreadset = compose(_validate_dataset(HdfSubreadSet), validate_xml_file) validate_alignmentset = compose(_validate_dataset(AlignmentSet), validate_xml_file) # These are really 'options', but keeping the naming convention consistent add_input_fofn_option = _add_input_file_option( 'input_fofn', validate_fofn, "Path to input.fofn (File of File names)") add_input_fasta_option = _add_input_file_option('fasta', validate_file, "Path to Fasta file.") add_input_fasta_reference_option = _add_input_file_option( 'fasta', validate_file, "Path to PacBio Reference Entry Fasta file.") add_input_fastq_option = _add_input_file_option('fastq', validate_file,
def add_base_options_with_emit_tool_contract(p): funcs = [add_base_options, add_resolved_tool_contract_option, add_emit_tool_contract_option] fs = compose(*funcs) return fs(p)
def add_get_job_list_options(p): fs = [add_base_and_sal_options, add_max_items_option(25, "Max Number of jobs")] f = compose(*fs) return f(p)
# If this raises, the error is a mangled from argparse and yields # a cryptic error message raise argparse.ArgumentTypeError("Invalid DataSet(s) {p} {e}".format(p=path, e=e)) return wrapper def _validate_xml(path): try: p = ET.parse(path) _ = p.getroot() return path except ET.ParseError as e: raise argparse.ArgumentTypeError("Invalid XML file {p} {e}".format(p=path, e=e)) validate_xml_file = compose(_validate_xml, validate_file) validate_subreadset = compose(_validate_dataset(SubreadSet), validate_xml_file) valdiate_hdfsubreadset = compose(_validate_dataset(HdfSubreadSet), validate_xml_file) validate_alignmentset = compose(_validate_dataset(AlignmentSet), validate_xml_file) # These are really 'options', but keeping the naming convention consistent add_input_fofn_option = _add_input_file_option('input_fofn', validate_fofn, "Path to input.fofn (File of File names)") add_input_fasta_option = _add_input_file_option('fasta', validate_file, "Path to Fasta file.") add_input_fasta_reference_option = _add_input_file_option('fasta', validate_file, "Path to PacBio Reference Entry Fasta file.") add_input_fastq_option = _add_input_file_option('fastq', validate_file, "Path to Fastq file") add_input_alignmentset_option = _add_input_file_option('alignmentset', validate_alignmentset, "Path to AlignmentSet XML file") add_input_hdfsubreadset_option = _add_input_file_option('hdfsubreadset', valdiate_hdfsubreadset, "Path to HdfSubreadSet XML file") add_input_subreadset_option = _add_input_file_option('subreadset', validate_subreadset, "Path to SubreadSet XML file") add_input_csv_option = _add_input_file_option('csv', validate_file, "Path to CSV")