def run_main(host, port, nprocesses, ntimes, profile_csv): # logging.basicConfig(level=logging.DEBUG, file=sys.stdout) profile_d = {} started_at = time.time() log.info(FUNCS.keys()) sal = ServiceAccessLayer(host, port) status = sal.get_status() log.info("Status {}".format(status)) profile_d['nprocesses'] = nprocesses profile_d["init_nsubreads"] = len(sal.get_subreadsets()) profile_d['init_nreferences'] = len(sal.get_referencesets()) profile_d['init_njobs'] = len(sal.get_analysis_jobs()) chunksize = 6 info = "{h}:{p} with ntimes:{n} with processors:{x}".format(h=host, p=port, n=ntimes, x=nprocesses) # FIXME. All paths are relative to smrtflow root def to_p(rpath): return os.path.join(os.getcwd(), rpath) # DataSet referenceset_path = to_p( "test-data/smrtserver-testdata/ds-references/mk-01/mk_name_01/referenceset.xml" ) subreadset_path = to_p( "test-data/smrtserver-testdata/ds-subreads/PacBioTestData/m54006_160504_020705.tiny.subreadset.xml" ) # Run Design run_design_path = to_p( "smrt-server-link/src/test/resources/runCreate2.xml") # Dev Diagnostic analysis_json = to_p( "smrt-server-link/src/test/resources/analysis-dev-diagnostic-stress-01.json" ) output_dir_prefix = to_p("test-output") if not os.path.exists(output_dir_prefix): os.mkdir(output_dir_prefix) # import referenceset with original UUID for the dev_diagnostic run _run_cmd("{pbservice} import-dataset --host={h} --port={p} {x}".format( pbservice=pbservice, h=host, p=port, x=referenceset_path)) xs = _generate_data(host, port, [referenceset_path, subreadset_path], analysis_json, run_design_path, output_dir_prefix, ntimes) log.info("Starting {i}".format(i=info)) p = multiprocessing.Pool(nprocesses) results = p.map(runner, xs, chunksize=chunksize) failed = [r for r in results if r.exit_code != 0] was_successful = len(failed) == 0 for f in failed: log.error(f) log.debug("exiting {i}".format(i=info)) if failed: log.error("Failed Results {r} of {x}".format(r=len(failed), x=len(results))) run_time_sec = time.time() - started_at profile_d['nresults'] = len(results) profile_d['nfailed'] = len(failed) profile_d['was_successful'] = was_successful profile_d["final_nsubreads"] = len(sal.get_subreadsets()) profile_d['final_nreferences'] = len(sal.get_referencesets()) profile_d['final_njobs'] = len(sal.get_analysis_jobs()) profile_d['run_time_sec'] = run_time_sec write_profile(profile_d, profile_csv) return 0 if was_successful else 1
def run_main(host, port, nprocesses, ntimes, profile_csv): # logging.basicConfig(level=logging.DEBUG, file=sys.stdout) profile_d = {} started_at = time.time() log.info(FUNCS.keys()) sal = ServiceAccessLayer(host, port) status = sal.get_status() log.info("Status {}".format(status)) profile_d['nprocesses'] = nprocesses profile_d["init_nsubreads"] = len(sal.get_subreadsets()) profile_d['init_nreferences'] = len(sal.get_referencesets()) profile_d['init_njobs'] = len(sal.get_analysis_jobs()) chunksize = 6 info = "{h}:{p} with ntimes:{n} with processors:{x}".format(h=host, p=port, n=ntimes, x=nprocesses) # FIXME. All paths are relative to smrtflow root def to_p(rpath): return os.path.join(os.getcwd(), rpath) # DataSet referenceset_path = to_p("test-data/smrtserver-testdata/ds-references/mk-01/mk_name_01/referenceset.xml") subreadset_path = to_p("test-data/smrtserver-testdata/ds-subreads/PacBioTestData/m54006_160504_020705.tiny.subreadset.xml") # Run Design run_design_path = to_p("smrt-server-link/src/test/resources/runCreate2.xml") # Dev Diagnostic analysis_json = to_p("smrt-server-analysis/src/test/resources/analysis-dev-diagnostic-stress-01.json") output_dir_prefix = to_p("test-output") if not os.path.exists(output_dir_prefix): os.mkdir(output_dir_prefix) # import referenceset with original UUID for the dev_diagnostic run _run_cmd("pbservice import-dataset --host={h} --port={p} {x}".format(h=host, p=port, x=referenceset_path)) xs = _generate_data(host, port, [referenceset_path, subreadset_path], analysis_json, run_design_path, output_dir_prefix, ntimes) log.info("Starting {i}".format(i=info)) p = multiprocessing.Pool(nprocesses) results = p.map(runner, xs, chunksize=chunksize) failed = [r for r in results if r.exit_code != 0] was_successful = len(failed) == 0 for f in failed: log.error(f) log.debug("exiting {i}".format(i=info)) if failed: log.error("Failed Results {r} of {x}".format(r=len(failed), x=len(results))) run_time_sec = time.time() - started_at profile_d['nresults'] = len(results) profile_d['nfailed'] = len(failed) profile_d['was_successful'] = was_successful profile_d["final_nsubreads"] = len(sal.get_subreadsets()) profile_d['final_nreferences'] = len(sal.get_referencesets()) profile_d['final_njobs'] = len(sal.get_analysis_jobs()) profile_d['run_time_sec'] = run_time_sec write_profile(profile_d, profile_csv) return 0 if was_successful else 1