def main(parser): args = parser.parse_args() jobs = pd.read_csv(args.jobCsv) sal = ServiceAccessLayer(args.host, args.port) #get dicts of values for all jobs rpts = jobs.jobId.apply(sal.get_analysis_job_report_attrs).values #check for unfinished jobs, exit if any unfinished = [ j for j in jobs.jobId if sal.get_job_by_id(j).state not in FINISHED ] if unfinished: for j in unfinished: print 'job %i still running' % j print 'Exiting' sys.exit() #put the reports together and index with (jobName,host,jobId,link) jobs['link'] = jobs[['host', 'jobId']].apply(LINKFMT, axis=1) columns = ['jobName', 'host', 'jobId', 'link'] idx = pd.MultiIndex.from_arrays(jobs[columns].values.T, names=columns) collated = pd.DataFrame.from_records(rpts, index=idx).T for fmt, fnc in zip(['.csv', '.xls'], [pd.DataFrame.to_csv, pd.DataFrame.to_excel]): ofile = '{d}/{name}{fmt}'.format(d=args.outDir, name=DEFAULTCSV, fmt=fmt) fnc(collated, ofile) # float_format=FLOATFMT print 'Wrote results to %s' % ofile return None
def run_services_testkit_job(host, port, testkit_cfg, xml_out="test-output.xml", ignore_test_failures=False, time_out=1800, sleep_time=2, import_only=False, test_job_id=None): """ Given a testkit.cfg and host/port parameters: 1. convert the .cfg to a JSON file 2. connect to the SMRTLink services and start the job, then block until it finishes 3. run the standard test suite on the job output """ sal = ServiceAccessLayer(host, port, sleep_time=sleep_time) if test_job_id is not None: engine_job = sal.get_job_by_id(test_job_id) return run_butler_tests_from_cfg(testkit_cfg=testkit_cfg, output_dir=engine_job.path, output_xml=xml_out, service_access_layer=sal, services_job_id=test_job_id) entrypoints = get_entrypoints(testkit_cfg) pipeline_id = pipeline_id_from_testkit_cfg(testkit_cfg) job_id = job_id_from_testkit_cfg(testkit_cfg) log.info("job_id = {j}".format(j=job_id)) log.info("pipeline_id = {p}".format(p=pipeline_id)) log.info("url = {h}:{p}".format(h=host, p=port)) task_options, workflow_options = get_task_and_workflow_options(testkit_cfg) service_entrypoints = [ ServiceEntryPoint.from_d(x) for x in entrypoints_dicts(entrypoints) ] for ep, dataset_xml in entrypoints.iteritems(): log.info("Importing {x}".format(x=dataset_xml)) sal.run_import_local_dataset(dataset_xml) if import_only: log.info("Skipping job execution") return 0 log.info("starting anaylsis job...") # XXX note that workflow options are currently ignored engine_job = run_analysis_job(sal, job_id, pipeline_id, service_entrypoints, block=True, time_out=time_out, task_options=task_options) exit_code = run_butler_tests_from_cfg(testkit_cfg=testkit_cfg, output_dir=engine_job.path, output_xml=xml_out, service_access_layer=sal, services_job_id=engine_job.id) if ignore_test_failures and engine_job.was_successful(): return 0 return exit_code
def run_services_testkit_job(host, port, testkit_cfg, xml_out="test-output.xml", ignore_test_failures=False, time_out=1800, sleep_time=2, import_only=False, test_job_id=None): """ Given a testkit.cfg and host/port parameters: 1. convert the .cfg to a JSON file 2. connect to the SMRTLink services and start the job, then block until it finishes 3. run the standard test suite on the job output """ sal = ServiceAccessLayer(host, port, sleep_time=sleep_time) if test_job_id is not None: engine_job = sal.get_job_by_id(test_job_id) return run_butler_tests_from_cfg( testkit_cfg=testkit_cfg, output_dir=engine_job.path, output_xml=xml_out, service_access_layer=sal, services_job_id=test_job_id) entrypoints = get_entrypoints(testkit_cfg) pipeline_id = pipeline_id_from_testkit_cfg(testkit_cfg) job_id = job_id_from_testkit_cfg(testkit_cfg) log.info("job_id = {j}".format(j=job_id)) log.info("pipeline_id = {p}".format(p=pipeline_id)) log.info("url = {h}:{p}".format(h=host, p=port)) task_options, workflow_options = get_task_and_workflow_options(testkit_cfg) service_entrypoints = [ServiceEntryPoint.from_d(x) for x in entrypoints_dicts(entrypoints)] for ep, dataset_xml in entrypoints.iteritems(): log.info("Importing {x}".format(x=dataset_xml)) sal.run_import_local_dataset(dataset_xml) if import_only: log.info("Skipping job execution") return 0 log.info("starting anaylsis job...") # XXX note that workflow options are currently ignored engine_job = run_analysis_job(sal, job_id, pipeline_id, service_entrypoints, block=True, time_out=time_out, task_options=task_options) exit_code = run_butler_tests_from_cfg( testkit_cfg=testkit_cfg, output_dir=engine_job.path, output_xml=xml_out, service_access_layer=sal, services_job_id=engine_job.id) if ignore_test_failures and engine_job.was_successful(): return 0 return exit_code