Exemplo n.º 1
0
def args_emit_analysis_template(args):
    ep1 = ServiceEntryPoint("eid_ref_dataset", FileTypes.DS_REF.file_type_id, 1)
    ep1_d = ep1.to_d()
    ep1_d['_comment'] = "datasetId can be provided as the DataSet UUID or Int. The entryId(s) can be obtained by running 'pbsmrtpipe show-pipeline-templates {PIPELINE-ID}'"
    d = dict(name="Job name",
             pipelineId="pbsmrtpipe.pipelines.dev_diagnostic",
             entryPoints=[ep1_d],
             taskOptions=[],
             workflowOptions=[])

    sx = json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))
    print sx

    return 0
Exemplo n.º 2
0
def args_emit_analysis_template(args):
    ep1 = ServiceEntryPoint("eid_ref_dataset", FileTypes.DS_REF.file_type_id, 1)
    ep1_d = ep1.to_d()
    ep1_d['_comment'] = "datasetId can be provided as the DataSet UUID or Int. The entryId(s) can be obtained by running 'pbsmrtpipe show-pipeline-templates {PIPELINE-ID}'"
    d = dict(name="Job name",
             pipelineId="pbsmrtpipe.pipelines.dev_diagnostic",
             entryPoints=[ep1_d],
             taskOptions=[],
             workflowOptions=[])

    sx = json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))
    print sx

    return 0
Exemplo n.º 3
0
def run_analysis_job(sal, job_name, pipeline_id, service_entry_points, block=False, time_out=None, task_options=()):
    """Run analysis (pbsmrtpipe) job

    :rtype ServiceJob:
    """
    if time_out is None:
        time_out = sal.JOB_DEFAULT_TIMEOUT
    status = sal.get_status()
    log.info("System:{i} v:{v} Status:{x}".format(x=status['message'], i=status['id'], v=status['version']))

    resolved_service_entry_points = []
    for service_entry_point in service_entry_points:
        # Always lookup/resolve the dataset by looking up the id
        ds = sal.get_dataset_by_uuid(service_entry_point.resource)
        if ds is None:
            raise ValueError("Failed to find DataSet with id {r} {s}".format(s=service_entry_point, r=service_entry_point.resource))

        dataset_id = ds['id']
        ep = ServiceEntryPoint(service_entry_point.entry_id, service_entry_point.dataset_type, dataset_id)
        log.debug("Resolved dataset {e}".format(e=ep))
        resolved_service_entry_points.append(ep)

    if block:
        job_result = sal.run_by_pipeline_template_id(job_name, pipeline_id, resolved_service_entry_points, time_out=time_out, task_options=task_options)
        job_id = job_result.job.id
        # service job
        result = sal.get_analysis_job_by_id(job_id)
        if not result.was_successful():
            raise JobExeError("Job {i} failed:\n{e}".format(i=job_id, e=job_result.job.error_message))
    else:
        # service job or error
        result = sal.create_by_pipeline_template_id(job_name, pipeline_id, resolved_service_entry_points)

    log.info("Result {r}".format(r=result))
    return result
Exemplo n.º 4
0
def load_analysis_job_json(d):
    """Translate a dict to args for scenario runner inputs"""
    job_name = to_ascii(d['name'])
    pipeline_template_id = to_ascii(d["pipelineId"])
    service_epoints = [ServiceEntryPoint.from_d(x) for x in d['entryPoints']]
    tags = d.get('tags', [])
    return job_name, pipeline_template_id, service_epoints, tags
Exemplo n.º 5
0
def run_services_testkit_job(host,
                             port,
                             testkit_cfg,
                             xml_out="test-output.xml",
                             ignore_test_failures=False,
                             time_out=1800,
                             sleep_time=2,
                             import_only=False,
                             test_job_id=None):
    """
    Given a testkit.cfg and host/port parameters:
        1. convert the .cfg to a JSON file
        2. connect to the SMRTLink services and start the job, then block
           until it finishes
        3. run the standard test suite on the job output
    """
    sal = ServiceAccessLayer(host, port, sleep_time=sleep_time)
    if test_job_id is not None:
        engine_job = sal.get_job_by_id(test_job_id)
        return run_butler_tests_from_cfg(testkit_cfg=testkit_cfg,
                                         output_dir=engine_job.path,
                                         output_xml=xml_out,
                                         service_access_layer=sal,
                                         services_job_id=test_job_id)
    entrypoints = get_entrypoints(testkit_cfg)
    pipeline_id = pipeline_id_from_testkit_cfg(testkit_cfg)
    job_id = job_id_from_testkit_cfg(testkit_cfg)
    log.info("job_id = {j}".format(j=job_id))
    log.info("pipeline_id = {p}".format(p=pipeline_id))
    log.info("url = {h}:{p}".format(h=host, p=port))
    task_options, workflow_options = get_task_and_workflow_options(testkit_cfg)
    service_entrypoints = [
        ServiceEntryPoint.from_d(x) for x in entrypoints_dicts(entrypoints)
    ]
    for ep, dataset_xml in entrypoints.iteritems():
        log.info("Importing {x}".format(x=dataset_xml))
        sal.run_import_local_dataset(dataset_xml)
    if import_only:
        log.info("Skipping job execution")
        return 0
    log.info("starting anaylsis job...")
    # XXX note that workflow options are currently ignored
    engine_job = run_analysis_job(sal,
                                  job_id,
                                  pipeline_id,
                                  service_entrypoints,
                                  block=True,
                                  time_out=time_out,
                                  task_options=task_options)
    exit_code = run_butler_tests_from_cfg(testkit_cfg=testkit_cfg,
                                          output_dir=engine_job.path,
                                          output_xml=xml_out,
                                          service_access_layer=sal,
                                          services_job_id=engine_job.id)
    if ignore_test_failures and engine_job.was_successful():
        return 0
    return exit_code
def run_services_testkit_job(host, port, testkit_cfg,
                             xml_out="test-output.xml",
                             ignore_test_failures=False,
                             time_out=1800, sleep_time=2,
                             import_only=False, test_job_id=None):
    """
    Given a testkit.cfg and host/port parameters:
        1. convert the .cfg to a JSON file
        2. connect to the SMRTLink services and start the job, then block
           until it finishes
        3. run the standard test suite on the job output
    """
    sal = ServiceAccessLayer(host, port, sleep_time=sleep_time)
    if test_job_id is not None:
        engine_job = sal.get_job_by_id(test_job_id)
        return run_butler_tests_from_cfg(
            testkit_cfg=testkit_cfg,
            output_dir=engine_job.path,
            output_xml=xml_out,
            service_access_layer=sal,
            services_job_id=test_job_id)
    entrypoints = get_entrypoints(testkit_cfg)
    pipeline_id = pipeline_id_from_testkit_cfg(testkit_cfg)
    job_id = job_id_from_testkit_cfg(testkit_cfg)
    log.info("job_id = {j}".format(j=job_id))
    log.info("pipeline_id = {p}".format(p=pipeline_id))
    log.info("url = {h}:{p}".format(h=host, p=port))
    task_options, workflow_options = get_task_and_workflow_options(testkit_cfg)
    service_entrypoints = [ServiceEntryPoint.from_d(x) for x in
                           entrypoints_dicts(entrypoints)]
    for ep, dataset_xml in entrypoints.iteritems():
        log.info("Importing {x}".format(x=dataset_xml))
        sal.run_import_local_dataset(dataset_xml)
    if import_only:
        log.info("Skipping job execution")
        return 0
    log.info("starting anaylsis job...")
    # XXX note that workflow options are currently ignored
    engine_job = run_analysis_job(sal, job_id, pipeline_id,
                                  service_entrypoints, block=True,
                                  time_out=time_out,
                                  task_options=task_options)
    exit_code = run_butler_tests_from_cfg(
        testkit_cfg=testkit_cfg,
        output_dir=engine_job.path,
        output_xml=xml_out,
        service_access_layer=sal,
        services_job_id=engine_job.id)
    if ignore_test_failures and engine_job.was_successful():
        return 0
    return exit_code
Exemplo n.º 7
0
def load_analysis_job_json(d):
    """Translate a dict to args for scenario runner inputs"""
    job_name = to_ascii(d['name'])
    pipeline_template_id = to_ascii(d["pipelineId"])
    service_epoints = [ServiceEntryPoint.from_d(x) for x in d['entryPoints']]
    return job_name, pipeline_template_id, service_epoints