示例#1
0
def test_on_failed_workflow(tmp_path, cromwell, womtool):
    fileobj_stdout = sys.stdout

    make_directory_with_failing_wdls(str(tmp_path / 'failed'))

    # Run Cromwell to get metadata JSON
    # designed to fail in a subworkflow
    c = Cromwell(cromwell=cromwell, womtool=womtool)
    th = c.run(
        wdl=str(tmp_path / 'failed' / 'main.wdl'),
        inputs=str(tmp_path / 'failed' / 'inputs.json'),
        fileobj_stdout=fileobj_stdout,
        cwd=str(tmp_path / 'failed'),
    )
    th.join()

    # check failed
    assert th.returncode
    metadata = th.returnvalue
    assert metadata
    cm = CromwellMetadata(metadata)

    assert cm.failures == metadata['failures']
    assert cm.calls == metadata['calls']

    # test troubleshoot()
    fileobj = StringIO()
    cm.troubleshoot(fileobj=fileobj)

    fileobj.seek(0)
    s = fileobj.read()
    assert '* Found failures JSON object' in s
    assert 'NAME=sub.t2_failing' in s
    assert 'INTENTED_ERROR: command not found'
示例#2
0
def test_on_successful_workflow(tmp_path, cromwell, womtool):
    fileobj_stdout = sys.stdout

    make_directory_with_wdls(str(tmp_path / 'successful'))

    # Run Cromwell to get metadata JSON
    c = Cromwell(cromwell=cromwell, womtool=womtool)
    th = c.run(
        wdl=str(tmp_path / 'successful' / 'main.wdl'),
        inputs=str(tmp_path / 'successful' / 'inputs.json'),
        fileobj_stdout=fileobj_stdout,
        cwd=str(tmp_path / 'successful'),
    )
    th.join()
    metadata = th.returnvalue
    assert metadata

    cm = CromwellMetadata(metadata)
    # test all properties
    assert cm.data == metadata
    assert cm.metadata == metadata
    assert CromwellMetadata(metadata).data == metadata
    assert cm.workflow_id == metadata['id']
    assert cm.workflow_status == metadata['status']
    # no failures for successful workflow's metadata
    assert cm.failures is None
    assert cm.calls == metadata['calls']
    assert sorted([call_name for call_name, _, _ in cm.recursed_calls
                   ]) == sorted(['main.t1', 'sub.t2', 'sub_sub.t3'])

    # test recurse_calls(): test with a simple function
    def fnc(call_name, call, parent_call_names):
        assert call_name in ('main.t1', 'sub.t2', 'sub_sub.t3')
        assert call['executionStatus'] == 'Done'
        if call_name == 'main.t1':
            assert not parent_call_names
        elif call_name == 'sub.t2':
            assert parent_call_names == ('main.sub', )
        elif call_name == 'sub_sub.t3':
            assert parent_call_names == ('main.sub', 'sub.sub_sub')
        else:
            raise ValueError('Wrong call_name: {name}'.format(name=call_name))

    list(cm.recurse_calls(fnc))

    # test write_on_workflow_root()
    m_file_on_root = os.path.join(cm.metadata['workflowRoot'], 'metadata.json')
    u = AutoURI(m_file_on_root)
    u.rm()
    assert not u.exists

    cm.write_on_workflow_root()
    assert os.path.exists(m_file_on_root)
    assert CromwellMetadata(m_file_on_root).metadata == cm.metadata
示例#3
0
def test_validate(tmp_path, cromwell, womtool):
    c = Cromwell(cromwell=cromwell, womtool=womtool)

    wdl = tmp_path / 'wrong.wdl'
    wdl.write_text(WRONG_WDL)
    assert not c.validate(str(wdl))

    make_directory_with_wdls(str(tmp_path / 'successful'))
    wdl = tmp_path / 'successful' / 'main.wdl'
    inputs = tmp_path / 'successful' / 'inputs.json'
    assert c.validate(str(wdl), str(inputs))

    # zip subworkflows for later use
    p = WDLParser(str(wdl))
    imports = p.zip_subworkflows(str(tmp_path / 'imports.zip'))

    # test with imports.zip
    make_directory_with_wdls(str(tmp_path / 'wo_sub_wdls'), no_sub_wdl=True)
    wdl = tmp_path / 'wo_sub_wdls' / 'main.wdl'
    inputs = tmp_path / 'wo_sub_wdls' / 'inputs.json'
    assert c.validate(str(wdl), str(inputs), imports)
示例#4
0
def test_run(tmp_path, cromwell, womtool):
    fileobj_stdout = sys.stdout

    c = Cromwell(cromwell=cromwell, womtool=womtool)

    make_directory_with_wdls(str(tmp_path))

    o_dir = tmp_path / 'output'
    o_dir.mkdir()
    work_dir = tmp_path / 'work_dir'
    work_dir.mkdir()

    backend_conf = tmp_path / 'backend.conf'
    backend_conf.write_text(BACKEND_CONF_CONTENTS.format(root=o_dir))

    try:
        th = c.run(
            backend_conf=str(backend_conf),
            wdl=str(tmp_path / 'main.wdl'),
            inputs=str(tmp_path / 'inputs.json'),
            metadata=str(tmp_path / 'metadata.json'),
            fileobj_stdout=fileobj_stdout,
            work_dir=work_dir,
            cwd=str(tmp_path),
        )
    finally:
        th.join()
    assert th.returncode == 0

    # check if metadata.json is written on both specified location
    # (tmp_path/metadata.json) and workflow's root directory
    metadata_dict = th.returnvalue
    root_dir = metadata_dict['workflowRoot']

    with open(os.path.join(root_dir, 'metadata.json')) as fp:
        metadata_contents_on_root = fp.read()
    metadata_dict_on_root = json.loads(metadata_contents_on_root)

    assert metadata_dict == metadata_dict_on_root
    # check if backend_conf's change of root directory worked
    assert root_dir.startswith(str(o_dir))

    # zip subworkflows for later use
    p = WDLParser(str(tmp_path / 'main.wdl'))
    imports = p.zip_subworkflows(str(tmp_path / 'imports.zip'))

    # test without sub WDLs but with imports.zip
    # test run without work_dir
    make_directory_with_wdls(str(tmp_path / 'wo_sub_wdls'), no_sub_wdl=True)

    try:
        th = c.run(
            wdl=str(tmp_path / 'wo_sub_wdls' / 'main.wdl'),
            inputs=str(tmp_path / 'wo_sub_wdls' / 'inputs.json'),
            imports=imports,
            fileobj_stdout=fileobj_stdout,
            cwd=str(tmp_path / 'wo_sub_wdls'),
        )
    finally:
        th.join()
    assert th.returncode == 0
示例#5
0
def test_server(tmp_path, cromwell, womtool):
    """Test Cromwell.server() method, which returns a Thread object.
    """
    server_port = 8005
    fileobj_stdout = sys.stdout

    c = Cromwell(cromwell=cromwell, womtool=womtool)

    o_dir = tmp_path / 'output'
    o_dir.mkdir()

    backend_conf = tmp_path / 'backend.conf'
    backend_conf.write_text(BACKEND_CONF_CONTENTS.format(root=o_dir))

    is_server_started = False

    def on_server_start():
        nonlocal is_server_started
        is_server_started = True

    workflow_id = None
    is_workflow_done = False

    def on_status_change(metadata):
        nonlocal workflow_id
        nonlocal is_workflow_done

        if metadata:
            if metadata['id'] == workflow_id:
                if metadata['status'] in ('Succeeded', 'Failed'):
                    is_workflow_done = True

    # also tests two callback functions
    try:
        th = c.server(
            server_port=server_port,
            backend_conf=str(backend_conf),
            embed_subworkflow=True,
            fileobj_stdout=fileobj_stdout,
            on_server_start=on_server_start,
            on_status_change=on_status_change,
            cwd=str(tmp_path),
        )
        assert th.status is None

        # wait until server is ready to take submissions
        t_start = time.time()
        while not is_server_started:
            time.sleep(1)
            if time.time() - t_start > TIMEOUT_SERVER_SPIN_UP:
                raise TimeoutError('Timed out waiting for Cromwell server spin-up.')

        # another way of checking server is started
        assert th.status

        # make WDLs and imports
        wdl = tmp_path / 'main.wdl'
        make_directory_with_wdls(str(tmp_path))
        # zip subworkflows for later use
        p = WDLParser(str(wdl))
        imports = p.zip_subworkflows(str(tmp_path / 'imports.zip'))

        cra = CromwellRestAPI(hostname='localhost', port=server_port)
        r = cra.submit(
            source=str(wdl), dependencies=imports, inputs=str(tmp_path / 'inputs.json')
        )
        workflow_id = r['id']

        t_start = time.time()
        while not is_workflow_done:
            time.sleep(1)
            print('polling: ', workflow_id, is_workflow_done)
            if time.time() - t_start > TIMEOUT_SERVER_RUN_WORKFLOW:
                raise TimeoutError('Timed out waiting for workflow being done.')

        metadata = cra.get_metadata([workflow_id], embed_subworkflow=True)[0]

        # check if metadata JSON is written on workflow's root directory.
        root_dir = metadata['workflowRoot']
        metadata_file = os.path.join(root_dir, 'metadata.json')
        assert os.path.exists(metadata_file)

        # check if subworkflow is embedded.
        with open(metadata_file) as fp:
            metadata_from_file = json.loads(fp.read())
        assert metadata == metadata_from_file

    finally:
        th.stop()
        th.join()
示例#6
0
def test_all(tmp_path, cromwell, womtool):
    """Test Cromwell.server() method, which returns a Thread object.
    """
    server_port = 8010
    fileobj_stdout = sys.stdout
    test_label = 'test_label'

    c = Cromwell(cromwell=cromwell, womtool=womtool)

    o_dir = tmp_path / 'output'
    o_dir.mkdir()

    labels_file = CaperLabels().create_file(directory=str(tmp_path),
                                            str_label=test_label)

    is_server_started = False

    def on_server_start():
        nonlocal is_server_started
        is_server_started = True

    workflow_id = None
    is_workflow_done = False

    def on_status_change(metadata):
        nonlocal workflow_id
        nonlocal is_workflow_done

        if metadata:
            if metadata['id'] == workflow_id:
                if metadata['status'] in ('Succeeded', 'Failed'):
                    is_workflow_done = True

    # also tests two callback functions
    try:
        th = c.server(
            server_port=server_port,
            embed_subworkflow=True,
            fileobj_stdout=fileobj_stdout,
            on_server_start=on_server_start,
            on_status_change=on_status_change,
            cwd=str(tmp_path),
        )
        assert th.status is None

        # wait until server is ready to take submissions
        t_start = time.time()
        while not is_server_started:
            time.sleep(1)
            if time.time() - t_start > 60:
                raise TimeoutError(
                    'Timed out waiting for Cromwell server spin-up.')

        # another way of checking server is started
        assert th.status

        # make WDLs and imports
        wdl = tmp_path / 'main.wdl'
        make_directory_with_wdls(str(tmp_path))
        # zip subworkflows for later use
        p = WDLParser(str(wdl))
        imports = p.zip_subworkflows(str(tmp_path / 'imports.zip'))

        cra = CromwellRestAPI(hostname='localhost', port=server_port)
        # no workflow
        assert not cra.find(workflow_ids=['*'])

        # put a hold on a workflow when submitting
        r = cra.submit(
            source=str(wdl),
            dependencies=imports,
            inputs=str(tmp_path / 'inputs.json'),
            labels=labels_file,
            on_hold=True,
        )
        workflow_id = r['id']
        time.sleep(10)
        # find by workflow ID
        workflow_by_id = cra.find(workflow_ids=[workflow_id])[0]
        # find by label
        workflow_by_label = cra.find(labels=[('caper-str-label',
                                              test_label)])[0]
        # find by workflow ID with wildcard *
        workflow_by_id_with_wildcard = cra.find(
            workflow_ids=[workflow_id[:-10] + '*'])[0]
        # find by label with wildcard ?
        workflow_by_label_with_wildcard = cra.find(labels=[('caper-str-label',
                                                            test_label[:-1] +
                                                            '?')])[0]

        assert workflow_by_label['id'] == workflow_id
        assert workflow_by_id['id'] == workflow_id
        assert workflow_by_id_with_wildcard['id'] == workflow_id
        assert workflow_by_label_with_wildcard['id'] == workflow_id
        assert workflow_by_id['status'] == 'On Hold'

        cra.release_hold([workflow_id])
        time.sleep(3)

        assert cra.get_label(workflow_id, 'caper-str-label') == test_label
        assert cra.get_labels(workflow_id)['caper-str-label'] == test_label

        # abort it
        assert cra.find([workflow_id])[0]['status'] in ('Submitted', 'On Hold')
        cra.abort([workflow_id])
        time.sleep(5)
        assert cra.find([workflow_id])[0]['status'] == 'Aborted'

        # submit another workflow
        r = cra.submit(
            source=str(wdl),
            dependencies=imports,
            inputs=str(tmp_path / 'inputs.json'),
            on_hold=False,
        )
        is_workflow_done = False
        workflow_id = r['id']
        time.sleep(5)

        t_start = time.time()
        while not is_workflow_done:
            time.sleep(1)
            print('polling: ', workflow_id, is_workflow_done)
            if time.time() - t_start > 120:
                raise TimeoutError(
                    'Timed out waiting for workflow being done.')

        metadata = cra.get_metadata([workflow_id], embed_subworkflow=True)[0]
        metadata_wo_sub = cra.get_metadata([workflow_id],
                                           embed_subworkflow=False)[0]

        assert 'subWorkflowMetadata' not in metadata_wo_sub['calls'][
            'main.sub'][0]
        subworkflow = metadata['calls']['main.sub'][0]
        assert 'subWorkflowMetadata' in subworkflow
        assert (
            'subWorkflowMetadata'
            in subworkflow['subWorkflowMetadata']['calls']['sub.sub_sub'][0])

        # check server's properties before closing it
        assert cra.get_default_backend() == 'Local'
        assert cra.get_backends()['supportedBackends'] == ['Local']

    finally:
        th.stop()
        th.join()