Exemple #1
0
async def test_exec_rp(pilot_description, rp_venv, cleandir):
    """Test that we are able to launch and shut down a RP dispatched execution session.
    """
    # Hopefully, this requirement is temporary.
    if rp_venv is None:
        pytest.skip('This test requires a user-provided static RP venv.')

    original_context = scalems.context.get_context()
    loop = asyncio.get_event_loop()
    loop.set_debug(True)
    logging.getLogger("asyncio").setLevel(logging.DEBUG)

    # Configure module.
    params = scalems.radical.runtime.Configuration(
        execution_target=pilot_description.resource,
        target_venv=rp_venv,
        rp_resource_params={
            'PilotDescription': {
                'access_schema': pilot_description.access_schema
            }
        })

    # Test RPDispatcher context
    manager = scalems.radical.workflow_manager(loop)

    # This sleep doesn't cost too much waiting, but seems to effectively work around
    # some sort of race condition as resources are freed when running the full test suite.
    time.sleep(10)
    # TODO: Try to find a better way to wait for previous resources to be released.

    with scalems.context.scope(manager):
        assert not loop.is_closed()
        # Enter the async context manager for the default dispatcher
        cmd1 = scalems.executable(('/bin/echo', ))
        async with manager.dispatch(params=params):
            cmd2 = scalems.executable(('/bin/echo', 'hello', 'world'))
            # TODO: Clarify whether/how result() method should work in this scope.
            # TODO: Make scalems.wait(cmd) work as expected in this scope.
        assert cmd1.done() and cmd2.done()
        logger.debug(cmd1.result())
        logger.debug(cmd2.result())

    # TODO: Output typing.
    out1: dict = cmd1.result()
    for output in out1['description']['output_staging']:
        assert os.path.exists(output['target'])
    out2: dict = cmd2.result()
    for output in out2['description']['output_staging']:
        assert os.path.exists(output['target'])
        if output['target'].endswith('stdout'):
            with open(output['target'], 'r') as fh:
                line = fh.readline()
                assert line.rstrip() == 'hello world'

    # Test active context scoping.
    assert scalems.context.get_context() is original_context
    assert not loop.is_closed()
Exemple #2
0
async def main(execution_context, words: typing.Iterable[str] = ()):
    try:
        # Using `cat` instead of `echo` to demonstrate availability of stdin.
        cmd = executable(('/bin/cat', '-'),
                         stdin=(' '.join(words) + '\n\n', ),
                         stdout='outfile.txt')
    except Exception as e:
        raise
    assert isinstance(cmd, scalems.workflow.ItemView)
    # TODO: Future interface allows client to force resolution of dependencies.
    # cmd.result()
    # TODO: #82
    # scalems.run(cmd)
    # TODO: Remove Session.run() from public interface (use scalems.run())
    # await context.run()
    async with execution_context.dispatch():
        ...
    # WARNING: If task appears to have already run, it is not re-executed,
    # but we have not yet implemented task state restoration from existing output,
    # so running multiple times without cleaning up output directories will cause
    # scalems.context.InvalidStateError: Called result() on a Task that is not done.
    result = cmd.result()  # type: scalems.subprocess.SubprocessResult
    assert result.stdout.name == 'outfile.txt'
    path = Path(result.stdout)
    assert path.exists()
    return path
Exemple #3
0
async def test_exec_rp():
    original_context = scalems.context.get_context()
    asyncio.get_event_loop().set_debug(True)
    logging.getLogger("asyncio").setLevel(logging.DEBUG)
    # Test RPDispatcher context
    context = scalems.radical.RPWorkflowContext()
    with scalems.context.scope(context):
        async with context.dispatch():
            cmd = scalems.executable(('/bin/echo', ))

    # Test active context scoping.
    assert scalems.context.get_context() is original_context
Exemple #4
0
def make_input(simulation_parameters=['md.mdp'],
               topology=['md.top'],
               conformation=['md.gro'],
               wrapper_name='gmx'):
    preprocess = scalems.executable(
        (wrapper_name, 'grompp'),
        inputs={
            '-f': simulation_parameters,
            '-p': topology,
            '-c': conformation
        },
        outputs={'-o': scalems.OutputFile(suffix='.tpr')})

    return gmxapi.read_tpr(preprocess.output.files['-o'])
Exemple #5
0
def main():
    cmd = scalems.executable(argv=['/bin/echo'] + sys.argv[1:],
                             stdout='stdout')
Exemple #6
0
def collect_coordinates(trajectories):
    allframes = scalems.executable(
        ('gmx', 'trajcat'),
        inputs={'-f': scalems.gather(trajectories)},
        outputs={'-o': scalems.OutputFile(suffix='.trr')})
    return allframes.output.file['-o']
Exemple #7
0
def internal_to_pdb(structure):
    editconf = scalems.executable(
        ('gmx', 'editconf'),
        inputs={'-f': structure},
        outputs={'-o': scalems.OutputFile(suffix='.pdb')})
    return editconf.output.files['-o']