Exemplo n.º 1
0
def success_run(database: DB, fs: FileStore,
                basedir: str) -> Tuple[str, str, str, str]:
    """Create a successful run with two result files:

        - A.json
        - run/results/B.json

    Returns the identifier of the created workflow, group, run, and user.
    """
    # Setup temporary run folder.
    tmprundir = os.path.join(basedir, 'tmprun')
    tmpresultsdir = os.path.join(tmprundir, 'run', 'results')
    os.makedirs(tmprundir)
    os.makedirs(tmpresultsdir)
    f1 = os.path.join(tmprundir, 'A.json')
    util.write_object(f1, {'A': 1})
    f2 = os.path.join(tmpresultsdir, 'B.json')
    util.write_object(f2, {'B': 1})
    with database.session() as session:
        user_id = create_user(session, active=True)
        workflow_id = create_workflow(session)
        group_id = create_group(session, workflow_id, users=[user_id])
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(
            run_id,
            state.start().success(files=['A.json', 'run/results/B.json']),
            rundir=tmprundir)
    assert not os.path.exists(tmprundir)
    return workflow_id, group_id, run_id, user_id
Exemplo n.º 2
0
def success_run(database: DB, fs: StorageVolume,
                basedir: str) -> Tuple[str, str, str, str]:
    """Create a successful run with two result files:

        - A.json
        - results/B.json

    Returns the identifier of the created workflow, group, run, and user.
    """
    # Setup temporary run folder.
    runfs = FileSystemStorage(basedir=os.path.join(basedir, 'tmprun'))
    runfs.store(file=io_file({'A': 1}), dst='A.json')
    runfs.store(file=io_file({'B': 1}), dst=util.join('results', 'B.json'))
    with database.session() as session:
        user_id = create_user(session, active=True)
        workflow_id = create_workflow(session)
        group_id = create_group(session, workflow_id, users=[user_id])
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(
            run_id=run_id,
            state=state.start().success(files=['A.json', 'results/B.json']),
            runstore=runfs)
    return workflow_id, group_id, run_id, user_id
Exemplo n.º 3
0
def run_postproc_workflow(postproc_spec: Dict, workflow: WorkflowObject,
                          ranking: List, runs: List, run_manager: RunManager,
                          backend: WorkflowController):
    """Run post-processing workflow for a workflow template."""
    workflow_spec = postproc_spec.get('workflow')
    pp_inputs = postproc_spec.get('inputs', {})
    pp_files = pp_inputs.get('files', [])
    # Prepare temporary directory with result files for all
    # runs in the ranking. The created directory is the only
    # run argument
    strace = None
    try:
        datadir = postutil.prepare_postproc_data(input_files=pp_files,
                                                 ranking=ranking,
                                                 run_manager=run_manager)
        dst = pp_inputs.get('runs', postbase.RUNS_DIR)
        run_args = {
            postbase.PARA_RUNS: InputFile(source=FSFile(datadir), target=dst)
        }
        arg_list = [
            serialize_arg(postbase.PARA_RUNS, serialize_fh(datadir, dst))
        ]
    except Exception as ex:
        logging.error(ex)
        strace = util.stacktrace(ex)
        run_args = dict()
        arg_list = []
    # Create a new run for the workflow. The identifier for the run group is
    # None.
    run = run_manager.create_run(workflow=workflow,
                                 arguments=arg_list,
                                 runs=runs)
    if strace is not None:
        # If there were data preparation errors set the created run into an
        # error state and return.
        run_manager.update_run(run_id=run.run_id,
                               state=run.state().error(messages=strace))
    else:
        # Execute the post-processing workflow asynchronously if
        # there were no data preparation errors.
        postproc_state, rundir = backend.exec_workflow(
            run=run,
            template=WorkflowTemplate(workflow_spec=workflow_spec,
                                      parameters=postbase.PARAMETERS),
            arguments=run_args,
            config=workflow.engine_config)
        # Update the post-processing workflow run state if it is
        # no longer pending for execution.
        if not postproc_state.is_pending():
            run_manager.update_run(run_id=run.run_id,
                                   state=postproc_state,
                                   rundir=rundir)
        # Remove the temporary input folder
        shutil.rmtree(datadir)
Exemplo n.º 4
0
def test_cancel_run(fscls, database, tmpdir):
    """Test setting run state to canceled."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
    # -- Test set run to error state ------------------------------------------
    with database.session() as session:
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(run_id=run_id, state=state.cancel())
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run = runs.get_run(run_id)
        state = run.state()
        assert not state.is_active()
        assert not state.is_pending()
        assert not state.is_running()
        assert state.is_canceled()
        assert not state.is_error()
        assert not state.is_success()
        assert len(state.messages) == 1
def test_multi_success_runs(database, tmpdir):
    """Test rankings for workflows where each group has multiple successful
    runs.
    """
    # -- Setup ----------------------------------------------------------------
    # Create database with two workflows and four grous each. Each group has
    # three active runs. Then set all runs for the first workflow into success
    # state. Increase a counter for the avg_len value as we update runs.
    workflows = init(database, tmpdir)
    fs = FileSystemStorage(basedir=tmpdir)
    workflow_id, groups = workflows[0]
    count = 0
    asc_order = list()
    count_order = list()
    desc_order = list()
    with database.session() as session:
        for group_id, runs in groups:
            for i, run_id in enumerate(runs):
                tmprundir = os.path.join(tmpdir, 'runs', run_id)
                run_success(run_manager=RunManager(session=session, fs=fs),
                            run_id=run_id,
                            store=fs.get_store_for_folder(key=tmprundir),
                            values={
                                'count': count,
                                'avg': 1.0,
                                'name': run_id
                            })
                count += 1
                if i == 0:
                    asc_order.append(run_id)
                count_order.append(run_id)
            desc_order.append(run_id)
    # -- Test get ranking with one result per group ---------------------------
    with database.session() as session:
        wfrepo = WorkflowManager(session=session, fs=fs)
        rankings = RankingManager(session=session)
        wf = wfrepo.get_workflow(workflow_id)
        ranking = rankings.get_ranking(wf)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == desc_order[::-1]
        ranking = rankings.get_ranking(
            wf, order_by=[SortColumn(column_id='count', sort_desc=False)])
        rank_order = [e.run_id for e in ranking]
        assert rank_order == asc_order
        # Run execution time
        assert type(ranking[0].exectime()) == timedelta
    # -- Test get ranking with all results per group --------------------------
    with database.session() as session:
        wfrepo = WorkflowManager(session=session, fs=fs)
        rankings = RankingManager(session=session)
        wf = wfrepo.get_workflow(workflow_id)
        ranking = rankings.get_ranking(wf, include_all=True)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == count_order[::-1]
        ranking = rankings.get_ranking(
            wf,
            order_by=[SortColumn(column_id='count', sort_desc=False)],
            include_all=True)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == count_order
Exemplo n.º 6
0
def test_error_run(fscls, database, tmpdir):
    """Test setting run state to error."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    messages = ['There', 'were', 'many errors']
    _, _, run_id = error_run(database, fs, messages)
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run = runs.get_run(run_id)
        state = run.state()
        assert not state.is_active()
        assert not state.is_pending()
        assert not state.is_running()
        assert not state.is_canceled()
        assert state.is_error()
        assert not state.is_success()
        assert state.messages == messages
Exemplo n.º 7
0
def test_success_run(database, tmpdir):
    """Test life cycle for a successful run."""
    # -- Setup ----------------------------------------------------------------
    fs = FileSystemStorage(basedir=tmpdir)
    workflow_id, _, run_id, _ = success_run(database, fs, tmpdir)
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run = runs.get_run(run_id)
        state = run.state()
        assert not state.is_active()
        assert not state.is_pending()
        assert not state.is_running()
        assert not state.is_canceled()
        assert not state.is_error()
        assert state.is_success()
        assert len(state.files) == 2
        with runs.get_runfile(run_id=run_id, key='A.json').open() as f:
            assert json.load(f) == {'A': 1}
        with runs.get_runfile(run_id=run_id, key='results/B.json').open() as f:
            assert json.load(f) == {'B': 1}
Exemplo n.º 8
0
def test_run_parameters(database, tmpdir):
    """Test creating run with template arguments."""
    # -- Setup ----------------------------------------------------------------
    fs = FileSystemStorage(basedir=tmpdir)
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
    # Prepare run arguments
    filename = os.path.join(str(tmpdir), 'results.json')
    util.write_object(filename=filename, obj={'A': 1})
    arguments = [{'id': 'A', 'value': 10}, {'id': 'B', 'value': True}]
    # -- Test create run with arguments ---------------------------------------
    with database.session() as session:
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(
            group=groups.get_group(group_id),
            arguments=arguments
        )
        run_id = run.run_id
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run = runs.get_run(run_id)
        assert run.arguments == arguments
Exemplo n.º 9
0
def test_delete_run(fscls, database, tmpdir):
    """Test deleting a run."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    _, _, run_id, _ = success_run(database, fs, tmpdir)
    # -- Test delete run ------------------------------------------------------
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        runs.delete_run(run_id)
    # -- Error cases ----------------------------------------------------------
    with database.session() as session:
        # Error when deleting an unknown run.
        runs = RunManager(session=session, fs=fs)
        with pytest.raises(err.UnknownRunError):
            runs.delete_run(run_id)
Exemplo n.º 10
0
def test_success_run(fscls, database, tmpdir):
    """Test life cycle for a successful run."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    workflow_id, _, run_id, _ = success_run(database, fs, tmpdir)
    rundir = fs.run_basedir(workflow_id=workflow_id, run_id=run_id)
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run = runs.get_run(run_id)
        state = run.state()
        assert not state.is_active()
        assert not state.is_pending()
        assert not state.is_running()
        assert not state.is_canceled()
        assert not state.is_error()
        assert state.is_success()
        assert len(state.files) == 2
        key = run.get_file(by_key='A.json').key
        f = fs.load_file(key=os.path.join(rundir, key)).open()
        assert json.load(f) == {'A': 1}
        key = run.get_file(by_key='run/results/B.json').key
        f = fs.load_file(key=os.path.join(rundir, key)).open()
        assert json.load(f) == {'B': 1}
Exemplo n.º 11
0
def prepare_postproc_data(input_files: List[str], ranking: List[RunResult],
                          run_manager: RunManager, store: StorageVolume):
    """Create input files for post-processing steps for a given set of runs.

    Creates files for a post-processing run in a given base directory on a
    storage volume. The resulting directory contains files for each run in a
    given ranking. For each run a sub-folder with the run identifier as the
    directory name is created. Each folder contains copies of result files for
    the run for those files that are specified in the input files list. A file
    ``runs.json`` in the base directory lists the runs in the ranking together
    with their group name.

    Parameters
    ----------
    input_files: list(string)
        List of identifier for benchmark run output files that are copied into
        the input directory for each submission.
    ranking: list(flowserv.model.ranking.RunResult)
        List of runs in the current result ranking
    run_manager: flowserv.model.run.RunManager
        Manager for workflow runs
    store: flowserv.volume.base.StorageVolume
        Target storage volume where the created post-processing files are
        stored.
    """
    # Collect information about runs and their result files.
    runs = list()
    for entry in ranking:
        run_id = entry.run_id
        group_name = entry.group_name
        # Create a sub-folder for the run in the output directory. Then copy
        # all given files into the created directory.
        rundir = run_id
        for key in input_files:
            # Copy run file to target file.
            file = run_manager.get_runfile(run_id=run_id, key=key)
            dst = util.join(rundir, key)
            store.store(file=file, dst=dst)
        runs.append({
            LABEL_ID: run_id,
            LABEL_NAME: group_name,
            LABEL_FILES: input_files
        })
    store.store(file=io_file(runs), dst=RUNS_FILE)
Exemplo n.º 12
0
def test_create_run_errors(fscls, database, tmpdir):
    """Test error cases for create_run parameter combinations."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
    # -- Test create_run with invalid arguments -------------------------------
    with database.session() as session:
        wfrepo = WorkflowManager(session=session, fs=fs)
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        workflow = wfrepo.get_workflow(workflow_id)
        group = groups.get_group(group_id)
        with pytest.raises(ValueError):
            runs.create_run()
        with pytest.raises(ValueError):
            runs.create_run(workflow=workflow, group=group)
        with pytest.raises(ValueError):
            runs.create_run(group=group, runs=['A'])
Exemplo n.º 13
0
def error_run(database, fs, messages):
    """Create a run that is in error state. Returns the identifier of the
    created workflow, group, and run.
    """
    # Setup temporary run folder.
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(run_id=run_id, state=state)
        messages = ['There', 'were', 'many errors']
        runs.update_run(run_id=run_id, state=state.error(messages))
    return workflow_id, group_id, run_id
Exemplo n.º 14
0
 def __enter__(self) -> API:
     """Create a new instance of the local API when the context manager is
     entered.
     """
     # Open a new database session.
     self._session = self._db.session()
     session = self._session.open()
     # Shortcuts for local variables.
     env = self._env
     fs = self._fs
     engine = self._engine
     # Start by creating the authorization component and setting the
     # identifier for and authenticated user.
     user_id = self._user_id
     username = None
     if env[AUTH] == config.AUTH_OPEN:
         auth = OpenAccessAuth(session)
         user_id = config.DEFAULT_USER if user_id is None else user_id
     else:
         auth = DefaultAuthPolicy(session)
         access_token = self._access_token if self._access_token is not None else env.get(
             ACCESS_TOKEN)
         if access_token and user_id is None:
             # If an access token is given we retrieve the user that is
             # associated with the token. Authentication may raise an error.
             # Here, we ignore that error since the token may be an outdated
             # token that is stored in the environment.
             try:
                 user = auth.authenticate(access_token)
                 # Set the user name for the authenticated user (to be
                 # included in the service descriptor).
                 username = user.name
                 user_id = user.user_id
             except err.UnauthenticatedAccessError:
                 pass
     # Create the individual components of the API.
     ttl = env.get(config.FLOWSERV_AUTH_LOGINTTL, config.DEFAULT_LOGINTTL)
     user_manager = UserManager(session=session, token_timeout=ttl)
     run_manager = RunManager(session=session, fs=fs)
     group_manager = WorkflowGroupManager(session=session,
                                          fs=fs,
                                          users=user_manager)
     ranking_manager = RankingManager(session=session)
     workflow_repo = WorkflowManager(session=session, fs=fs)
     return API(
         service=ServiceDescriptor.from_config(env=env, username=username),
         workflow_service=LocalWorkflowService(
             workflow_repo=workflow_repo,
             ranking_manager=ranking_manager,
             group_manager=group_manager,
             run_manager=run_manager,
             user_id=user_id),
         group_service=LocalWorkflowGroupService(
             group_manager=group_manager,
             workflow_repo=workflow_repo,
             backend=engine,
             run_manager=run_manager,
             auth=auth,
             user_id=user_id),
         upload_service=LocalUploadFileService(group_manager=group_manager,
                                               auth=auth,
                                               user_id=user_id),
         run_service=LocalRunService(run_manager=run_manager,
                                     group_manager=group_manager,
                                     ranking_manager=ranking_manager,
                                     backend=engine,
                                     auth=auth,
                                     user_id=user_id),
         user_service=LocalUserService(manager=user_manager, auth=auth))
Exemplo n.º 15
0
def test_run_serialization(database, tmpdir):
    """Test serialization of run handles and run listings."""
    config = Config().basedir(tmpdir)
    view = RunSerializer()
    fs = FileSystemStore(config)
    # Setup temporary run folder.
    tmprundir = os.path.join(tmpdir, 'tmprun')
    tmpresultsdir = os.path.join(tmprundir, 'run', 'results')
    os.makedirs(tmprundir)
    os.makedirs(tmpresultsdir)
    f1 = os.path.join(tmprundir, 'A.json')
    util.write_object(f1, {'A': 1})
    # Create runs.
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
        # Create successful run.
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(
            run_id,
            state.start().success(files=['A.json', 'run/results/B.json']),
            rundir=tmprundir)
        run = runs.get_run(run_id)
        doc = view.run_handle(run)
        validator('RunHandle').validate(doc)
        # Create error run.
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(run_id=run_id, state=state)
        messages = ['There', 'were', 'many errors']
        runs.update_run(run_id=run_id, state=state.error(messages))
        run = runs.get_run(run_id)
        doc = view.run_handle(run)
        validator('RunHandle').validate(doc)
        # Validate run listing.
        doc = view.run_listing(runs=runs.list_runs(group_id))
        validator('RunListing').validate(doc)
        assert len(doc[labels.RUN_LIST]) == 2
Exemplo n.º 16
0
def run_postproc_workflow(workflow: WorkflowObject, ranking: List[RunResult],
                          keys: List[str], run_manager: RunManager,
                          tmpstore: StorageVolume, staticfs: StorageVolume,
                          backend: WorkflowController):
    """Run post-processing workflow for a workflow template.

    Parameters
    ----------
    workflow: flowserv.model.base.WorkflowObject
        Handle for the workflow that triggered the post-processing workflow run.
    ranking: list(flowserv.model.ranking.RunResult)
        List of runs in the current result ranking.
    keys: list of string
        Sorted list of run identifier for runs in the ranking.
    run_manager: flowserv.model.run.RunManager
        Manager for workflow runs
    tmpstore: flowserv.volume.base.StorageVolume
        Temporary storage volume where the created post-processing files are
        stored. This volume will be erased after the workflow is started.
    staticfs: flowserv.volume.base.StorageVolume
        Storage volume that contains the static files from the workflow
        template.
    backend: flowserv.controller.base.WorkflowController
        Backend that is used to execute the post-processing workflow.
    """
    # Get workflow specification and the list of input files from the
    # post-processing statement.
    postproc_spec = workflow.postproc_spec
    workflow_spec = postproc_spec.get('workflow')
    pp_inputs = postproc_spec.get('inputs', {})
    pp_files = pp_inputs.get('files', [])
    # Prepare temporary directory with result files for all
    # runs in the ranking. The created directory is the only
    # run argument
    strace = None
    try:
        prepare_postproc_data(input_files=pp_files,
                              ranking=ranking,
                              run_manager=run_manager,
                              store=tmpstore)
        dst = pp_inputs.get('runs', RUNS_DIR)
        run_args = {PARA_RUNS: InputDirectory(store=tmpstore, target=RUNS_DIR)}
        arg_list = [serialize_arg(PARA_RUNS, dst)]
    except Exception as ex:
        logging.error(ex, exc_info=True)
        strace = util.stacktrace(ex)
        run_args = dict()
        arg_list = []
    # Create a new run for the workflow. The identifier for the run group is
    # None.
    run = run_manager.create_run(workflow=workflow,
                                 arguments=arg_list,
                                 runs=keys)
    if strace is not None:
        # If there were data preparation errors set the created run into an
        # error state and return.
        run_manager.update_run(run_id=run.run_id,
                               state=run.state().error(messages=strace))
    else:
        # Execute the post-processing workflow asynchronously if
        # there were no data preparation errors.
        try:
            postproc_state, runstore = backend.exec_workflow(
                run=run,
                template=WorkflowTemplate(workflow_spec=workflow_spec,
                                          parameters=PARAMETERS),
                arguments=run_args,
                staticfs=staticfs,
                config=workflow.engine_config)
        except Exception as ex:
            # Make sure to catch exceptions and set the run into an error state.
            postproc_state = run.state().error(messages=util.stacktrace(ex))
            runstore = None
        # Update the post-processing workflow run state if it is
        # no longer pending for execution.
        if not postproc_state.is_pending():
            run_manager.update_run(run_id=run.run_id,
                                   state=postproc_state,
                                   runstore=runstore)
        # Erase the temporary storage volume.
        tmpstore.erase()
Exemplo n.º 17
0
def test_invalid_state_transitions(fscls, database, tmpdir):
    """Test error cases for invalid state transitions."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
    # -- Test set active run to pending ---------------------------------------
    with database.session() as session:
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        run = runs.create_run(group=groups.get_group(group_id))
        run_id = run.run_id
        state = run.state()
        runs.update_run(run_id=run_id, state=state.start())
        with pytest.raises(err.ConstraintViolationError):
            runs.update_run(run_id=run_id, state=st.StatePending())
    # Cancel run
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        runs.update_run(run_id=run_id, state=state.cancel())
    # -- Test cannot set run to any of the inactive states --------------------
    with database.session() as session:
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        assert runs.update_run(run_id=run_id, state=state.cancel()) is None
        with pytest.raises(err.ConstraintViolationError):
            runs.update_run(run_id=run_id, state=state.error())
        with pytest.raises(err.ConstraintViolationError):
            runs.update_run(run_id=run_id, state=state.success())
Exemplo n.º 18
0
def test_list_runs(fscls, database, tmpdir):
    """Test retrieving a list of run descriptors."""
    # -- Setup ----------------------------------------------------------------
    #
    # Create two runs: one in running state and one in error state.
    fs = fscls(env=Config().basedir(tmpdir))
    with database.session() as session:
        user_id = model.create_user(session, active=True)
        workflow_id = model.create_workflow(session)
        group_id = model.create_group(session, workflow_id, users=[user_id])
        groups = WorkflowGroupManager(session=session, fs=fs)
        runs = RunManager(session=session, fs=fs)
        group = groups.get_group(group_id)
        # Run 1 in running state
        r = runs.create_run(group=group)
        run_1 = r.run_id
        runs.update_run(run_id=run_1, state=r.state().start())
        r = runs.create_run(group=group)
        run_2 = r.run_id
        runs.update_run(run_id=run_2, state=r.state().error())
    # -- Test get listing -----------------------------------------------------
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        run_index = dict()
        for run in runs.list_runs(group_id):
            run_index[run.run_id] = run
        assert len(run_index) == 2
        assert run_index[run_1].state().is_running()
        assert run_index[run_2].state().is_error()
    # -- Test polling runs ----------------------------------------------------
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        assert len(runs.list_runs(group_id)) == 2
        assert len(runs.list_runs(group_id, state=st.STATE_ERROR)) == 1
        assert len(runs.list_runs(group_id, state=st.STATE_SUCCESS)) == 0
Exemplo n.º 19
0
def test_obsolete_runs(fscls, database, tmpdir):
    """Test deleting runs that were created before a given date."""
    # -- Setup ----------------------------------------------------------------
    fs = fscls(env=Config().basedir(tmpdir))
    # Create two runs (one SUCCESS and one ERROR) before a timestamp t1
    _, _, run_1, _ = success_run(database, fs, tmpdir)
    _, _, run_2 = error_run(database, fs, ['There were errors'])
    time.sleep(1)
    t1 = util.utc_now()
    # Create another SUCCESS run after timestamp t1
    _, _, run_3, _ = success_run(database, fs, tmpdir)
    # -- Test delete run with state filter ------------------------------------
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        assert runs.delete_obsolete_runs(date=t1, state=st.STATE_ERROR) == 1
        # After deleting the error run the two success runs still exist.
        runs.get_run(run_id=run_1)
        with pytest.raises(err.UnknownRunError):
            runs.get_run(run_id=run_2)
        runs.get_run(run_id=run_3)
    # -- Test delete all runs prior to a given date ---------------------------
    with database.session() as session:
        runs = RunManager(session=session, fs=fs)
        assert runs.delete_obsolete_runs(date=t1) == 1
        # After deleting the run the only one success runs still exist.
        with pytest.raises(err.UnknownRunError):
            runs.get_run(run_id=run_1)
        runs.get_run(run_id=run_3)