def test_workflow_result_ranking(local_service, hello_world):
    """Test creating rankings from multiple workflow runs."""
    # -- Setup ----------------------------------------------------------------
    #
    # Create four groups for the 'Hello World' workflow with one successful
    # run each.
    with local_service() as api:
        user_1 = create_user(api)
        workflow_id = hello_world(api).workflow_id
    with local_service(user_id=user_1) as api:
        groups = create_ranking(api, workflow_id, 4)
    # -- Get ranking in decreasing order of avg_count -------------------------
    with local_service() as api:
        r = api.workflows().get_ranking(workflow_id=workflow_id,
                                        order_by=[SortColumn('avg_count')],
                                        include_all=False)
        serialize.validate_ranking(r)
        ranking = [e['group']['id'] for e in r['ranking']]
        assert groups == ranking[::-1]
    # -- Get ranking in decreasing order of max_len ---------------------------
    with local_service() as api:
        r = api.workflows().get_ranking(workflow_id=workflow_id,
                                        order_by=[SortColumn('max_len')],
                                        include_all=False)
        serialize.validate_ranking(r)
        ranking = [e['group']['id'] for e in r['ranking']]
        assert groups == ranking
Exemple #2
0
def test_get_ranking_remote(remote_service, mock_response):
    """Test getting leaderboard from remote service."""
    remote_service.workflows().get_ranking(workflow_id='0000')
    remote_service.workflows().get_ranking(
        workflow_id='0000',
        order_by=[SortColumn('A'), SortColumn('B', sort_desc=False)]
    )
def test_multi_success_runs(database, tmpdir):
    """Test rankings for workflows where each group has multiple successful
    runs.
    """
    # -- Setup ----------------------------------------------------------------
    # Create database with two workflows and four grous each. Each group has
    # three active runs. Then set all runs for the first workflow into success
    # state. Increase a counter for the avg_len value as we update runs.
    workflows = init(database, tmpdir)
    fs = FileSystemStorage(basedir=tmpdir)
    workflow_id, groups = workflows[0]
    count = 0
    asc_order = list()
    count_order = list()
    desc_order = list()
    with database.session() as session:
        for group_id, runs in groups:
            for i, run_id in enumerate(runs):
                tmprundir = os.path.join(tmpdir, 'runs', run_id)
                run_success(run_manager=RunManager(session=session, fs=fs),
                            run_id=run_id,
                            store=fs.get_store_for_folder(key=tmprundir),
                            values={
                                'count': count,
                                'avg': 1.0,
                                'name': run_id
                            })
                count += 1
                if i == 0:
                    asc_order.append(run_id)
                count_order.append(run_id)
            desc_order.append(run_id)
    # -- Test get ranking with one result per group ---------------------------
    with database.session() as session:
        wfrepo = WorkflowManager(session=session, fs=fs)
        rankings = RankingManager(session=session)
        wf = wfrepo.get_workflow(workflow_id)
        ranking = rankings.get_ranking(wf)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == desc_order[::-1]
        ranking = rankings.get_ranking(
            wf, order_by=[SortColumn(column_id='count', sort_desc=False)])
        rank_order = [e.run_id for e in ranking]
        assert rank_order == asc_order
        # Run execution time
        assert type(ranking[0].exectime()) == timedelta
    # -- Test get ranking with all results per group --------------------------
    with database.session() as session:
        wfrepo = WorkflowManager(session=session, fs=fs)
        rankings = RankingManager(session=session)
        wf = wfrepo.get_workflow(workflow_id)
        ranking = rankings.get_ranking(wf, include_all=True)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == count_order[::-1]
        ranking = rankings.get_ranking(
            wf,
            order_by=[SortColumn(column_id='count', sort_desc=False)],
            include_all=True)
        rank_order = [e.run_id for e in ranking]
        assert rank_order == count_order
Exemple #4
0
def test_sort_column_serialization():
    """Test serailization of sore columns."""
    doc = {'name': '0', 'sortDesc': False}
    col = SortColumn.from_dict(SortColumn.from_dict(doc).to_dict())
    assert col.column_id == '0'
    assert not col.sort_desc
    # Default sort order is desc.
    doc = {'name': '0'}
    col = SortColumn.from_dict(SortColumn.from_dict(doc).to_dict())
    assert col.column_id == '0'
    assert col.sort_desc
    # Invalid column serialization.
    with pytest.raises(KeyError):
        SortColumn.from_dict({}, validate=False)
    with pytest.raises(err.InvalidTemplateError):
        SortColumn.from_dict({})
Exemple #5
0
def get_leaderboard(workflow_id):
    """Get leader board for a given benchmark. Benchmarks and their results are
    available to everyone, independent of whether they are authenticated or
    not.
    """
    # The orderBy argument can include a list of column names. Each column name
    # may be suffixed by the sort order.
    order_by = request.args.get('orderBy')
    if order_by is not None:
        sort_columns = list()
        for col in order_by.split(','):
            sort_desc = None
            pos = col.find(':')
            if pos > -1:
                if col[pos + 1:].lower() == 'asc':
                    sort_desc = False
                col = col[:pos]
            sort_columns.append(SortColumn(col, sort_desc=sort_desc))
    else:
        sort_columns = None
    # The includeAll argument is a flag. If the argument is given without value
    # the default is True. Otherwise, we expect a string that is equal to true.
    include_all = request.args.get('includeAll')
    if include_all is not None:
        if include_all == '':
            include_all = True
        else:
            include_all = include_all.lower() == 'true'
    # Get serialization of the result ranking
    from robflask.service import service
    with service() as api:
        r = api.workflows().get_ranking(
            workflow_id,
            order_by=sort_columns,
            include_all=include_all
        )
    return make_response(jsonify(r), 200)
import flowserv.model.workflow.state as st
import flowserv.util as util
import flowserv.tests.model as model
"""Result schema for the test workflows."""
RESULT_FILE_ID = 'results.json'

SCHEMA_1 = ResultSchema(result_file=RESULT_FILE_ID,
                        columns=[
                            ResultColumn('count', 'Total Count', PARA_INT),
                            ResultColumn('avg', 'avg', PARA_FLOAT),
                            ResultColumn('name',
                                         'name',
                                         PARA_STRING,
                                         required=False)
                        ],
                        order_by=[SortColumn(column_id='count')])

SCHEMA_2 = ResultSchema(
    result_file=RESULT_FILE_ID,
    columns=[
        ResultColumn('min', 'min', PARA_INT, path='values/min'),
        ResultColumn('max', 'max', PARA_INT, path='max')
    ],
    order_by=[SortColumn(column_id='min', sort_desc=False)])


def init(database, basedir):
    """Create a fresh database with one user, two workflows, and four groups
    for each workflow. Each group has three active runs. Returns a a list of
    tuples with workflow_id, groups, and runs.
    """