Ejemplo n.º 1
0
 def process_answer(update, context):
     if update.callback_query.message.message_id != context.user_data.get(
             'active_test_message_id'):
         update.callback_query.answer(
             'Опрос неактивен т.к. был создан более новый')
         return
     context.user_data['answers'].append(
         int(update.callback_query.data.split('_')[-1]))
     if len(context.user_data['answers']) < 25:
         update.callback_query.edit_message_text(
             **TestMessage(len(context.user_data['answers'])))
     else:
         result = sum(context.user_data['answers'])
         status = None
         if 0 <= result <= 5:
             status = 'депрессия отсутствует'
         if 6 <= result <= 10:
             status = 'нормальное, но несчастливое состояние'
         if 11 <= result <= 25:
             status = 'слабо выраженная депрессия'
         if 26 <= result <= 50:
             status = 'умеренная депрессия'
         if 51 <= result <= 75:
             status = 'сильно выраженная депрессия'
         if 76 <= result <= 100:
             status = 'крайняя степень депрессии'
         update.callback_query.edit_message_text(**ResultMessage(result))
         TestResult.create(value=result,
                           user_id=update.effective_user.id,
                           data=context.user_data['answers'])
         Base.session.commit()
Ejemplo n.º 2
0
    def test_merge(self):
        branch, platform, builder = _create_some_builder()
        some_build = _create_build(branch, platform, builder)
        some_result = TestResult.get_or_insert_from_parsed_json('some-test', some_build, 50)
        some_test = Test.update_or_insert('some-test', branch, platform)

        other_build = _create_build(branch, platform, builder, 'other-build')
        other_result = TestResult.get_or_insert_from_parsed_json('other-test', other_build, 30)
        other_test = Test.update_or_insert('other-test', branch, platform)

        self.assertOnlyInstances([some_result, other_result])
        self.assertNotEqual(some_result.key(), other_result.key())
        self.assertOnlyInstances([some_test, other_test])

        self.assertRaises(AssertionError, some_test.merge, (some_test))
        self.assertOnlyInstances([some_test, other_test])

        some_test.merge(other_test)
        results_for_some_test = TestResult.all()
        results_for_some_test.filter('name =', 'some-test')
        results_for_some_test = results_for_some_test.fetch(5)
        self.assertEqual(len(results_for_some_test), 2)

        self.assertEqual(results_for_some_test[0].name, 'some-test')
        self.assertEqual(results_for_some_test[1].name, 'some-test')

        if results_for_some_test[0].value == 50:
            self.assertEqual(results_for_some_test[1].value, 30)
        else:
            self.assertEqual(results_for_some_test[1].value, 50)
Ejemplo n.º 3
0
    def test_merge(self):
        branch, platform, builder = _create_some_builder()
        some_build = _create_build(branch, platform, builder)
        some_result = TestResult.get_or_insert_from_parsed_json('some-test', some_build, 50)
        some_test = Test.update_or_insert('some-test', branch, platform)

        other_build = _create_build(branch, platform, builder, 'other-build')
        other_result = TestResult.get_or_insert_from_parsed_json('other-test', other_build, 30)
        other_test = Test.update_or_insert('other-test', branch, platform)

        self.assertOnlyInstances([some_result, other_result])
        self.assertNotEqual(some_result.key(), other_result.key())
        self.assertOnlyInstances([some_test, other_test])

        self.assertRaises(AssertionError, some_test.merge, (some_test))
        self.assertOnlyInstances([some_test, other_test])

        some_test.merge(other_test)
        results_for_some_test = TestResult.all()
        results_for_some_test.filter('name =', 'some-test')
        results_for_some_test = results_for_some_test.fetch(5)
        self.assertEqual(len(results_for_some_test), 2)

        self.assertEqual(results_for_some_test[0].name, 'some-test')
        self.assertEqual(results_for_some_test[1].name, 'some-test')

        if results_for_some_test[0].value == 50:
            self.assertEqual(results_for_some_test[1].value, 30)
        else:
            self.assertEqual(results_for_some_test[1].value, 50)
Ejemplo n.º 4
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'

        log_id = int(self.request.get('id', 0))

        log = ReportLog.get_by_id(log_id)
        if not log or not log.commit:
            self.response.out.write("Not processed")
            return

        branch = log.branch()
        platform = log.platform()
        build = Build.get_or_insert_from_log(log)

        for test_name, result in log.results().iteritems():
            test = Test.update_or_insert(test_name, branch, platform)
            TestResult.get_or_insert_from_parsed_json(test_name, build, result)
            schedule_runs_update(test.id, branch.id, platform.id)

        log = ReportLog.get(log.key())
        log.delete()

        # We need to update dashboard and manifest because they are affected by the existance of test results
        schedule_dashboard_update()
        schedule_manifest_update()

        self.response.out.write('OK')
Ejemplo n.º 5
0
 def _create_results(self, branch, platform, builder, test_name, values, timestamps=None):
     results = []
     for i, value in enumerate(values):
         build = Build(branch=branch, platform=platform, builder=builder,
             buildNumber=i, revision=100 + i, timestamp=timestamps[i] if timestamps else datetime.now())
         build.put()
         result = TestResult(name=test_name, build=build, value=value)
         result.put()
         results.append(result)
     return results
Ejemplo n.º 6
0
def _create_results(branch, platform, builder, test_name, values):
    results = []
    for i, value in enumerate(values):
        build = Build(branch=branch, platform=platform, builder=builder,
            buildNumber=i, revision=100 + i, timestamp=datetime.now())
        build.put()
        result = TestResult(name=test_name, build=build, value=value)
        result.put()
        Test.update_or_insert(test_name, branch, platform)
        results.append(result)
    return results
Ejemplo n.º 7
0
 def _create_results(self, branch, platform, builder, test_name, values, timestamps=None, starting_revision=100):
     builds = []
     results = []
     for i, value in enumerate(values):
         build = Build(branch=branch, platform=platform, builder=builder,
             buildNumber=i, revision=starting_revision + i, timestamp=timestamps[i] if timestamps else datetime.now())
         build.put()
         result = TestResult(name=test_name, build=build, value=value)
         result.put()
         builds.append(build)
         results.append(result)
     return builds, results
Ejemplo n.º 8
0
    def test_replace_to_change_test_name_overrides_conflicting_result(self):
        branch, platform, builder = _create_some_builder()
        build = _create_build(branch, platform, builder)
        self.assertThereIsNoInstanceOf(TestResult)
        result = TestResult.get_or_insert_from_parsed_json('some-test', build, 20)
        self.assertOnlyInstance(result)

        conflicting_result = TestResult.get_or_insert_from_parsed_json('other-test', build, 10)

        new_result = result.replace_to_change_test_name('other-test')
        self.assertNotEqual(result, new_result)
        self.assertOnlyInstance(new_result)

        self.assertEqual(new_result.name, 'other-test')
        self.assertEqual(TestResult.get(conflicting_result.key()).value, 20)
Ejemplo n.º 9
0
    def test_replace_to_change_test_name_overrides_conflicting_result(self):
        branch, platform, builder = _create_some_builder()
        build = _create_build(branch, platform, builder)
        self.assertThereIsNoInstanceOf(TestResult)
        result = TestResult.get_or_insert_from_parsed_json('some-test', build, 20)
        self.assertOnlyInstance(result)

        conflicting_result = TestResult.get_or_insert_from_parsed_json('other-test', build, 10)

        new_result = result.replace_to_change_test_name('other-test')
        self.assertNotEqual(result, new_result)
        self.assertOnlyInstance(new_result)

        self.assertEqual(new_result.name, 'other-test')
        self.assertEqual(TestResult.get(conflicting_result.key()).value, 20)
Ejemplo n.º 10
0
def _create_results(branch, platform, builder, test_name, values):
    results = []
    for i, value in enumerate(values):
        build = Build(branch=branch,
                      platform=platform,
                      builder=builder,
                      buildNumber=i,
                      revision=100 + i,
                      timestamp=datetime.now())
        build.put()
        result = TestResult(name=test_name, build=build, value=value)
        result.put()
        Test.update_or_insert(test_name, branch, platform)
        results.append(result)
    return results
Ejemplo n.º 11
0
def test_rounded_percentages():
    tests_output_file_name: str = 'test_rounded_percentages.out'
    output_file_folder: str = './tests/.out/'
    input_data = PageGenInputData(
        PageGenSettings(
            ref_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='13a4c1dca0dd58d62acc741866fb945f3fe81592',
            ),
            comp_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='614c0134750071ffe08dc376e9cc8caf210974bf',
            ),
        ),
        [
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-03-4-rom_c18",
                Diff("Exit value", 0, 0, 0, 0),
                [Diff("bound", 13, 12, -1, -7.6923),
                 Diff("time", 0, 0, 0, 0)]),
        ],
    )

    generate_page(tests_output_file_name, output_file_folder, input_data)

    f = open("./tests/.out/test_rounded_percentages.md", "r", encoding="utf-8")
    file_content = f.read()
    assert "7.692" not in file_content
    assert "7.69" in file_content
Ejemplo n.º 12
0
    def test_replace_to_change_test_name_with_stat_value(self):
        branch, platform, builder = _create_some_builder()
        build = _create_build(branch, platform, builder)
        self.assertThereIsNoInstanceOf(TestResult)
        result = TestResult.get_or_insert_from_parsed_json(
            'some-test', build, {
                "avg": 40,
                "median": "40.1",
                "stdev": 3.25,
                "min": 30.5,
                "max": 45
            })
        self.assertOnlyInstance(result)
        self.assertEqual(result.name, 'some-test')

        new_result = result.replace_to_change_test_name('other-test')
        self.assertNotEqual(result, new_result)
        self.assertOnlyInstance(new_result)

        self.assertEqual(new_result.name, 'other-test')
        self.assertEqual(new_result.build.key(), result.build.key())
        self.assertEqual(new_result.value, result.value)
        self.assertEqual(result.value, 40.0)
        self.assertEqual(result.valueMedian, 40.1)
        self.assertEqual(result.valueStdev, 3.25)
        self.assertEqual(result.valueMin, 30.5)
        self.assertEqual(result.valueMax, 45)
Ejemplo n.º 13
0
def test_table_hidden_if_no_result():
    tests_output_file_name: str = 'test_table_hidden_if_no_result.out'
    output_file_folder: str = './tests/.out/'
    input_data = PageGenInputData(
        PageGenSettings(
            ref_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='13a4c1dca0dd58d62acc741866fb945f3fe81592',
            ),
            comp_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='614c0134750071ffe08dc376e9cc8caf210974bf',
            ),
        ),
        [
            TestResult(
                '/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-03-4-rom_c18',
                Diff('Exit value 1', 0, 0, 0, 0), []),
        ],
    )

    generate_page(tests_output_file_name, output_file_folder, input_data)
    f = open('./tests/.out/test_table_hidden_if_no_result.md',
             'r',
             encoding="utf-8")
    file_content = f.read()
    assert '| Measure' not in file_content
    assert '*The test generated no result.*' in file_content
Ejemplo n.º 14
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'

        log_id = int(self.request.get('id', 0))

        log = ReportLog.get_by_id(log_id)
        if not log or not log.commit:
            self.response.out.write("Not processed")
            return

        branch = log.branch()
        platform = log.platform()
        build = Build.get_or_insert_from_log(log)

        for test_name, result_value in log.results().iteritems():
            unit = result_value.get('unit') if isinstance(result_value, dict) else None
            test = Test.update_or_insert(test_name, branch, platform, unit)
            result = TestResult.get_or_insert_from_parsed_json(test_name, build, result_value)
            if not result:
                continue
            runs = Runs.get_by_objects(branch, platform, test)
            regenerate_runs = True
            if runs:
                runs.update_incrementally(build, result)
                regenerate_runs = False
            schedule_runs_update(test.id, branch.id, platform.id, regenerate_runs)

        log = ReportLog.get(log.key())
        log.delete()

        # We need to update dashboard and manifest because they are affected by the existance of test results
        schedule_dashboard_update()
        schedule_manifest_update()

        self.response.out.write('OK')
Ejemplo n.º 15
0
    def get(self):
        self.response.headers['Content-Type'] = 'application/json; charset=utf-8'

        try:
            testId = int(self.request.get('id', 0))
            branchId = int(self.request.get('branchid', 0))
            platformId = int(self.request.get('platformid', 0))
        except TypeError:
            # FIXME: Output an error here
            testId = 0
            branchId = 0
            platformId = 0

        # FIXME: Just fetch builds specified by "days"
        # days = self.request.get('days', 365)

        cacheKey = Test.cacheKey(testId, branchId, platformId)
        cache = memcache.get(cacheKey)
        if cache:
            self.response.out.write(cache)
            return

        builds = Build.all()
        builds.filter('branch =', modelFromNumericId(branchId, Branch))
        builds.filter('platform =', modelFromNumericId(platformId, Platform))

        test = modelFromNumericId(testId, Test)
        testName = test.name if test else None
        test_runs = []
        averages = {}
        values = []
        timestamps = []

        for build in builds:
            results = TestResult.all()
            results.filter('name =', testName)
            results.filter('build =', build)
            for result in results:
                builderId = build.builder.key().id()
                posixTimestamp = mktime(build.timestamp.timetuple())
                test_runs.append([result.key().id(),
                    [build.key().id(), build.buildNumber, build.revision],
                    posixTimestamp, result.value, 0, [], builderId])
                # FIXME: Calculate the average; in practice, we wouldn't have more than one value for a given revision
                averages[build.revision] = result.value
                values.append(result.value)
                timestamps.append(posixTimestamp)

        result = json.dumps({
            'test_runs': test_runs,
            'averages': averages,
            'min': min(values) if values else None,
            'max': max(values) if values else None,
            'date_range': [min(timestamps), max(timestamps)] if timestamps else None,
            'stat': 'ok'})
        self.response.out.write(result)
        memcache.add(cacheKey, result)
Ejemplo n.º 16
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'

        try:
            test_id = int(self.request.get('id', 0))
            branch_id = int(self.request.get('branchid', 0))
            platform_id = int(self.request.get('platformid', 0))
        except TypeError:
            # FIXME: Output an error here
            test_id = 0
            branch_id = 0
            platform_id = 0

        # FIXME: Just fetch builds specified by "days"
        # days = self.request.get('days', 365)

        builds = Build.all()
        builds.filter('branch =', modelFromNumericId(branch_id, Branch))
        builds.filter('platform =', modelFromNumericId(platform_id, Platform))

        test = modelFromNumericId(test_id, Test)
        test_name = test.name if test else None
        test_runs = []
        averages = {}
        values = []
        timestamps = []

        for build in builds:
            results = TestResult.all()
            results.filter('name =', test_name)
            results.filter('build =', build)
            for result in results:
                builderId = build.builder.key().id()
                posixTimestamp = mktime(build.timestamp.timetuple())
                statistics = None
                if result.valueStdev != None and result.valueMin != None and result.valueMax != None:
                    statistics = {'stdev': result.valueStdev, 'min': result.valueMin, 'max': result.valueMax}
                test_runs.append([result.key().id(),
                    [build.key().id(), build.buildNumber, build.revision],
                    posixTimestamp, result.value, 0,  # runNumber
                    [],  # annotations
                    builderId, statistics])
                # FIXME: Calculate the average; in practice, we wouldn't have more than one value for a given revision
                averages[build.revision] = result.value
                values.append(result.value)
                timestamps.append(posixTimestamp)

        result = json.dumps({
            'test_runs': test_runs,
            'averages': averages,
            'min': min(values) if values else None,
            'max': max(values) if values else None,
            'date_range': [min(timestamps), max(timestamps)] if timestamps else None,
            'stat': 'ok'})
        cache_runs(test_id, branch_id, platform_id, result)
        self.response.out.write('OK')
Ejemplo n.º 17
0
def test_with_data():
    tests_output_file_name: str = 'test_with_data.out'
    output_file_folder: str = './tests/.out/'
    input_data = PageGenInputData(
        PageGenSettings(
            ref_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='13a4c1dca0dd58d62acc741866fb945f3fe81592',
            ),
            comp_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='614c0134750071ffe08dc376e9cc8caf210974bf',
            ),
        ),
        [
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-03-4-rom_c18",
                Diff("Exit value", 0, 0, 0, 0),
                [Diff("bound", 13, 12, -1, -7.6923),
                 Diff("time", 0, 0, 0, 0)]),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-04-4-rom_c18",
                Diff("Exit value", 1, 1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-07-4-rom_c18",
                Diff("Exit value", -1, -1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/NurseRostering/NurseRostering-17_c18",
                Diff("Exit value", -1, -1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/NurseRostering/NurseRostering-20_c18",
                Diff("Exit value", -1, -1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/Rlfap/Rlfap-opt/Rlfap-scen-03-opt_c18",
                Diff("Exit value", -1, -1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP18/Rlfap/Rlfap-opt/Rlfap-scen-06-opt_c18",
                Diff("Exit value", -1, -1, 0, 0), []),
            TestResult(
                "/home/evaluation/evaluation/pub/bench/XCSP3/Filters-ar_1_2.xml",
                Diff("Exit value", -1, -1, 0, 0), []),
        ],
    )

    generate_page(tests_output_file_name, output_file_folder, input_data)
Ejemplo n.º 18
0
    def _generate_runs(branch, platform, test_name):
        builds = Build.all()
        builds.filter('branch =', branch)
        builds.filter('platform =', platform)

        for build in builds:
            results = TestResult.all()
            results.filter('name =', test_name)
            results.filter('build =', build)
            for result in results:
                yield build, result
        raise StopIteration
Ejemplo n.º 19
0
def test_failure_color():
    tests_output_file_name: str = 'test_failure_color.out'
    output_file_folder: str = './tests/.out/'
    input_data = PageGenInputData(
        PageGenSettings(
            ref_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='13a4c1dca0dd58d62acc741866fb945f3fe81592',
            ),
            comp_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='614c0134750071ffe08dc376e9cc8caf210974bf',
            ),
        ),
        [
            TestResult(
                '/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-03-4-rom_c18',
                Diff('Exit value 1', -1, -1, 0, 0), []),
            TestResult(
                '/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-04-4-rom_c18',
                Diff('Exit value 2', 2, -1, -3, -150), []),
            TestResult(
                '/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-07-4-rom_c18',
                Diff('Exit value 3', 0, -1, -1, 0), []),
        ],
    )

    generate_page(tests_output_file_name, output_file_folder, input_data)

    f = open('./tests/.out/test_failure_color.md', 'r', encoding="utf-8")
    file_content = f.read()
    assert '**Exit value 1:** `-1` <span style="color: red">⨯ (was `-1`)</span>' in file_content
    assert '**Exit value 2:** `-1` <span style="color: red">⨯ (was `2`)</span>' in file_content
    assert '**Exit value 3:** `-1` <span style="color: red">⨯ (was `0`)</span>' in file_content
Ejemplo n.º 20
0
 def test_get_or_insert_value(self):
     branch, platform, builder = _create_some_builder()
     build = _create_build(branch, platform, builder)
     self.assertThereIsNoInstanceOf(TestResult)
     result = TestResult.get_or_insert_from_parsed_json('some-test', build, 50)
     self.assertOnlyInstance(result)
     self.assertEqual(result.name, 'some-test')
     self.assertEqual(result.build.key(), build.key())
     self.assertEqual(result.value, 50.0)
     self.assertEqual(result.valueMedian, None)
     self.assertEqual(result.valueStdev, None)
     self.assertEqual(result.valueMin, None)
     self.assertEqual(result.valueMax, None)
Ejemplo n.º 21
0
 def test_get_or_insert_value(self):
     branch, platform, builder = _create_some_builder()
     build = _create_build(branch, platform, builder)
     self.assertThereIsNoInstanceOf(TestResult)
     result = TestResult.get_or_insert_from_parsed_json('some-test', build, 50)
     self.assertOnlyInstance(result)
     self.assertEqual(result.name, 'some-test')
     self.assertEqual(result.build.key(), build.key())
     self.assertEqual(result.value, 50.0)
     self.assertEqual(result.valueMedian, None)
     self.assertEqual(result.valueStdev, None)
     self.assertEqual(result.valueMin, None)
     self.assertEqual(result.valueMax, None)
Ejemplo n.º 22
0
 def test_get_or_insert_stat_value(self):
     branch, platform, builder = _create_some_builder()
     build = _create_build(branch, platform, builder)
     self.assertThereIsNoInstanceOf(TestResult)
     result = TestResult.get_or_insert_from_parsed_json('some-test', build,
         {"avg": 40, "median": "40.1", "stdev": 3.25, "min": 30.5, "max": 45})
     self.assertOnlyInstance(result)
     self.assertEqual(result.name, 'some-test')
     self.assertEqual(result.build.key(), build.key())
     self.assertEqual(result.value, 40.0)
     self.assertEqual(result.valueMedian, 40.1)
     self.assertEqual(result.valueStdev, 3.25)
     self.assertEqual(result.valueMin, 30.5)
     self.assertEqual(result.valueMax, 45)
Ejemplo n.º 23
0
    def post(self):
        """
        Create a new test result
        :return: status of the request
        :raise HTTPError
        """

        # check for request payload
        if self.json_args is None:
            raise HTTPError(HTTP_BAD_REQUEST)

        # check for missing parameters in the request payload
        if self.json_args.get("project_name") is None:
            raise HTTPError(HTTP_BAD_REQUEST)
        if self.json_args.get("case_name") is None:
            raise HTTPError(HTTP_BAD_REQUEST)
        # check for pod_name instead of id,
        # keeping id for current implementations
        if self.json_args.get("pod_name") is None:
            raise HTTPError(HTTP_BAD_REQUEST)

        # TODO : replace checks with jsonschema
        # check for project
        mongo_dict = yield self.db.test_projects.find_one({"name": self.json_args.get("project_name")})
        if mongo_dict is None:
            raise HTTPError(HTTP_NOT_FOUND, "Could not find project [{}] ".format(self.json_args.get("project_name")))

        # check for case
        mongo_dict = yield self.db.test_cases.find_one({"name": self.json_args.get("case_name")})
        if mongo_dict is None:
            raise HTTPError(HTTP_NOT_FOUND, "Could not find case [{}] ".format(self.json_args.get("case_name")))

        # check for pod
        mongo_dict = yield self.db.pod.find_one({"name": self.json_args.get("pod_name")})
        if mongo_dict is None:
            raise HTTPError(HTTP_NOT_FOUND, "Could not find POD [{}] ".format(self.json_args.get("pod_name")))

        # convert payload to object
        test_result = TestResult.test_result_from_dict(self.json_args)
        test_result.creation_date = datetime.now()

        future = self.db.test_results.insert(test_result.format(), check_keys=False)
        result = yield future
        test_result._id = result

        self.finish_request(test_result.format_http())
Ejemplo n.º 24
0
    def test_replace_to_change_test_name(self):
        branch, platform, builder = _create_some_builder()
        build = _create_build(branch, platform, builder)
        self.assertThereIsNoInstanceOf(TestResult)
        result = TestResult.get_or_insert_from_parsed_json('some-test', build, 50)
        self.assertOnlyInstance(result)
        self.assertEqual(result.name, 'some-test')

        new_result = result.replace_to_change_test_name('other-test')
        self.assertNotEqual(result, new_result)
        self.assertOnlyInstance(new_result)

        self.assertEqual(new_result.name, 'other-test')
        self.assertEqual(new_result.build.key(), result.build.key())
        self.assertEqual(new_result.value, result.value)
        self.assertEqual(new_result.valueMedian, None)
        self.assertEqual(new_result.valueStdev, None)
        self.assertEqual(new_result.valueMin, None)
        self.assertEqual(new_result.valueMax, None)
Ejemplo n.º 25
0
    def test_replace_to_change_test_name(self):
        branch, platform, builder = _create_some_builder()
        build = _create_build(branch, platform, builder)
        self.assertThereIsNoInstanceOf(TestResult)
        result = TestResult.get_or_insert_from_parsed_json('some-test', build, 50)
        self.assertOnlyInstance(result)
        self.assertEqual(result.name, 'some-test')

        new_result = result.replace_to_change_test_name('other-test')
        self.assertNotEqual(result, new_result)
        self.assertOnlyInstance(new_result)

        self.assertEqual(new_result.name, 'other-test')
        self.assertEqual(new_result.build.key(), result.build.key())
        self.assertEqual(new_result.value, result.value)
        self.assertEqual(new_result.valueMedian, None)
        self.assertEqual(new_result.valueStdev, None)
        self.assertEqual(new_result.valueMin, None)
        self.assertEqual(new_result.valueMax, None)
Ejemplo n.º 26
0
    def persist_data_to_db(self, payload):
        try:
            newd = json.loads(self.payload)
            student_list = newd['students']

            for student in student_list:
                test_list = student['tests']
                student = Student(name=student['firstName'],
                                  surname=student['lastName'])
                db.session.add(student)

                for key, value in test_list.items():
                    student_test = TestResult(test=key,
                                              result=value,
                                              student_id=student.id)
                    db.session.add(student_test)
                    db.session.commit()
        except Exception as e:
            return (str(e))
Ejemplo n.º 27
0
 def get_stats(update, context):
     if update.message:
         now = datetime.datetime.utcnow() + datetime.timedelta(hours=3)
         from_time = now.date().replace(day=1)
         from_time = datetime.datetime(from_time.year, from_time.month,
                                       from_time.day)
         to_time = now
     else:
         month, year = update.callback_query.data.split('_')[-2:]
         from_time = datetime.datetime(int(year), int(month), 1)
         to_time = from_time + relativedelta(months=1, days=-1)
     items = TestResult.where(
         user_id=update.effective_user.id,
         created_on__ge=from_time,
         created_on__le=to_time,
     ).all()
     answer = StatsMessage(from_time, items)
     if update.message:
         update.message.reply_text(**answer)
     else:
         update.callback_query.edit_message_text(**answer)
Ejemplo n.º 28
0
    def compare(self):
        comp_results = []
        for result_ref in self.dataref[RawDataKeys().results_key]:
            try:
                result_comp = self.pair(result_ref)
            except Exception as e:
                print(
                    f'Could not pair results for {result_ref[RawDataKeys().result_input_file_path_key]}. Skipping.'
                )
                continue

            data_ref = RawData.from_result(result_ref)
            data_comp = RawData.from_result(result_comp)

            result = TestResult(
                data_ref.path,
                Comparator.make_exit_diff(data_ref, data_comp),
                Comparator.make_diffs(data_ref, data_comp),
            )
            comp_results.append(result)

        return comp_results
Ejemplo n.º 29
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8';

        merge = Test.get_by_key_name(self.request.get('merge'))
        into = Test.get_by_key_name(self.request.get('into'))
        if not merge or not into:
            self.response.out.write('Invalid test names')
            return

        mergedResults = TestResult.all()
        mergedResults.filter('name =', merge.name)
        for result in mergedResults:
            result.name = into.name
            result.put()

        # Just flush everyting since we rarely merge tests and we need to flush
        # dashboard, manifest, and all runs for this test here.
        memcache.flush_all()

        deleteModelWithNumericIdHolder(merge)

        self.response.out.write('OK')
Ejemplo n.º 30
0
    def post(self):
        self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'

        log_id = int(self.request.get('id', 0))

        log = ReportLog.get_by_id(log_id)
        if not log or not log.commit:
            self.response.out.write("Not processed")
            return

        branch = log.branch()
        platform = log.platform()
        build = Build.get_or_insert_from_log(log)

        for test_name, result_value in log.results().iteritems():
            unit = result_value.get('unit') if isinstance(result_value,
                                                          dict) else None
            test = Test.update_or_insert(test_name, branch, platform, unit)
            result = TestResult.get_or_insert_from_parsed_json(
                test_name, build, result_value)
            if not result:
                continue
            runs = Runs.get_by_objects(branch, platform, test)
            regenerate_runs = True
            if runs:
                runs.update_incrementally(build, result)
                regenerate_runs = False
            schedule_runs_update(test.id, branch.id, platform.id,
                                 regenerate_runs)

        log = ReportLog.get(log.key())
        log.delete()

        # We need to update dashboard and manifest because they are affected by the existance of test results
        schedule_dashboard_update()
        schedule_manifest_update()

        self.response.out.write('OK')
Ejemplo n.º 31
0
def test_metadata_are_used_to_generate_front_matter():
    tests_output_file_name: str = 'test_metadata_are_used_to_generate_front_matter.out'
    output_file_folder: str = './tests/.out/'
    input_data = PageGenInputData(
        PageGenSettings(
            ref_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='https://github.com/chocoteam/choco-solver',
                code_commit='abcdefghij',
            ),
            comp_test_metadata=TestOutputMetadata(
                test_folder_path='/home/evaluation/evaluation/pub/bench/',
                page_title='Title',
                page_description='Description',
                repository_url='http://wesite.com/repository/',
                code_commit='1234567890',
            ),
            similar_percent_limit=50,
        ),
        [
            TestResult(
                '/home/evaluation/evaluation/pub/bench/XCSP18/CrosswordDesign/CrosswordDesign-03-4-rom_c18',
                Diff('Exit value 1', 50, 40, -10, -20), []),
        ],
    )

    generate_page(tests_output_file_name, output_file_folder, input_data)
    f = open('./tests/.out/test_metadata_are_used_to_generate_front_matter.md',
             'r',
             encoding="utf-8")
    file_content = f.read()
    assert 'title: "Title"' in file_content
    assert 'description: >\n  Description\n\n  Results of' in file_content
    assert 'Results of [`1234567`](http://wesite.com/repository/commit/1234567890) are compared with [`abcdefg`](https://github.com/chocoteam/choco-solver/commit/abcdefghij).' in file_content
    assert '≈ `-10` (`-20%`)' in file_content
Ejemplo n.º 32
0
    def process_request(self, request):
        #noinspection PyBroadException
        if not AB_TEST_ACTIVE:
            return None
        try:
            sessionTests = request.session.get(AB_TEST_SESSION_NAME, {})
            if sessionTests is None: sessionTests = {}
            newSessionTests = {}
            requestTests = ABTestRequest()
            for activeTest in Test.objects.filter(active=True):
                if activeTest.pk in sessionTests and TestResult.objects.filter(pk=sessionTests[activeTest.pk]).exists():
                    activeTestResult = TestResult.objects.get(pk=sessionTests[activeTest.pk])
                else:
                    activeTestResult = TestResult.chooseExperiment(request, activeTest)
                newSessionTests[activeTest.pk] = activeTestResult.pk
                requestTests[activeTest] = activeTestResult
            request.session[AB_TEST_SESSION_NAME] = newSessionTests
            setattr(request, AB_TEST_REQUEST_NAME, requestTests)
        except Exception as ex:
            getLogger(AB_TEST_LOGGER_MIDDLEWARE).error("error processing request: %s", ex)
            if not AB_TEST_FAIL_SILENT_MIDDLEWARE:
                raise

        return None
Ejemplo n.º 33
0
def report_start(request, mac):
    host, _ = Host.objects.get_or_create(_mac=mac)
    new_result = TestResult(host=host)
    new_result.save()
    return HttpResponse(host)
Ejemplo n.º 34
0
    def get(self, result_id=None):
        """
        Retrieve result(s) for a test project on a specific POD.
        Available filters for this request are :
         - project : project name
         - case : case name
         - pod : pod name
         - version : platform version (Arno-R1, ...)
         - installer (fuel, ...)
         - build_tag : Jenkins build tag name
         - period : x (x last days)


        :param result_id: Get a result by ID
        :raise HTTPError

        GET /results/project=functest&case=vPing&version=Arno-R1 \
        &pod=pod_name&period=15
        => get results with optional filters
        """

        project_arg = self.get_query_argument("project", None)
        case_arg = self.get_query_argument("case", None)
        pod_arg = self.get_query_argument("pod", None)
        version_arg = self.get_query_argument("version", None)
        installer_arg = self.get_query_argument("installer", None)
        build_tag_arg = self.get_query_argument("build_tag", None)
        period_arg = self.get_query_argument("period", None)

        # prepare request
        get_request = dict()
        if result_id is None:
            if project_arg is not None:
                get_request["project_name"] = project_arg

            if case_arg is not None:
                get_request["case_name"] = case_arg

            if pod_arg is not None:
                get_request["pod_name"] = pod_arg

            if version_arg is not None:
                get_request["version"] = version_arg

            if installer_arg is not None:
                get_request["installer"] = installer_arg

            if build_tag_arg is not None:
                get_request["build_tag"] = build_tag_arg

            if period_arg is not None:
                try:
                    period_arg = int(period_arg)
                except:
                    raise HTTPError(HTTP_BAD_REQUEST)

                if period_arg > 0:
                    period = datetime.now() - timedelta(days=period_arg)
                    obj = {"$gte": str(period)}
                    get_request["creation_date"] = obj
        else:
            get_request["_id"] = result_id

        print get_request
        res = []
        # fetching results
        cursor = self.db.test_results.find(get_request)
        while (yield cursor.fetch_next):
            test_result = TestResult.test_result_from_dict(cursor.next_object())
            res.append(test_result.format_http())

        # building meta object
        meta = dict()
        meta["total"] = len(res)

        # final response object
        answer = dict()
        answer["test_results"] = res
        answer["meta"] = meta
        self.finish_request(answer)
Ejemplo n.º 35
0
    def test_run_from_build_and_result(self):
        branch = Branch.create_if_possible('some-branch', 'Some Branch')
        platform = Platform.create_if_possible('some-platform', 'Some Platform')
        builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
        test_name = ' some-test'

        def create_build(build_number, revision):
            timestamp = datetime.now().replace(microsecond=0)
            build = Build(branch=branch, platform=platform, builder=builder, buildNumber=build_number,
                revision=revision, timestamp=timestamp)
            build.put()
            return build

        build = create_build(1, 101)
        result = TestResult(name=test_name, value=123.0, build=build)
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 123.0)

        build = create_build(2, 102)
        result = TestResult(name=test_name, value=456.0, valueMedian=789.0, build=build)
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = 7.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = None
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = 8.0
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})

        result.valueMedian = 345.0  # Median is never used by the frontend.
        result.valueStdev = 8.0
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
Ejemplo n.º 36
0
 def test_get_or_insert_stat_value_with_values(self):
     branch, platform, builder = _create_some_builder()
     build = _create_build(branch, platform, builder)
     result = TestResult.get_or_insert_from_parsed_json('some-test', build,
         {"avg": 40, "median": "40.1", "stdev": 3.25, "min": 30.5, "max": 45, "values": [1.0, 2.0, 3.0]})
     self.assertEqual(result.values, [1.0, 2.0, 3.0])
Ejemplo n.º 37
0
    def test_run_from_build_and_result(self):
        branch = Branch.create_if_possible('some-branch', 'Some Branch')
        platform = Platform.create_if_possible('some-platform', 'Some Platform')
        builder = Builder.get(Builder.create('some-builder', 'Some Builder'))
        test_name = ' some-test'

        def create_build(build_number, revision):
            timestamp = datetime.now().replace(microsecond=0)
            build = Build(branch=branch, platform=platform, builder=builder, buildNumber=build_number,
                revision=revision, timestamp=timestamp)
            build.put()
            return build

        build = create_build(1, 101)
        result = TestResult(name=test_name, value=123.0, build=build)
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 123.0)

        build = create_build(2, 102)
        result = TestResult(name=test_name, value=456.0, valueMedian=789.0, build=build)
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = 7.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = None
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0)

        result.valueStdev = 8.0
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})

        result.valueMedian = 345.0  # Median is never used by the frontend.
        result.valueStdev = 8.0
        result.valueMin = 123.0
        result.valueMax = 789.0
        result.put()
        self._assert_entry(Runs._entry_from_build_and_result(build, result), build, result, 456.0,
            statistics={'stdev': 8.0, 'min': 123.0, 'max': 789.0})
Ejemplo n.º 38
0
 def test_get_or_insert_stat_value_with_values(self):
     branch, platform, builder = _create_some_builder()
     build = _create_build(branch, platform, builder)
     result = TestResult.get_or_insert_from_parsed_json('some-test', build,
         {"avg": 40, "median": "40.1", "stdev": 3.25, "min": 30.5, "max": 45, "values": [1.0, 2.0, 3.0]})
     self.assertEqual(result.values, [1.0, 2.0, 3.0])
Ejemplo n.º 39
0
    def get(self, result_id=None):
        """
        Retrieve dashboard ready result(s) for a test project
        Available filters for this request are :
         - project : project name
         - case : case name
         - pod : pod name
         - version : platform version (Arno-R1, ...)
         - installer (fuel, ...)
         - period : x (x last days)


        :param result_id: Get a result by ID
        :raise HTTPError

        GET /dashboard?project=functest&case=vPing&version=Arno-R1 \
        &pod=pod_name&period=15
        => get results with optional filters
        """

        project_arg = self.get_query_argument("project", None)
        case_arg = self.get_query_argument("case", None)
        pod_arg = self.get_query_argument("pod", None)
        version_arg = self.get_query_argument("version", None)
        installer_arg = self.get_query_argument("installer", None)
        period_arg = self.get_query_argument("period", None)

        # prepare request
        get_request = dict()

        # /dashboard?project=<>&pod=<>...
        if result_id is None:
            if project_arg is not None:
                get_request["project_name"] = project_arg

            if case_arg is not None:
                get_request["case_name"] = case_arg

            if pod_arg is not None:
                get_request["pod_name"] = pod_arg

            if version_arg is not None:
                get_request["version"] = version_arg

            if installer_arg is not None:
                get_request["installer"] = installer_arg

            if period_arg is not None:
                try:
                    period_arg = int(period_arg)
                except:
                    raise HTTPError(HTTP_BAD_REQUEST)
                if period_arg > 0:
                    period = datetime.now() - timedelta(days=period_arg)
                    obj = {"$gte": str(period)}
                    get_request["creation_date"] = obj
        else:
            get_request["_id"] = result_id

        dashboard = []

        # on /dashboard retrieve the list of projects and testcases
        # ready for dashboard
        if project_arg is None:
            raise HTTPError(HTTP_NOT_FOUND, "error:Project name missing")
        elif check_dashboard_ready_project(project_arg, "./dashboard"):
            res = []

            if case_arg is None:
                raise HTTPError(HTTP_NOT_FOUND, "error:Test case missing for project " + project_arg)

            # special case of status for project
            if case_arg == "status":
                del get_request["case_name"]
                # retention time to be agreed
                # last five days by default?
                # TODO move to DB
                period = datetime.now() - timedelta(days=5)
                get_request["creation_date"] = {"$gte": period}

            # fetching results
            cursor = self.db.test_results.find(get_request)
            while (yield cursor.fetch_next):
                test_result = TestResult.test_result_from_dict(cursor.next_object())
                res.append(test_result.format_http())

            if check_dashboard_ready_case(project_arg, case_arg):
                dashboard = get_dashboard_result(project_arg, case_arg, res)
            else:
                raise HTTPError(
                    HTTP_NOT_FOUND,
                    "error:" + case_arg + " test case not case dashboard ready on project " + project_arg,
                )

        else:
            dashboard.append({"error": "Project not recognized or not dashboard ready"})
            dashboard.append({"Dashboard-ready-projects": get_dashboard_cases("./dashboard")})
            raise HTTPError(HTTP_NOT_FOUND, "error: no dashboard ready data for this project")

        # fetching results
        # cursor = self.db.test_results.find(get_request)
        # while (yield cursor.fetch_next):
        #    test_result = TestResult.test_result_from_dict(
        #        cursor.next_object())
        #    res.append(test_result.format_http())

        # building meta object
        meta = dict()

        # final response object
        answer = dict()
        answer["dashboard"] = dashboard
        answer["meta"] = meta
        self.finish_request(answer)