Ejemplo n.º 1
0
def report_execution_json_download(project, execution, timestamp):
    report_data = exec_report.get_execution_data(project=project,
                                                 execution=execution,
                                                 timestamp=timestamp)
    json_report = json.dumps(report_data, indent=4)
    headers = {'Content-disposition': 'attachment; filename=report.json'}
    return Response(json_report, mimetype='application/json', headers=headers)
Ejemplo n.º 2
0
 def test_run_suite_filter_by_invalid_tag_expression(
         self, _project_with_tags, test_utils, capsys):
     """When a invalid tag expression is used a message is displayed
     to the console, no tests are run, the report is generated,
     and the execution exists with status code 1
     """
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     tests = [
         _project_with_tags.t.test_alfa_bravo,
         _project_with_tags.t.test_bravo_charlie
     ]
     test_utils.create_suite(project, suite_name, tests=tests)
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['sierra = tango'])
     execution_runner.project = project
     with pytest.raises(SystemExit):
         execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     expected = (
         "InvalidTagExpression: unknown expression <class '_ast.Assign'>, the "
         "only valid operators for tag expressions are: 'and', 'or' & 'not'"
     )
     assert expected in out
     data = exec_report.get_execution_data(project=project,
                                           suite=suite_name,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
Ejemplo n.º 3
0
def report_get_reports():
    user = _get_user_api_or_session()
    user_projects = user.project_list
    project = request.args['project']
    execution = request.args['execution']
    last_days = int(request.args['lastDays'])
    if project is None or project == 'null':
        project_list = user_projects
    else:
        _verify_permissions(Permissions.REPORTS_ONLY, project)
        project_list = [project]
    last_timestamps = report.get_last_execution_timestamps(project_list, execution=execution, last_days=last_days)

    result = []

    for proj, executions in last_timestamps.items():
        for exec_, timestamps in executions.items():
            for timestamp in timestamps:
                execution_data = exec_report.get_execution_data(project=proj, execution=exec_, timestamp=timestamp)
                result.append({
                    'project': proj,
                    'execution': exec_,
                    'timestamp': timestamp,
                    'report': execution_data
                })

    # re-sort
    result = sorted(result, key=lambda x: x['timestamp'], reverse=True)

    return jsonify(result)
Ejemplo n.º 4
0
    def test_run_with_not_existing_environments(self, project_function,
                                                test_utils, capsys):
        """Run tests with a not existing environment.
        It should throw an error and finish with status code 1
        """
        _, project = project_function.activate()
        test_utils.create_test(project, 'test01')
        timestamp = utils.get_timestamp()
        execution_runner = exc_runner.ExecutionRunner(
            browsers=['chrome'],
            timestamp=timestamp,
            environments=['not_existing'])
        execution_runner.project = project
        with pytest.raises(SystemExit) as wrapped_execution:
            execution_runner.run_directory('')

        assert wrapped_execution.value.code == 1
        out, err = capsys.readouterr()
        msg = (
            'ERROR: the following environments do not exist for project {}: '
            'not_existing'.format(project))
        assert msg in out
        data = exec_report.get_execution_data(project=project,
                                              suite='all',
                                              execution=timestamp)
        assert data['has_finished'] is True
        assert data['total_tests'] == 0
Ejemplo n.º 5
0
def report_execution_json_download(project, suite, execution):
    report_data = exec_report.get_execution_data(project=project,
                                                 suite=suite,
                                                 execution=execution)
    json_report = json.dumps(report_data, indent=4)
    headers = {
        'Content-disposition': 'attachment; filename={}'.format('report.json')
    }
    return Response(json_report, mimetype='application/json', headers=headers)
Ejemplo n.º 6
0
def report_execution():
    project = request.args['project']
    execution = request.args['execution']
    timestamp = request.args['timestamp']
    _verify_permissions(Permissions.REPORTS_ONLY, project)
    execution_data = exec_report.get_execution_data(project=project, execution=execution, timestamp=timestamp)
    response = jsonify(execution_data)
    if execution_data['has_finished']:
        response.cache_control.max_age = 60 * 60 * 24 * 7
        response.cache_control.public = True
    return response
Ejemplo n.º 7
0
    def test_get_execution_data_from_report_json(self, project_function, test_utils):
        _, project = project_function.activate()
        suite_name = 'suite1'
        test_utils.create_test(project, name='test1')
        test_utils.create_suite(project, name=suite_name, tests=['test1'])
        timestamp = test_utils.run_suite(project, suite_name)

        exec_data = get_execution_data(project=project, suite=suite_name, execution=timestamp)

        assert len(exec_data['tests']) == 1
        assert exec_data['tests'][0]['name'] == 'test1'
        assert exec_data['total_tests'] == 1
        assert exec_data['has_finished'] is True
Ejemplo n.º 8
0
def project_health():
    project = request.args['project']
    _verify_permissions(Permissions.REPORTS_ONLY, project)
    project_data = report.get_last_executions(projects=[project], suite=None, limit=1)
    health_data = {}
    for suite, executions in project_data[project].items():
        execution_data = exec_report.get_execution_data(project=project, suite=suite,
                                                        execution=executions[0])
        health_data[suite] = {
            'execution': executions[0],
            'total': execution_data['total_tests'],
            'totals_by_result': execution_data['totals_by_result']
        }
    return jsonify(health_data)
Ejemplo n.º 9
0
 def test_run_directory(self, _project_with_tags, capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_directory('foo')
     out, err = capsys.readouterr()
     assert 'Tests found: 2' in out
     data = exec_report.get_execution_data(project=project,
                                           suite='foo',
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
Ejemplo n.º 10
0
    def test_get_execution_data_unfinished_execution(self, project_function, test_utils):
        _, project = project_function.activate()
        suite_name = 'suite1'
        test_utils.create_test(project, name='test1')
        test_utils.create_suite(project, name=suite_name, tests=['test1'])
        timestamp = test_utils.run_suite(project, suite_name)
        exec_dir = suite_execution_path(project, suite_name, timestamp)
        report_path = os.path.join(exec_dir, 'report.json')
        os.remove(report_path)

        exec_data = get_execution_data(project=project, suite=suite_name, execution=timestamp)

        assert len(exec_data['tests']) == 1
        assert exec_data['tests'][0]['name'] == 'test1'
        assert exec_data['total_tests'] == 1
        assert exec_data['has_finished'] is False
Ejemplo n.º 11
0
 def test_run_directory_without_tests(self, _project_with_tags, capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     dirname = 'empty'
     execution_runner = exc_runner.ExecutionRunner(project,
                                                   browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.run_directory(dirname)
     out, err = capsys.readouterr()
     expected = f"No tests were found in {os.path.join('tests', dirname)}"
     assert expected in out
     data = exec_report.get_execution_data(project=project,
                                           execution=dirname,
                                           timestamp=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
Ejemplo n.º 12
0
 def test_run_directory_filter_by_tags(self, _project_with_tags, test_utils,
                                       capsys):
     _, project = _project_with_tags.activate()
     timestamp = utils.get_timestamp()
     dirname = 'foo'
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp,
                                                   tags=['alfa', 'bravo'])
     execution_runner.project = project
     execution_runner.run_directory(dirname)
     out, err = capsys.readouterr()
     assert 'Tests found: 1' in out
     data = exec_report.get_execution_data(project=project,
                                           suite=dirname,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 1
Ejemplo n.º 13
0
 def test_run_suite_without_tests(self, _project_with_tags, test_utils,
                                  capsys):
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     test_utils.create_suite(project, suite_name, tests=[])
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.project = project
     execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     assert 'No tests found for suite {}'.format(suite_name) in out
     data = exec_report.get_execution_data(project=project,
                                           suite=suite_name,
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 0
Ejemplo n.º 14
0
 def test_run_with_environments(self, project_function, test_utils, capsys):
     _, project = project_function.activate()
     environments = json.dumps({'test': {}, 'stage': {}})
     environment_manager.save_environments(project, environments)
     test_utils.create_test(project, 'test01')
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(
         browsers=['chrome'],
         timestamp=timestamp,
         environments=['test', 'stage'])
     execution_runner.project = project
     execution_runner.run_directory('')
     out, err = capsys.readouterr()
     assert 'Tests found: 1 (2 sets)' in out
     data = exec_report.get_execution_data(project=project,
                                           suite='all',
                                           execution=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
Ejemplo n.º 15
0
 def test_run_suite(self, _project_with_tags, test_utils, capsys):
     _, project = _project_with_tags.activate()
     suite_name = test_utils.random_numeric_string(10, 'suite')
     tests = [
         _project_with_tags.t.test_alfa_bravo,
         _project_with_tags.t.test_bravo_charlie
     ]
     test_utils.create_suite(project, suite_name, tests=tests)
     timestamp = utils.get_timestamp()
     execution_runner = exc_runner.ExecutionRunner(project,
                                                   browsers=['chrome'],
                                                   timestamp=timestamp)
     execution_runner.run_suite(suite_name)
     out, err = capsys.readouterr()
     assert 'Tests found: 2' in out
     data = exec_report.get_execution_data(project=project,
                                           execution=suite_name,
                                           timestamp=timestamp)
     assert data['has_finished'] is True
     assert data['total_tests'] == 2
Ejemplo n.º 16
0
 def execute_suite(project,
                   suite_name,
                   timestamp=None,
                   ignore_sys_exit=False):
     if not timestamp:
         timestamp = utils.get_timestamp()
     try:
         timestamp = TestUtils.run_suite(project, suite_name, timestamp)
     except SystemExit as e:
         if not ignore_sys_exit:
             raise e
     exec_data = get_execution_data(project=project,
                                    suite=suite_name,
                                    execution=timestamp)
     exec_dir = suite_execution_path(project, suite_name, timestamp)
     return {
         'exec_dir': exec_dir,
         'report_path': os.path.join(exec_dir, 'report.json'),
         'suite_name': suite_name,
         'timestamp': timestamp,
         'exec_data': exec_data
     }
Ejemplo n.º 17
0
def generate_html_report(project, execution, timestamp, destination_folder=None,
                         report_name=None, no_images=False):
    """Generate static HTML report.
    Report is generated in <report_directory>/<report_name>
    By default it's generated in <testdir>/projects/<project>/reports/<suite>/<timestamp>
    Default name is 'report.html' and 'report-no-images.html'
    """
    execution_directory = exec_report.execution_report_path(project, execution, timestamp)

    if destination_folder is None:
        destination_folder = execution_directory

    if not report_name:
        if no_images:
            report_name = 'report-no-images'
        else:
            report_name = 'report'

    formatted_date = utils.get_date_time_from_timestamp(timestamp)
    app = gui.create_app()
    static_folder = app.static_folder
    # css paths
    css_folder = os.path.join(static_folder, 'css')
    boostrap_css = os.path.join(css_folder, 'bootstrap', 'bootstrap.min.css')
    main_css = os.path.join(css_folder, 'main.css')
    report_css = os.path.join(css_folder, 'report.css')
    # js paths
    js_folder = os.path.join(static_folder, 'js')
    main_js = os.path.join(js_folder, 'main.js')
    jquery_js = os.path.join(js_folder, 'external', 'jquery.min.js')
    datatables_js = os.path.join(js_folder, 'external', 'datatable', 'datatables.min.js')
    bootstrap_js = os.path.join(js_folder, 'external', 'bootstrap.min.js')
    report_execution_js = os.path.join(js_folder, 'report_execution.js')

    css = {
        'bootstrap': open(boostrap_css, encoding='utf-8').read(),
        'main': open(main_css, encoding='utf-8').read(),
        'report': open(report_css, encoding='utf-8').read()
    }
    js = {
        'jquery': open(jquery_js, encoding='utf-8').read(),
        'datatables': open(datatables_js, encoding='utf-8').read(),
        'bootstrap': open(bootstrap_js, encoding='utf-8').read(),
        'main': open(main_js, encoding='utf-8').read(),
        'report_execution': open(report_execution_js).read()
    }

    execution_data = exec_report.get_execution_data(execution_directory)
    detail_test_data = {}
    for test in execution_data['tests']:
        test_detail = exec_report.function_test_execution_result(
            project, execution, timestamp, test['test_file'], test['test'], test['set_name'],
            no_screenshots=no_images, encode_screenshots=True
        )
        # testId is test_file + test + set_name
        test_id = f"{test['test_file']}.{test['test']}"
        if test['set_name']:
            test_id = f"{test_id}.{test['set_name']}"
        detail_test_data[test_id] = test_detail
    with app.app_context():
        html_string = render_template(
            'report/report_execution_static.html', project=project, execution=execution,
            timestamp=timestamp, execution_data=execution_data,
            detail_test_data=detail_test_data, formatted_date=formatted_date,
            css=css, js=js, static=True
        )
    _, file_extension = os.path.splitext(report_name)
    if not file_extension:
        report_name = f'{report_name}.html'
    destination = os.path.join(destination_folder, report_name)

    if not os.path.exists(os.path.dirname(destination)):
        os.makedirs(os.path.dirname(destination), exist_ok=True)

    try:
        with open(destination, 'w', encoding='utf-8') as f:
            f.write(html_string)
    except IOError as e:
        if e.errno == errno.EACCES:
            print(f'ERROR: cannot write to {destination}, PermissionError (Errno 13)')
        else:
            print(f'ERROR: There was an error writing to {destination}')

    return html_string
Ejemplo n.º 18
0
def report_execution_json(project, execution, timestamp):
    json_report = exec_report.get_execution_data(project=project,
                                                 execution=execution,
                                                 timestamp=timestamp)
    return json_report
Ejemplo n.º 19
0
def generate_junit_report(project_name, execution, timestamp, report_folder=None,
                          report_name=None):
    """Generate a report in JUnit XML format.

    Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
    src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
    """
    data = get_execution_data(project=project_name, execution=execution, timestamp=timestamp)

    totals = data['totals_by_result']
    errors = totals.get(Results.CODE_ERROR, 0)
    failures = totals.get(Results.FAILURE, 0) + totals.get(Results.ERROR, 0)
    skipped = totals.get(Results.SKIPPED, 0)

    testsuites_attrs = {
        'name': execution,
        'errors': str(errors),
        'failures': str(failures),
        'tests': str(data['total_tests']),
        'time': str(data['net_elapsed_time'])
    }
    testsuites = ET.Element('testsuites', testsuites_attrs)

    testsuites_attrs['timestamp'] = timestamp
    testsuites_attrs['skipped'] = str(skipped)
    testsuite = ET.SubElement(testsuites, 'testsuite', testsuites_attrs)

    for test in data['tests']:
        class_name = test['test_file']
        if test['set_name']:
            class_name = f"{class_name}_{test['set_name']}"
        test_attrs = {
            'name': test['test'],
            'classname': class_name,
            'time': str(test['elapsed_time'])
        }
        testcase = ET.SubElement(testsuite, 'testcase', test_attrs)

        # testcase nodes can contain 'failure', 'error', and 'skipped' sub-nodes
        # matching Golem 'error' and 'failure' to JUnit 'failure',
        # 'code error' to 'error', and 'skipped' to 'skipped'
        #
        # A Golem test can have 0 or more errors and 0 or 1 failures.
        # Correct mapping would be one sub-node for each of these.
        # The list of errors for a test is currently not available in the
        # execution json.
        if test['result'] in (Results.CODE_ERROR, Results.FAILURE, Results.ERROR, Results.SKIPPED):
            if test['result'] in [Results.ERROR, Results.FAILURE]:
                error_type = 'failure'
            elif test['result'] == Results.CODE_ERROR:
                error_type = 'error'
            else:
                error_type = 'skipped'
            error_data = {
                'type': test['result'],
                'message': str(test['test_data'])
            }
            error_message = ET.SubElement(testcase, error_type, error_data)

        # add debug log to /test/system-out node
        log_lines = get_test_debug_log(project_name, execution, timestamp,
                                       test['test_file'], test['set_name'])
        log_string = '\n'.join(log_lines)
        system_out = ET.SubElement(testcase, 'system-out')
        system_out.text = _clean_illegal_xml_chars(log_string)

    xmlstring = ET.tostring(testsuites)
    doc = minidom.parseString(xmlstring).toprettyxml(indent=' ' * 4, encoding='UTF-8')

    if not report_folder:
        report_folder = execution_report_path(project_name, execution, timestamp)
    if not report_name:
        report_name = 'report'
    report_path = os.path.join(report_folder, report_name + '.xml')
    if not os.path.exists(os.path.dirname(report_path)):
        os.makedirs(os.path.dirname(report_path), exist_ok=True)

    try:
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(doc.decode('UTF-8'))
    except IOError as e:
        if e.errno == errno.EACCES:
            print(f'ERROR: cannot write to {report_path}, PermissionError (Errno 13)')
        else:
            print(f'ERROR: There was an error writing to {report_path}')

    return doc
Ejemplo n.º 20
0
def generate_html_report(project, suite, execution, report_directory=None,
                         report_name=None, no_images=False):
    """Generate static HTML report.
    Report is generated in <report_directory>/<report_name>
    By default it's generated in <testdir>/projects/<project>/reports/<suite>/<timestamp>
    Default name is 'report.html' and 'report-no-images.html'
    """
    if not report_directory:
        report_directory = os.path.join(session.testdir, 'projects', project,
                                        'reports', suite, execution)
    if not report_name:
        if no_images:
            report_name = 'report-no-images'
        else:
            report_name = 'report'

    formatted_date = utils.get_date_time_from_timestamp(execution)
    app = gui.create_app()
    static_folder = app.static_folder
    # css paths
    css_folder = os.path.join(static_folder, 'css')
    boostrap_css = os.path.join(css_folder, 'bootstrap', 'bootstrap.min.css')
    main_css = os.path.join(css_folder, 'main.css')
    report_css = os.path.join(css_folder, 'report.css')
    # js paths
    js_folder = os.path.join(static_folder, 'js')
    main_js = os.path.join(js_folder, 'main.js')
    jquery_js = os.path.join(js_folder, 'external', 'jquery.min.js')
    datatables_js = os.path.join(js_folder, 'external', 'datatable', 'datatables.min.js')
    bootstrap_js = os.path.join(js_folder, 'external', 'bootstrap.min.js')
    report_execution_js = os.path.join(js_folder, 'report_execution.js')

    css = {
        'bootstrap': open(boostrap_css, encoding='utf-8').read(),
        'main': open(main_css, encoding='utf-8').read(),
        'report': open(report_css, encoding='utf-8').read()
    }
    js = {
        'jquery': open(jquery_js, encoding='utf-8').read(),
        'datatables': open(datatables_js, encoding='utf-8').read(),
        'bootstrap': open(bootstrap_js, encoding='utf-8').read(),
        'main': open(main_js, encoding='utf-8').read(),
        'report_execution': open(report_execution_js).read()
    }

    execution_data = exec_report.get_execution_data(project=project, suite=suite, execution=execution)
    detail_test_data = {}
    for test in execution_data['tests']:
        test_detail = test_report.get_test_case_data(project, test['full_name'], suite=suite,
                                                     execution=execution, test_set=test['test_set'],
                                                     is_single=False, encode_screenshots=True,
                                                     no_screenshots=no_images)
        detail_test_data[test['test_set']] = test_detail
    with app.app_context():
        html_string = render_template('report/report_execution_static.html', project=project,
                                      suite=suite, execution=execution, execution_data=execution_data,
                                      detail_test_data=detail_test_data, formatted_date=formatted_date,
                                      css=css, js=js, static=True)
    _, file_extension = os.path.splitext(report_name)
    if not file_extension:
        report_name = '{}.html'.format(report_name)
    destination = os.path.join(report_directory, report_name)

    if not os.path.exists(os.path.dirname(destination)):
        os.makedirs(os.path.dirname(destination), exist_ok=True)

    try:
        with open(destination, 'w', encoding='utf-8') as f:
            f.write(html_string)
    except IOError as e:
        if e.errno == errno.EACCES:
            print('ERROR: cannot write to {}, PermissionError (Errno 13)'
                  .format(destination))
        else:
            print('ERROR: There was an error writing to {}'.format(destination))

    return html_string
Ejemplo n.º 21
0
def report_execution_json(project, suite, execution):
    json_report = exec_report.get_execution_data(project=project,
                                                 suite=suite,
                                                 execution=execution)
    return jsonify(json_report)