Esempio n. 1
0
def delete_execution(project, suite, execution):
    errors = []
    path = suite_execution_path(project, suite, execution)
    if os.path.isdir(path):
        try:
            shutil.rmtree(path)
        except:
            pass
    else:
        errors.append('Execution for {} {} {} does not exist'.format(
            project, suite, execution))
    return errors
Esempio n. 2
0
    def test_parse_execution_data_exec_dir(self, project_function, test_utils):
        _, project = project_function.activate()
        suite_name = 'suite1'
        test_utils.create_test(project, name='test1')
        test_utils.create_suite(project, name=suite_name, tests=['test1'])
        timestamp = test_utils.run_suite(project, suite_name)
        exec_dir = suite_execution_path(project, suite_name, timestamp)

        exec_data = _parse_execution_data(execution_directory=exec_dir)

        assert len(exec_data['tests']) == 1
        assert exec_data['tests'][0]['name'] == 'test1'
        assert exec_data['total_tests'] == 1
Esempio n. 3
0
def get_or_generate_junit_report(project, suite, timestamp):
    """Get the JUnit XML report as a string.

    If it does not exist, generate it first.
    Report is generated at:
      <testdir>/projects/<project>/reports/<suite>/<execution>/report.xml
    """
    report_filename = 'report'
    report_directory = suite_execution_path(project, suite, timestamp)
    report_filepath = os.path.join(report_directory, report_filename + '.xml')
    if os.path.isfile(report_filepath):
        xml_string = open(report_filepath, encoding='utf-8').read()
    else:
        xml_string = generate_junit_report(project, suite, timestamp)
    return xml_string
Esempio n. 4
0
    def test_get_execution_data_unfinished_execution(self, project_function, test_utils):
        _, project = project_function.activate()
        suite_name = 'suite1'
        test_utils.create_test(project, name='test1')
        test_utils.create_suite(project, name=suite_name, tests=['test1'])
        timestamp = test_utils.run_suite(project, suite_name)
        exec_dir = suite_execution_path(project, suite_name, timestamp)
        report_path = os.path.join(exec_dir, 'report.json')
        os.remove(report_path)

        exec_data = get_execution_data(project=project, suite=suite_name, execution=timestamp)

        assert len(exec_data['tests']) == 1
        assert exec_data['tests'][0]['name'] == 'test1'
        assert exec_data['total_tests'] == 1
        assert exec_data['has_finished'] is False
Esempio n. 5
0
 def execute_suite(project,
                   suite_name,
                   timestamp=None,
                   ignore_sys_exit=False):
     if not timestamp:
         timestamp = utils.get_timestamp()
     try:
         timestamp = TestUtils.run_suite(project, suite_name, timestamp)
     except SystemExit as e:
         if not ignore_sys_exit:
             raise e
     exec_data = get_execution_data(project=project,
                                    suite=suite_name,
                                    execution=timestamp)
     exec_dir = suite_execution_path(project, suite_name, timestamp)
     return {
         'exec_dir': exec_dir,
         'report_path': os.path.join(exec_dir, 'report.json'),
         'suite_name': suite_name,
         'timestamp': timestamp,
         'exec_data': exec_data
     }
Esempio n. 6
0
def test_report_directory(project, suite, timestamp, test, test_set):
    execdir = execution_report.suite_execution_path(project, suite, timestamp)
    return os.path.join(execdir, test, test_set)
Esempio n. 7
0
def generate_junit_report(project_name, suite_name, timestamp, report_folder=None,
                          report_name=None):
    """Generate a report in JUnit XML format.

    Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
    src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
    """
    data = get_execution_data(project=project_name, suite=suite_name, execution=timestamp)

    totals = data['totals_by_result']
    errors = totals.get(Results.CODE_ERROR, 0)
    failures = totals.get(Results.FAILURE, 0) + totals.get(Results.ERROR, 0)
    skipped = totals.get(Results.SKIPPED, 0)

    testsuites_attrs = {
        'name': suite_name,
        'errors': str(errors),
        'failures': str(failures),
        'tests': str(data['total_tests']),
        'time': str(data['net_elapsed_time'])
    }
    testsuites = ET.Element('testsuites', testsuites_attrs)

    testsuites_attrs['timestamp'] = timestamp
    testsuites_attrs['skipped'] = str(skipped)
    testsuite = ET.SubElement(testsuites, 'testsuite', testsuites_attrs)

    for test in data['tests']:
        test_name = test['full_name']

        # if there are sets - append set name
        # if the sets' name is empty - use the generated name
        if test['data']:
            set_name = test['set_name'] if test['set_name'] is not "" else test['test_set']
            test_name += '_' + set_name

        test_attrs = {
            'name': test_name,
            'classname': test['full_name'],
            'time': str(test['test_elapsed_time'])
        }
        testcase = ET.SubElement(testsuite, 'testcase', test_attrs)

        # testcase nodes can contain 'failure', 'error', and 'skipped' sub-nodes
        # matching Golem 'error' and 'failure' to JUnit 'failure',
        # 'code error' to 'error', and 'skipped' to 'skipped'
        #
        # A Golem test can have 0 or more errors and 0 or 1 failures.
        # Correct mapping would be one sub-node for each of these.
        # The list of errors for a test is currently not available in the
        # execution json.
        if test['result'] in (Results.CODE_ERROR, Results.FAILURE, Results.ERROR, Results.SKIPPED):
            if test['result'] in [Results.ERROR, Results.FAILURE]:
                error_type = 'failure'
            elif test['result'] == Results.CODE_ERROR:
                error_type = 'error'
            else:
                error_type = 'skipped'
            error_data = {
                'type': test['result'],
                'message': str(test['data'])
            }
            error_message = ET.SubElement(testcase, error_type, error_data)

        # add debug log to /test/system-out node
        log_text = get_test_debug_log(project_name, timestamp, test['full_name'],
                                      test['test_set'], suite_name)
        system_out = ET.SubElement(testcase, 'system-out')
        system_out.text = _clean_illegal_xml_chars(log_text)

    xmlstring = ET.tostring(testsuites)
    doc = minidom.parseString(xmlstring).toprettyxml(indent=' ' * 4, encoding='UTF-8')

    if not report_folder:
        report_folder = suite_execution_path(project_name, suite_name, timestamp)
    if not report_name:
        report_name = 'report'
    report_path = os.path.join(report_folder, report_name + '.xml')
    if not os.path.exists(os.path.dirname(report_path)):
        os.makedirs(os.path.dirname(report_path), exist_ok=True)

    try:
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(doc.decode('UTF-8'))
    except IOError as e:
        if e.errno == errno.EACCES:
            print('ERROR: cannot write to {}, PermissionError (Errno 13)'
                  .format(report_path))
        else:
            print('ERROR: There was an error writing to {}'.format(report_path))

    return doc