예제 #1
0
    def create_report(thresholds, prefix):
        path = "/tmp/junit_report_{}.xml".format(prefix)
        test_suites = []
        threshold_test_cases = []
        mapping = {
            'response_time': 'ms',
            'throughput': 'req/s',
            'error_rate': '%'
        }
        for th in thresholds:
            threshold_test_cases.append(
                TestCase(
                    name=
                    f'Threshold for {th["request_name"]}, target - {th["target"]}',
                    stdout=
                    f'Value: {str(th["metric"])} {mapping.get(th["target"])}. '
                    f'Yellow threshold: {th["yellow"]} {mapping.get(th["target"])},'
                    f' red threshold: {th["red"]} {mapping.get(th["target"])}')
            )

            if th['threshold'] != 'green':
                threshold_test_cases[-1].add_failure_info(
                    f'{th["target"]} for {th["request_name"]} exceeded '
                    f'{th["threshold"]} threshold of {th.get(th["threshold"])} '
                    f'{mapping.get(th["target"])}. Test result - '
                    f'{str(th["metric"])} {mapping.get(th["target"])}')

        test_suites.append(TestSuite("Thresholds ", threshold_test_cases))
        with open(path, 'w') as f:
            TestSuite.to_file(f, test_suites, prettyprint=True)
        return path
예제 #2
0
def generate_reports(args, models):
    """
    Generate Report Portal, JUnit, JSON reports
    :param args: argparse.Namespace
        commandline arguments
    :param models: dict of BaseReport
    """
    repo = os.environ.get('REPO', '')
    branch = os.environ.get('BRANCH', 'develop')
    if repo.endswith('.git'):
        repo = repo[:-len('.git')]
    canonical = Converter(models, repo, branch)
    ti = canonical.get_rp_items()

    if ti:
        if args.reportportal:
            send_items_to_rp(ti)

        junit_items = canonical.get_junit_items()
        if os.path.exists(os.path.dirname(args.output)):
            if junit_items:
                with open(args.output, 'w') as f:
                    TestSuite.to_file(f, [junit_items], prettyprint=False)
        if os.path.exists(os.path.dirname(args.json_output)):
            json_items = canonical.get_json_items()
            if json_items:
                with open(args.json_output, 'w') as f:
                    json.dump(json_items, f, indent=4, sort_keys=True)
    else:
        logger.critical('There are no findings in report.')
예제 #3
0
    def process_report(requests, thresholds):
        functional_test_cases, threshold_test_cases = [], []
        test_suites = []
        for req in requests:
            if requests[req]['KO'] != 0:
                functional_test_cases.append(TestCase(name=requests[req]['request_name'],
                                                      stdout="PASSED: {}. FAILED: {}".format(str(requests[req]['OK']),
                                                                                             str(requests[req]['KO'])),
                                                      stderr="FAILED: {}".format(str(requests[req]['KO']))))
                functional_test_cases[-1].add_failure_info("Request failed {} times".format(str(requests[req]['KO'])))
            else:
                functional_test_cases.append(
                    TestCase(name=requests[req]['request_name'], stdout="PASSED: {}".format(str(requests[req]['OK'])),
                             stderr="FAILED: {}".format(str(requests[req]['KO']))))

        test_suites.append(TestSuite("Functional errors ", functional_test_cases))

        for th in thresholds:
            threshold_test_cases.append(TestCase(name="Threshold for {}, target - {}".format(th['scope'], th['target']),
                                                 stdout="Value: {} {}. Threshold value: {} {}".format(str(th['value']),
                                                                                                th['metric'],
                                                                                                str(th['threshold']),
                                                                                                th['metric'])))
            if th['status'] == 'FAILED':
                threshold_test_cases[-1].add_failure_info("{} for {} exceeded threshold of {} {}. Test result - {} {}"
                                                          .format(th['target'], th['scope'], str(th['threshold']),
                                                                  th['metric'], str(th['value']), th['metric']))

        test_suites.append(TestSuite("Thresholds ", threshold_test_cases))
        with open("/tmp/reports/jmeter.xml", 'w') as f:
            TestSuite.to_file(f, test_suites, prettyprint=True)
예제 #4
0
    def v2_playbook_on_stats(self, stats):
        """
        Implementation of the callback endpoint to be
        fired when a playbook is finished. As we are
        only running one playbook at a time, we know
        we are done logging and can aggregate the jUnit
        test suite and serialize it.

        :param stats: statistics about the run
        """
        suite = TestSuite(self.playbook_name, self.test_cases)

        base_dir = getenv('OCT_CONFIG_HOME',
                          abspath(join(expanduser('~'), '.config')))
        log_dir = abspath(join(base_dir, 'origin-ci-tool', 'logs', 'junit'))
        if not exists(log_dir):
            mkdir(log_dir)

        log_filename = ''
        for _ in range(10):
            log_basename = '{}.xml'.format(''.join(
                choice(ascii_letters) for i in range(10)))
            log_filename = join(log_dir, log_basename)
            if not exists(log_filename):
                # TODO: determine a better way to do this
                break

        with open(log_filename, 'w') as result_file:
            TestSuite.to_file(result_file, [suite])
예제 #5
0
파일: dev.py 프로젝트: DarumasLegs/alba
def run_everything_else(xml = False):
    mega_suite = []
    tests = [
        run_test_arakoon_changes,
        run_tests_cli,
        run_test_big_object
    ]
    for x in tests:
        r = x ()
        mega_suite.append(r)

    if is_true(xml):
        from junit_xml import TestSuite, TestCase
        test_cases = []
        for (suite, results) in mega_suite:
            for (name,result, delta) in results:
                test_case = TestCase(name, suite, elapsed_sec = delta)
                if not result:
                    test_case.add_error_info(message = "failed")
                test_cases.append(test_case)

        ts = [TestSuite("run_everything_else", test_cases)]
        with open('./testresults.xml', 'w') as f:
            TestSuite.to_file(f,ts)
    else:
        print mega_suite
def main():
    parser = argparse.ArgumentParser(description='dummy test')
    parser.add_argument('-classes', type=int, default=5, help='number of classes')
    parser.add_argument('-testcases', type=int, default=10, help='number of testcases')
    parser.add_argument('-pass_rate', type=int, default=75, help='pass rate')
    parser.add_argument('-error_rate', type=int, default=20, help='error rate')
    parser.add_argument('-failure_rate', type=int, default=10, help='failure rate')
    parser.add_argument('-skip_rate', type=int, default=10, help='skip rate')
    parser.add_argument('-outputfile', type=str, default='test_results.xml', help='output file')
    parser.add_argument('-print', action='store_true', help='print the test results')
    args = parser.parse_args()

    ts = TestSuite(name='my test suite', hostname=platform.node(), timestamp=datetime.now())
    for i in range(args.classes):
        for j in range(args.testcases):
            tc = TestCase(classname=f"myclass{i}",
                          name=f"mytest{j}",
                          elapsed_sec=random.randint(100, 1000),
                          stdout = "stdout output",
                          stderr = "stderr output")
            if random.randint(0, 100) < args.pass_rate:
                if random.randint(0, 100) < args.error_rate:
                    tc.add_error_info(message=f"error {i} {j}", output="error output message", error_type="ERR1")
                elif random.randint(0, 100) < args.failure_rate:
                    tc.add_failure_info(message=f"failure {i} {j}", output="failure output message", failure_type="FAIL1")
                elif random.randint(0, 100) < args.skip_rate:
                    tc.add_skipped_info(message=f"skipped {i} {j}", output="skipped output message")
            ts.test_cases.append(tc)

    # pretty printing is on by default but can be disabled using prettyprint=False
    if args.print:
        print(TestSuite.to_xml_string([ts]))

    with open(args.outputfile, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
예제 #7
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('swift', help='path to swift executable')
  parser.add_argument('output', help='where to write xUnit output')
  args = parser.parse_args()

  test_cases = [
      benchmark(
          TestCase('debug build'),
          [args.swift, 'build', '--product', 'TensorFlow']
      ),
      benchmark(
          TestCase('release build'),
          [args.swift, 'build', '-c', 'release', '--product', 'TensorFlow']
      ),

      # The point of "release build -Onone" is to compile TensorFlow in
      # "-whole-module-optimization" mode without "-O".
      benchmark(
          TestCase('release build -Onone'),
          [args.swift, 'build', '-c', 'release', '--product', 'TensorFlow',
           '-Xswiftc', '-Onone']
      ),
  ]

  test_suite = TestSuite('swift-apis compile time', test_cases)

  with open(args.output, 'w') as f:
    TestSuite.to_file(f, [test_suite])
예제 #8
0
def main():
    args = parse_args()
    spawn_func = None
    spawn_arg = None
    if args.port is not None:
        spawn_func = spawn_port
        spawn_arg = args.port
    elif args.executable is not None:
        spawn_func = spawn_exec
        spawn_arg = args.executable
    name = args.name or ""
    global debug
    if args.debug:
        debug = True
    if spawn_func is None:
        debug_print("Please specify port or executable", file=sys.stderr)
        return 1
    env_vars = []
    if args.env_file is not None:
        cfg = ConfigParser()
        cfg.optionxform = str
        with args.env_file as fp:
            cfg.readfp(fp)
        env_vars = cfg.items('global')
    mocks = {}
    if args.mock is not None:
        mocks_mod = imp.load_source('mocks', args.mock)
        mocks = mock_decorators.env
    with spawn_func(spawn_arg) as sp:
        ts = run_tests(sp, name, mocks, env_vars)
        if args.output:
            with open(args.output, "w") as f:
                TestSuite.to_file(f, [ts], encoding='raw_unicode_escape')
        return 0
예제 #9
0
def writeJUnitSLAContent(slas, test, filepath):

    logger = logging.getLogger("root")
    if slas is None: return

    logger.info("writeJUnitSLAContent: " + test.id + " to " + filepath)

    try:
        indicators = slas['indicators']
        perrun = slas['perrun']
        perinterval = slas['perinterval']

        suites = []

        for sla in perrun:
            ts = getSLATestSuites(test, "PerRun", sla)
            suites.append(ts)

        for sla in perinterval:
            ts = getSLATestSuites(test, "PerInterval", sla)
            suites.append(ts)

        logger.debug(TestSuite.to_xml_string(suites))

        with open(filepath, 'w') as f:
            TestSuite.to_file(f, suites, prettyprint=True)

        return True
    except:
        logger.error("Unexpected error at 'writeJUnitSLAContent':",
                     sys.exc_info()[0])

    return False
예제 #10
0
def serialize_and_read(test_suites, to_file=False, prettyprint=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        with os.fdopen(fd, 'w') as f:
            TestSuite.to_file(f, test_suites)

        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        if prettyprint is not None:
            xml_string = TestSuite.to_xml_string(test_suites,
                                                 prettyprint=prettyprint)
        else:
            xml_string = TestSuite.to_xml_string(test_suites)
        print("Serialized XML to string:\n%s" % xml_string)
        xmldoc = minidom.parseString(xml_string)

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
예제 #11
0
파일: helper.py 프로젝트: viennt01/backend
def junit_report(results):
    test_cases = []
    for result in results:
        if result["result"] != 'Failed' and result["data"] != 'Failed':
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: {}, data: {}".format(result["result"],
                                                     result["data"]))
        else:
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: Failed, data: Failed")
            tc.add_failure_info("Failed")
        test_cases.append(tc)
    ts = TestSuite("autoupdate", test_cases)
    with open(
            os.path.abspath(os.path.dirname(__file__)) + '/junit_output.xml',
            'w') as f:
        TestSuite.to_file(f, [ts])
예제 #12
0
def main():
    args = parse_args()
    spawn_func = None
    spawn_arg = None
    if args.port is not None:
        spawn_func = spawn_port
        spawn_arg = args.port
    elif args.executable is not None:
        spawn_func = spawn_exec
        spawn_arg = args.executable
    name = args.name or ""
    global debug
    if args.debug:
        debug = True
    if spawn_func is None:
        debug_print("Please specify port or executable", file=sys.stderr)
        return 1
    mocks = {}
    if args.mock is not None:
        mocks_mod = imp.load_source('mocks', args.mock)
        mocks = mock_decorators.env
    with spawn_func(spawn_arg) as sp:
        ts = run_tests(sp, name, mocks)
        if args.output:
            with open(args.output, "w") as f:
                TestSuite.to_file(f, [ts])
        return 0
예제 #13
0
파일: runner.py 프로젝트: 4m1g0/Arduino
def main():
    args = parse_args()
    spawn_func = None
    spawn_arg = None
    if args.port is not None:
        spawn_func = spawn_port
        spawn_arg = args.port
    elif args.executable is not None:
        spawn_func = spawn_exec
        spawn_arg = args.executable
    name = args.name or ""
    global debug
    if args.debug:
        debug = True
    if spawn_func is None:
        debug_print("Please specify port or executable", file=sys.stderr)
        return 1
    env_vars = []
    if args.env_file is not None:
        cfg = ConfigParser()
        cfg.optionxform = str
        with args.env_file as fp:
            cfg.readfp(fp)
        env_vars = cfg.items('global')
    mocks = {}
    if args.mock is not None:
        mocks_mod = imp.load_source('mocks', args.mock)
        mocks = mock_decorators.env
    with spawn_func(spawn_arg) as sp:
        ts = run_tests(sp, name, mocks, env_vars)
        if args.output:
            with open(args.output, "w") as f:
                TestSuite.to_file(f, [ts])
        return 0
def save_result_xml(exit_code, test_cases, result_file_path):
    if exit_code > 0:
        for testcase in test_cases:
            testcase.add_error_info('Test run failed.')
    ts = TestSuite("Azure Arc Conformance Suite", test_cases)
    with open(result_file_path, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
예제 #15
0
def main():
    args = parse_args()
    spawn_func = None
    spawn_arg = None
    if args.port is not None:
        spawn_func = spawn_port
        spawn_arg = args.port
    elif args.executable is not None:
        spawn_func = spawn_exec
        spawn_arg = args.executable
    name = args.name or ""
    global debug
    if args.debug:
        debug = True
    if spawn_func is None:
        debug_print("Please specify port or executable", file=sys.stderr)
        return 1
    mocks = {}
    if args.mock is not None:
        mocks_mod = imp.load_source('mocks', args.mock)
        mocks = mock_decorators.env
    with spawn_func(spawn_arg) as sp:
        ts = run_tests(sp, name, mocks)
        if args.output:
            with open(args.output, "w") as f:
                TestSuite.to_file(f, [ts])
        return 0
예제 #16
0
def file_junit_report(rules, report):
    """
    Output file Junit xml report

    :param rules: set of rules to verify
    :param report: report generated by drheader
    :return: None
    """

    test_cases = []

    for header in rules:
        tc = []
        for item in report:
            if item.get('rule') == header:
                violation = item.copy()
                violation.pop('rule')
                message = violation.pop('message')
                tc = TestCase(name=header + ' :: ' + message)
                tc.add_failure_info(message, violation)
                test_cases.append(tc)
        if not tc:
            tc = TestCase(name=header)
            test_cases.append(tc)

    os.makedirs('reports', exist_ok=True)
    with open('reports/junit.xml', 'w') as f:
        TestSuite.to_file(f,
                          [TestSuite(name='DrHeader', test_cases=test_cases)],
                          prettyprint=False)
        f.close()
예제 #17
0
파일: output.py 프로젝트: zuBux/drydock
 def write_xml_file(self):
     test_cases = []
     if os.path.isfile(self.output):
         logging.warn("File exists,deleting...")
         os.remove(self.output)
     with open(self.output, 'a') as f:
         for _, elements in self.log.items():
             for j in elements.viewitems():
                 if j[0] == 'date' or j[0] == 'profile' or j[0] == 'score':
                     # we really don't care
                     pass
                 else:
                     try:
                         test_case = TestCase(j[0], j[1]['descr'], '', '',
                                              '')
                         if j[1]['status'] == 'Fail':
                             test_case.add_failure_info(j[1]['output'])
                         else:
                             test_case = TestCase(j[0], '', '', '', '')
                         test_cases.append(test_case)
                     except KeyError:
                         # the world's smallest violin playin' for KeyError
                         pass
         ts = [TestSuite("Docker Security Benchmarks", test_cases)]
         TestSuite.to_file(f, ts)
예제 #18
0
def generate_junit_report(test_name, total_thresholds, report_name):
    test_cases = []
    file_name = f"junit_report_{report_name}.xml"
    logger.info(f"Generate report {file_name}")

    for item in total_thresholds["details"]:
        message = item['message']
        test_case = TestCase(
            item['name'],
            classname=f"{item['scope']}",
            status="PASSED",
            stdout=
            f"{item['scope']} {item['name'].lower()} {item['aggregation']} {item['actual']} "
            f"{item['rule']} {item['expected']}")
        if message:
            test_case.status = "FAILED"
            test_case.add_failure_info(message)
        test_cases.append(test_case)

    ts = TestSuite(test_name, test_cases)
    os.makedirs(f"{REPORT_PATH}/junit", exist_ok=True)
    with open(f"{REPORT_PATH}/junit/{file_name}", 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)

    return file_name
예제 #19
0
def serialize_and_read(test_suites, to_file=False, prettyprint=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        with os.fdopen(fd, 'w') as f:
            TestSuite.to_file(f, test_suites)

        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        if prettyprint is not None:
            xml_string = TestSuite.to_xml_string(test_suites, prettyprint=prettyprint)
        else:
            xml_string = TestSuite.to_xml_string(test_suites)
        print("Serialized XML to string:\n%s" % xml_string)
        xmldoc = minidom.parseString(xml_string)

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
예제 #20
0
 def generateJUnitReport(self, lstRunResult, runResultDir):
     #create junit xml report file use junit-xml 1.4   pip install junit-xml
     resultFileName = runResultDir + os.path.sep + 'RunResult.xml'
     previousCaseModuleName = ''
     rowIndex = 0
     lstTestSuites = []
     testSuite = []
     for runResult in lstRunResult:
         #runResult (sheetName, moduleName, testCaseID, runResult, timeElapsedSec, failureMessage)
         #test
         testCaseName = runResult[2]
         className = runResult[1] + '.' + runResult[2]
         timeElapsedSec = runResult[4]
         failureMessage = runResult[5]
         testCase = TestCase(testCaseName, className, timeElapsedSec)
         testCase.add_failure_info(None, failureMessage)
         currTestCaseModuleName = runResult[1]
         if not currTestCaseModuleName == previousCaseModuleName:
             testSuite = TestSuite(currTestCaseModuleName)
             lstTestSuites.append(testSuite)
         testSuite.test_cases.append(testCase)
     #print TestSuite.to_xml_string(lstTestSuites)
     #Write the xml content to result file
     with open(runResultDir + os.path.sep + 'Result.xml', 'w') as f:
         TestSuite.to_file(f, lstTestSuites)
예제 #21
0
def main():
    parser = argparse.ArgumentParser(description='generate report from finviz daily data')
    parser.add_argument('-input', type=str, help='input file')
    parser.add_argument('-output', type=str, default='daily_report.xml', help='output file')
    args = parser.parse_args()

    if args.input is None:
        filename = '../stock_data/raw_daily_finviz/finviz_' + str(datetime.date.today()) + '.csv'

    # generate report
    df = pd.read_csv(filename)
    df.set_index('Ticker', inplace=True)
    df.drop_duplicates(inplace=True)
    ts_list = []
    for sector in df.Sector.unique():
        ts = TestSuite(name=sector)
        df_sector = df[df['Sector'] == sector]
        for industry in df_sector.Industry.unique():
            for ticker in df.index[df['Industry'] == industry]:
                if df.loc[ticker,'Market Cap'].find('B') > 0:
                    print(sector, '-', industry, '-', ticker, '-', df.loc[ticker,'Change'])
                    tc = TestCase(classname=industry,
                                  name=ticker,
                                  elapsed_sec=df.loc[ticker,'Price'],
                                  stdout=df.loc[ticker,'Change'],
                                  stderr=df.loc[ticker,'Market Cap'])
                    if df.loc[ticker,'Change'].find('-') >= 0:
                        tc.add_error_info(message='lower')
                    ts.test_cases.append(tc)
        ts_list.append(ts)

    with open(args.output, 'w') as f:
        TestSuite.to_file(f, ts_list, prettyprint=True)
예제 #22
0
def write_test_results(results, args):
    exit_code = ExitCodes.OK
    test_cases = []
    for test_result in results["result"]:
        test_case = TestCase(test_result.name,
                             elapsed_sec=test_result.elapsed_time,
                             timestamp=test_result.timestamp)
        if test_result.name in args.ignore or test_result.state in [
                TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL,
                TestStates.NA, TestStates.OPTIONAL
        ]:
            test_case.add_skipped_info(test_result.detail)
        elif test_result.state in [TestStates.WARNING, TestStates.FAIL]:
            test_case.add_failure_info(test_result.detail,
                                       failure_type=str(test_result.state))
            if test_result.state == TestStates.FAIL:
                exit_code = max(exit_code, ExitCodes.FAIL)
            elif test_result.state == TestStates.WARNING:
                exit_code = max(exit_code, ExitCodes.WARNING)
        elif test_result.state != TestStates.PASS:
            test_case.add_error_info(test_result.detail,
                                     error_type=str(test_result.state))
        test_cases.append(test_case)

    ts = TestSuite(results["name"] + ": " + results["base_url"], test_cases)
    with open(args.output, "w") as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
        print(" * Test results written to file: {}".format(args.output))
    return exit_code
예제 #23
0
def export_junit(retCode: int, msg: str, testName: str = 'Compile'):
    tc = TestCase(testName)

    if retCode != 0:
        tc.add_failure_info(message=msg)
    ts = TestSuite("ERA Tester", [tc])
    with open('result.xml', 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
예제 #24
0
파일: alba.py 프로젝트: fxfactorial/alba
def dump_junit_xml():
    from junit_xml import TestSuite, TestCase

    test_cases = [TestCase('testname', 'package.test', 123.345, 'I am stdout!', 'I am stderr!')]
    ts = [ TestSuite("stress test suite", test_cases) ]

    with open('./testresults.xml', mode='w') as f:
        TestSuite.to_file(f, ts)
예제 #25
0
def callattr_ahead_of_alltests(request):
    a = request
    # print ("callattr_ahead_of_alltests called")
    yield "123"
    a = request
    # print('finished everything')

    with open(topPath + '/output.xml', 'w') as f:
        TestSuite.to_file(f, testSuiteList, prettyprint=True)
예제 #26
0
def run_notebook(input_notebook,
                 add_nunit_attachment,
                 parameters=None,
                 kernel_name="ai-architecture-template",
                 root="."):
    """
    Used to run a notebook in the correct directory.

    Parameters
    ----------
    :param input_notebook: Name of Notebook to Test
    :param add_nunit_attachment:
    :param parameters:
    :param kernel_name: Jupyter Kernal
    :param root:
    """

    output_notebook = input_notebook.replace(".ipynb", NOTEBOOK_OUTPUT_EXT)
    try:
        results = pm.execute_notebook(os.path.join(root, input_notebook),
                                      os.path.join(root, output_notebook),
                                      parameters=parameters,
                                      kernel_name=kernel_name)

        for cell in results.cells:
            if cell.cell_type is "code":
                assert not cell.metadata.papermill.exception, "Error in Python Notebook"
    finally:
        with open(os.path.join(root, output_notebook)) as json_file:
            data = json.load(json_file)
            jupyter_output = nbformat.reads(json.dumps(data),
                                            as_version=nbformat.NO_CONVERT)

        export_md(jupyter_output,
                  output_notebook,
                  add_nunit_attachment,
                  file_ext=".txt",
                  root=root)

        regex = r'Deployed (.*) with name (.*). Took (.*) seconds.'

        with open(os.path.join(root, output_notebook), 'r') as file:
            data = file.read()

            test_cases = []
            for group in re.findall(regex, data):
                test_cases.append(
                    TestCase(name=group[0] + " creation",
                             classname=input_notebook,
                             elapsed_sec=float(group[2]),
                             status="Success"))

            ts = TestSuite("my test suite", test_cases)

            with open('test-timing-output.xml', 'w') as f:
                TestSuite.to_file(f, [ts], prettyprint=False)
def generate_empty_report(report, test_type, case):
    """
    Generate empty junitxml report if no tests are run
    :param report: CodeBuild Report
    Returns: None
    """
    test_cases = [TestCase(test_type, case, 1, f"Skipped {test_type} on {case}", '')]
    ts = TestSuite(report, test_cases)
    with open(report, "w") as skip_file:
        TestSuite.to_file(skip_file, [ts], prettyprint=False)
예제 #28
0
def dump_junit_xml():
    from junit_xml import TestSuite, TestCase

    test_cases = [
        TestCase('testname', 'package.test', 123.345, 'I am stdout!',
                 'I am stderr!')
    ]
    ts = [TestSuite("stress test suite", test_cases)]

    with open('./testresults.xml', mode='w') as f:
        TestSuite.to_file(f, ts)
예제 #29
0
def print_result_junit(json_result, sla_json_test, sla_json_interval, sla_json_global, junit_file_path):
    junit_suites = []
    for sla in sla_json_global:
        junit_suites.append(__build_test_suite(json_result, __SLA_global, sla))
    for sla in sla_json_test:
        junit_suites.append(__build_test_suite(json_result, __SLA_test, sla))
    for sla in sla_json_interval:
        junit_suites.append(__build_test_suite(json_result, __SLA_interval, sla))
    with open(junit_file_path, 'w') as stream:
        TestSuite.to_file(stream, junit_suites, prettyprint=True)
    print('Report written to file %s' % junit_file_path)
예제 #30
0
 def generate(self):
   """
   Generates the report
   """
   self._setup()
   for config_name in self.report_info.config_to_test_names_map.keys():
     config_dir = os.path.join(self.report_info.resource_dir, config_name)
     utils.makedirs(config_dir)
     testsuite = self._generate_junit_xml(config_name)
     with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file:
         TestSuite.to_file(file, [testsuite], prettyprint=False)
예제 #31
0
 def __init__(self, findings, config, report_path='/tmp/reports'):
     test_cases = []
     for finding in findings:
         test_cases.append(finding.junit_item())
     if not test_cases:
         return
     test_name = f'{config["project_name"]}-{config["environment"]}-{config["test_type"]}'
     self.report_name = path.join(report_path, f'TEST-{test_name}.xml')
     with open(self.report_name, 'w') as f:
         TestSuite.to_file(f, [TestSuite(test_name, test_cases)],
                           prettyprint=False)
     print(f"Generated report:  <reports folder>/TEST-{test_name}.xml")
예제 #32
0
def write_test_results(results, endpoints, args):
    if args.output.endswith(".xml"):
        formatted = format_test_results(results, endpoints, "junit", args)
    else:
        formatted = format_test_results(results, endpoints, "json", args)
    with open(args.output, "w") as f:
        if args.output.endswith(".xml"):
            # pretty-print to help out Jenkins (and us humans), which struggles otherwise
            TestSuite.to_file(f, [formatted], prettyprint=True)
        else:
            f.write(formatted)
        print(" * Test results written to file: {}".format(args.output))
    return identify_exit_code(results, args)
예제 #33
0
 def teardown(self):
     if len(self.failed_test):
         test_cases = self.failed_test
     else:
         test_cases = list()
         test_cases.append(TestCase(name='Fuzz test succeed',
                                    status='Pass'))
     if self.junit_report_path:
         with open(self.junit_report_path, 'w') as report_file:
             TestSuite.to_file(report_file,
                               [TestSuite("API Fuzzer", test_cases)],
                               prettyprint=True)
     super(ServerTarget, self).teardown()
예제 #34
0
def generate_junit_report(args, reports, start_time, end_time, total,
                          junit_file):

    from junit_xml import TestSuite, TestCase
    import sys
    junit_log = []

    junit_prop = {}
    junit_prop['Command Line'] = ' '.join(args)
    junit_prop['Python'] = sys.version.replace('\n', '')
    junit_prop['test_groups'] = []
    junit_prop['Host'] = host.label(mode='all')
    junit_prop['passed_count'] = reports.passed
    junit_prop['failed_count'] = reports.failed
    junit_prop['user-input_count'] = reports.user_input
    junit_prop['expected-fail_count'] = reports.expected_fail
    junit_prop['indeterminate_count'] = reports.indeterminate
    junit_prop['benchmark_count'] = reports.benchmark
    junit_prop['timeout_count'] = reports.timeouts
    junit_prop['test-too-long_count'] = reports.test_too_long
    junit_prop['invalid_count'] = reports.invalids
    junit_prop['wrong-version_count'] = reports.wrong_version
    junit_prop['wrong-build_count'] = reports.wrong_build
    junit_prop['wrong-tools_count'] = reports.wrong_tools
    junit_prop['total_count'] = reports.total
    time_delta = end_time - start_time
    junit_prop['average_test_time'] = str(time_delta / total)
    junit_prop['testing_time'] = str(time_delta)

    for name in reports.results:
        result_type = reports.results[name]['result']
        test_parts = name.split('/')
        test_category = test_parts[-2]
        test_name = test_parts[-1]

        junit_result = TestCase(test_name.split('.')[0])
        junit_result.category = test_category
        if result_type == 'failed' or result_type == 'timeout':
            junit_result.add_failure_info(None,
                                          reports.results[name]['output'],
                                          result_type)

        junit_log.append(junit_result)

    ts = TestSuite('RTEMS Test Suite', junit_log)
    ts.properties = junit_prop
    ts.hostname = host.label(mode='all')

    # write out junit log
    with open(junit_file, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
예제 #35
0
def Run(conf, xmldir):
    logfile = "%s/pacemaker.log" % xmldir
    cluster_env = readClusterConf(conf)

    testcases = []
    #Name of Test Suite
    TestSuiteName = "Running pacemaker-cts"
    #Name of junit xml file
    JunitXML = "junit-pacemakerCTS-ha.xml"

    #Define testcases
    #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)]
    #eg.
    # ('PacemakerService', 'SetupCluster.service', runPackmakerService)
    #Define function runPackmakerService before using
    cases_def = [("Test Flip", "Flip.PacemakerCTS.service", get_result),
                 ("Test Restart", "Restart.PacemakerCTS.service", get_result),
                 ("Test Stonithd", "Stonithd.PacemakerCTS.service", get_result),
                 ("Test StartOnebyOne", "StartOnebyOne.PacemakerCTS.service", get_result),
                 ("Test SimulStart", "SimulStart.PacemakerCTS.service", get_result),
                 ("Test SimulStop", "SimulStop.PacemakerCTS.service", get_result),
                 ("Test StopOnebyOne", "StopOnebyOne.PacemakerCTS.service", get_result),
                 ("Test RestartOnebyOne", "RestartOnebyOne.PacemakerCTS.service", get_result),
                 ("Test PartialStart", "PartialStart.PacemakerCTS.service", get_result),
                 ("Test Standby", "Standby.PacemakerCTS.service", get_result),
                 ("Test MaintenanceMode", "MaintenanceMode.PacemakerCTS.service", get_result),
                 ("Test ResourceRecover", "ResourceRecover.PacemakerCTS.service", get_result),
                 ("Test ComponentFail", "ComponentFail.PacemakerCTS.service", get_result),
                 ("Test Reattach", "Reattach.PacemakerCTS.service", get_result),
                 ("Test SpecialTest1", "SpecialTest1.PacemakerCTS.service", get_result),
                 ("Test NearQuorumPoint", "NearQuorumPoint.PacemakerCTS.service", get_result),
                 ("Test RemoteBasic", "RemoteBasic.PacemakerCTS.service", get_result),
                 ("Test RemoteStonithd", "RemoteStonithd.PacemakerCTS.service", get_result),
                 ("Test RemoteMigrate", "RemoteMigrate.PacemakerCTS.service", get_result),
                 ("Test RemoteRscFailure","RemoteRscFailure.PacemakerCTS.service", get_result)]

    #Not necessary to modify the lines below!
    skip_flag = False
    for a_case in cases_def:
        case = TestCase(a_case[0], a_case[1])
        testcases.append(case)

        if skip_flag:
            skipCase(case, "Pacemaker service of the first node not started.")
            continue
        skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], logfile)

    ts = TestSuite(TestSuiteName, testcases)

    with open(xmldir+"/"+JunitXML, "w") as f:
        ts.to_file(f, [ts])
예제 #36
0
def create_junit_results(data, output=None, **kwargs):
    """
    Creates a Junit result, can write to a file if desired, or return xml string. (used by Jenkins)
    input either dict(dict(dict())) or dict(list(dict()))
    dict = {suite: {test: {stderr,stdout,time,class,err,fail,skip}}}
    list = {suite: [(test, {stderr,stdout,time,class,err,fail,skip})]}
    :param data: A dictionary with dict or list hierarchy
    :param output: A filename to write results to  /path/to/file/*.junit.xml
    :return: Returns an XML string if no output, else nothing.
    """
    log.debug('creating junit results: output={}'.format(output))
    stdout_format = kwargs.pop('stdout_format', None)
    test_class = kwargs.pop('test_class', None)
    package = kwargs.pop('package', None)
    from junit_xml import TestSuite, TestCase
    test_suites = []
    for suite, tests in data.items():
        test_cases = []
        for test, result in (tests if isinstance(tests, list) else tests.items()):
            tc = TestCase(test)
            stdout = result.get('stdout')
            if stdout_format is not None and callable(stdout_format):
                if hasattr(stdout_format, 'func_code') and 'kwargs' in stdout_format.func_code.co_varnames:
                    stdout = stdout_format(stdout, suite_name=suite, test_name=test, **kwargs)
                else:
                    stdout = stdout_format(stdout)
            tc.stdout = stdout
            tc.stderr = result.get('stderr')
            tc.elapsed_sec = result.get('time')
            tc.classname = result.get('class', test_class)
            err = result.get('err')
            if err:
                tc.add_error_info(*err if isinstance(err, (list, tuple)) else [err])
            fail = result.get('fail')
            if fail:
                tc.add_failure_info(*fail if isinstance(fail, (list, tuple)) else [fail])
            skip = result.get('skip')
            if skip:
                tc.add_skipped_info(*skip if isinstance(skip, (list, tuple)) else [skip])
            test_cases.append(tc)
        ts = TestSuite(suite, test_cases, package=package)
        test_suites.append(ts)

    if output:
        check_makedir(os.path.dirname(output))
        with open(output, 'w') as out:
            TestSuite.to_file(out, test_suites)
        return output
    else:
        return TestSuite.to_xml_string(test_suites)
예제 #37
0
 def render(self, object_pairs_hook=OrderedDict, raise_errors=False):
     if self.output_filename:
         f = open(self.output_filename, 'w')
     try:
         for ret in super(JunitRenderer,
                          self).render(object_pairs_hook, raise_errors):
             yield ret
     finally:
         if self.output_filename:
             with f:
                 TestSuite.to_file(f, [
                     TestSuite(self.env.name or 'default', self.test_cases)
                 ],
                                   prettyprint=False)
예제 #38
0
def serialize_and_read(test_suites,
                       to_file=False,
                       prettyprint=False,
                       encoding=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        os.close(fd)
        with codecs.open(filename, mode='w', encoding=encoding) as f:
            TestSuite.to_file(f,
                              test_suites,
                              prettyprint=prettyprint,
                              encoding=encoding)
        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        xml_string = TestSuite.to_xml_string(test_suites,
                                             prettyprint=prettyprint,
                                             encoding=encoding)
        if PY2:
            assert isinstance(xml_string, unicode)
        print("Serialized XML to string:\n%s" % xml_string)
        if encoding:
            xml_string = xml_string.encode(encoding)
        xmldoc = minidom.parseString(xml_string)

    def remove_blanks(node):
        for x in node.childNodes:
            if x.nodeType == minidom.Node.TEXT_NODE:
                if x.nodeValue:
                    x.nodeValue = x.nodeValue.strip()
            elif x.nodeType == minidom.Node.ELEMENT_NODE:
                remove_blanks(x)

    remove_blanks(xmldoc)
    xmldoc.normalize()

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
예제 #39
0
    def _output_normal(self, test_result):
        # Need refactor
        if test_result == {}:
            print '[what?!] there are not any test result, what is the test case id?'
        else:
            print
            xml_test_suites = []
            summary_dict = self._get_summary_dict(test_result)
            self.report_create_time = str(time.strftime('%Y%m%d_%H%M%S', time.localtime()))
            for case_classify in test_result.keys():
                xml_test_cases = []
                if 'result' in test_result[case_classify].keys():
                    # Generate HTML report
                    self._generate_html_file(
                        case_classify, test_result[case_classify]['result'],
                        test_result[case_classify]['summary'])

                    # Save the result into the CSV
                    self._output_result_to_csv(test_result)

                    # Show in Console
                    print '{0} {1} {2}'.format('='*16, case_classify, '='*16)
                    test_case_result = test_result[case_classify]['result']
                    for case_id in test_case_result.keys():
                        print '[{0}][{1}] {2}, {3}, {4}'.format(case_classify, case_id,
                                                                test_case_result[case_id][0],
                                                                test_case_result[case_id][1],
                                                                str(test_case_result[case_id][2]))

                        # Produce xml file
                        test_case = TestCase(case_id, case_classify, int(test_case_result[case_id][2]))
                        if test_case_result[case_id][0] == 'Fail' or test_case_result[case_id][0] == 'Error':
                            try:
                                test_case.add_failure_info('msg' + test_case_result[case_id][1])
                            except:
                                test_case.add_failure_info('msg' + str(test_case_result[case_id]))

                        xml_test_cases.append(test_case)

                    xml_test_suites.append(TestSuite(case_classify, xml_test_cases))
                    with open(os.path.join(self.latest_reports_dir, case_classify + '.xml'), 'w') as f:
                        TestSuite.to_file(f, xml_test_suites, prettyprint=True)

            self._generate_summary_html_file(summary_dict)
            print '{0} {1} {2}'.format('='*16, 'Summary', '='*16)
            pprint.pprint(summary_dict)
예제 #40
0
파일: cli.py 프로젝트: akabos/devpi-builder
    def build_packages(self, packages):
        self._results = []

        for package, version in packages:
            if self._should_package_be_build(package, version):
                logger.info('Building %s %s', package, version)
                try:
                    wheel_file = self._builder(package, version)
                    self._upload_package(package, version, wheel_file)
                    self._log_success(package, version)
                except wheeler.BuildError as e:
                    self._log_fail(e, package, version)

        if self._junit_xml:
            with open(self._junit_xml, 'w') as output:
                test_suite = TestSuite('devpi-builder results', self._results)
                TestSuite.to_file(output, [test_suite])
예제 #41
0
def publish_result(file=None):

    ts = [TestSuite("Manifest launcher", test_cases)]
    if file:
        with open(file,  'w') as f:
            TestSuite.to_file(f, ts, prettyprint=True, encoding='utf-8')
    else:
        err = 0
        pas = 0
        for case in test_cases:
            if case.is_error() or case.is_failure():
                print case.name+':'+case.classname+' ... FAIL'
                print case.failure_message
                err+=1
            else:
                print case.name+':'+case.classname+'... PASS'
                pas+=1
        print "Passed: %d, failed: %d\n" % (pas, err)
def parseLog(TestSuiteName, xmlfile, caseset, test_results, cluster_env):
	testcases = []

	#Not necessary to modify the lines below!
	skip_flag = False
	for a_case in caseset:
		case = TestCase(a_case[0], a_case[1])
		testcases.append(case)

		if skip_flag:
			skipCase(case, "This case is not scheduled.")
			continue
		skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], test_results)

	ts = TestSuite(TestSuiteName, testcases)

	with open(xmlfile, "w") as f:
		ts.to_file(f, [ts])
예제 #43
0
def Run(conf, xmldir):
    cluster_env = readClusterConf(conf)

    testcases = []
    #Name of Test Suite
    TestSuiteName = "Setup HA Cluster"
    #Name of junit xml file
    JunitXML = "junit-setup-ha.xml"

    #Define testcases
    #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)]
    #eg.
    # ('PacemakerService', 'SetupCluster.service', runPackmakerService)
    #Define function runPackmakerService before using
    cases_def = [('PacemakerService', 'SetupCluster.service', runPackmakerService),
                 ('NodesNumber', 'SetupCluster.nodes', runNodesNumber),
                 ('NodesStatus', 'SetupCluster.nodes', runNodesStatus)]
                 #('ConfigureRes', 'SetupCluster.resources', runConfigureRes)]

    #Not necessary to modify the lines below!
    skip_flag = False
    for a_case in cases_def:
        case = TestCase(a_case[0], a_case[1])
        testcases.append(case)
        if skip_flag:
            skipCase(case, "Can not test!",
                     "Pacemaker service of the first node not started.")
            continue
        skip_flag = assertCase(case, a_case[2], cluster_env)

    ts = TestSuite(TestSuiteName, testcases)

    with open(xmldir+"/"+JunitXML, "w") as f:
        ts.to_file(f, [ts])

    lines = os.popen("ssh root@%s crm_mon -1r" % cluster_env["IP_NODE1"]).readlines()
    with open(xmldir+"/"+"crm_mon", "w") as p:
        p.writelines(lines)

    lines = os.popen("ssh root@%s cat /etc/YaST2/*build*" % cluster_env["IP_NODE1"]).readlines()
    with open(xmldir+"/"+"host-build", "w") as p:
        p.writelines(lines)
예제 #44
0
def serialize_and_read(test_suites, to_file=False, prettyprint=False, encoding=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        os.close(fd)
        with codecs.open(filename, mode='w', encoding=encoding) as f:
            TestSuite.to_file(f, test_suites, prettyprint=prettyprint, encoding=encoding)
        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        xml_string = TestSuite.to_xml_string(
            test_suites, prettyprint=prettyprint, encoding=encoding)
        if PY2:
            assert isinstance(xml_string, unicode)
        print("Serialized XML to string:\n%s" % xml_string)
        if encoding:
            xml_string = xml_string.encode(encoding)
        xmldoc = minidom.parseString(xml_string)

    def remove_blanks(node):
        for x in node.childNodes:
            if x.nodeType == minidom.Node.TEXT_NODE:
                if x.nodeValue:
                    x.nodeValue = x.nodeValue.strip()
            elif x.nodeType == minidom.Node.ELEMENT_NODE:
                remove_blanks(x)
    remove_blanks(xmldoc)
    xmldoc.normalize()

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
예제 #45
0
def Run(conf, xmldir):
    cluster_env = readClusterConf(conf)

    testcases = []
    #Name of Test Suite
    TestSuiteName = "Setup HA Cluster"
    #Name of junit xml file
    JunitXML = "junit-drbd-pacemaker.xml"

    #Define testcases
    #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)]
    #eg.
    # ('PacemakerService', 'SetupCluster.service', runPackmakerService)
    #Define function runPackmakerService before using
    cases_def = [('drbdPacemakerRes', 'SetupCluster.drbd', configurePacemaker),
                 ('drbdUpToDateBefore', 'DRBD.disks', checkDRBDState),
                 ('drbdPrimaryBefore', 'DRBD.state', checkDRBDRole),
                 ('drbdShowInPacemaker', 'DRBD.pacemaker', checkPacemakerStatus),
                 ('drbdSwitchMaster', 'DRBD.pacemaker', switchDRBD),
                 ('drbdUpToDateAfter', 'DRBD.disks', checkDRBDState),
                 ('drbdPrimaryAfter', 'DRBD.state', checkDRBDRole),
                 ('drbdShowInPacemakerAfter', 'DRBD.pacemaker', checkPacemakerStatus)]
                 #('ConfigureRes', 'SetupCluster.resources', runConfigureRes)]

    #Not necessary to modify the lines below!
    skip_flag = False
    for a_case in cases_def:
        case = TestCase(a_case[0], a_case[1])
        testcases.append(case)
        if skip_flag:
            skipCase(case, "Can not test!",
                     "Pacemaker service of the first node not started or didn't configure DRBD.")
            continue
        skip_flag = assertCase(case, a_case[2], cluster_env)
        sleep(3)

    ts = TestSuite(TestSuiteName, testcases)

    with open(xmldir+"/"+JunitXML, "w") as f:
        ts.to_file(f, [ts])
예제 #46
0
def main():
    args = parse_args()
    spawn_func = None
    spawn_arg = None
    if args.port is not None:
        spawn_func = spawn_port
        spawn_arg = args.port
    elif args.executable is not None:
        spawn_func = spawn_exec
        spawn_arg = args.executable
    name = args.name or ""
    global debug
    if args.debug:
        debug = True
    if spawn_func is None:
        debug_print("Please specify port or executable", file=sys.stderr)
        return 1
    with spawn_func(spawn_arg) as sp:
        ts = run_tests(sp, name)
        if args.output:
            with open(args.output, "w") as f:
                TestSuite.to_file(f, [ts])
        return 0
def Run(conf, xmldir):
    cluster_env = readClusterConf(conf)

    testcases = []
    #Name of Test Suite
    TestSuiteName = "Linbit DRBD Test"
    #Name of junit xml file
    JunitXML = "junit-linbit-drbd-test.xml"

    yml_file = "%s/Linbit-drbd-test.yml" % xmldir
    results = readFromYaml(yml_file)

    #Define testcases
    #testcases = [(TestcaseName, TestcaseClass, TestcaseFunction)]
    #eg.
    # ('PacemakerService', 'SetupCluster.service', runPackmakerService)
    #Define function runPackmakerService before using
    cases_def = []
    for c_name in results.keys():
        cases_def.append( (c_name, TESTCASES.get(c_name, CLASSIFY[0]),
                           parseResult) )

    #Not necessary to modify the lines below!
    skip_flag = False
    for a_case in cases_def:
        case = TestCase(a_case[0], a_case[1])
        testcases.append(case)
        if skip_flag:
            skipCase(case, "Can not test!",
                     "Case is skipped due to previous errors.")
            continue
        skip_flag = assertCase(case, a_case[2], cluster_env, a_case[0], results[a_case[0]])

    ts = TestSuite(TestSuiteName, testcases)

    with open(xmldir+"/"+JunitXML, "w") as f:
        ts.to_file(f, [ts])
예제 #48
0
def generate_junit_xml(file_name='junit.xml'):
    results = monitor_runner.get_latest_status()
    print(results)

    test_suites = []

    # test_cases = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
    # ts = TestSuite("my test suite", test_cases)
    for testsuite_name in results:
        test_cases = []
        for test_case_name in results[testsuite_name]:
            try:
                name = results[testsuite_name][test_case_name]['name']
            except:
                name = '.'

            success = results[testsuite_name][test_case_name]['success']

            try:
                elapsed_sec = results[testsuite_name][test_case_name]['response_time'].total_seconds()
            except:
                elapsed_sec = -1
            tc = TestCase(
                name=name,
                classname='{}.{}'.format(testsuite_name, test_case_name),
                elapsed_sec=elapsed_sec,
                stdout='{}'.format(success),
            )
            if success is False:
                tc.add_failure_info('Failed')
            test_cases.append(tc)
        ts = TestSuite(testsuite_name, test_cases)
        test_suites.append(ts)
    pass

    with open(file_name, "w", encoding='utf-8-sig') as f:
        TestSuite.to_file(f, test_suites, prettyprint=True)
예제 #49
0
파일: output.py 프로젝트: thanasisk/drydock
 def write_xml_file(self):
     test_cases = []
     if os.path.isfile(self.output):
         logging.warn("File exists,deleting...")
         os.remove(self.output)
     with open(self.output, "a") as f:
         for _, elements in self.log.items():
             for j in elements.viewitems():
                 if j[0] == "date" or j[0] == "profile" or j[0] == "score":
                     # we really don't care
                     pass
                 else:
                     try:
                         test_case = TestCase(j[0], j[1]["descr"], "", "", "")
                         if j[1]["status"] == "Fail":
                             test_case.add_failure_info(j[1]["output"])
                         else:
                             test_case = TestCase(j[0], "", "", "", "")
                         test_cases.append(test_case)
                     except KeyError:
                         # the world's smallest violin playin' for KeyError
                         pass
         ts = [TestSuite("Docker Security Benchmarks", test_cases)]
         TestSuite.to_file(f, ts)
예제 #50
0
파일: runner.py 프로젝트: sajedts/pipedrive
def write_xml(test_cases, class_name):
    myTestSuite = TestSuite(class_name, test_cases)
    with open(class_name + '.xml', 'w') as f:
        TestSuite.to_file(f, [myTestSuite], prettyprint=True)
예제 #51
0
파일: test.py 프로젝트: jandegr/routing-qa
            else:
                navit.zoom_to_route()
            os.system("/usr/bin/import -window root "+gpx_directory+"/"+filename+export_suffix + ".png")
        else:
            print "No route found, last status : " + str(status) + ", duration : "+str(time.time() - start_time)

        test_cases = TestCase(filename, '', time.time() - start_time, '', '')
        if dataMap['success']['source'] == 'gpx' :
            doc = lxml.etree.parse(gpx_directory+"/"+filename+export_suffix + ".gpx")
            rtept_count = doc.xpath('count(//rtept)')
        
            if not(eval(str(rtept_count) + dataMap['success']['operator'] + str(dataMap['success']['value']))):
                test_cases.add_failure_info('navigation items count mismatch [ got ' + \
                    str(rtept_count) + ", expected " + dataMap['success']['operator'] + str(dataMap['success']['value']) ) 
        elif dataMap['success']['source'] == 'dbus' :
            if not(eval(dataMap['success']['item'] + dataMap['success']['operator'] + str(dataMap['success']['value']))):
                test_cases.add_failure_info('dbus result mismatch [ got ' + \
                    str(eval(str(dataMap['success']['item']))) + dataMap['success']['operator'] + str(dataMap['success']['value']) )
    except:
       # We had a failure, like navit crash, dbus timeout, ...

       print "This test failed. Maybe a missing map?"
       test_cases = TestCase(filename, '', time.time() - start_time, '', '')
       test_cases.add_error_info('test failed')
    tests.append(test_cases)

ts = [TestSuite("Navit routing tests", tests)]

with open(junit_directory+'output.xml', 'w+') as f:
    TestSuite.to_file(f, ts, prettyprint=False)
예제 #52
0
    def do_build(self, args):
        try:
            # add arguments
            doParser = self.arg_build()
            doArgs = doParser.parse_args(shlex.split(args))

            # if the help command is called, parse_args returns None object
            if not doArgs:
                return 2

            # --
            template = validate(doArgs.file)
            if template is None:
                return 2

            if doArgs.id:
                myAppliance = self.api.Users(self.login).Appliances().Getall(Query="dbId==" + doArgs.id)
                myAppliance = myAppliance.appliances.appliance
            else:
                # Get template which correpond to the template file
                myAppliance = (
                    self.api.Users(self.login)
                    .Appliances()
                    .Getall(
                        Query="name=='"
                        + template["stack"]["name"]
                        + "';version=='"
                        + template["stack"]["version"]
                        + "'"
                    )
                )
                myAppliance = myAppliance.appliances.appliance
            if myAppliance is None or len(myAppliance) != 1:
                printer.out("No template found on the plateform")
                return 0
            myAppliance = myAppliance[0]
            rInstallProfile = self.api.Users(self.login).Appliances(myAppliance.dbId).Installprofile("").Getdeprecated()
            if rInstallProfile is None:
                printer.out("No installation found on the template '" + template["stack"]["name"] + "'", printer.ERROR)
                return 0
            try:
                i = 1
                if doArgs.junit is not None:
                    test_results = []
                for builder in template["builders"]:
                    try:
                        printer.out(
                            "Generating '"
                            + builder["type"]
                            + "' image ("
                            + str(i)
                            + "/"
                            + str(len(template["builders"]))
                            + ")"
                        )
                        if doArgs.junit is not None:
                            test = TestCase("Generation " + builder["type"])
                            test_results.append(test)
                            start_time = time.time()

                        format_type = builder["type"]
                        targetFormat = generate_utils.get_target_format_object(self.api, self.login, format_type)
                        if targetFormat is None:
                            printer.out("Builder type unknown: " + format_type, printer.ERROR)
                            return 2

                        myimage = image()
                        myinstallProfile = installProfile()
                        if rInstallProfile.partitionAuto:
                            if "installation" in builder:
                                if "swapSize" in builder["installation"]:
                                    myinstallProfile.swapSize = builder["installation"]["swapSize"]
                                if "diskSize" in builder["installation"]:
                                    myinstallProfile.diskSize = builder["installation"]["diskSize"]
                            else:
                                myinstallProfile.swapSize = rInstallProfile.swapSize
                                myinstallProfile.diskSize = rInstallProfile.partitionTable.disks.disk[0].size

                        func = getattr(
                            generate_utils,
                            "generate_" + generics_utils.remove_special_chars(targetFormat.format.name),
                            None,
                        )
                        if func:
                            myimage, myinstallProfile = func(myimage, builder, myinstallProfile, self.api, self.login)
                        else:
                            printer.out("Builder type unknown: " + format_type, printer.ERROR)
                            return 2

                        if myimage is None:
                            return 2

                        myimage.targetFormat = targetFormat
                        myimage.installProfile = myinstallProfile
                        if doArgs.simulated is not None and doArgs.simulated:
                            myimage.simulated = True
                        if doArgs.forced is not None and doArgs.forced:
                            myimage.forceCheckingDeps = True

                        rImage = self.api.Users(self.login).Appliances(myAppliance.dbId).Images().Generate(myimage)

                        status = rImage.status
                        statusWidget = progressbar_widget.Status()
                        statusWidget.status = status
                        widgets = [Bar(">"), " ", statusWidget, " ", ReverseBar("<")]
                        progress = ProgressBar(widgets=widgets, maxval=100).start()
                        while not (status.complete or status.error or status.cancelled):
                            statusWidget.status = status
                            progress.update(status.percentage)
                            status = (
                                self.api.Users(self.login).Appliances(myAppliance.dbId).Images(rImage.dbId).Status.Get()
                            )
                            time.sleep(2)
                        statusWidget.status = status
                        progress.finish()
                        if status.error:
                            printer.out(
                                "Generation '"
                                + builder["type"]
                                + "' error: "
                                + status.message
                                + "\n"
                                + status.errorMessage,
                                printer.ERROR,
                            )
                            if status.detailedError:
                                printer.out(status.detailedErrorMsg)
                            if doArgs.junit is not None:
                                test.elapsed_sec = time.time() - start_time
                                test.add_error_info("Error", status.message + "\n" + status.errorMessage)
                        elif status.cancelled:
                            printer.out(
                                "Generation '" + builder["type"] + "' canceled: " + status.message, printer.WARNING
                            )
                            if doArgs.junit is not None:
                                test.elapsed_sec = time.time() - start_time
                                test.add_failure_info("Canceled", status.message)
                        else:
                            printer.out("Generation '" + builder["type"] + "' ok", printer.OK)
                            printer.out("Image URI: " + rImage.uri)
                            printer.out("Image Id : " + generics_utils.extract_id(rImage.uri))
                            if doArgs.junit is not None:
                                test.elapsed_sec = time.time() - start_time
                                # the downloadUri already contains downloadKey at the end
                                if rImage.downloadUri is not None:
                                    test.stdout = self.api._url + "/" + rImage.downloadUri
                        i += 1
                    except Exception as e:
                        if is_uforge_exception(e):
                            print_uforge_exception(e)
                            if doArgs.junit is not None and "test_results" in locals() and len(test_results) > 0:
                                test = test_results[len(test_results) - 1]
                                test.elapsed_sec = time.time() - start_time
                                test.add_error_info("Error", get_uforge_exception(e))
                        else:
                            raise
                if doArgs.junit is not None:
                    testName = myAppliance.distributionName + " " + myAppliance.archName
                    ts = TestSuite("Generation " + testName, test_results)
                    with open(doArgs.junit, "w") as f:
                        TestSuite.to_file(f, [ts], prettyprint=False)
                return 0
            except KeyError as e:
                printer.out("unknown error in template file", printer.ERROR)

        except ArgumentParserError as e:
            printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
            self.help_build()
        except KeyboardInterrupt:
            printer.out("\n")
            if generics_utils.query_yes_no("Do you want to cancel the job ?"):
                if (
                    "myAppliance" in locals()
                    and "rImage" in locals()
                    and hasattr(myAppliance, "dbId")
                    and hasattr(rImage, "dbId")
                ):
                    self.api.Users(self.login).Appliances(myAppliance.dbId).Images(rImage.dbId).Status.Cancel()
                else:
                    printer.out("Impossible to cancel", printer.WARNING)
            else:
                printer.out("Exiting command")
        except Exception as e:
            print_uforge_exception(e)
            if doArgs.junit is not None and "test_results" in locals() and len(test_results) > 0:
                test = test_results[len(test_results) - 1]
                if "start_time" in locals():
                    elapse = time.time() - start_time
                else:
                    elapse = 0
                test.elapsed_sec = elapse
                test.add_error_info("Error", get_uforge_exception(e))
            else:
                return 2
        finally:
            if (
                "doArgs" in locals()
                and doArgs.junit is not None
                and "test_results" in locals()
                and len(test_results) > 0
            ):
                if "myAppliance" in locals():
                    testName = myAppliance.distributionName + " " + myAppliance.archName
                else:
                    testName = ""
                ts = TestSuite("Generation " + testName, test_results)
                with open(doArgs.junit, "w") as f:
                    TestSuite.to_file(f, [ts], prettyprint=False)
예제 #53
0
class TestHarness:
    def __init__(self, length="any", parallel="any", exclude_tags=None,
                 tags=None, file="", from_file=None,
                 verbose=True, justtest=False,
                 valgrind=False, genpbs=False, exit_fails=False, xml_outfile=""):
        self.tests = []
        self.verbose = verbose
        self.length = length
        self.parallel = parallel
        self.passcount = 0
        self.failcount = 0
        self.warncount = 0
        self.teststatus = []
        self.completed_tests = []
        self.justtest = justtest
        self.valgrind = valgrind
        self.genpbs = genpbs
        self.xml_parser=TestSuite('TestHarness',[])
        self.cwd=os.getcwd()
        self.xml_outfile=xml_outfile
        self.exit_fails=exit_fails

        fluidity_command = self.decide_fluidity_command()

        if file == "":
          print "Test criteria:"
          print "-" * 80
          print "length: ", length
          print "parallel: ", parallel
          print "tags to include: ", tags
          print "tags to exclude: ", exclude_tags
          print "-" * 80
          print 

        # step 1. form a list of all the xml files to be considered.

        xml_files = []
        rootdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.pardir))
        dirnames = []
        testpaths = ["examples", "tests", "longtests"]
        for directory in testpaths:
          if os.path.exists(os.path.join(rootdir, directory)):
            dirnames.append(directory)
        testdirs = [ os.path.join( rootdir, x ) for x in dirnames ]
        for directory in testdirs:
          subdirs = [ os.path.join(directory, x) for x in os.listdir(directory)]
          for subdir in subdirs:
            g = glob.glob1(subdir, "*.xml")
            for xml_file in g:
              try:
                p = etree.parse(os.path.join(subdir, xml_file))
                x = p.getroot()
                if x.tag == "testproblem":
                  xml_files.append(os.path.join(subdir, xml_file))
              except xml.parsers.expat.ExpatError:
                print "Warning: %s mal-formed" % xml_file
                traceback.print_exc()

        # step 2. if the user has specified a particular file, let's use that.

        if file != "":
          files = [file]
        elif from_file:
          try:
            f = open(from_file, 'r')
            files = [line[:-1] for line in f.readlines()]
          except IOError as e:
            sys.stderr.write("Unable to read tests from file %s: %s" % (from_file, e))
            sys.exit(1)
          f.close()
        else:
          files = None

        if files:
          for (subdir, xml_file) in [os.path.split(x) for x in xml_files]:
            temp_files=files
            for file in temp_files:
              if xml_file == file:
                p = etree.parse(os.path.join(subdir,xml_file))
                prob_defn = p.findall("problem_definition")[0]
                prob_nprocs = int(prob_defn.attrib["nprocs"])                
                testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
                                                      verbose=self.verbose, replace=self.modify_command_line(prob_nprocs), genpbs=genpbs)
                self.tests.append((subdir, testprob))
                files.remove(xml_file)
          if files != []:
            print "Could not find the following specified test files:"
            for f in files:
              print f
            sys.exit(1)
          return

        # step 3. form a cut-down list of the xml files matching the correct length and the correct parallelism.
        working_set = []
        for xml_file in xml_files:
          p = etree.parse(xml_file)
          prob_defn = p.findall("problem_definition")[0]
          prob_length = prob_defn.attrib["length"]
          prob_nprocs = int(prob_defn.attrib["nprocs"])
          if prob_length == length or (length == "any" and prob_length not in ["special", "long"]):
            if self.parallel == "parallel":
              if prob_nprocs > 1:
                working_set.append(xml_file)
            elif self.parallel == "serial":
              if prob_nprocs == 1:
                working_set.append(xml_file)
            elif self.parallel == "any":
              working_set.append(xml_file)
                
        def get_xml_file_tags(xml_file):
          p = etree.parse(xml_file)
          p_tags = p.findall("tags")
          if len(p_tags) > 0 and not p_tags[0].text is None:
            xml_tags = p_tags[0].text.split()
          else:
            xml_tags = []
          
          return xml_tags
                
        # step 4. if there are any excluded tags, let's exclude tests that have
        # them
        if exclude_tags is not None:
          to_remove = []
          for xml_file in working_set:
            p_tags = get_xml_file_tags(xml_file)
            include = True
            for tag in exclude_tags:
              if tag in p_tags:
                include = False
                break
            if not include:
              to_remove.append(xml_file)
          for xml_file in to_remove:
            working_set.remove(xml_file)

        # step 5. if there are any tags, let's use them
        if tags is not None:
          tagged_set = []
          for xml_file in working_set:
            p_tags = get_xml_file_tags(xml_file)

            include = True
            for tag in tags:
              if tag not in p_tags:
                include = False

            if include is True:
              tagged_set.append(xml_file)
        else:
          tagged_set = working_set

        for (subdir, xml_file) in [os.path.split(x) for x in tagged_set]:
          # need to grab nprocs here to pass through to modify_command_line
          p = etree.parse(os.path.join(subdir,xml_file))
          prob_defn = p.findall("problem_definition")[0]
          prob_nprocs = int(prob_defn.attrib["nprocs"])
          testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
                       verbose=self.verbose, replace=self.modify_command_line(prob_nprocs))
          self.tests.append((subdir, testprob))

        if len(self.tests) == 0:
          print "Warning: no matching tests."

    def length_matches(self, filelength):
        if self.length == filelength: return True
        if self.length == "medium" and filelength == "short": return True
        return False

    def decide_fluidity_command(self):
        bindir = os.environ["PATH"].split(':')[0]
        
        for binaryBase in ["dfluidity", "fluidity"]:
          binary = binaryBase
          debugBinary = binaryBase + "-debug"
          try:
              fluidity_mtime = os.stat(os.path.join(bindir, binary))[-2]
              have_fluidity = True
          except OSError:
              fluidity_mtime = 1e30
              have_fluidity = False

          try:
              debug_mtime = os.stat(os.path.join(bindir, debugBinary))[-2]
              have_debug = True
          except OSError:
              debug_mtime = 1e30
              have_debug = False

          if have_fluidity is True or have_debug is True:
            if have_fluidity is False and have_debug is True:
                flucmd = debugBinary

            elif have_fluidity is True and have_debug is False:
                flucmd = binary

            elif fluidity_mtime > debug_mtime:
                flucmd = binary
            else:
                flucmd = debugBinary

            # no longer valid since debugging doesn't change the name - any suitable alternative tests?
            # if self.valgrind is True:
            #  if flucmd != debugBinary:
            #     print "Error: you really should compile with debugging for use with valgrind!"
            #     sys.exit(1)
                
            return flucmd
              
        return None

    def modify_command_line(self, nprocs):
      flucmd = self.decide_fluidity_command()
      print flucmd
      def f(s):
        if not flucmd in [None, "fluidity"]:
          s = s.replace('fluidity ', flucmd + ' ')

        if self.valgrind:
          s = "valgrind --tool=memcheck --leak-check=full -v" + \
              " --show-reachable=yes --num-callers=8 --error-limit=no " + \
              "--log-file=test.log " + s

        # when calling genpbs, genpbs should take care of inserting the right -n <NPROCS> magic
        if not self.genpbs:
          s = s.replace('mpiexec ', 'mpiexec -n %(nprocs)d ' % {'nprocs': nprocs})

        return s

      return f


    def log(self, str):
        if self.verbose == True:
            print str

    def clean(self):
      self.log(" ")
      for t in self.tests:
        os.chdir(t[0])
        t[1].clean()

      return

    def run(self):
        self.log(" ")
        if not self.justtest:
            threadlist=[]
            self.threadtests=regressiontest.ThreadIterator(self.tests)
            for i in range(options.thread_count):
                threadlist.append(threading.Thread(target=self.threadrun)) 
                threadlist[-1].start()
            for t in threadlist:
                '''Wait until all threads finish'''
                t.join()

            count = len(self.tests)
            while True:
                for t in self.tests:
                  if t is None: continue
                  test = t[1]
                  os.chdir(t[0])
                  if test.is_finished():
                      if test.length == "long":
                        test.fl_logs(nLogLines = 20)
                      else:
                        test.fl_logs(nLogLines = 0)
                      try:
                        self.teststatus += test.test()
                      except:
                        self.log("Error: %s raised an exception while testing:" % test.filename)
                        lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
                        for line in lines:
                            self.log(line)
                        self.teststatus += ['F']
                        test.pass_status = ['F']
                      self.completed_tests += [test]
                      self.xml_parser.test_cases+=test.xml_reports
                      t = None
                      count -= 1

                if count == 0: break
                time.sleep(60)                  
        else:
          for t in self.tests:
            test = t[1]
            os.chdir(t[0])
            if self.length == "long":
              test.fl_logs(nLogLines = 20)
            else:
              test.fl_logs(nLogLines = 0)
            self.teststatus += test.test()
            self.completed_tests += [test]

            self.xml_parser.test_cases+=test.xml_reports

        self.passcount = self.teststatus.count('P')
        self.failcount = self.teststatus.count('F')
        self.warncount = self.teststatus.count('W')
        
        if self.failcount + self.warncount > 0:
            print
            print "Summary of test problems with failures or warnings:"
            for t in self.completed_tests:
                if t.pass_status.count('F')+t.warn_status.count('W')>0:
                    print t.filename+':', ''.join(t.pass_status+t.warn_status)
            print
        
        if self.passcount + self.failcount + self.warncount > 0:
            print "Passes:   %d" % self.passcount
            print "Failures: %d" % self.failcount
            print "Warnings: %d" % self.warncount

        if self.xml_outfile!="":
            fd=open(self.cwd+'/'+self.xml_outfile,'w')
            self.xml_parser.to_file(fd,[self.xml_parser])
            fd.close()

        if self.exit_fails:
            sys.exit(self.failcount)

          

    def threadrun(self):
        '''This is the portion of the loop which actually runs the
        tests. This is split out so that it can be threaded'''
        
        for (dir, test) in self.threadtests:
            try:
                runtime=test.run(dir)
                if self.length=="short" and runtime>30.0:
                    self.log("Warning: short test ran for %f seconds which"+
                             " is longer than the permitted 30s run time"%runtime)
                    self.teststatus += ['W']
                    test.pass_status = ['W']
                    
            except:
                self.log("Error: %s raised an exception while running:" % test.filename)
                lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
                for line in lines:
                    self.log(line)
                self.tests.remove((dir, test))
                self.teststatus += ['F']
                test.pass_status = ['F']
                self.completed_tests += [test]

    def list(self):
      for (subdir, test) in self.tests:
        print os.path.join(subdir, test.filename)
예제 #54
0
    def test_deploy_openstack_run_tempest(self, underlay, config,
                                          ccpcluster, k8s_actions, rally):
        """Deploy base environment

        Scenario:
        1. Revert snapshot
        2. Install ccp
        3. Deploy environment
        4. Run tempest

        Duration 35 min
        """
        remote = underlay.remote(host=config.k8s.kube_host)
        if settings.REGISTRY == "127.0.0.1:31500":
            k8s_actions.create_registry()
            ccpcluster.build()

        ccpcluster.deploy()
        post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=4500)
        post_os_deploy_checks.check_pods_status(k8s_actions.api, timeout=4500)

        # prepare rally
        rally.prepare()
        rally.pull_image()
        rally.run()
        # run tempest
        rally.run_tempest()

        LOG.info('Storing tests results...')
        res_file_name = 'result.json'
        file_prefix = 'results_' + datetime.datetime.now().strftime(
            '%Y%m%d_%H%M%S') + '_'
        file_dst = '{0}/logs/{1}{2}'.format(
            settings.LOGS_DIR, file_prefix, res_file_name)
        remote.download(
            '/home/{0}/rally/{1}'.format(settings.SSH_LOGIN, res_file_name),
            file_dst)
        res = json.load(remote.open('/home/{}/rally/result.json'.format(
            settings.SSH_LOGIN)))
        formatted_tc = []
        failed_cases = [res['test_cases'][case]
                        for case in res['test_cases']
                        if res['test_cases'][case]['status']
                        in 'fail']
        for case in failed_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_failure_info(case['traceback'])
                formatted_tc.append(tc)

        skipped_cases = [res['test_cases'][case]
                         for case in res['test_cases']
                         if res['test_cases'][case]['status'] in 'skip']
        for case in skipped_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_skipped_info(case['reason'])
                formatted_tc.append(tc)

        error_cases = [res['test_cases'][case] for case in res['test_cases']
                       if res['test_cases'][case]['status'] in 'error']

        for case in error_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_error_info(case['traceback'])
                formatted_tc.append(tc)

        success = [res['test_cases'][case] for case in res['test_cases']
                   if res['test_cases'][case]['status'] in 'success']
        for case in success:
            if case:
                tc = TestCase(case['name'])
                formatted_tc.append(tc)

        ts = TestSuite("tempest", formatted_tc)
        with open('tempest.xml', 'w') as f:
            ts.to_file(f, [ts], prettyprint=False)
        fail_msg = 'Tempest verification fails {}'.format(res)
        assert res['failures'] == 0, fail_msg
예제 #55
0
 def print(self, time_taken):
     super().print(time_taken)
     test_suite = TestSuite("Test Results", self.junit_cases)
     with open("output.xml", "w") as f:
         TestSuite.to_file(f, [test_suite], prettyprint=True)
예제 #56
0
파일: dev.py 프로젝트: DarumasLegs/alba
    failures = filter(lambda x: not x[1], results)
    t1 = time.time()
    delta = t1 - t0

    if is_true(xml):
        from junit_xml import TestSuite, TestCase
        test_cases = []
        for (name,result, delta) in results:
            test_case = TestCase(name, 'TestCompat', elapsed_sec = delta)
            if not result:
                test_case.add_error_info(message = "failed")
            test_cases.append(test_case)

        ts = [TestSuite("compatibility", test_cases)]
        with open('./testresults.xml', 'w') as f:
            TestSuite.to_file(f,ts)
    else:
        print results

@task
def run_test_arakoon_changes ():
    def _inner():
        alba.demo_kill ()
        alba.demo_setup(acf = arakoon_config_file_2)
        # 2 node cluster, and arakoon_0 will be master.
        def stop_node(node_name):
            r = local("pgrep -a arakoon | grep '%s'" % node_name, capture = True)
            info = r.split()
            pid = info[0]
            local("kill %s" % pid)
예제 #57
0
파일: test.py 프로젝트: VINEELKONERU/ALM
from junit_xml import TestSuite, TestCase

test_cases1 = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
test_cases2 = [TestCase('Test2', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
test_cases3 = [TestCase('Test3', 'some.class.name', 'abc', 'I am stdout!', 'I am stderr!')]

ts1 = TestSuite("my test suite", test_cases1)
ts2 = TestSuite("my test suite", test_cases2)
ts3 = TestSuite("my test suite", test_cases3)
# pretty printing is on by default but can be disabled using prettyprint=False
print(TestSuite.to_xml_string([ts1]))
print(TestSuite.to_xml_string([ts2]))
print(TestSuite.to_xml_string([ts3]))
# you can also write the XML to a file and not pretty print it
#print(TestSuite.to_xml_string([ts]))
with open('output1.xml', 'w') as f1:
    TestSuite.to_file(f1, [ts1], prettyprint=False)
with open('output2.xml', 'w') as f2:
    TestSuite.to_file(f2, [ts2], prettyprint=False)

#with open('output3.xml', 'w') as f3:
#  ##  TestSuite.to_file(f3, [ts3], prettyprint=False)
예제 #58
0
파일: julep.py 프로젝트: sasvirco/julep
def main () :

	levels = {
		'debug': logging.DEBUG,
		'info': logging.INFO,
		'warning': logging.WARNING,
		'error': logging.ERROR,
		'critical': logging.CRITICAL
	}
	

	parser = argparse.ArgumentParser(description = 'HP Operation Orchestration testing tool')
	parser.add_argument('--configfile', default = 'julep.yaml', help='Configfile with hpoo flow testcases')
	parser.add_argument('--loglevel', default = 'INFO', help='FATAL, ERROR, WARNING, INFO, DEBUG')
	parser.add_argument('--logfile', default = 'julep.log', help='Logfile to store messages (Default: julep.log)')
	parser.add_argument('--timeout', default = 3600, type = int, help='The time to wait for flow completion in seconds (Default: 3600 - 1hour)')
	parser.add_argument('--heartbeat', default = 120, type = int, help='Operation Orchestration polling interval (Default: 120 secs)')
	parser.add_argument('--quiet', action='store_true', help='Do not print logging to stdout')
	parser.add_argument('--trustcert', action='store_true', help='Trust self-signed certs')
	parser.add_argument('--configfmt', default = 'yaml', help="Configfile format - json or yaml. Default json.")
	parser.add_argument('--delay', default = 15, type = int, help="Delay in seconds to wait between starting flows")
	parser.add_argument('--junitoutput', default = 'julepout.xml', help="The location of the junit xml output. Default julepout.xml")

	args = parser.parse_args()
	loglevel = levels.get(args.loglevel, logging.NOTSET)
	logging.basicConfig(
		level= args.loglevel,
		format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
		datefmt='%m-%d %H:%M',
		filename= args.logfile,
		filemode='a')

	root = logging.getLogger()

	if args.quiet is False: 
		console = logging.StreamHandler()
		console.setLevel(args.loglevel)
		
		formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
		console.setFormatter(formatter)
		
		root.addHandler(console)

	logging.info("Want some blacksea julep?")
	config = parse_config(args.configfile, args.configfmt)
	config['general']['trustcert'] = args.trustcert

	testcases = {
		'running' : [],
		'finished' : [],
	}

	for flow in config['flows'] :
		test = oo.hpoo(config['general'], flow)
		name = test.run()
		if args.delay is not None:
			logging.info("sleeping between runs for %s secs", args.delay)
			time.sleep(args.delay)
		testcases['running'].append(test)

	root.debug(testcases)

	timeout = int(args.timeout)
	heartbeat = int(args.heartbeat)
    
	while timeout >= heartbeat :
		logging.info('Tracking testcases in running state')
		for test in testcases['running'] :
			
			if test.get_status() == 'RUNNING' :
				continue
			else :	
				testcases['finished'].append(test)
				testcases['running'].remove(test)

			logging.debug(testcases)

		if len(testcases['running']) == 0 :
			root.info("Running testcases list is zero, we are done")
			break

		logging.info('Waiting %s seconds for next heartbeat', str(heartbeat))
		timeout = timeout - heartbeat
		time.sleep(heartbeat)
	
	testresults = []
	logging.info("Generating junit xml output")	

	for test in testcases['finished'] :
		result = test.collect()
		flow = test.get_flow()
		testname = flow['name'] + " " + test.get_run_id()

		logging.info("Asserts for "+flow['name'])
		errors = []

		for k,v in flow['assert'].items() :
			 if all(item in result[k].items() for item in flow['assert'][k].items()) is False:
				errors.append("Failed to assert "+k)		
		
		if errors :
			tc = TestCase(testname,flow['uuid'],'',errors)
			tc.add_failure_info(errors)
			logging.info("Adding failed test")
			testresults.append(tc)
		else :
			logging.info("Adding succesfull test")
			duration = int(result['executionSummary']['endTime'] - result['executionSummary']['startTime'])
			tc = TestCase(testname,flow['uuid'],duration/1000.0,result['executionSummary']['resultStatusType'],'')
			testresults.append(tc)

	
	ts = TestSuite('ootests', testresults)
	with open(args.junitoutput, 'w') as f:
		TestSuite.to_file(f, [ts], prettyprint=True)
		logging.info("Writing output to "+args.junitoutput)
예제 #59
0
    def run(self):
        # Clean up result file names
        results = []
        for target in self.targets:
            results.append(target.replace('\\', '/'))

        # Dig through each result file, looking for details on pass/fail:
        for result_file in results:
            lines = list(map(lambda line: line.rstrip(), open(result_file, "r").read().split('\n')))
            if len(lines) == 0:
                raise Exception("Empty test result file: %s" % result_file)

            # define an expression for your file reference
            entry_one = Combine(
                oneOf(list(alphas)) + ':/' +
                Word(alphanums + '_-./'))

            entry_two = Word(printables + ' ', excludeChars=':')
            entry = entry_one | entry_two

            delimiter = Literal(':').suppress()
            tc_result_line = Group(entry.setResultsName('tc_file_name') + delimiter + entry.setResultsName(
                'tc_line_nr') + delimiter + entry.setResultsName('tc_name') + delimiter + entry.setResultsName(
                'tc_status') + Optional(
                delimiter + entry.setResultsName('tc_msg'))).setResultsName("tc_line")

            eol = LineEnd().suppress()
            sol = LineStart().suppress()
            blank_line = sol + eol

            tc_summary_line = Group(Word(nums).setResultsName("num_of_tests") + "Tests" + Word(nums).setResultsName(
                "num_of_fail") + "Failures" + Word(nums).setResultsName("num_of_ignore") + "Ignored").setResultsName(
                "tc_summary")
            tc_end_line = Or(Literal("FAIL"), Literal('Ok')).setResultsName("tc_result")

            # run it and see...
            pp1 = tc_result_line | Optional(tc_summary_line | tc_end_line)
            pp1.ignore(blank_line | OneOrMore("-"))

            result = list()
            for l in lines:
                result.append((pp1.parseString(l)).asDict())
            # delete empty results
            result = filter(None, result)

            tc_list = list()
            for r in result:
                if 'tc_line' in r:
                    tmp_tc_line = r['tc_line']

                    # get only the file name which will be used as the classname
                    file_name = tmp_tc_line['tc_file_name'].split('\\').pop().split('/').pop().rsplit('.', 1)[0]
                    tmp_tc = TestCase(name=tmp_tc_line['tc_name'], classname=file_name)
                    if 'tc_status' in tmp_tc_line:
                        if str(tmp_tc_line['tc_status']) == 'IGNORE':
                            if 'tc_msg' in tmp_tc_line:
                                tmp_tc.add_skipped_info(message=tmp_tc_line['tc_msg'],
                                                        output=r'[File]={0}, [Line]={1}'.format(
                                                            tmp_tc_line['tc_file_name'], tmp_tc_line['tc_line_nr']))
                            else:
                                tmp_tc.add_skipped_info(message=" ")
                        elif str(tmp_tc_line['tc_status']) == 'FAIL':
                            if 'tc_msg' in tmp_tc_line:
                                tmp_tc.add_failure_info(message=tmp_tc_line['tc_msg'],
                                                        output=r'[File]={0}, [Line]={1}'.format(
                                                            tmp_tc_line['tc_file_name'], tmp_tc_line['tc_line_nr']))
                            else:
                                tmp_tc.add_failure_info(message=" ")

                    tc_list.append((str(result_file), tmp_tc))

            for k, v in tc_list:
                try:
                    self.test_suites[k].append(v)
                except KeyError:
                    self.test_suites[k] = [v]
        ts = []
        for suite_name in self.test_suites:
            ts.append(TestSuite(suite_name, self.test_suites[suite_name]))

        with open('result.xml', 'w') as f:
            TestSuite.to_file(f, ts, prettyprint='True', encoding='utf-8')

        return self.report
예제 #60
0
                    break
            ts = TestSuite(path.basename(fn), test_cases)
            test_suites.append(ts)
        else:
            failed == 1

deduced_module_name = ''
if args.target == "LINUX":
    deduced_module_name = path.basename(file_list[0]).split('-test-')[0]
elif args.target == "K64F":
    try:
        op = subprocess.check_output(["git", "remote", "-v"])
        deduced_module_name = op.split(" ")[1].split('/')[-1].split('.')[0]
    except:
        deduced_module_name = path.basename(file_list[0]).lower().split('-tests-')[0]

module_name = os.getenv("CIRCLE_PROJECT_REPONAME", deduced_module_name)
reports_dir = os.getenv('CIRCLE_TEST_REPORTS', '')

if reports_dir != '' and not os.path.exists(reports_dir):
    os.makedirs(reports_dir)

report_fn_suffix = "result_junit.xml"
report_fn = path.join(reports_dir, '{}_{}_{}'.format(module_name, args.target, report_fn_suffix))

if len(test_suites) > 0:
    with open(report_fn, "w") as fd:
        TestSuite.to_file(fd, test_suites)

exit(failed)