Ejemplo n.º 1
0
def run_tests(modules, parallel):
    print('\n\nRun automation')
    print('Modules: {}'.format(', '.join(name for name, _, _ in modules)))

    # create test results folder
    test_results_folder = get_test_results_dir(with_timestamp=True, prefix='tests')

    # get test runner
    run_nose = get_nose_runner(test_results_folder, xunit_report=True, exclude_integration=True,
                               parallel=parallel)

    # run tests
    passed = True
    module_results = []
    for name, _, test_path in modules:
        result, start, end, _ = run_nose(name, test_path)
        passed &= result
        record = (name, start.strftime('%H:%M:%D'), str((end - start).total_seconds()),
                  'Pass' if result else 'Fail')

        module_results.append(record)

    print_records(module_results, title='test results')

    return passed
Ejemplo n.º 2
0
def run_code_coverage(modules):
    # create test results folder
    test_results_folder = automation_path.get_test_results_dir(with_timestamp=True, prefix="cover")

    # get test runner
    run_nose = automation_tests.get_nose_runner(test_results_folder, code_coverage=True, parallel=False)

    # run code coverage on each project
    for name, _, test_path in modules:
        with CoverageContext():
            run_nose(name, test_path)

        import shutil

        shutil.move(".coverage", os.path.join(test_results_folder, ".coverage.{}".format(name)))
Ejemplo n.º 3
0
def run_command_coverage(modules):
    test_result_dir = automation_path.get_test_results_dir(with_timestamp=True, prefix="cmdcov")
    data_file = os.path.join(test_result_dir, "cmdcov.data")

    # run tests to generate executed command list
    run_nose = automation_tests.get_nose_runner(test_result_dir, parallel=False)

    with CommandCoverageContext(data_file) as context:
        for name, path in modules:
            run_nose(name, path)

        print("BEGIN: Full executed commands list")
        for line in open(context.coverage_file_path):
            sys.stdout.write(line)
        print("END: Full executed commands list")
Ejemplo n.º 4
0
def run_tests(modules, parallel, run_live):
    print('\n\nRun automation')
    print('Modules: {}'.format(', '.join(name for name, _, _ in modules)))

    # create test results folder
    test_results_folder = get_test_results_dir(with_timestamp=True, prefix='tests')

    # set environment variable
    if run_live:
        os.environ['AZURE_CLI_TEST_RUN_LIVE'] = 'True'

    # get test runner
    run_nose = get_nose_runner(test_results_folder, xunit_report=True, exclude_integration=True,
                               parallel=parallel, process_timeout=3600 if run_live else 600)

    # run tests
    overall_result = True
    for name, _, test_path in modules:
        print('\n\n==== Test module {} ===='.format(name))
        result, test_result = run_nose([test_path])
        overall_result &= result
        print('==== Test module {} result ====\n{}\n==========\n'.format(name, test_result))

    return overall_result
Ejemplo n.º 5
0
def run_tests(modules, parallel, run_live, tests):
    print('Run automation')
    print('Modules: {}'.format(', '.join(name for name, _, _ in modules)))

    # create test results folder
    test_results_folder = get_test_results_dir(with_timestamp=True, prefix='tests')

    # set environment variable
    if run_live:
        os.environ['AZURE_TEST_RUN_LIVE'] = 'True'

    if not tests:
        # the --test is not given, use nosetests to run entire module
        print('Drive test by nosetests')
        runner = get_nose_runner(test_results_folder, parallel=parallel, process_timeout=3600 if run_live else 600)
    else:
        # the --test is given, use unittest to run single test
        print('Drive test by unittest')
        runner = get_unittest_runner(tests)

    # run tests
    result = runner([p for _, _, p in modules])

    return result