Esempio n. 1
0
def run_suite(opts, path, display_name, suffix='[!_]*.py'):
    '''
    Execute a unit test suite
    '''
    loader = saltunittest.TestLoader()
    if opts.name:
        tests = loader.loadTestsFromName(display_name)
    else:
        tests = loader.discover(path, suffix, TEST_DIR)

    header = '{0} Tests'.format(display_name)
    print_header('Starting {0}'.format(header))

    if opts.xmlout:
        runner = xmlrunner.XMLTestRunner(
            output=XML_OUTPUT_DIR,
            stream=sys.stdout,
            verbosity=opts.verbosity
        ).run(tests)
        TEST_RESULTS.append((header, runner))
    else:
        if not os.path.isdir(XML_OUTPUT_DIR):
            os.makedirs(XML_OUTPUT_DIR)
        runner = saltunittest.TextTestRunner(
            stream=sys.stdout,
            verbosity=opts.verbosity
        ).run(tests)
        TEST_RESULTS.append((header, runner))

    return runner.wasSuccessful()
Esempio n. 2
0
def run_integration_tests(opts):
    '''
    Execute the integration tests suite
    '''
    if opts.unit and not (opts.runner or opts.state or opts.module or opts.client):
        return [True]
    smax_open_files, hmax_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
    if smax_open_files < REQUIRED_OPEN_FILES:
        print('~' * PNUM)
        print('Max open files setting is too low({0}) for running the tests'.format(smax_open_files))
        print('Trying to raise the limit to {0}'.format(REQUIRED_OPEN_FILES))
        if hmax_open_files < 4096:
            hmax_open_files = 4096  # Decent default?
        try:
            resource.setrlimit(
                resource.RLIMIT_NOFILE,
                (REQUIRED_OPEN_FILES, hmax_open_files)
            )
        except Exception as err:
            print('ERROR: Failed to raise the max open files setting -> {0}'.format(err))
            print('Please issue the following command on your console:')
            print('  ulimit -n {0}'.format(REQUIRED_OPEN_FILES))
            sys.exit(1)
        finally:
            print('~' * PNUM)

    print_header('Setting up Salt daemons to execute tests', top=False)
    status = []
    if not any([opts.client, opts.module, opts.runner,
                opts.shell, opts.state, opts.name]):
        return status
    with TestDaemon(opts=opts):
        if opts.name:
            for name in opts.name:
                results = run_suite(opts, '', name)
                status.append(results)
        if opts.runner:
            status.append(run_integration_suite(opts, 'runners', 'Runner'))
        if opts.module:
            status.append(run_integration_suite(opts, 'modules', 'Module'))
        if opts.state:
            status.append(run_integration_suite(opts, 'states', 'State'))
        if opts.client:
            status.append(run_integration_suite(opts, 'client', 'Client'))
        if opts.shell:
            status.append(run_integration_suite(opts, 'shell', 'Shell'))
    return status
Esempio n. 3
0
def run_suite(opts, path, display_name, suffix='[!_]*.py'):
    '''
    Execute a unit test suite
    '''
    loader = saltunittest.TestLoader()
    if opts.name:
        tests = loader.loadTestsFromName(display_name)
    else:
        tests = loader.discover(path, suffix, TEST_DIR)

    header = '{0} Tests'.format(display_name)
    print_header('Starting {0}'.format(header))

    if opts.xmlout:
        runner = xmlrunner.XMLTestRunner(output='test-reports').run(tests)
    else:
        runner = saltunittest.TextTestRunner(
            verbosity=opts.verbosity).run(tests)
        TEST_RESULTS.append((header, runner))
    return runner.wasSuccessful()
Esempio n. 4
0
def run_suite(opts, path, display_name, suffix='[!_]*.py'):
    '''
    Execute a unit test suite
    '''
    loader = saltunittest.TestLoader()
    if opts.name:
        tests = loader.loadTestsFromName(display_name)
    else:
        tests = loader.discover(path, suffix, TEST_DIR)

    header = '{0} Tests'.format(display_name)
    print_header('Starting {0}'.format(header))

    if opts.xmlout:
        runner = xmlrunner.XMLTestRunner(output='test-reports').run(tests)
    else:
        runner = saltunittest.TextTestRunner(
            verbosity=opts.verbosity
        ).run(tests)
        TEST_RESULTS.append((header, runner))
    return runner.wasSuccessful()
Esempio n. 5
0
def parse_opts():
    '''
    Parse command line options for running specific tests
    '''
    parser = optparse.OptionParser()

    parser.add_option(
        '--sysinfo',
        default=False,
        action='store_true',
        help='Print some system information.'
    )

    tests_select_group = optparse.OptionGroup(
        parser,
        "Tests Selection Options",
        "Select which tests are to be executed"
    )
    tests_select_group.add_option(
        '-m',
        '--module',
        '--module-tests',
        dest='module',
        default=False,
        action='store_true',
        help='Run tests for modules'
    )
    tests_select_group.add_option(
        '-S',
        '--state',
        '--state-tests',
        dest='state',
        default=False,
        action='store_true',
        help='Run tests for states'
    )
    tests_select_group.add_option(
        '-c',
        '--client',
        '--client-tests',
        dest='client',
        default=False,
        action='store_true',
        help='Run tests for client'
    )
    tests_select_group.add_option(
        '-s',
        '--shell',
        dest='shell',
        default=False,
        action='store_true',
        help='Run shell tests'
    )
    tests_select_group.add_option(
        '-r',
        '--runner',
        dest='runner',
        default=False,
        action='store_true',
        help='Run runner tests'
    )
    tests_select_group.add_option(
        '-u',
        '--unit',
        '--unit-tests',
        dest='unit',
        default=False,
        action='store_true',
        help='Run unit tests'
    )
    tests_select_group.add_option(
        '-n',
        '--name',
        dest='name',
        action='append',
        default=[],
        help='Specific test name to run'
    )
    tests_select_group.add_option(
        '--run-destructive',
        action='store_true',
        default=False,
        help='Run destructive tests. These tests can include adding or '
             'removing users from your system for example. Default: %default'
    )
    parser.add_option_group(tests_select_group)

    fs_cleanup_options_group = optparse.OptionGroup(
        parser, "File system cleanup Options"
    )
    fs_cleanup_options_group.add_option(
        '--clean',
        dest='clean',
        default=True,
        action='store_true',
        help='Clean up test environment before and after integration '
             'testing (default behaviour)'
    )
    fs_cleanup_options_group.add_option(
        '--no-clean',
        dest='clean',
        action='store_false',
        help='Don\'t clean up test environment before and after integration '
             'testing (speed up test process)'
    )
    parser.add_option_group(fs_cleanup_options_group)

    output_options_group = optparse.OptionGroup(parser, "Output Options")
    output_options_group.add_option(
        '-v',
        '--verbose',
        dest='verbosity',
        default=1,
        action='count',
        help='Verbose test runner output'
    )
    output_options_group.add_option(
        '-x',
        '--xml',
        dest='xmlout',
        default=False,
        action='store_true',
        help='XML test runner output(Output directory: {0})'.format(
            XML_OUTPUT_DIR
        )
    )
    output_options_group.add_option(
        '--no-report',
        default=False,
        action='store_true',
        help='Do NOT show the overall tests result'
    )
    output_options_group.add_option(
        '--coverage',
        default=False,
        action='store_true',
        help='Run tests and report code coverage'
    )
    output_options_group.add_option(
        '--no-colors',
        '--no-colours',
        default=False,
        action='store_true',
        help='Disable colour printing.'
    )
    parser.add_option_group(output_options_group)

    options, _ = parser.parse_args()

    if options.xmlout and xmlrunner is None:
        parser.error(
            '\'--xml\' is not available. The xmlrunner library is not '
            'installed.'
        )

    if options.coverage and code_coverage is None:
        parser.error(
            'Cannot run tests with coverage report. '
            'Please install coverage>=3.5.3'
        )
    elif options.coverage:
        coverage_version = tuple(
            [int(part) for part in
             re.search(r'([0-9.]+)', coverage.__version__).group(0).split('.')]
        )
        if coverage_version < (3, 5, 3):
            # Should we just print the error instead of exiting?
            parser.error(
                'Versions lower than 3.5.3 of the coverage library are know '
                'to produce incorrect results. Please consider upgrading...'
            )

        if any((options.module, options.client, options.shell, options.unit,
                options.state, options.runner, options.name,
                os.geteuid() != 0, not options.run_destructive)):
            parser.error(
                'No sense in generating the tests coverage report when not '
                'running the full test suite, including the destructive '
                'tests, as \'root\'. It would only produce incorrect '
                'results.'
            )

        # Update environ so that any subprocess started on test are also
        # included in the report
        os.environ['COVERAGE_PROCESS_START'] = '1'

    # Setup logging
    formatter = logging.Formatter(
        '%(asctime)s,%(msecs)03.0f [%(name)-5s:%(lineno)-4d]'
        '[%(levelname)-8s] %(message)s',
        datefmt='%H:%M:%S'
    )
    logfile = os.path.join(tempfile.gettempdir(), 'salt-runtests.log')
    filehandler = logging.FileHandler(
        mode='w',           # Not preserved between re-runs
        filename=logfile
    )
    filehandler.setLevel(logging.DEBUG)
    filehandler.setFormatter(formatter)
    logging.root.addHandler(filehandler)
    logging.root.setLevel(logging.DEBUG)

    print_header('Logging tests on {0}'.format(logfile), bottom=False)
    print_header(
        'Test suite is running under PID {0}'.format(os.getpid()), bottom=False
    )

    # With greater verbosity we can also log to the console
    if options.verbosity > 2:
        consolehandler = logging.StreamHandler(sys.stderr)
        consolehandler.setLevel(logging.INFO)       # -vv
        consolehandler.setFormatter(formatter)
        handled_levels = {
            3: logging.DEBUG,   # -vvv
            4: logging.TRACE,   # -vvvv
            5: logging.GARBAGE  # -vvvvv
        }
        if options.verbosity > 3:
            consolehandler.setLevel(
                handled_levels.get(
                    options.verbosity,
                    options.verbosity > 5 and 5 or 3
                )
            )

        logging.root.addHandler(consolehandler)

    os.environ['DESTRUCTIVE_TESTS'] = str(options.run_destructive)

    if not any((options.module, options.client,
                options.shell, options.unit,
                options.state, options.runner,
                options.name)):
        options.module = True
        options.client = True
        options.shell = True
        options.unit = True
        options.runner = True
        options.state = True
    return options
Esempio n. 6
0
    status = run_unit_tests(opts)
    overall_status.extend(status)
    false_count = overall_status.count(False)

    if opts.no_report:
        if opts.coverage:
            code_coverage.stop()
            code_coverage.save()

        if false_count > 0:
            sys.exit(1)
        else:
            sys.exit(0)

    print
    print_header(u'  Overall Tests Report  ', sep=u'=', centered=True, inline=True)

    no_problems_found = True
    for (name, results) in TEST_RESULTS:
        if not results.failures and not results.errors and not results.skipped:
            continue

        no_problems_found = False

        print_header(u'*** {0}  '.format(name), sep=u'*', inline=True)
        if results.skipped:
            print_header(u' --------  Skipped Tests  ', sep='-', inline=True)
            maxlen = len(max([tc.id() for (tc, reason) in results.skipped], key=len))
            fmt = u'   -> {0: <{maxlen}}  ->  {1}'
            for tc, reason in results.skipped:
                print(fmt.format(tc.id(), reason, maxlen=maxlen))
Esempio n. 7
0
def parse_opts():
    '''
    Parse command line options for running specific tests
    '''
    parser = optparse.OptionParser()
    parser.add_option('-m',
                      '--module',
                      '--module-tests',
                      dest='module',
                      default=False,
                      action='store_true',
                      help='Run tests for modules')
    parser.add_option('-S',
                      '--state',
                      '--state-tests',
                      dest='state',
                      default=False,
                      action='store_true',
                      help='Run tests for states')
    parser.add_option('-c',
                      '--client',
                      '--client-tests',
                      dest='client',
                      default=False,
                      action='store_true',
                      help='Run tests for client')
    parser.add_option('-s',
                      '--shell',
                      dest='shell',
                      default=False,
                      action='store_true',
                      help='Run shell tests')
    parser.add_option('-r',
                      '--runner',
                      dest='runner',
                      default=False,
                      action='store_true',
                      help='Run runner tests')
    parser.add_option('-u',
                      '--unit',
                      '--unit-tests',
                      dest='unit',
                      default=False,
                      action='store_true',
                      help='Run unit tests')
    parser.add_option('-v',
                      '--verbose',
                      dest='verbosity',
                      default=1,
                      action='count',
                      help='Verbose test runner output')
    parser.add_option('-x',
                      '--xml',
                      dest='xmlout',
                      default=False,
                      action='store_true',
                      help='XML test runner output')
    parser.add_option('-n',
                      '--name',
                      dest='name',
                      action='append',
                      default=[],
                      help='Specific test name to run')
    parser.add_option('--clean',
                      dest='clean',
                      default=True,
                      action='store_true',
                      help=('Clean up test environment before and after '
                            'integration testing (default behaviour)'))
    parser.add_option(
        '--no-clean',
        dest='clean',
        action='store_false',
        help=('Don\'t clean up test environment before and after '
              'integration testing (speed up test process)'))
    parser.add_option(
        '--run-destructive',
        action='store_true',
        default=False,
        help='Run destructive tests. These tests can include adding or '
        'removing users from your system for example. Default: '
        '%default')
    parser.add_option('--no-report',
                      default=False,
                      action='store_true',
                      help='Do NOT show the overall tests result')
    parser.add_option('--coverage',
                      default=False,
                      action='store_true',
                      help='Run tests and report code coverage')
    parser.add_option('--sysinfo',
                      default=False,
                      action='store_true',
                      help='Print some system information.')
    parser.add_option('--no-colors',
                      default=False,
                      action='store_true',
                      help='Disable colour printing.')

    options, _ = parser.parse_args()

    if options.xmlout and xmlrunner is None:
        parser.error('\'--xml\' is not available. The xmlrunner library '
                     'is not installed.')

    if options.coverage and code_coverage is None:
        parser.error('Cannot run tests with coverage report. '
                     'Please install coverage>=3.5.3')
    elif options.coverage:
        coverage_version = tuple([
            int(part) for part in re.search(
                r'([0-9.]+)', coverage.__version__).group(0).split('.')
        ])
        if coverage_version < (3, 5, 3):
            # Should we just print the error instead of exiting?
            parser.error(
                'Versions lower than 3.5.3 of the coverage library are know '
                'to produce incorrect results. Please consider upgrading...')

        if any((options.module, options.client, options.shell, options.unit,
                options.state, options.runner, options.name, os.geteuid()
                is not 0, not options.run_destructive)):
            parser.error(
                'No sense in generating the tests coverage report when not '
                'running the full test suite, including the destructive '
                'tests, as \'root\'. It would only produce incorrect '
                'results.')

        # Update environ so that any subprocess started on test are also
        # included in the report
        os.environ['COVERAGE_PROCESS_START'] = '1'

    # Setup logging
    formatter = logging.Formatter(
        '%(asctime)s,%(msecs)03.0f [%(name)-5s:%(lineno)-4d]'
        '[%(levelname)-8s] %(message)s',
        datefmt='%H:%M:%S')
    logfile = os.path.join(tempfile.gettempdir(), 'salt-runtests.log')
    filehandler = logging.FileHandler(
        mode='w',  # Not preserved between re-runs
        filename=logfile)
    filehandler.setLevel(logging.DEBUG)
    filehandler.setFormatter(formatter)
    logging.root.addHandler(filehandler)
    logging.root.setLevel(logging.DEBUG)

    print_header('Logging tests on {0}'.format(logfile), bottom=False)
    print_header('Test suite is running under PID {0}'.format(os.getpid()),
                 bottom=False)

    # With greater verbosity we can also log to the console
    if options.verbosity > 2:
        consolehandler = logging.StreamHandler(sys.stderr)
        consolehandler.setLevel(logging.INFO)  # -vv
        consolehandler.setFormatter(formatter)
        if options.verbosity > 3:
            consolehandler.setLevel(logging.DEBUG)  # -vvv

        logging.root.addHandler(consolehandler)

    os.environ['DESTRUCTIVE_TESTS'] = str(options.run_destructive)

    if not any((options.module, options.client, options.shell, options.unit,
                options.state, options.runner, options.name)):
        options.module = True
        options.client = True
        options.shell = True
        options.unit = True
        options.runner = True
        options.state = True
    return options
Esempio n. 8
0
        print('Trying to raise the limit to {0}'.format(REQUIRED_OPEN_FILES))
        if hmax_open_files < 4096:
            hmax_open_files = 4096  # Decent default?
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE,
                               (REQUIRED_OPEN_FILES, hmax_open_files))
        except Exception, err:
            print('ERROR: Failed to raise the max open files setting -> {0}'.
                  format(err))
            print('Please issue the following command on your console:')
            print('  ulimit -n {0}'.format(REQUIRED_OPEN_FILES))
            sys.exit(1)
        finally:
            print('~' * PNUM)

    print_header('Setting up Salt daemons to execute tests', top=False)
    status = []
    if not any([
            opts.client, opts.module, opts.runner, opts.shell, opts.state,
            opts.name
    ]):
        return status
    with TestDaemon(opts=opts):
        if opts.name:
            for name in opts.name:
                results = run_suite(opts, '', name)
                status.append(results)
        if opts.runner:
            status.append(run_integration_suite(opts, 'runners', 'Runner'))
        if opts.module:
            status.append(run_integration_suite(opts, 'modules', 'Module'))
Esempio n. 9
0
        if hmax_open_files < 4096:
            hmax_open_files = 4096  # Decent default?
        try:
            resource.setrlimit(
                resource.RLIMIT_NOFILE,
                (REQUIRED_OPEN_FILES, hmax_open_files)
            )
        except Exception, err:
            print('ERROR: Failed to raise the max open files setting -> {0}'.format(err))
            print('Please issue the following command on your console:')
            print('  ulimit -n {0}'.format(REQUIRED_OPEN_FILES))
            sys.exit(1)
        finally:
            print('~' * PNUM)

    print_header('Setting up Salt daemons to execute tests', top=False)
    status = []
    if not any([opts.client, opts.module, opts.runner,
                opts.shell, opts.state, opts.name]):
        return status
    with TestDaemon(opts=opts):
        if opts.name:
            for name in opts.name:
                results = run_suite(opts, '', name)
                status.append(results)
        if opts.runner:
            status.append(run_integration_suite(opts, 'runners', 'Runner'))
        if opts.module:
            status.append(run_integration_suite(opts, 'modules', 'Module'))
        if opts.state:
            status.append(run_integration_suite(opts, 'states', 'State'))