Exemple #1
0
 def test_mixing_binds(self):
     """ Ensure that variables are set correctly when mixing explicit declaration and variables """
     context = Context()
     context.add_generator('gen', count_gen())
     context.bind_variable('foo', '100')
     self.assertEqual(1, context.mod_count)
     context.bind_generator_next('foo', 'gen')
     self.assertEqual(1, context.get_value('foo'))
     self.assertEqual(2, context.mod_count)
Exemple #2
0
 def test_mixing_binds(self):
     """ Ensure that variables are set correctly when mixing explicit declaration and variables """
     context = Context()
     context.add_generator('gen', count_gen())
     context.bind_variable('foo', '100')
     self.assertEqual(1, context.mod_count)
     context.bind_generator_next('foo', 'gen')
     self.assertEqual(1, context.get_value('foo'))
     self.assertEqual(2, context.mod_count)
Exemple #3
0
    def test_generator(self):
        """ Test adding a generator """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        self.assertEqual(1, len(context.get_generators()))
        self.assertTrue('gen' in context.get_generators())
        self.assertTrue(context.get_generator('gen') is not None)
Exemple #4
0
    def test_generator(self):
        """ Test adding a generator """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        self.assertEqual(1, len(context.get_generators()))
        self.assertTrue('gen' in context.get_generators())
        self.assertTrue(context.get_generator('gen') is not None)
Exemple #5
0
    def test_generator_bind(self):
        """ Test generator setting to variables """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        context.bind_generator_next('foo', 'gen')
        self.assertEqual(1, context.mod_count)
        self.assertEqual(1, context.get_value('foo'))
        self.assertTrue(2, context.get_generator('gen').next())
        self.assertTrue(3, my_gen.next())
Exemple #6
0
    def test_generator_bind(self):
        """ Test generator setting to variables """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        context.bind_generator_next('foo', 'gen')
        self.assertEqual(1, context.mod_count)
        self.assertEqual(1, context.get_value('foo'))
        self.assertTrue(2, context.get_generator('gen').next())
        self.assertTrue(3, my_gen.next())
Exemple #7
0
    def test_update_context_generators(self):
        """ Test updating context variables using generator """
        test = Test()
        context = Context()
        context.bind_variable('foo', 'broken')
        test.variable_binds = {'foo': 'initial_value'}
        test.generator_binds = {'foo': 'gen'}
        context.add_generator('gen', generators.generator_basic_ids())

        test.update_context_before(context)
        self.assertEqual(1, context.get_value('foo'))
        test.update_context_before(context)
        self.assertEqual(2, context.get_value('foo'))
Exemple #8
0
    def test_update_context_generators(self):
        """ Test updating context variables using generator """
        test = Test()
        context = Context()
        context.bind_variable('foo', 'broken')
        test.variable_binds = {'foo': 'initial_value'}
        test.generator_binds = {'foo': 'gen'}
        context.add_generator('gen', generators.generator_basic_ids())

        test.update_context_before(context)
        self.assertEqual(1, context.get_value('foo'))
        test.update_context_before(context)
        self.assertEqual(2, context.get_value('foo'))
Exemple #9
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict()  # results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        # Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        # Run tests, collecting statistics as needed
        for test in mytests:
            # Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: # Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                # Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: # Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            # Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    # Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures
Exemple #10
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict() #results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        #Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        #Run tests, collecting statistics as needed
        for test in mytests:
            #Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: #Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                #Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: #Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            #Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    #Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures