def test_content_file_template(self):
        """ Test file read and templating of read files in this directory """
        variables = {'id':1, 'login':'******'}
        context = Context()

        file_path = os.path.dirname(os.path.realpath(__file__))
        file_path = os.path.join(file_path, 'person_body_template.json')

        file_content = None
        with open(file_path, 'r') as f:
            file_content = f.read()

        # Test basic read
        handler = ContentHandler()
        handler.setup(file_path, is_file=True)
        self.assertEqual(file_content, handler.get_content())

        # Test templating of read content
        handler.setup(file_path, is_file=True, is_template_content=True)
        self.assertEqual(file_content, handler.get_content())
        self.assertEqual(file_content, handler.get_content(context))  # No substitution
        substituted = string.Template(file_content).safe_substitute(variables)
        context.bind_variables(variables)
        self.assertEqual(substituted, handler.get_content(context))

        # Test path templating
        templated_file_path = '$filepath'
        context.bind_variable('filepath', file_path)
        handler.setup(file_path, is_file=True, is_template_path=True)
        self.assertEqual(file_content, handler.get_content(context))

        # Test double templating with files
        handler.setup(file_path, is_file=True, is_template_path=True, is_template_content=True)
        self.assertEqual(substituted, handler.get_content(context=context))
示例#2
0
    def test_header_templating(self):
        test = Test()
        head_templated = {'$key': "$val"}
        context = Context()
        context.bind_variables({'key': 'cheese', 'val': 'gouda'})

        # No templating applied
        test.headers = head_templated
        head = test.get_headers()
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        test.set_headers(head_templated, isTemplate=True)
        self.assertTrue(test.templates)
        self.assertTrue(test.NAME_HEADERS in test.templates)

        # No context, no templating
        head = test.headers
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        # Templated with context
        head = test.get_headers(context=context)
        self.assertEqual(1, len(head))
        self.assertEqual('gouda', head['cheese'])
示例#3
0
    def test_header_templating(self):
        test = Test()
        head_templated = {'$key': "$val"}
        context = Context()
        context.bind_variables({'key': 'cheese', 'val': 'gouda'})

        # No templating applied
        test.headers = head_templated
        head = test.get_headers()
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        test.set_headers(head_templated, isTemplate=True)
        self.assertTrue(test.templates)
        self.assertTrue(test.NAME_HEADERS in test.templates)

        # No context, no templating
        head = test.headers
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        # Templated with context
        head = test.get_headers(context=context)
        self.assertEqual(1, len(head))
        self.assertEqual('gouda', head['cheese'])
示例#4
0
    def test_parse_test_templated_headers(self):
        """ Test parsing with templated headers """

        heads = {"Accept": "Application/json", "$AuthHeader": "$AuthString"}
        templated_heads = {
            "Accept": "Application/json",
            "apikey": "magic_passWord"
        }
        context = Context()
        context.bind_variables({
            'AuthHeader': 'apikey',
            'AuthString': 'magic_passWord'
        })

        # If this doesn't throw errors we have silent failures
        input_invalid = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": 'goat'
        }
        try:
            test = Test.parse_test('', input_invalid)
            test.fail("Expected error not thrown")
        except TypeError:
            pass

        def assert_dict_eq(dict1, dict2):
            """ Test dicts are equal """
            self.assertEqual(2, len(set(dict1.items()) & set(dict2.items())))

        # Before templating is used
        input = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": heads
        }
        test = Test.parse_test('', input)
        assert_dict_eq(heads, test.headers)
        assert_dict_eq(heads, test.get_headers(context=context))

        # After templating applied
        input_templated = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": {
                'tEmplate': heads
            }
        }
        test2 = Test.parse_test('', input_templated)
        assert_dict_eq(heads, test2.get_headers())
        assert_dict_eq(templated_heads, test2.get_headers(context=context))
示例#5
0
    def test_parse_test_templated_headers(self):
        """ Test parsing with templated headers """

        heads = {"Accept":"Application/json", "$AuthHeader":"$AuthString"}
        templated_heads = {"Accept":"Application/json", "apikey":"magic_passWord"}
        context = Context()
        context.bind_variables({'AuthHeader': 'apikey', 'AuthString':'magic_passWord'})

        # If this doesn't throw errors we have silent failures
        input_invalid = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": 'goat'}
        try:
            test = Test.parse_test('', input_invalid)
            test.fail("Expected error not thrown")
        except TypeError:
            pass

        def assert_dict_eq(dict1, dict2):
            """ Test dicts are equal """
            self.assertEqual(2, len(set(dict1.items()) & set(dict2.items())))

        # Before templating is used
        input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": heads}
        test = Test.parse_test('', input)
        assert_dict_eq(heads, test.headers)
        assert_dict_eq(heads, test.get_headers(context=context))

        # After templating applied
        input_templated = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": {'tEmplate': heads}}
        test2 = Test.parse_test('', input_templated)
        assert_dict_eq(heads, test2.get_headers())
        assert_dict_eq(templated_heads, test2.get_headers(context=context))
示例#6
0
    def test_test_content_templating(self):
        test = Test()
        handler = ContentHandler()
        handler.is_template_content = True
        handler.content = '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}'
        context = Context()
        context.bind_variables({'id': 9, 'login': '******'})
        test.set_body(handler)

        templated = test.realize(context=context)
        self.assertEqual(string.Template(handler.content).safe_substitute(context.get_values()),
                         templated.body)
示例#7
0
    def test_test_content_templating(self):
        test = Test()
        handler = ContentHandler()
        handler.is_template_content = True
        handler.content = '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}'
        context = Context()
        context.bind_variables({'id':9, 'login':'******'})
        test.set_body(handler)

        templated = test.realize(context=context)
        self.assertEqual(string.Template(handler.content).safe_substitute(context.get_values()),
            templated.body)
    def test_content_file_template(self):
        """ Test file read and templating of read files in this directory """
        variables = {'id': 1, 'login': '******'}
        context = Context()

        file_path = os.path.dirname(os.path.realpath(__file__))
        file_path = os.path.join(file_path, 'person_body_template.json')

        file_content = None
        with open(file_path, 'r') as f:
            file_content = f.read()

        # Test basic read
        handler = ContentHandler()
        handler.setup(file_path, is_file=True)
        self.assertEqual(file_content, handler.get_content())

        # Test templating of read content
        handler.setup(file_path, is_file=True, is_template_content=True)
        self.assertEqual(file_content, handler.get_content())
        self.assertEqual(file_content,
                         handler.get_content(context))  # No substitution
        substituted = string.Template(file_content).safe_substitute(variables)
        context.bind_variables(variables)
        self.assertEqual(substituted, handler.get_content(context))

        # Test path templating
        templated_file_path = '$filepath'
        context.bind_variable('filepath', file_path)
        handler.setup(file_path, is_file=True, is_template_path=True)
        self.assertEqual(file_content, handler.get_content(context))

        # Test double templating with files
        handler.setup(file_path,
                      is_file=True,
                      is_template_path=True,
                      is_template_content=True)
        self.assertEqual(substituted, handler.get_content(context=context))
示例#9
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict()  # results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        # Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        # Run tests, collecting statistics as needed
        for test in mytests:
            # Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: # Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                # Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: # Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            # Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    # Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures
示例#10
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict() #results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        #Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        #Run tests, collecting statistics as needed
        for test in mytests:
            #Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: #Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                #Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: #Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            #Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    #Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures