Exemple #1
0
    def test_header_templating(self):
        test = Test()
        head_templated = {'$key': "$val"}
        context = Context()
        context.bind_variables({'key': 'cheese', 'val': 'gouda'})

        # No templating applied
        test.headers = head_templated
        head = test.get_headers()
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        test.set_headers(head_templated, isTemplate=True)
        self.assertTrue(test.templates)
        self.assertTrue(test.NAME_HEADERS in test.templates)

        # No context, no templating
        head = test.headers
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        # Templated with context
        head = test.get_headers(context=context)
        self.assertEqual(1, len(head))
        self.assertEqual('gouda', head['cheese'])
Exemple #2
0
    def test_parse_test_templated_headers(self):
        """ Test parsing with templated headers """

        heads = {"Accept": "Application/json", "$AuthHeader": "$AuthString"}
        templated_heads = {
            "Accept": "Application/json",
            "apikey": "magic_passWord"
        }
        context = Context()
        context.bind_variables({
            'AuthHeader': 'apikey',
            'AuthString': 'magic_passWord'
        })

        # If this doesn't throw errors we have silent failures
        input_invalid = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": 'goat'
        }
        try:
            test = Test.parse_test('', input_invalid)
            test.fail("Expected error not thrown")
        except TypeError:
            pass

        def assert_dict_eq(dict1, dict2):
            """ Test dicts are equal """
            self.assertEqual(2, len(set(dict1.items()) & set(dict2.items())))

        # Before templating is used
        input = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": heads
        }
        test = Test.parse_test('', input)
        assert_dict_eq(heads, test.headers)
        assert_dict_eq(heads, test.get_headers(context=context))

        # After templating applied
        input_templated = {
            "url": "/ping",
            "method": "DELETE",
            "NAME": "foo",
            "group": "bar",
            "body": "<xml>input</xml>",
            "headers": {
                'tEmplate': heads
            }
        }
        test2 = Test.parse_test('', input_templated)
        assert_dict_eq(heads, test2.get_headers())
        assert_dict_eq(templated_heads, test2.get_headers(context=context))
    def test_content_file_template(self):
        """ Test file read and templating of read files in this directory """
        variables = {'id':1, 'login':'******'}
        context = Context()

        file_path = os.path.dirname(os.path.realpath(__file__))
        file_path = os.path.join(file_path, 'person_body_template.json')

        file_content = None
        with open(file_path, 'r') as f:
            file_content = f.read()

        # Test basic read
        handler = ContentHandler()
        handler.setup(file_path, is_file=True)
        self.assertEqual(file_content, handler.get_content())

        # Test templating of read content
        handler.setup(file_path, is_file=True, is_template_content=True)
        self.assertEqual(file_content, handler.get_content())
        self.assertEqual(file_content, handler.get_content(context))  # No substitution
        substituted = string.Template(file_content).safe_substitute(variables)
        context.bind_variables(variables)
        self.assertEqual(substituted, handler.get_content(context))

        # Test path templating
        templated_file_path = '$filepath'
        context.bind_variable('filepath', file_path)
        handler.setup(file_path, is_file=True, is_template_path=True)
        self.assertEqual(file_content, handler.get_content(context))

        # Test double templating with files
        handler.setup(file_path, is_file=True, is_template_path=True, is_template_content=True)
        self.assertEqual(substituted, handler.get_content(context=context))
Exemple #4
0
    def test_parse_validator_comparator(self):
        """ Test parsing a comparator validator """
        test_config = {
            'name':
            'Default',
            'url':
            '/api',
            'validators': [{
                'comparator': {
                    'jsonpath_mini': 'id',
                    'comparator': 'eq',
                    'expected': {
                        'template': '$id'
                    }
                }
            }]
        }
        test = Test.parse_test('', test_config)
        self.assertTrue(test.validators)
        self.assertEqual(1, len(test.validators))

        context = Context()
        context.bind_variable('id', 3)

        myjson = '{"id": "3"}'
        failure = test.validators[0].validate(myjson, context=context)
        self.assertTrue(test.validators[0].validate(myjson, context=context))
        self.assertFalse(test.validators[0].validate(myjson))
Exemple #5
0
    def test_header_templating(self):
        test = Test()
        head_templated = {'$key': "$val"}
        context = Context()
        context.bind_variables({'key': 'cheese', 'val': 'gouda'})

        # No templating applied
        test.headers = head_templated
        head = test.get_headers()
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        test.set_headers(head_templated, isTemplate=True)
        self.assertTrue(test.templates)
        self.assertTrue(test.NAME_HEADERS in test.templates)

        # No context, no templating
        head = test.headers
        self.assertEqual(1, len(head))
        self.assertEqual('$val', head['$key'])

        # Templated with context
        head = test.get_headers(context=context)
        self.assertEqual(1, len(head))
        self.assertEqual('gouda', head['cheese'])
Exemple #6
0
    def test_parse_test_templated_headers(self):
        """ Test parsing with templated headers """

        heads = {"Accept":"Application/json", "$AuthHeader":"$AuthString"}
        templated_heads = {"Accept":"Application/json", "apikey":"magic_passWord"}
        context = Context()
        context.bind_variables({'AuthHeader': 'apikey', 'AuthString':'magic_passWord'})

        # If this doesn't throw errors we have silent failures
        input_invalid = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": 'goat'}
        try:
            test = Test.parse_test('', input_invalid)
            test.fail("Expected error not thrown")
        except TypeError:
            pass

        def assert_dict_eq(dict1, dict2):
            """ Test dicts are equal """
            self.assertEqual(2, len(set(dict1.items()) & set(dict2.items())))

        # Before templating is used
        input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": heads}
        test = Test.parse_test('', input)
        assert_dict_eq(heads, test.headers)
        assert_dict_eq(heads, test.get_headers(context=context))

        # After templating applied
        input_templated = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers": {'tEmplate': heads}}
        test2 = Test.parse_test('', input_templated)
        assert_dict_eq(heads, test2.get_headers())
        assert_dict_eq(templated_heads, test2.get_headers(context=context))
Exemple #7
0
 def test_update_context_variables(self):
     test = Test()
     context = Context()
     context.bind_variable('foo', 'broken')
     test.variable_binds = {'foo': 'correct', 'test': 'value'}
     test.update_context_before(context)
     self.assertEqual('correct', context.get_value('foo'))
     self.assertEqual('value', context.get_value('test'))
    def test_abstract_extractor_templating(self):
        """ Test that abstract extractors template the query """
        ext = validators.AbstractExtractor()
        ext.query = '$val.vee'
        ext.is_templated = True
        context = Context()
        context.bind_variable('val', 'foo')
        self.assertEqual('$val.vee', ext.templated_query())
        self.assertEqual('foo.vee', ext.templated_query(context=context))

        ext.is_templated = False
        self.assertEqual('$val.vee', ext.templated_query(context=context))
Exemple #9
0
    def test_test_content_templating(self):
        test = Test()
        handler = ContentHandler()
        handler.is_template_content = True
        handler.content = '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}'
        context = Context()
        context.bind_variables({'id': 9, 'login': '******'})
        test.set_body(handler)

        templated = test.realize(context=context)
        self.assertEqual(string.Template(handler.content).safe_substitute(context.get_values()),
                         templated.body)
 def test_parse_content_templated(self):
     """ Test parsing of templated content """
     node = {'template': 'myval $var'}
     handler = ContentHandler.parse_content(node)
     context = Context()
     context.bind_variable('var', 'cheese')
     self.assertEqual(node['template'], handler.content)
     self.assertEqual('myval cheese', handler.get_content(context))
     self.assertTrue(handler.is_dynamic())
     self.assertFalse(handler.is_file)
     self.assertFalse(handler.is_template_path)
     self.assertTrue(handler.is_template_content)
 def test_parse_content_templated(self):
     """ Test parsing of templated content """
     node = {'template':'myval $var'}
     handler = ContentHandler.parse_content(node)
     context = Context()
     context.bind_variable('var','cheese')
     self.assertEqual(node['template'], handler.content)
     self.assertEqual('myval cheese', handler.get_content(context))
     self.assertTrue(handler.is_dynamic())
     self.assertFalse(handler.is_file)
     self.assertFalse(handler.is_template_path)
     self.assertTrue(handler.is_template_content)
Exemple #12
0
    def test_test_content_templating(self):
        test = Test()
        handler = ContentHandler()
        handler.is_template_content = True
        handler.content = '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}'
        context = Context()
        context.bind_variables({'id':9, 'login':'******'})
        test.set_body(handler)

        templated = test.realize(context=context)
        self.assertEqual(string.Template(handler.content).safe_substitute(context.get_values()),
            templated.body)
Exemple #13
0
 def test_mixing_binds(self):
     """ Ensure that variables are set correctly when mixing explicit declaration and variables """
     context = Context()
     context.add_generator('gen', count_gen())
     context.bind_variable('foo', '100')
     self.assertEqual(1, context.mod_count)
     context.bind_generator_next('foo', 'gen')
     self.assertEqual(1, context.get_value('foo'))
     self.assertEqual(2, context.mod_count)
Exemple #14
0
    def test_test_url_templating(self):
        test = Test()
        test.set_url('$cheese', isTemplate=True)
        self.assertTrue(test.is_dynamic())
        self.assertEqual('$cheese', test.get_url())
        self.assertTrue(test.templates['url'])

        context = Context()
        context.bind_variable('cheese', 'stilton')
        self.assertEqual('stilton', test.get_url(context=context))

        realized = test.realize(context)
        self.assertEqual('stilton', realized.url)
Exemple #15
0
    def test_test_url_templating(self):
        test = Test()
        test.set_url('$cheese', isTemplate=True)
        self.assertTrue(test.is_dynamic())
        self.assertEqual('$cheese', test.get_url())
        self.assertTrue(test.templates['url'])

        context = Context()
        context.bind_variable('cheese', 'stilton')
        self.assertEqual('stilton', test.get_url(context=context))

        realized = test.realize(context)
        self.assertEqual('stilton', realized.url)
Exemple #16
0
    def test_parse_validator(self):
        """ Test basic parsing using registry """
        config = {"jsonpath_mini": "key.val", "comparator": "eq", "expected": 3}
        validator = validators.parse_validator("comparator", config)
        myjson = '{"key": {"val": 3}}'
        comp = validator.validate(body=myjson)

        # Try it with templating
        config["jsonpath_mini"] = {"template": "key.$node"}
        validator = validators.parse_validator("comparator", config)
        context = Context()
        context.bind_variable("node", "val")
        comp = validator.validate(myjson, context=context)
    def test_content_templating(self):
        """ Test content and templating of it """
        handler = ContentHandler()
        body = '$variable value'
        context = Context()
        context.bind_variable('variable', 'bar')

        # No templating
        handler.setup(body, is_template_content=False)
        self.assertEqual(body, handler.get_content())
        self.assertEqual(body, handler.get_content(context))

        # Templating
        handler.setup(body, is_template_content=True)
        self.assertEqual(body, handler.get_content())
    def test_content_templating(self):
        """ Test content and templating of it """
        handler = ContentHandler()
        body = '$variable value'
        context = Context()
        context.bind_variable('variable', 'bar')

        # No templating
        handler.setup(body, is_template_content=False)
        self.assertEqual(body, handler.get_content())
        self.assertEqual(body, handler.get_content(context))

        # Templating
        handler.setup(body, is_template_content=True)
        self.assertEqual(body, handler.get_content())
Exemple #19
0
    def test_variables(self):
        """ Test bind/return of variables """

        context = Context()
        self.assertTrue(context.get_value('foo') is None)
        self.assertEqual(0, context.mod_count)

        context.bind_variable('foo', 'bar')
        self.assertEqual('bar', context.get_value('foo'))
        self.assertEqual('bar', context.get_values()['foo'])
        self.assertEqual(1, context.mod_count)

        context.bind_variable('foo', 'bar2')
        self.assertEqual('bar2', context.get_value('foo'))
        self.assertEqual(2, context.mod_count)
Exemple #20
0
    def test_generator_bind(self):
        """ Test generator setting to variables """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        context.bind_generator_next('foo', 'gen')
        self.assertEqual(1, context.mod_count)
        self.assertEqual(1, context.get_value('foo'))
        self.assertTrue(2, context.get_generator('gen').next())
        self.assertTrue(3, my_gen.next())
Exemple #21
0
    def test_variable_binding(self):
        """ Test that tests successfully bind variables """
        element = 3
        input = [{"url": "/ping"},{"name": "cheese"},{"expected_status":["200",204,"202"]}]
        input.append({"variable_binds":{'var':'value'}})

        test = Test.parse_test('', input)
        binds = test.variable_binds
        self.assertEqual(1, len(binds))
        self.assertEqual('value', binds['var'])

        # Test that updates context correctly
        context = Context()
        test.update_context_before(context)
        self.assertEqual('value', context.get_value('var'))
        self.assertTrue(test.is_context_modifier())
Exemple #22
0
 def test_mixing_binds(self):
     """ Ensure that variables are set correctly when mixing explicit declaration and variables """
     context = Context()
     context.add_generator('gen', count_gen())
     context.bind_variable('foo', '100')
     self.assertEqual(1, context.mod_count)
     context.bind_generator_next('foo', 'gen')
     self.assertEqual(1, context.get_value('foo'))
     self.assertEqual(2, context.mod_count)
Exemple #23
0
    def test_validator_comparator_templating(self):
        """ Try templating comparator validator """
        config = {"jsonpath_mini": {"template": "key.$node"}, "comparator": "eq", "expected": 3}
        context = Context()
        context.bind_variable("node", "val")
        myjson_pass = '******'
        myjson_fail = '{"id": 3, "key": {"val": 4}}'
        comp = validators.ComparatorValidator.parse(config)

        self.assertTrue(comp.validate(body=myjson_pass, context=context))
        self.assertFalse(comp.validate(body=myjson_fail, context=context))

        # Template expected
        config["expected"] = {"template": "$id"}
        context.bind_variable("id", 3)
        self.assertTrue(comp.validate(body=myjson_pass, context=context))
        self.assertFalse(comp.validate(body=myjson_fail, context=context))
Exemple #24
0
    def test_parse_validator(self):
        """ Test basic parsing using registry """
        config = {
            'jsonpath_mini': 'key.val',
            'comparator': 'eq',
            'expected': 3
        }
        validator = validators.parse_validator('comparator', config)
        myjson = '{"key": {"val": 3}}'
        comp = validator.validate(body=myjson)

        # Try it with templating
        config['jsonpath_mini']={'template':'key.$node'}
        validator = validators.parse_validator('comparator', config)
        context = Context()
        context.bind_variable('node','val')
        comp = validator.validate(myjson, context=context)
Exemple #25
0
    def test_generator(self):
        """ Test adding a generator """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        self.assertEqual(1, len(context.get_generators()))
        self.assertTrue('gen' in context.get_generators())
        self.assertTrue(context.get_generator('gen') is not None)
Exemple #26
0
    def test_header_extraction(self):
        test = Test()
        test.url = self.prefix + '/api/person/1/'
        key1 = 'server-header'
        key2 = 'server-header-mixedcase'

        test.extract_binds = {
            key1: validators.HeaderExtractor.parse('server'),
            # Verify case-insensitive behavior
            key2: validators.HeaderExtractor.parse('sErVer')
        }
        my_context = Context()
        test_response = resttest.run_test(test, context=my_context)
        val1 = my_context.get_value(key1)
        val2 = my_context.get_value(key2)
        self.assertEqual(val1, val2)
        self.assertTrue('wsgi' in val1.lower())
        self.assertTrue('wsgi' in val2.lower())
Exemple #27
0
    def test_header_extraction(self):
        test = Test()
        test.url = self.prefix + "/api/person/1/"
        key1 = "server-header"
        key2 = "server-header-mixedcase"

        test.extract_binds = {
            key1: validators.HeaderExtractor.parse("server"),
            # Verify case-insensitive behavior
            key2: validators.HeaderExtractor.parse("sErVer"),
        }
        my_context = Context()
        test_response = resttest.run_test(test, context=my_context)
        val1 = my_context.get_value(key1)
        val2 = my_context.get_value(key2)
        self.assertEqual(val1, val2)
        self.assertTrue("wsgi" in val1.lower())
        self.assertTrue("wsgi" in val2.lower())
Exemple #28
0
def run_test(mytest, test_config = TestConfig(), context = None):
    """ Put together test pieces: configure & run actual test, return results """

    # Initialize a context if not supplied
    my_context = context
    if my_context is None:
        my_context = Context()

    mytest.update_context_before(my_context)
    templated_test = mytest.realize(my_context)
    curl = templated_test.configure_curl(timeout=test_config.timeout, context=my_context)
    result = TestResponse()
    result.test = templated_test

    # reset the body, it holds values from previous runs otherwise
    headers = StringIO()
    body = StringIO()
    curl.setopt(pycurl.WRITEDATA, body)
    curl.setopt(pycurl.HEADERFUNCTION, headers.write)

    result.passed = None

    if test_config.interactive:
        print "==================================="
        print "%s" % mytest.name
        print "-----------------------------------"
        print "REQUEST:"
        print "%s %s" % (mytest.method, mytest.url)
        if mytest.body is not None:
            print "\n%s" % mytest.body
        raw_input("Press ENTER when ready: ")

    if (mytest.delay > 0.0):
        logger.debug("Delay sleeping " + str(mytest.delay) + " before HTTP request")
        time.sleep(mytest.delay)

    retries = test_config.retries
    retry_sleep = 1.0
    while True:
        try:
            curl.perform() #Run the actual call
        except Exception, e:
            # Curl exception occurred (network error), do not pass go, do not collect $200
            trace = traceback.format_exc()
            result.failures.append(Failure(message="Curl Exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION))
            result.passed = False
            curl.close()
            return result

        response_code = curl.getinfo(pycurl.RESPONSE_CODE)
        if (response_code == 503 and retries > 0):
            logger.debug("Retry sleeping " + str(retry_sleep) + " seconds after HTTP 503")
            time.sleep(retry_sleep)
            retry_sleep = retry_sleep * 2.0
            retries = retries - 1
        else:
            break
Exemple #29
0
    def test_variables(self):
        """ Test bind/return of variables """

        context = Context()
        self.assertTrue(context.get_value('foo') is None)
        self.assertEqual(0, context.mod_count)

        context.bind_variable('foo','bar')
        self.assertEqual('bar', context.get_value('foo'))
        self.assertEqual('bar', context.get_values()['foo'])
        self.assertEqual(1, context.mod_count)

        context.bind_variable('foo','bar2')
        self.assertEqual('bar2', context.get_value('foo'))
        self.assertEqual(2, context.mod_count)
Exemple #30
0
def run_test(mytest, test_config=TestConfig(), context=None):
    """ Put together test pieces: configure & run actual test, return results """

    # Initialize a context if not supplied
    my_context = context
    if my_context is None:
        my_context = Context()

    mytest.update_context_before(my_context)
    templated_test = mytest.realize(my_context)
    curl = templated_test.configure_curl(timeout=test_config.timeout,
                                         context=my_context)
    result = TestResponse()
    result.test = templated_test

    # reset the body, it holds values from previous runs otherwise
    headers = StringIO()
    body = StringIO()
    curl.setopt(pycurl.WRITEDATA, body)
    curl.setopt(pycurl.HEADERFUNCTION, headers.write)
    if test_config.verbose:
        curl.setopt(pycurl.VERBOSE, True)
    if test_config.ssl_insecure:
        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
        curl.setopt(pycurl.SSL_VERIFYHOST, 0)

    result.passed = None

    if test_config.interactive:
        print "==================================="
        print "%s" % mytest.name
        print "-----------------------------------"
        print "REQUEST:"
        print "%s %s" % (templated_test.method, templated_test.url)
        print "HEADERS:"
        print "%s" % (templated_test.headers)
        if mytest.body is not None:
            print "\n%s" % templated_test.body
        raw_input("Press ENTER when ready (%d): " % (mytest.delay))

    if mytest.delay > 0:
        print "Delaying for %ds" % mytest.delay
        time.sleep(mytest.delay)

    try:
        curl.perform()  # Run the actual call
    except Exception, e:
        # Curl exception occurred (network error), do not pass go, do not collect $200
        trace = traceback.format_exc()
        result.failures.append(
            Failure(message="Curl Exception: {0}".format(e),
                    details=trace,
                    failure_type=validators.FAILURE_CURL_EXCEPTION))
        result.passed = False
        curl.close()
        return result
Exemple #31
0
    def test_generator_bind(self):
        """ Test generator setting to variables """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        context.bind_generator_next('foo', 'gen')
        self.assertEqual(1, context.mod_count)
        self.assertEqual(1, context.get_value('foo'))
        self.assertTrue(2, context.get_generator('gen').next())
        self.assertTrue(3, my_gen.next())
Exemple #32
0
    def test_update_context_generators(self):
        """ Test updating context variables using generator """
        test = Test()
        context = Context()
        context.bind_variable('foo', 'broken')
        test.variable_binds = {'foo': 'initial_value'}
        test.generator_binds = {'foo': 'gen'}
        context.add_generator('gen', generators.generator_basic_ids())

        test.update_context_before(context)
        self.assertEqual(1, context.get_value('foo'))
        test.update_context_before(context)
        self.assertEqual(2, context.get_value('foo'))
Exemple #33
0
    def test_parse_extractor_minijson(self):
        config = 'key.val'
        extractor = validators.MiniJsonExtractor.parse(config)
        myjson = '{"key": {"val": 3}}'
        context = Context()
        context.bind_variable('node', 'val')

        extracted = extractor.extract(body=myjson)
        self.assertEqual(3, extracted)
        self.assertEqual(extracted, extractor.extract(body=myjson, context=context))

        try:
            val = extractor.extract(body='[31{]')
            self.fail("Should throw exception on invalid JSON")
        except ValueError:
            pass

        # Templating
        config = {'template': 'key.$node'}
        extract = validators.MiniJsonExtractor.parse(config)
        self.assertEqual(3, extract.extract(myjson, context=context))
Exemple #34
0
    def test_generator(self):
        """ Test adding a generator """
        context = Context()
        self.assertEqual(0, len(context.get_generators()))
        my_gen = count_gen()
        context.add_generator('gen', my_gen)

        self.assertEqual(1, len(context.get_generators()))
        self.assertTrue('gen' in context.get_generators())
        self.assertTrue(context.get_generator('gen') is not None)
Exemple #35
0
    def test_abstract_extractor_readableconfig(self):
        """ Test human-readable extractor config string output """
        config = 'key.val'
        extractor = validators.parse_extractor('jsonpath_mini', config)
        expected_string = 'Extractor Type: jsonpath_mini,  Query: "key.val", Templated?: False'
        self.assertEqual(expected_string, extractor.get_readable_config())

        # Check empty context & args uses okay
        context = Context()
        self.assertEqual(expected_string, extractor.get_readable_config(context=context))
        context.bind_variable('foo', 'bar')
        self.assertEqual(expected_string, extractor.get_readable_config(context=context))
        extractor.args = dict()
        self.assertEqual(expected_string, extractor.get_readable_config(context=context))

        # Check args output is handled correctly
        extractor.args = {'caseSensitive': True}
        self.assertEqual(expected_string+", Args: "+str(extractor.args), extractor.get_readable_config(context=context))

        # Check template handling is okay
        config = {'template': 'key.$templated'}
        context.bind_variable('templated', 'val')
        extractor = validators.parse_extractor('jsonpath_mini', config)
        expected_string = 'Extractor Type: jsonpath_mini,  Query: "key.val", Templated?: True'
        self.assertEqual(expected_string, extractor.get_readable_config(context=context))
Exemple #36
0
    def test_parse_validator_comparator(self):
        """ Test parsing a comparator validator """
        test_config = {
            'name': 'Default',
            'url': '/api',
            'validators': [
                {'comparator': {'jsonpath_mini': 'id',
                                'comparator': 'eq',
                                'expected': {'template': '$id'}}}
            ]
        }
        test = Test.parse_test('', test_config)
        self.assertTrue(test.validators)
        self.assertEqual(1, len(test.validators))

        context = Context()
        context.bind_variable('id', 3)

        myjson = '{"id": "3"}'
        failure = test.validators[0].validate(myjson, context=context)
        self.assertTrue(test.validators[0].validate(myjson, context=context))
        self.assertFalse(test.validators[0].validate(myjson))
Exemple #37
0
 def test_update_context_variables(self):
     test = Test()
     context = Context()
     context.bind_variable('foo', 'broken')
     test.variable_binds = {'foo': 'correct', 'test': 'value'}
     test.update_context_before(context)
     self.assertEqual('correct', context.get_value('foo'))
     self.assertEqual('value', context.get_value('test'))
Exemple #38
0
    def test_update_context_generators(self):
        """ Test updating context variables using generator """
        test = Test()
        context = Context()
        context.bind_variable('foo', 'broken')
        test.variable_binds = {'foo': 'initial_value'}
        test.generator_binds = {'foo': 'gen'}
        context.add_generator('gen', generators.generator_basic_ids())

        test.update_context_before(context)
        self.assertEqual(1, context.get_value('foo'))
        test.update_context_before(context)
        self.assertEqual(2, context.get_value('foo'))
Exemple #39
0
    def test_validator_comparator_templating(self):
        """ Try templating comparator validator """
        config = {
            'jsonpath_mini': {'template': 'key.$node'},
            'comparator': 'eq',
            'expected': 3
        }
        context = Context()
        context.bind_variable('node', 'val')
        myjson_pass = '******'
        myjson_fail = '{"id": 3, "key": {"val": 4}}'
        comp = validators.ComparatorValidator.parse(config)

        self.assertTrue(comp.validate(body=myjson_pass, context=context))
        self.assertFalse(comp.validate(body=myjson_fail, context=context))

        # Template expected
        config['expected'] = {'template' : '$id'}
        context.bind_variable('id', 3)
        self.assertTrue(comp.validate(body=myjson_pass, context=context))
        self.assertFalse(comp.validate(body=myjson_fail, context=context))
    def test_content_file_template(self):
        """ Test file read and templating of read files in this directory """
        variables = {'id': 1, 'login': '******'}
        context = Context()

        file_path = os.path.dirname(os.path.realpath(__file__))
        file_path = os.path.join(file_path, 'person_body_template.json')

        file_content = None
        with open(file_path, 'r') as f:
            file_content = f.read()

        # Test basic read
        handler = ContentHandler()
        handler.setup(file_path, is_file=True)
        self.assertEqual(file_content, handler.get_content())

        # Test templating of read content
        handler.setup(file_path, is_file=True, is_template_content=True)
        self.assertEqual(file_content, handler.get_content())
        self.assertEqual(file_content,
                         handler.get_content(context))  # No substitution
        substituted = string.Template(file_content).safe_substitute(variables)
        context.bind_variables(variables)
        self.assertEqual(substituted, handler.get_content(context))

        # Test path templating
        templated_file_path = '$filepath'
        context.bind_variable('filepath', file_path)
        handler.setup(file_path, is_file=True, is_template_path=True)
        self.assertEqual(file_content, handler.get_content(context))

        # Test double templating with files
        handler.setup(file_path,
                      is_file=True,
                      is_template_path=True,
                      is_template_content=True)
        self.assertEqual(substituted, handler.get_content(context=context))
Exemple #41
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict()  # results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        # Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        # Run tests, collecting statistics as needed
        for test in mytests:
            # Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: # Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                # Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: # Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            # Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    # Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures
Exemple #42
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict() #results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        #Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        #Run tests, collecting statistics as needed
        for test in mytests:
            #Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config = myconfig, context=context)
            result.body = None  # Remove the body, save some memory!

            if not result.passed: #Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: '+test.name+" URL="+result.test.url+" Group="+test.group+" HTTP Status Code: "+str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context, test_config=myconfig)

                #Increment test failure counts for that group (adding an entry if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else: #Test passed, print results
                logger.info('Test Succeeded: '+test.name+" URL="+test.url+" Group="+test.group)

            #Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: "+benchmark.name+" Group: "+benchmark.group)
            benchmark_result = run_benchmark(benchmark, myconfig, context=context)
            print benchmark_result
            logger.info("Benchmark Done: "+benchmark.name+" Group: "+benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug('Writing benchmark to file in format: '+benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file =  open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " + benchmark.output_file)
                write_method(my_file, benchmark_result, benchmark, test_config = myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print "==================================="

    #Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures
        if (failures > 0):
            print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'
        else:
            print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'

    return total_failures
Exemple #43
0
def run_benchmark(benchmark, test_config = TestConfig(), context = None):
    """ Perform a benchmark, (re)using a given, configured CURL call to do so
        The actual analysis of metrics is performed separately, to allow for testing
    """

    # Context handling
    my_context = context
    if my_context is None:
        my_context = Context()

    warmup_runs = benchmark.warmup_runs
    benchmark_runs = benchmark.benchmark_runs
    message = ''  #Message is name of benchmark... print it?

    if (benchmark_runs <= 0):
        raise Exception("Invalid number of benchmark runs, must be > 0 :" + benchmark_runs)

    result = TestResponse()

    # TODO create and use a curl-returning configuration function
    # TODO create and use a post-benchmark cleanup function
    # They should use is_dynamic/is_context_modifier to determine if they need to
    #  worry about context and re-reading/retemplating and only do it if needed
    #    - Also, they will need to be smart enough to handle extraction functions
    #  For performance reasons, we don't want to re-run templating/extraction if
    #   we do not need to, and do not want to save request bodies.

    #Initialize variables to store output
    output = BenchmarkResult()
    output.name = benchmark.name
    output.group = benchmark.group
    metricnames = list(benchmark.metrics)
    metricvalues = [METRICS[name] for name in metricnames]  # Metric variable for curl, to avoid hash lookup for every metric name
    results = [list() for x in xrange(0, len(metricnames))]  # Initialize arrays to store results for each metric
    curl = pycurl.Curl()

    #Benchmark warm-up to allow for caching, JIT compiling, on client
    logger.info('Warmup: ' + message + ' started')
    for x in xrange(0, warmup_runs):
        benchmark.update_context_before(my_context)
        templated = benchmark.realize(my_context)
        curl = templated.configure_curl(timeout=test_config.timeout, context=my_context, curl_handle=curl)
        curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.
        curl.perform()

    logger.info('Warmup: ' + message + ' finished')

    logger.info('Benchmark: ' + message + ' starting')

    for x in xrange(0, benchmark_runs):  # Run the actual benchmarks
        # Setup benchmark
        benchmark.update_context_before(my_context)
        templated = benchmark.realize(my_context)
        curl = templated.configure_curl(timeout=test_config.timeout, context=my_context, curl_handle=curl)
        curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.

        try:  # Run the curl call, if it errors, then add to failure counts for benchmark
            curl.perform()
        except Exception:
            output.failures = output.failures + 1
            curl.close()
            curl = pycurl.Curl()
            continue  # Skip metrics collection

        # Get all metrics values for this run, and store to metric lists
        for i in xrange(0, len(metricnames)):
            results[i].append( curl.getinfo(metricvalues[i]) )


    curl.close()
    logger.info('Benchmark: ' + message + ' ending')

    temp_results = dict()
    for i in xrange(0, len(metricnames)):
        temp_results[metricnames[i]] = results[i]
    output.results = temp_results
    return analyze_benchmark_results(output, benchmark)
Exemple #44
0
def run_test(mytest, test_config = TestConfig(), context = None):
    """ Put together test pieces: configure & run actual test, return results """

    # Initialize a context if not supplied
    my_context = context
    if my_context is None:
        my_context = Context()

    mytest.update_context_before(my_context)
    templated_test = mytest.realize(my_context)
    curl = templated_test.configure_curl(timeout=test_config.timeout, context=my_context)
    result = TestResponse()
    result.test = templated_test

    # reset the body, it holds values from previous runs otherwise
    headers = StringIO()
    body = StringIO()
    curl.setopt(pycurl.WRITEDATA, body)
    curl.setopt(pycurl.HEADERFUNCTION, headers.write)
    if test_config.verbose:
        curl.setopt(pycurl.VERBOSE,True)
    if test_config.ssl_insecure:
        curl.setopt(pycurl.SSL_VERIFYPEER,0)
        curl.setopt(pycurl.SSL_VERIFYHOST,0)

    result.passed = None

    if test_config.interactive:
        print("===================================")
        print("%s" % mytest.name)
        print("-----------------------------------")
        print("REQUEST:")
        print("%s %s" % (templated_test.method, templated_test.url))
        print("HEADERS:")
        print("%s" % (templated_test.headers))
        if mytest.body is not None:
            print("\n%s" % templated_test.body)
        raw_input("Press ENTER when ready (%d): " % (mytest.delay))

    if mytest.delay > 0:
        print("Delaying for %ds" % mytest.delay)
        time.sleep(mytest.delay)

    try:
        curl.perform()  # Run the actual call
    except Exception as e:
        # Curl exception occurred (network error), do not pass go, do not collect $200
        trace = traceback.format_exc()
        result.failures.append(Failure(message="Curl Exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION))
        result.passed = False
        curl.close()
        return result

    # Retrieve values
    result.body = body.getvalue()
    body.close()
    result.response_headers = headers.getvalue()
    headers.close()

    response_code = curl.getinfo(pycurl.RESPONSE_CODE)
    result.response_code = response_code

    logger.debug("Initial Test Result, based on expected response code: "+str(response_code in mytest.expected_status))

    if response_code in mytest.expected_status:
        result.passed = True
    else:
        # Invalid response code
        result.passed = False
        failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format(response_code, mytest.expected_status)
        result.failures.append(Failure(message=failure_message, details=None, failure_type=validators.FAILURE_INVALID_RESPONSE))

    # Parse HTTP headers
    try:
        result.response_headers = parse_headers(result.response_headers)
    except Exception as e:
        trace = traceback.format_exc()
        result.failures.append(Failure(message="Header parsing exception: {0}".format(e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION))
        result.passed = False
        curl.close()
        return result

    # print str(test_config.print_bodies) + ',' + str(not result.passed) + ' , ' + str(test_config.print_bodies or not result.passed)

    head = result.response_headers

    # execute validator on body
    if result.passed is True:
        body = result.body
        if mytest.validators is not None and isinstance(mytest.validators, list):
            logger.debug("executing this many validators: " + str(len(mytest.validators)))
            failures = result.failures
            for validator in mytest.validators:
                validate_result = validator.validate(body=body, headers=head, context=my_context)
                if not validate_result:
                    result.passed = False
                # Proxy for checking if it is a Failure object, because of import issues with isinstance there
                if hasattr(validate_result, 'details'):
                    failures.append(validate_result)
                # TODO add printing of validation for interactive mode
        else:
            logger.debug("no validators found")

        # Only do context updates if test was successful
        mytest.update_context_after(result.body, my_context)

    # Print response body if override is set to print all *OR* if test failed (to capture maybe a stack trace)
    if test_config.print_bodies or not result.passed:
        if test_config.interactive:
            print("RESPONSE:")
        print(result.body.decode("string-escape"))

    # TODO add string escape on body output
    logger.debug(result)

    curl.close()
    return result