Example #1
0
    def test_get_validators(self):
        """ Test that validators work correctly """
        test = Test()
        test.url = self.prefix + "/api/person/"

        # Validators need library calls to configure them
        test.validators = list()
        cfg_exists = {"jsonpath_mini": "objects.0", "test": "exists"}
        test.validators.append(validators.parse_validator("extract_test", cfg_exists))
        cfg_exists_0 = {"jsonpath_mini": "meta.offset", "test": "exists"}
        test.validators.append(validators.parse_validator("extract_test", cfg_exists_0))
        cfg_not_exists = {"jsonpath_mini": "objects.100", "test": "not_exists"}
        test.validators.append(validators.parse_validator("extract_test", cfg_not_exists))
        cfg_compare_login = {"jsonpath_mini": "objects.0.login", "expected": "gbaltar"}
        test.validators.append(validators.parse_validator("compare", cfg_compare_login))
        cfg_compare_id = {"jsonpath_mini": "objects.1.id", "comparator": "gt", "expected": -1}
        test.validators.append(validators.parse_validator("compare", cfg_compare_id))

        test_response = resttest.run_test(test)
        for failure in test_response.failures:
            print("REAL FAILURE")
            print("Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message))
            if failure.details:
                print("Validator/Error details: " + str(failure.details))
        self.assertFalse(test_response.failures)
        self.assertTrue(test_response.passed)
Example #2
0
 def test_failed_get(self):
     """ Test GET that should fail """
     test = Test()
     test.url = self.prefix + "/api/person/500/"
     test_response = resttest.run_test(test)
     self.assertEqual(False, test_response.passed)
     self.assertEqual(404, test_response.response_code)
Example #3
0
 def test_get(self):
     """ Basic local get test """
     test = Test()
     test.url = self.prefix + "/api/person/"
     test_response = resttest.run_test(test)
     self.assertTrue(test_response.passed)
     self.assertEqual(200, test_response.response_code)
Example #4
0
    def test_get_validators(self):
        """ Test that validators work correctly """
        test = Test()
        test.url = self.prefix + '/api/person/'
        
        # Validators need library calls to configure them
        test.validators = list()
        cfg_exists = {'jsonpath_mini': "objects.0", 'test':'exists'}
        test.validators.append(validators.parse_validator('extract_test', cfg_exists))
        cfg_exists_0 = {'jsonpath_mini': "meta.offset", 'test':'exists'}
        test.validators.append(validators.parse_validator('extract_test', cfg_exists_0))
        cfg_not_exists = {'jsonpath_mini': "objects.100", 'test':'not_exists'}
        test.validators.append(validators.parse_validator('extract_test', cfg_not_exists))
        cfg_compare_login = {'jsonpath_mini': 'objects.0.login', 'expected': 'gbaltar'}
        test.validators.append(validators.parse_validator('compare', cfg_compare_login))
        cfg_compare_id = {'jsonpath_mini': 'objects.1.id', 'comparator':'gt', 'expected': -1}
        test.validators.append(validators.parse_validator('compare', cfg_compare_id))

        test_response = resttest.run_test(test)
        for failure in test_response.failures:
            print "REAL FAILURE"
            print "Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message)
            if failure.details:
                print "Validator/Error details: "+str(failure.details)
        self.assertFalse(test_response.failures)
        self.assertTrue(test_response.passed)
Example #5
0
File: main.py Project: ismtabo/Test
def main():
	"""
		Prodecimiento principal del programa
		TODO: Bucle infinito que te permita elegir otro test(o el mismo) al acabar el presente
	"""
	correct_path = True
# 	path = raw_input('Introduzca el nombre (o ruta) de su fichero de test\'.csv\':')
#  	path = 'testPOO.csv'
	path = 'testEDA.csv'
	try:
		io = Read(path)
	except IOError:
		correct_path = False
	else:
		correct_path = True
	while(not correct_path):
		path = raw_input("Introduzca la direccion del fichero: ")
		try:
			io = Read(path)
		except IOError:
			correct_path = False
		else:
			correct_path = True

	info, answers = io.getContent()

	test = Test(info, answers)

	run(test)

	print test.finalResult()
	
	return 0
Example #6
0
 def test_get_redirect(self):
     """ Basic local get test """
     test = Test()
     test.curl_options = {"FOLLOWLOCATION": True}
     test.url = self.prefix + "/api/person"
     test_response = resttest.run_test(test)
     self.assertTrue(test_response.passed)
     self.assertEqual(200, test_response.response_code)
Example #7
0
def parse_testsets(base_url, test_structure, test_files = set(), working_directory = None):
    """ Convert a Python datastructure read from validated YAML to a set of structured testsets
    The data stucture is assumed to be a list of dictionaries, each of which describes:
        - a tests (test structure)
        - a simple test (just a URL, and a minimal test is created)
        - or overall test configuration for this testset
        - an import (load another set of tests into this one, from a separate file)
            - For imports, these are recursive, and will use the parent config if none is present

    Note: test_files is used to track tests that import other tests, to avoid recursive loops

    This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets
    """

    tests_out = list()
    test_config = TestConfig()
    testsets = list()
    benchmarks = list()

    if working_directory is None:
        working_directory = os.path.abspath(os.getcwd())

    #returns a testconfig and collection of tests
    for node in test_structure: #Iterate through lists of test and configuration elements
        if isinstance(node,dict): #Each config element is a miniature key-value dictionary
            node = lowercase_keys(node)
            for key in node:
                if key == u'import':
                    importfile = node[key] #import another file
                    if importfile not in test_files:
                        logger.debug("Importing test sets: " + importfile)
                        test_files.add(importfile)
                        import_test_structure = read_test_file(importfile)
                        with cd(os.path.dirname(os.path.realpath(importfile))):
                            import_testsets = parse_testsets(base_url, import_test_structure, test_files)
                            testsets.extend(import_testsets)
                elif key == u'url': #Simple test, just a GET to a URL
                    mytest = Test()
                    val = node[key]
                    assert isinstance(val,str) or isinstance(val,unicode)
                    mytest.url = base_url + val
                    tests_out.append(mytest)
                elif key == u'test': #Complex test with additional parameters
                    with cd(working_directory):
                        child = node[key]
                        mytest = Test.parse_test(base_url, child)
                        tests_out.append(mytest)
                elif key == u'benchmark':
                    benchmark = parse_benchmark(base_url, node[key])
                    benchmarks.append(benchmark)
                elif key == u'config' or key == u'configuration':
                    test_config = parse_configuration(node[key])
    testset = TestSet()
    testset.tests = tests_out
    testset.config = test_config
    testset.benchmarks = benchmarks
    testsets.append(testset)
    return testsets
Example #8
0
 def test_put_inplace(self):
     """ Test PUT where item already exists """
     test = Test()
     test.url = self.prefix + "/api/person/1/"
     test.method = u"PUT"
     test.body = '{"first_name": "Gaius","id": 1,"last_name": "Baltar","login": "******"}'
     test.headers = {u"Content-Type": u"application/json"}
     test_response = resttest.run_test(test)
     self.assertEqual(True, test_response.passed)
     self.assertEqual(200, test_response.response_code)
Example #9
0
    def test_header_validators(self):
        test = Test()
        test.url = self.prefix + "/api/person/1/"
        config = {"header": "server", "comparator": "contains", "expected": "WSGI"}
        test.validators = list()
        test.validators.append(validators.parse_validator("comparator", config))
        result = resttest.run_test(test)

        if result.failures:
            for fail in result.failures:
                print(fail)
        self.assertTrue(result.passed)
Example #10
0
 def test_get_validators_fail(self):
     """ Test validators that should fail """
     test = Test()
     test.url = self.prefix + '/api/person/'
     test.validators = list()
     cfg_exists = {'jsonpath_mini': "objects.500", 'test':'exists'}
     test.validators.append(validators.parse_validator('extract_test', cfg_exists))
     cfg_not_exists = {'jsonpath_mini': "objects.1", 'test':'not_exists'}
     test.validators.append(validators.parse_validator('extract_test', cfg_not_exists))
     cfg_compare = {'jsonpath_mini': "objects.1.last_name", 'expected':'NotJenkins'}
     test.validators.append(validators.parse_validator('compare', cfg_compare))
     test_response = resttest.run_test(test)
     self.assertFalse(test_response.passed)
     self.assertTrue(test_response.failures)
     self.assertEqual(3, len(test_response.failures))
Example #11
0
 def test_get_validators_fail(self):
     """ Test validators that should fail """
     test = Test()
     test.url = self.prefix + "/api/person/"
     test.validators = list()
     cfg_exists = {"jsonpath_mini": "objects.500", "test": "exists"}
     test.validators.append(validators.parse_validator("extract_test", cfg_exists))
     cfg_not_exists = {"jsonpath_mini": "objects.1", "test": "not_exists"}
     test.validators.append(validators.parse_validator("extract_test", cfg_not_exists))
     cfg_compare = {"jsonpath_mini": "objects.1.last_name", "expected": "NotJenkins"}
     test.validators.append(validators.parse_validator("compare", cfg_compare))
     test_response = resttest.run_test(test)
     self.assertFalse(test_response.passed)
     self.assertTrue(test_response.failures)
     self.assertEqual(3, len(test_response.failures))
Example #12
0
	def _runPlainR(self):
		""" Runs the tester in plain R mode, where it outputs the plain R version of the tests to a special file, rather than running the tests. """
		if (len(self._testRoots)!=1):
			error("When using --plainr mode, only one root can be selected");
		root = self._testRoots[0][0]
		lastFilename = ""
		outfile = None
		fileTests = 0
		print("Creating R-compatible raw tests. The following is a list of test file entered")
		print("and number of tests generated:\n")
		for t in Test.enumerate(self._testRoots, self._recursive):
			if (t.filename() != lastFilename):
				if (outfile != None):
					print("["+str(fileTests)+"]")
					outfile.close()
					fileTests = 0
				fname = os.path.join(self._plainROutput, t.filename()[len(root)+1:])
				dirname, filename = os.path.split(fname)
				print(strFormat(fname), end="")
				os.makedirs(dirname, exist_ok = True)
				outfile = open(fname, "w")
				lastFilename = t.filename()
			for c in t.commands():
				if (c.find("#! ") == 0):
					outfile.write("#! "+t.name()+"\n")
				elif (c.find("#!g") == 0):
					pass
				else:
					outfile.write(c.replace("\\\"",'"')+"\n")
			outfile.write(t.code()+"\n\n")
			fileTests += 1
		if (outfile != None):
			print("["+str(fileTests)+"]")
			outfile.close()
Example #13
0
    def test_header_validators(self):
        test = Test()
        test.url = self.prefix + '/api/person/1/'
        config = {
            'header': 'server',
            'comparator': 'contains',
            'expected': 'WSGI'
        }
        test.validators = list()
        test.validators.append(
            validators.parse_validator('comparator', config))
        result = resttest.run_test(test)

        if result.failures:
            for fail in result.failures:
                print(fail)
        self.assertTrue(result.passed)
Example #14
0
    def test_header_extraction(self):
        test = Test()
        test.url = self.prefix + "/api/person/1/"
        key1 = "server-header"
        key2 = "server-header-mixedcase"

        test.extract_binds = {
            key1: validators.HeaderExtractor.parse("server"),
            # Verify case-insensitive behavior
            key2: validators.HeaderExtractor.parse("sErVer"),
        }
        my_context = Context()
        test_response = resttest.run_test(test, context=my_context)
        val1 = my_context.get_value(key1)
        val2 = my_context.get_value(key2)
        self.assertEqual(val1, val2)
        self.assertTrue("wsgi" in val1.lower())
        self.assertTrue("wsgi" in val2.lower())
Example #15
0
def parse_benchmark(base_url, node):
    """ Try building a benchmark configuration from deserialized configuration root node """
    node = lowercase_keys(flatten_dictionaries(node))  # Make it usable

    benchmark = Benchmark()

    # Read & set basic test parameters
    benchmark = Test.parse_test(base_url, node, benchmark)

    # Complex parsing because of list/dictionary/singleton legal cases
    for key, value in node.items():
        if key == u'warmup_runs':
            benchmark.warmup_runs = int(value)
        elif key == u'benchmark_runs':
            benchmark.benchmark_runs = int(value)
        elif key == u'output_format':
            format = value.lower()
            if format in OUTPUT_FORMATS:
                benchmark.output_format = format
            else:
                raise Exception('Invalid benchmark output format: ' + format)
        elif key == u'output_file':
            if not isinstance(value, basestring):
                raise Exception("Invalid output file format")
            benchmark.output_file = value
        elif key == u'metrics':
            if isinstance(value, unicode) or isinstance(value,str):
                # Single value
                benchmark.add_metric(unicode(value, 'UTF-8'))
            elif isinstance(value, list) or isinstance(value, set):
            # List of single values or list of {metric:aggregate, ...}
                for metric in value:
                    if isinstance(metric, dict):
                        for metricname, aggregate in metric.items():
                            if not isinstance(metricname, basestring):
                                raise Exception("Invalid metric input: non-string metric name")
                            if not isinstance(aggregate, basestring):
                                raise Exception("Invalid aggregate input: non-string aggregate name")
                            # TODO unicode-safe this
                            benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))

                    elif isinstance(metric, unicode) or isinstance(metric, str):
                        benchmark.add_metric(unicode(metric,'UTF-8'))
            elif isinstance(value, dict):
                # Dictionary of metric-aggregate pairs
                for metricname, aggregate in value.items():
                    if not isinstance(metricname, basestring):
                        raise Exception("Invalid metric input: non-string metric name")
                    if not isinstance(aggregate, basestring):
                        raise Exception("Invalid aggregate input: non-string aggregate name")
                    benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))
            else:
                raise Exception("Invalid benchmark metric datatype: "+str(value))

    return benchmark
Example #16
0
 def test_patch(self):
     """ Basic local get test """
     test = Test()
     test.url = self.prefix + "/api/person/2/"
     test.method = "PATCH"
     test.body = '{"login":"******"}'
     test.headers = {u"Content-Type": u"application/json", u"X-HTTP-Method-Override": u"PATCH"}
     test.expected_status = [202, 400]  # Django issues give a 400, sigh
     test_response = resttest.run_test(test)
     self.assertTrue(test_response.passed)
Example #17
0
    def test_post(self):
        """ Test POST to create an item """
        test = Test()
        test.url = self.prefix + "/api/person/"
        test.method = u"POST"
        test.expected_status = [200, 201, 204]
        test.body = '{"first_name": "Willim","last_name": "Adama","login": "******"}'
        test.headers = {u"Content-Type": u"application/json"}
        test_response = resttest.run_test(test)
        self.assertEqual(True, test_response.passed)
        self.assertEqual(201, test_response.response_code)

        # Test user was created
        test2 = Test()
        test2.url = self.prefix + "/api/person/?login=theadmiral"
        test_response2 = resttest.run_test(test2)
        self.assertTrue(test_response2.passed)
        obj = json.loads(str(test_response2.body))
        print(json.dumps(obj))
Example #18
0
    def test_put_created(self):
        """ Test PUT where item DOES NOT already exist """
        test = Test()
        test.url = self.prefix + "/api/person/100/"
        test.method = u"PUT"
        test.expected_status = [200, 201, 204]
        test.body = '{"first_name": "Willim","last_name": "Adama","login":"******", "id": 100}'
        test.headers = {u"Content-Type": u"application/json"}
        test_response = resttest.run_test(test)
        self.assertEqual(True, test_response.passed)
        self.assertEqual(201, test_response.response_code)

        # Test it was actually created
        test2 = Test()
        test2.url = test.url
        test_response2 = resttest.run_test(test2)
        self.assertTrue(test_response2.passed)
        self.assertTrue(u'"last_name": "Adama"' in test_response2.unicode_body())
        self.assertTrue(u'"login": "******"' in test_response2.unicode_body())
Example #19
0
	def run(self):
		""" Runs the test suite as specified, that is runs all tests in all files under all test roots. """
		if (self._plainROutput != None):
			return self._runPlainR()
		self._print("Initializing target %s " % self._target)
		self.target = __import__(self._target).Target(self._targetPath)
		self._print("    path:         %s" % self.target.path)
		self._print("    version:      %s" % self.target.version)
		self._print("    architecture: %s" % self.target.arch)
		tests = 0
		test_fails = 0
		run_fails = 0
		skipped = 0

		for t in Test.enumerate(self._testRoots, self._recursive):
			if (not self._reportOnlyErrors):
				print(strFormat(t.name()), end="")
			result = self._runTest(t)
			if (self._reportOnlyErrors):
				if (result[0] in (TestR.RUN_FAIL, TestR.TEST_FAIL)):
					print(strFormat(t.name()), end="")
					print(result[0])
			else:
					print(result[0])
			if (result[0] in (TestR.RUN_FAIL, TestR.TEST_FAIL)):
				print("    File: {0} [line {1}]".format(t.filename(), t.line()))
				print(strLineOffset(result[1]))
				self._print("    Code:")
				self._print(strLineOffset(t.code(), 8))
			tests += 1
			if (result[0] == TestR.RUN_FAIL):
				run_fails += 1
			elif (result[0] == TestR.TEST_FAIL):
				test_fails += 1
			elif (result[0] == TestR.NOT_RUN):
				skipped += 1

		print("\n----------------------------------------------------------------------------------------------------------\n")
		print("Total tests:    {0}".format(tests))
		print("Skipped:        {0}".format(skipped))
		print("Successful:     {0} {1}".format(tests-test_fails-run_fails-skipped, "OK" if (test_fails + run_fails == 0) else "FAIL"))
		print("Failed:         {0}".format(run_fails + test_fails))
		print("    execution:  {0}".format(run_fails))
		print("    checks:     {0}".format(test_fails))
Example #20
0
    def test_delete(self):
        """ Try removing an item """
        test = Test()
        test.url = self.prefix + "/api/person/1/"
        test.expected_status = [200, 202, 204]
        test.method = u"DELETE"
        test_response = resttest.run_test(test)
        self.assertEqual(True, test_response.passed)
        self.assertEqual(204, test_response.response_code)

        # Verify it's really gone
        test.method = u"GET"
        test.expected_status = [404]
        test_response = resttest.run_test(test)
        self.assertEqual(True, test_response.passed)
        self.assertEqual(404, test_response.response_code)

        # Check it's gone by name
        test2 = Test()
        test2.url = self.prefix + "/api/person/?first_name__contains=Gaius"
        test_response2 = resttest.run_test(test2)
        self.assertTrue(test_response2.passed)
        self.assertTrue(u'"objects": []' in test_response2.unicode_body())
Example #21
0
 def __init__(self, server, code):
     Test.__init__(self, "as", server)
     self.server = server
     self.code = code
Example #22
0
 def test_detailed_get(self):
     test = Test()
     test.url = self.prefix + '/api/person/1/'
     test_response = resttest.run_test(test)
     self.assertEqual(True, test_response.passed)
     self.assertEqual(200, test_response.response_code)
Example #23
0
 def setUp(self):
     Test.setUp(self)
Example #24
0
 def test_detailed_get(self):
     test = Test()
     test.url = self.prefix + "/api/person/1/"
     test_response = resttest.run_test(test)
     self.assertEqual(True, test_response.passed)
     self.assertEqual(200, test_response.response_code)
Example #25
0
def parse_testsets(base_url, test_structure, test_files=set(), working_directory=None, vars=None):
    """ Convert a Python data structure read from validated YAML to a set of structured testsets
    The data structure is assumed to be a list of dictionaries, each of which describes:
        - a tests (test structure)
        - a simple test (just a URL, and a minimal test is created)
        - or overall test configuration for this testset
        - an import (load another set of tests into this one, from a separate file)
            - For imports, these are recursive, and will use the parent config if none is present

    Note: test_files is used to track tests that import other tests, to avoid recursive loops

    This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets
    """

    tests_out = list()
    test_config = TestConfig()
    testsets = list()
    benchmarks = list()

    if working_directory is None:
        working_directory = os.path.abspath(os.getcwd())

    if vars and isinstance(vars, dict):
        test_config.variable_binds = vars

    # returns a testconfig and collection of tests
    for node in test_structure:  # Iterate through lists of test and configuration elements
        if isinstance(node, dict):  # Each config element is a miniature key-value dictionary
            node = lowercase_keys(node)
            for key in node:
                if key == u'import':
                    importfile = node[key]  # import another file
                    if importfile not in test_files:
                        logger.debug("Importing test sets: " + importfile)
                        test_files.add(importfile)
                        import_test_structure = read_test_file(importfile)
                        with cd(os.path.dirname(os.path.realpath(importfile))):
                            import_testsets = parse_testsets(
                                base_url, import_test_structure, test_files, vars=vars)
                            testsets.extend(import_testsets)
                elif key == u'url':  # Simple test, just a GET to a URL
                    mytest = Test()
                    val = node[key]
                    assert isinstance(val, str) or isinstance(val, unicode)
                    mytest.url = base_url + val
                    tests_out.append(mytest)
                elif key == u'test':  # Complex test with additional parameters
                    with cd(working_directory):
                        child = node[key]
                        mytest = Test.parse_test(base_url, child)
                        tests_out.append(mytest)
                elif key == u'benchmark':
                    benchmark = parse_benchmark(base_url, node[key])
                    benchmarks.append(benchmark)
                elif key == u'config' or key == u'configuration':
                    test_config = parse_configuration(
                        node[key], base_config=test_config)
    testset = TestSet()
    testset.tests = tests_out
    testset.config = test_config
    testset.benchmarks = benchmarks
    testsets.append(testset)
    return testsets