示例#1
0
from pyresttest.generators import factory_generate_ids

import cProfile

test = Benchmark()
test.warmup_runs = 0
test.benchmark_runs = 1000
test.raw_metrics = set()
test.metrics = {'total_time'}
test.aggregated_metrics = {'total_time': ['total', 'mean']}

# Basic get test
test.url = 'http://localhost:8000/api/person/'
test.name = 'Basic GET'
print 'Basic GET test'
#cProfile.run('resttest.run_benchmark(test)', sort='cumtime')

# Test a generator PUT method
test.method = 'PUT'
test.set_url('http://localhost:8000/api/person/$id/', isTemplate=True)
test.headers = {'Content-Type': 'application/json'}
handler = ContentHandler()
handler.setup(
    '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}',
    is_template_content=True)
test.body = handler
context = Context()
context.add_generator('gen', factory_generate_ids(starting_id=10)())
test.generator_binds = {'id': 'gen'}
print 'Running templated PUT test'
cProfile.run('resttest.run_benchmark(test, context=context)', sort='cumtime')
示例#2
0
def run_testsets(testsets):
    """ Execute a set of tests, using given TestSet list input """
    group_results = dict()  # results, by group
    group_failure_counts = dict()
    total_failures = 0
    myinteractive = False
    curl_handle = pycurl.Curl()

    for testset in testsets:
        mytests = testset.tests
        myconfig = testset.config
        mybenchmarks = testset.benchmarks
        context = Context()

        # Bind variables & add generators if pertinent
        if myconfig.variable_binds:
            context.bind_variables(myconfig.variable_binds)
        if myconfig.generators:
            for key, value in myconfig.generators.items():
                context.add_generator(key, value)

        # Make sure we actually have tests to execute
        if not mytests and not mybenchmarks:
            # no tests in this test set, probably just imports.. skip to next
            # test set
            break

        myinteractive = True if myinteractive or myconfig.interactive else False

        # Run tests, collecting statistics as needed
        for test in mytests:
            # Initialize the dictionaries to store test fail counts and results
            if test.group not in group_results:
                group_results[test.group] = list()
                group_failure_counts[test.group] = 0

            result = run_test(test, test_config=myconfig, context=context, curl_handle=curl_handle)
            result.body = None  # Remove the body, save some memory!

            if not result.passed:  # Print failure, increase failure counts for that test group
                # Use result test URL to allow for templating
                logger.error('Test Failed: ' + test.name + " URL=" + result.test.url +
                             " Group=" + test.group + " HTTP Status Code: " + str(result.response_code))

                # Print test failure reasons
                if result.failures:
                    for failure in result.failures:
                        log_failure(failure, context=context,
                                    test_config=myconfig)

                # Increment test failure counts for that group (adding an entry
                # if not present)
                failures = group_failure_counts[test.group]
                failures = failures + 1
                group_failure_counts[test.group] = failures

            else:  # Test passed, print results
                logger.info('Test Succeeded: ' + test.name +
                            " URL=" + test.url + " Group=" + test.group)

            # Add results for this test group to the resultset
            group_results[test.group].append(result)

            # handle stop_on_failure flag
            if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
                print(
                    'STOP ON FAILURE! stopping test set execution, continuing with other test sets')
                break

        for benchmark in mybenchmarks:  # Run benchmarks, analyze, write
            if not benchmark.metrics:
                logger.debug('Skipping benchmark, no metrics to collect')
                continue

            logger.info("Benchmark Starting: " + benchmark.name +
                        " Group: " + benchmark.group)
            benchmark_result = run_benchmark(
                benchmark, myconfig, context=context)
            print(benchmark_result)
            logger.info("Benchmark Done: " + benchmark.name +
                        " Group: " + benchmark.group)

            if benchmark.output_file:  # Write file
                logger.debug(
                    'Writing benchmark to file in format: ' + benchmark.output_format)
                write_method = OUTPUT_METHODS[benchmark.output_format]
                my_file = open(benchmark.output_file, 'w')  # Overwrites file
                logger.debug("Benchmark writing to file: " +
                             benchmark.output_file)
                write_method(my_file, benchmark_result,
                             benchmark, test_config=myconfig)
                my_file.close()

    if myinteractive:
        # a break for when interactive bits are complete, before summary data
        print("===================================")

    # Print summary results
    for group in sorted(group_results.keys()):
        test_count = len(group_results[group])
        failures = group_failure_counts[group]
        total_failures = total_failures + failures

        passfail = {True: u'SUCCEEDED: ', False: u'FAILED: '}
        output_string = "Test Group {0} {1}: {2}/{3} Tests Passed!".format(group, passfail[failures == 0], str(test_count - failures), str(test_count)) 
        
        if myconfig.skip_term_colors:
            print(output_string)    
        else:
            if failures > 0:
                print('\033[91m' + output_string + '\033[0m')
            else:
                print('\033[92m' + output_string + '\033[0m')

    return total_failures
示例#3
0
import cProfile

test = Benchmark()
test.warmup_runs = 0
test.benchmark_runs = 1000
test.raw_metrics = set()
test.metrics = {'total_time'}
test.aggregated_metrics = {'total_time': ['total', 'mean']}

# Basic get test
test.url = 'http://localhost:8000/api/person/'
test.name = 'Basic GET'
print 'Basic GET test'
#cProfile.run('resttest.run_benchmark(test)', sort='cumtime')


# Test a generator PUT method
test.method = 'PUT'
test.set_url('http://localhost:8000/api/person/$id/', isTemplate=True)
test.headers = {'Content-Type': 'application/json'}
handler = ContentHandler()
handler.setup('{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "******"}',
              is_template_content=True)
test.body = handler
context = Context()
context.add_generator('gen', factory_generate_ids(starting_id=10)())
test.generator_binds = {'id': 'gen'}
print 'Running templated PUT test'
cProfile.run('test.execute_macro(context=context)', sort='cumtime')