Ejemplo n.º 1
0
    def __init__(self, config):
        '''
        Initialize the benchmarker.
        '''
        self.config = config
        self.time_logger = TimeLogger()
        self.metadata = Metadata(self)
        self.audit = Audit(self)

        # a list of all tests for this run
        self.tests = self.metadata.tests_to_run()

        self.results = Results(self)
        self.docker_helper = DockerHelper(self)
    def __init__(self, config):
        '''
        Initialize the benchmarker.
        '''
        self.config = config
        self.time_logger = TimeLogger()
        self.metadata = Metadata(self)
        self.audit = Audit(self)

        # a list of all tests for this run
        self.tests = self.metadata.tests_to_run()

        self.results = Results(self)
        self.docker_helper = DockerHelper(self)
Ejemplo n.º 3
0
def main(argv=None):
    '''
    Runs the toolset.
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Suite options
    parser.add_argument('--audit',
                        action='store_true',
                        default=False,
                        help='Audits framework tests for inconsistencies')
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Removes the results directory')
    parser.add_argument('--new',
                        action='store_true',
                        default=False,
                        help='Initialize a new framework test')
    parser.add_argument(
        '--quiet',
        action='store_true',
        default=False,
        help=
        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
    )
    parser.add_argument(
        '--results-name',
        help='Gives a name to this set of results, formatted as a date',
        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument(
        '--results-environment',
        help='Describes the environment in which these results were gathered',
        default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument(
        '--results-upload-uri',
        default=None,
        help=
        'A URI where the in-progress results.json file will be POSTed periodically'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )

    # Test options
    parser.add_argument('--test',
                        default=None,
                        nargs='+',
                        help='names of tests to run')
    parser.add_argument(
        '--test-dir',
        nargs='+',
        dest='test_dir',
        help='name of framework directory containing all tests to run')
    parser.add_argument(
        '--test-lang',
        nargs='+',
        dest='test_lang',
        help='name of language directory containing all tests to run')
    parser.add_argument('--exclude',
                        default=None,
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'cached_query',
                            'fortune', 'update', 'plaintext'
                        ],
                        nargs='+',
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify', 'debug'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
    )
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument('--server-host',
                        default='tfb-server',
                        help='Hostname/IP for application server')
    parser.add_argument('--database-host',
                        default='tfb-database',
                        help='Hostname/IP for database server')
    parser.add_argument('--client-host',
                        default='',
                        help='Hostname/IP for client server')
    parser.add_argument('--concurrency-levels',
                        nargs='+',
                        default=[16, 32, 64, 128, 256, 512],
                        help='List of concurrencies to benchmark')
    parser.add_argument('--pipeline-concurrency-levels',
                        nargs='+',
                        default=[256, 1024, 4096, 16384],
                        help='List of pipeline concurrencies to benchmark')
    parser.add_argument('--query-levels',
                        nargs='+',
                        default=[1, 5, 10, 15, 20],
                        help='List of query levels to benchmark')
    parser.add_argument('--cached-query-levels',
                        nargs='+',
                        default=[1, 10, 20, 50, 100],
                        help='List of cached query levels to benchmark')

    # Network options
    parser.add_argument('--network-mode',
                        default=None,
                        help='The network mode to run docker in')

    args = parser.parse_args()

    config = BenchmarkConfig(args)
    benchmarker = Benchmarker(config)

    signal.signal(signal.SIGTERM, benchmarker.stop)
    signal.signal(signal.SIGINT, benchmarker.stop)

    try:
        if config.new:
            Scaffolding(benchmarker)

        elif config.audit:
            Audit(benchmarker).start_audit()

        elif config.clean:
            cleaner.clean(benchmarker.results)
            benchmarker.docker_helper.clean()

        elif config.list_tests:
            all_tests = benchmarker.metadata.gather_tests()

            for test in all_tests:
                log(test.name)

        elif config.parse:
            all_tests = benchmarker.metadata.gather_tests()

            for test in all_tests:
                test.parse_all()

            benchmarker.results.parse(all_tests)

        else:
            any_failed = benchmarker.run()
            if config.mode == "verify":
                return any_failed
    except Exception:
        tb = traceback.format_exc()
        log("A fatal error has occurred", color=Fore.RED)
        log(tb)
        # try one last time to stop docker containers on fatal error
        try:
            benchmarker.stop()
        except:
            sys.exit(1)

    return 0
Ejemplo n.º 4
0
class Benchmarker:
    def __init__(self, config):
        '''
        Initialize the benchmarker.
        '''
        self.config = config
        self.time_logger = TimeLogger()
        self.metadata = Metadata(self)
        self.audit = Audit(self)

        # a list of all tests for this run
        self.tests = self.metadata.tests_to_run()

        self.results = Results(self)
        self.docker_helper = DockerHelper(self)

    ##########################################################################################
    # Public methods
    ##########################################################################################

    def run(self):
        '''
        This process involves setting up the client/server machines
        with any necessary change. Then going through each test,
        running their docker build and run, verifying the URLs, and
        running benchmarks against them.
        '''
        # Generate metadata
        self.metadata.list_test_metadata()

        any_failed = False
        # Run tests
        log("Running Tests...", border='=')

        # build wrk and all databases needed for current run
        self.docker_helper.build_wrk()
        self.docker_helper.build_databases()

        with open(os.path.join(self.results.directory, 'benchmark.log'),
                  'w') as benchmark_log:
            for test in self.tests:
                log("Running Test: %s" % test.name, border='-')
                with self.config.quiet_out.enable():
                    if not self.__run_test(test, benchmark_log):
                        any_failed = True
                # Load intermediate result from child process
                self.results.load()

        # Parse results
        if self.config.mode == "benchmark":
            log("Parsing Results ...", border='=')
            self.results.parse(self.tests)

        self.results.set_completion_time()
        self.results.upload()
        self.results.finish()

        return any_failed

    def stop(self, signal=None, frame=None):
        log("Shutting down (may take a moment)")
        self.docker_helper.stop()
        sys.exit(0)

    ##########################################################################################
    # Private methods
    ##########################################################################################

    def __exit_test(self, success, prefix, file, message=None):
        if message:
            log(message,
                prefix=prefix,
                file=file,
                color=Fore.RED if success else '')
        self.time_logger.log_test_end(log_prefix=prefix, file=file)
        return success

    def __run_test(self, test, benchmark_log):
        '''
        Runs the given test, verifies that the webapp is accepting requests,
        optionally benchmarks the webapp, and ultimately stops all services
        started for this test.
        '''

        log_prefix = "%s: " % test.name
        # Start timing the total test duration
        self.time_logger.mark_test_start()

        # If the test is in the excludes list, we skip it
        if self.config.exclude and test.name in self.config.exclude:
            message = "Test {name} has been added to the excludes list. Skipping.".format(
                name=test.name)
            self.results.write_intermediate(test.name, message)
            return self.__exit_test(success=False,
                                    message=message,
                                    prefix=log_prefix,
                                    file=benchmark_log)

        database_container = None
        try:
            # Start database container
            if test.database.lower() != "none":
                self.time_logger.mark_starting_database()
                database_container = self.docker_helper.start_database(
                    test.database.lower())
                if database_container is None:
                    message = "ERROR: Problem building/running database container"
                    return self.__exit_test(success=False,
                                            message=message,
                                            prefix=log_prefix,
                                            file=benchmark_log)
                self.time_logger.mark_started_database()

            # Start webapp
            container = test.start()
            self.time_logger.mark_test_starting()
            if container is None:
                self.docker_helper.stop([container, database_container])
                message = "ERROR: Problem starting {name}".format(
                    name=test.name)
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(success=False,
                                        message=message,
                                        prefix=log_prefix,
                                        file=benchmark_log)

            max_time = time.time() + 60
            while True:
                accepting_requests = test.is_accepting_requests()
                if accepting_requests \
                        or time.time() >= max_time \
                        or not self.docker_helper.server_container_exists(container.id):
                    break
                time.sleep(1)

            if not accepting_requests:
                self.docker_helper.stop([container, database_container])
                message = "ERROR: Framework is not accepting requests from client machine"
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(success=False,
                                        message=message,
                                        prefix=log_prefix,
                                        file=benchmark_log)

            self.time_logger.mark_test_accepting_requests()

            # Debug mode blocks execution here until ctrl+c
            if self.config.mode == "debug":
                log("Entering debug mode. Server has started. CTRL-c to stop.",
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.YELLOW)
                while True:
                    time.sleep(1)

            # Verify URLs and audit
            log("Verifying framework URLs", prefix=log_prefix)
            self.time_logger.mark_verify_start()
            passed_verify = test.verify_urls()
            self.audit.audit_test_dir(test.directory)

            # Benchmark this test
            if self.config.mode == "benchmark":
                log("Benchmarking %s" % test.name,
                    file=benchmark_log,
                    border='-')
                self.time_logger.mark_benchmarking_start()
                self.__benchmark(test, benchmark_log)
                self.time_logger.log_benchmarking_end(log_prefix=log_prefix,
                                                      file=benchmark_log)

            # Log test timing stats
            self.time_logger.log_build_flush(benchmark_log)
            self.time_logger.log_database_start_time(log_prefix, benchmark_log)
            self.time_logger.log_test_accepting_requests(
                log_prefix, benchmark_log)
            self.time_logger.log_verify_end(log_prefix, benchmark_log)

            # Stop this test
            self.docker_helper.stop([container, database_container])

            # Save results thus far into the latest results directory
            self.results.write_intermediate(
                test.name, time.strftime("%Y%m%d%H%M%S", time.localtime()))

            # Upload the results thus far to another server (optional)
            self.results.upload()

            if self.config.mode == "verify" and not passed_verify:
                return self.__exit_test(success=False,
                                        message="Failed verify!",
                                        prefix=log_prefix,
                                        file=benchmark_log)
        except Exception as e:
            tb = traceback.format_exc()
            self.results.write_intermediate(test.name,
                                            "error during test: " + str(e))
            log(tb, prefix=log_prefix, file=benchmark_log)
            return self.__exit_test(success=False,
                                    message="Error during test: %s" %
                                    test.name,
                                    prefix=log_prefix,
                                    file=benchmark_log)

        return self.__exit_test(success=True,
                                prefix=log_prefix,
                                file=benchmark_log)

    def __benchmark(self, framework_test, benchmark_log):
        '''
        Runs the benchmark for each type of test that it implements
        '''
        def benchmark_type(test_type):
            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)

            test = framework_test.runTests[test_type]
            raw_file = self.results.get_raw_file(framework_test.name,
                                                 test_type)
            if not os.path.exists(raw_file):
                # Open to create the empty file
                with open(raw_file, 'w'):
                    pass

            if not test.failed:
                # Begin resource usage metrics collection
                self.__begin_logging(framework_test, test_type)

                script = self.config.types[test_type].get_script_name()
                script_variables = self.config.types[
                    test_type].get_script_variables(
                        test.name, "http://%s:%s%s" %
                        (self.config.server_host, framework_test.port,
                         test.get_url()))

                self.docker_helper.benchmark(script, script_variables,
                                             raw_file)

                # End resource usage metrics collection
                self.__end_logging()

            results = self.results.parse_test(framework_test, test_type)
            log("Benchmark results:", file=benchmark_log)
            # TODO move into log somehow
            pprint(results)

            self.results.report_benchmark_results(framework_test, test_type,
                                                  results['results'])
            log("Complete", file=benchmark_log)

        for test_type in framework_test.runTests:
            benchmark_type(test_type)

    def __begin_logging(self, framework_test, test_type):
        '''
        Starts a thread to monitor the resource usage, to be synced with the
        client's time.
        TODO: MySQL and InnoDB are possible. Figure out how to implement them.
        '''
        output_file = "{file_name}".format(
            file_name=self.results.get_stats_file(framework_test.name,
                                                  test_type))
        dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
                                      --raw --socket --tcp --udp --unix --vm --disk-util \
                                      --rpc --rpcd --output {output_file}".format(
            output_file=output_file)
        cmd = shlex.split(dstat_string)
        self.subprocess_handle = subprocess.Popen(cmd,
                                                  stdout=FNULL,
                                                  stderr=subprocess.STDOUT)

    def __end_logging(self):
        '''
        Stops the logger thread and blocks until shutdown is complete.
        '''
        self.subprocess_handle.terminate()
        self.subprocess_handle.communicate()
class Benchmarker:
    def __init__(self, config):
        '''
        Initialize the benchmarker.
        '''
        self.config = config
        self.time_logger = TimeLogger()
        self.metadata = Metadata(self)
        self.audit = Audit(self)

        # a list of all tests for this run
        self.tests = self.metadata.tests_to_run()

        self.results = Results(self)
        self.docker_helper = DockerHelper(self)

    ##########################################################################################
    # Public methods
    ##########################################################################################

    def run(self):
        '''
        This process involves setting up the client/server machines
        with any necessary change. Then going through each test,
        running their docker build and run, verifying the URLs, and
        running benchmarks against them.
        '''
        # Generate metadata
        self.metadata.list_test_metadata()

        any_failed = False
        # Run tests
        log("Running Tests...", border='=')

        # build wrk and all databases needed for current run
        self.docker_helper.build_wrk()
        self.docker_helper.build_databases()

        with open(os.path.join(self.results.directory, 'benchmark.log'),
                  'w') as benchmark_log:
            for test in self.tests:
                log("Running Test: %s" % test.name, border='-')
                with self.config.quiet_out.enable():
                    if not self.__run_test(test, benchmark_log):
                        any_failed = True
                # Load intermediate result from child process
                self.results.load()

        # Parse results
        if self.config.mode == "benchmark":
            log("Parsing Results ...", border='=')
            self.results.parse(self.tests)

        self.results.set_completion_time()
        self.results.upload()
        self.results.finish()

        return any_failed

    def stop(self, signal=None, frame=None):
        log("Shutting down (may take a moment)")
        self.docker_helper.stop()
        sys.exit(0)

    ##########################################################################################
    # Private methods
    ##########################################################################################

    def __exit_test(self, success, prefix, file, message=None):
        if message:
            log(message,
                prefix=prefix,
                file=file,
                color=Fore.RED if success else '')
        self.time_logger.log_test_end(log_prefix=prefix, file=file)
        return success

    def __run_test(self, test, benchmark_log):
        '''
        Runs the given test, verifies that the webapp is accepting requests,
        optionally benchmarks the webapp, and ultimately stops all services
        started for this test.
        '''

        log_prefix = "%s: " % test.name
        # Start timing the total test duration
        self.time_logger.mark_test_start()

        # If the test is in the excludes list, we skip it
        if self.config.exclude and test.name in self.config.exclude:
            message = "Test {name} has been added to the excludes list. Skipping.".format(
                name=test.name)
            self.results.write_intermediate(test.name, message)
            return self.__exit_test(
                success=False,
                message=message,
                prefix=log_prefix,
                file=benchmark_log)

        database_container = None
        try:
            # Start database container
            if test.database.lower() != "none":
                self.time_logger.mark_starting_database()
                database_container = self.docker_helper.start_database(
                    test.database.lower())
                if database_container is None:
                    message = "ERROR: Problem building/running database container"
                    return self.__exit_test(
                        success=False,
                        message=message,
                        prefix=log_prefix,
                        file=benchmark_log)
                self.time_logger.mark_started_database()

            # Start webapp
            container = test.start()
            self.time_logger.mark_test_starting()
            if container is None:
                message = "ERROR: Problem starting {name}".format(
                    name=test.name)
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(
                    success=False,
                    message=message,
                    prefix=log_prefix,
                    file=benchmark_log)

            max_time = time.time() + 60
            while True:
                accepting_requests = test.is_accepting_requests()
                if accepting_requests \
                        or time.time() >= max_time \
                        or not self.docker_helper.server_container_exists(container.id):
                    break
                time.sleep(1)

            if hasattr(test, 'wait_before_sending_requests') and isinstance(test.wait_before_sending_requests, numbers.Integral) and test.wait_before_sending_requests > 0:
                time.sleep(test.wait_before_sending_requests)

            if not accepting_requests:
                message = "ERROR: Framework is not accepting requests from client machine"
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(
                    success=False,
                    message=message,
                    prefix=log_prefix,
                    file=benchmark_log)

            self.time_logger.mark_test_accepting_requests()

            # Debug mode blocks execution here until ctrl+c
            if self.config.mode == "debug":
                log("Entering debug mode. Server has started. CTRL-c to stop.",
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.YELLOW)
                while True:
                    time.sleep(1)

            # Verify URLs and audit
            log("Verifying framework URLs", prefix=log_prefix)
            self.time_logger.mark_verify_start()
            passed_verify = test.verify_urls()
            self.audit.audit_test_dir(test.directory)

            # Benchmark this test
            if self.config.mode == "benchmark":
                log("Benchmarking %s" % test.name,
                    file=benchmark_log,
                    border='-')
                self.time_logger.mark_benchmarking_start()
                self.__benchmark(test, benchmark_log)
                self.time_logger.log_benchmarking_end(
                    log_prefix=log_prefix, file=benchmark_log)

            # Log test timing stats
            self.time_logger.log_build_flush(benchmark_log)
            self.time_logger.log_database_start_time(log_prefix, benchmark_log)
            self.time_logger.log_test_accepting_requests(
                log_prefix, benchmark_log)
            self.time_logger.log_verify_end(log_prefix, benchmark_log)

            # Save results thus far into the latest results directory
            self.results.write_intermediate(test.name,
                                            time.strftime(
                                                "%Y%m%d%H%M%S",
                                                time.localtime()))

            # Upload the results thus far to another server (optional)
            self.results.upload()

            if self.config.mode == "verify" and not passed_verify:
                return self.__exit_test(
                    success=False,
                    message="Failed verify!",
                    prefix=log_prefix,
                    file=benchmark_log)
        except Exception as e:
            tb = traceback.format_exc()
            self.results.write_intermediate(test.name,
                                            "error during test: " + str(e))
            log(tb, prefix=log_prefix, file=benchmark_log)
            return self.__exit_test(
                success=False,
                message="Error during test: %s" % test.name,
                prefix=log_prefix,
                file=benchmark_log)
        finally:
            self.docker_helper.stop()

        return self.__exit_test(
            success=True, prefix=log_prefix, file=benchmark_log)

    def __benchmark(self, framework_test, benchmark_log):
        '''
        Runs the benchmark for each type of test that it implements
        '''

        def benchmark_type(test_type):
            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)

            test = framework_test.runTests[test_type]
            raw_file = self.results.get_raw_file(framework_test.name,
                                                 test_type)
            if not os.path.exists(raw_file):
                # Open to create the empty file
                with open(raw_file, 'w'):
                    pass

            if not test.failed:
                # Begin resource usage metrics collection
                self.__begin_logging(framework_test, test_type)

                script = self.config.types[test_type].get_script_name()
                script_variables = self.config.types[
                    test_type].get_script_variables(
                        test.name, "http://%s:%s%s" % (self.config.server_host,
                                                       framework_test.port,
                                                       test.get_url()))

                self.docker_helper.benchmark(script, script_variables,
                                             raw_file)

                # End resource usage metrics collection
                self.__end_logging()

            results = self.results.parse_test(framework_test, test_type)
            log("Benchmark results:", file=benchmark_log)
            # TODO move into log somehow
            pprint(results)

            self.results.report_benchmark_results(framework_test, test_type,
                                                  results['results'])
            log("Complete", file=benchmark_log)

        for test_type in framework_test.runTests:
            benchmark_type(test_type)

    def __begin_logging(self, framework_test, test_type):
        '''
        Starts a thread to monitor the resource usage, to be synced with the
        client's time.
        TODO: MySQL and InnoDB are possible. Figure out how to implement them.
        '''
        output_file = "{file_name}".format(
            file_name=self.results.get_stats_file(framework_test.name,
                                                  test_type))
        dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
                                      --raw --socket --tcp --udp --unix --vm --disk-util \
                                      --rpc --rpcd --output {output_file}".format(
            output_file=output_file)
        cmd = shlex.split(dstat_string)
        self.subprocess_handle = subprocess.Popen(
            cmd, stdout=FNULL, stderr=subprocess.STDOUT)

    def __end_logging(self):
        '''
        Stops the logger thread and blocks until shutdown is complete.
        '''
        self.subprocess_handle.terminate()
        self.subprocess_handle.communicate()