Ejemplo n.º 1
0
 def log_benchmarking_end(self, log_prefix, file):
     total = int(time.time() - self.benchmarking_start)
     self.benchmarking_total = self.benchmarking_total + total
     log("Benchmarking time: %s" % TimeLogger.output(total),
         prefix=log_prefix,
         file=file,
         color=Fore.YELLOW)
 def watch_container(docker_container, docker_file):
     with open(
             os.path.join(
                 run_log_dir, "%s.log" % docker_file.replace(
                     ".dockerfile", "").lower()), 'w') as run_log:
         for line in docker_container.logs(stream=True):
             log(line, prefix=log_prefix, file=run_log)
Ejemplo n.º 3
0
 def log_build_flush(self, file):
     for b_log in self.build_logs:
         log(b_log['str'],
             prefix=b_log['log_prefix'],
             file=file,
             color=Fore.YELLOW)
     self.build_logs = []
Ejemplo n.º 4
0
 def log_test_end(self, log_prefix, file):
     total = int(time.time() - self.test_start)
     log("Total test time: %s" % TimeLogger.output(total),
         prefix=log_prefix,
         file=file,
         color=Fore.YELLOW)
     log("Total time building so far: %s" % TimeLogger.output(
         self.build_total),
         prefix="tfb: ",
         file=file,
         color=Fore.YELLOW)
     log("Total time verifying so far: %s" % TimeLogger.output(
         self.verify_total),
         prefix="tfb: ",
         file=file,
         color=Fore.YELLOW)
     if self.benchmarking_total > 0:
         log("Total time benchmarking so far: %s" % TimeLogger.output(
             self.benchmarking_total),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
     running_time = int(time.time() - self.start)
     log("Total execution time so far: %s" %
         TimeLogger.output(running_time),
         prefix="tfb: ",
         file=file,
         color=Fore.YELLOW)
Ejemplo n.º 5
0
 def log_verify_end(self, log_prefix, file):
     total = int(time.time() - self.verify_start)
     self.verify_total = self.verify_total + total
     log("Verify time: %s" % TimeLogger.output(total),
         prefix=log_prefix,
         file=file,
         color=Fore.YELLOW)
Ejemplo n.º 6
0
        def benchmark_type(test_type):
            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)

            test = framework_test.runTests[test_type]
            raw_file = self.results.get_raw_file(framework_test.name,
                                                 test_type)
            if not os.path.exists(raw_file):
                # Open to create the empty file
                with open(raw_file, "w"):
                    pass

            if not test.failed:
                # Begin resource usage metrics collection
                self.__begin_logging(framework_test, test_type)

                script = self.config.types[test_type].get_script_name()
                script_variables = self.config.types[
                    test_type].get_script_variables(
                        test.name,
                        "http://%s:%s%s" %
                        (self.config.server_host, framework_test.port,
                         test.get_url()),
                    )

                self.docker_helper.benchmark(script, script_variables,
                                             raw_file)

                # End resource usage metrics collection
                self.__end_logging()

            results = self.results.parse_test(framework_test, test_type)
            log("Benchmark results:", file=benchmark_log)
            # TODO move into log somehow
            pprint(results)

            self.results.report_benchmark_results(framework_test, test_type,
                                                  results["results"])
            log("Complete", file=benchmark_log)
Ejemplo n.º 7
0
    def run(self):
        '''
        This process involves setting up the client/server machines
        with any necessary change. Then going through each test,
        running their docker build and run, verifying the URLs, and
        running benchmarks against them.
        '''
        # Generate metadata
        self.metadata.list_test_metadata()

        any_failed = False
        # Run tests
        log("Running Tests...", border='=')

        # build wrk and all databases needed for current run
        self.docker_helper.build_wrk()
        self.docker_helper.build_databases()

        with open(os.path.join(self.results.directory, 'benchmark.log'),
                  'w') as benchmark_log:
            for test in self.tests:
                log("Running Test: %s" % test.name, border='-')
                with self.config.quiet_out.enable():
                    if not self.__run_test(test, benchmark_log):
                        any_failed = True
                # Load intermediate result from child process
                self.results.load()

        # Parse results
        if self.config.mode == "benchmark":
            log("Parsing Results ...", border='=')
            self.results.parse(self.tests)

        self.results.set_completion_time()
        self.results.upload()
        self.results.finish()

        return any_failed
Ejemplo n.º 8
0
def main(argv=None):
    '''
    Runs the toolset.
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Suite options
    parser.add_argument('--audit',
                        action='store_true',
                        default=False,
                        help='Audits framework tests for inconsistencies')
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Removes the results directory')
    parser.add_argument('--new',
                        action='store_true',
                        default=False,
                        help='Initialize a new framework test')
    parser.add_argument(
        '--quiet',
        action='store_true',
        default=False,
        help=
        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
    )
    parser.add_argument(
        '--results-name',
        help='Gives a name to this set of results, formatted as a date',
        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument(
        '--results-environment',
        help='Describes the environment in which these results were gathered',
        default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument(
        '--results-upload-uri',
        default=None,
        help=
        'A URI where the in-progress results.json file will be POSTed periodically'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )

    # Test options
    parser.add_argument('--test',
                        default=None,
                        nargs='+',
                        help='names of tests to run')
    parser.add_argument(
        '--test-dir',
        nargs='+',
        dest='test_dir',
        help='name of framework directory containing all tests to run')
    parser.add_argument(
        '--test-lang',
        nargs='+',
        dest='test_lang',
        help='name of language directory containing all tests to run')
    parser.add_argument('--exclude',
                        default=None,
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'cached_query',
                            'fortune', 'update', 'plaintext'
                        ],
                        nargs='+',
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify', 'debug'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
    )
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument('--server-host',
                        default='tfb-server',
                        help='Hostname/IP for application server')
    parser.add_argument('--database-host',
                        default='tfb-database',
                        help='Hostname/IP for database server')
    parser.add_argument('--client-host',
                        default='',
                        help='Hostname/IP for client server')
    parser.add_argument('--concurrency-levels',
                        nargs='+',
                        default=[16, 32, 64, 128, 256, 512],
                        help='List of concurrencies to benchmark')
    parser.add_argument('--pipeline-concurrency-levels',
                        nargs='+',
                        default=[256, 1024, 4096, 16384],
                        help='List of pipeline concurrencies to benchmark')
    parser.add_argument('--query-levels',
                        nargs='+',
                        default=[1, 5, 10, 15, 20],
                        help='List of query levels to benchmark')
    parser.add_argument('--cached-query-levels',
                        nargs='+',
                        default=[1, 10, 20, 50, 100],
                        help='List of cached query levels to benchmark')

    # Network options
    parser.add_argument('--network-mode',
                        default=None,
                        help='The network mode to run docker in')

    args = parser.parse_args()

    config = BenchmarkConfig(args)
    benchmarker = Benchmarker(config)

    signal.signal(signal.SIGTERM, benchmarker.stop)
    signal.signal(signal.SIGINT, benchmarker.stop)

    try:
        if config.new:
            Scaffolding(benchmarker)

        elif config.audit:
            Audit(benchmarker).start_audit()

        elif config.clean:
            cleaner.clean(benchmarker.results)
            benchmarker.docker_helper.clean()

        elif config.list_tests:
            all_tests = benchmarker.metadata.gather_tests()

            for test in all_tests:
                log(test.name)

        elif config.parse:
            all_tests = benchmarker.metadata.gather_tests()

            for test in all_tests:
                test.parse_all()

            benchmarker.results.parse(all_tests)

        else:
            any_failed = benchmarker.run()
            if config.mode == "verify":
                return any_failed
    except Exception:
        tb = traceback.format_exc()
        log("A fatal error has occurred", color=Fore.RED)
        log(tb)
        # try one last time to stop docker containers on fatal error
        try:
            benchmarker.stop()
        except:
            sys.exit(1)

    return 0
Ejemplo n.º 9
0
    def __run_test(self, test, benchmark_log):
        '''
        Runs the given test, verifies that the webapp is accepting requests,
        optionally benchmarks the webapp, and ultimately stops all services
        started for this test.
        '''

        log_prefix = "%s: " % test.name
        # Start timing the total test duration
        self.time_logger.mark_test_start()

        # If the test is in the excludes list, we skip it
        if self.config.exclude and test.name in self.config.exclude:
            message = "Test {name} has been added to the excludes list. Skipping.".format(
                name=test.name)
            self.results.write_intermediate(test.name, message)
            return self.__exit_test(success=False,
                                    message=message,
                                    prefix=log_prefix,
                                    file=benchmark_log)

        database_container = None
        try:
            # Start database container
            if test.database.lower() != "none":
                self.time_logger.mark_starting_database()
                database_container = self.docker_helper.start_database(
                    test.database.lower())
                if database_container is None:
                    message = "ERROR: Problem building/running database container"
                    return self.__exit_test(success=False,
                                            message=message,
                                            prefix=log_prefix,
                                            file=benchmark_log)
                self.time_logger.mark_started_database()

            # Start webapp
            container = test.start()
            self.time_logger.mark_test_starting()
            if container is None:
                self.docker_helper.stop([container, database_container])
                message = "ERROR: Problem starting {name}".format(
                    name=test.name)
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(success=False,
                                        message=message,
                                        prefix=log_prefix,
                                        file=benchmark_log)

            max_time = time.time() + 60
            while True:
                accepting_requests = test.is_accepting_requests()
                if accepting_requests \
                        or time.time() >= max_time \
                        or not self.docker_helper.server_container_exists(container.id):
                    break
                time.sleep(1)

            if not accepting_requests:
                self.docker_helper.stop([container, database_container])
                message = "ERROR: Framework is not accepting requests from client machine"
                self.results.write_intermediate(test.name, message)
                return self.__exit_test(success=False,
                                        message=message,
                                        prefix=log_prefix,
                                        file=benchmark_log)

            self.time_logger.mark_test_accepting_requests()

            # Debug mode blocks execution here until ctrl+c
            if self.config.mode == "debug":
                log("Entering debug mode. Server has started. CTRL-c to stop.",
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.YELLOW)
                while True:
                    time.sleep(1)

            # Verify URLs and audit
            log("Verifying framework URLs", prefix=log_prefix)
            self.time_logger.mark_verify_start()
            passed_verify = test.verify_urls()
            self.audit.audit_test_dir(test.directory)

            # Benchmark this test
            if self.config.mode == "benchmark":
                log("Benchmarking %s" % test.name,
                    file=benchmark_log,
                    border='-')
                self.time_logger.mark_benchmarking_start()
                self.__benchmark(test, benchmark_log)
                self.time_logger.log_benchmarking_end(log_prefix=log_prefix,
                                                      file=benchmark_log)

            # Log test timing stats
            self.time_logger.log_build_flush(benchmark_log)
            self.time_logger.log_database_start_time(log_prefix, benchmark_log)
            self.time_logger.log_test_accepting_requests(
                log_prefix, benchmark_log)
            self.time_logger.log_verify_end(log_prefix, benchmark_log)

            # Stop this test
            self.docker_helper.stop([container, database_container])

            # Save results thus far into the latest results directory
            self.results.write_intermediate(
                test.name, time.strftime("%Y%m%d%H%M%S", time.localtime()))

            # Upload the results thus far to another server (optional)
            self.results.upload()

            if self.config.mode == "verify" and not passed_verify:
                return self.__exit_test(success=False,
                                        message="Failed verify!",
                                        prefix=log_prefix,
                                        file=benchmark_log)
        except Exception as e:
            tb = traceback.format_exc()
            self.results.write_intermediate(test.name,
                                            "error during test: " + str(e))
            log(tb, prefix=log_prefix, file=benchmark_log)
            return self.__exit_test(success=False,
                                    message="Error during test: %s" %
                                    test.name,
                                    prefix=log_prefix,
                                    file=benchmark_log)

        return self.__exit_test(success=True,
                                prefix=log_prefix,
                                file=benchmark_log)
Ejemplo n.º 10
0
 def stop(self, signal=None, frame=None):
     log("Shutting down (may take a moment)")
     self.docker_helper.stop()
     sys.exit(0)
Ejemplo n.º 11
0
 def log_database_start_time(self, log_prefix, file):
     log("Time starting database: %s" %
         TimeLogger.output(self.database_started),
         prefix=log_prefix,
         file=file,
         color=Fore.YELLOW)
Ejemplo n.º 12
0
    def run(self, test, run_log_dir):
        """
		Run the given Docker container(s)
		"""

        log_prefix = "%s: " % test.name
        container = None

        try:

            def watch_container(docker_container, docker_file):
                with open(
                        os.path.join(
                            run_log_dir, "%s.log" %
                            docker_file.replace(".dockerfile", "").lower()),
                        'w') as run_log:
                    for line in docker_container.logs(stream=True):
                        log(line, prefix=log_prefix, file=run_log)

            extra_hosts = None
            name = "ssgberk-server"

            if self.benchmarker.config.network is None:
                extra_hosts = {
                    socket.gethostname():
                    str(self.benchmarker.config.server_host),
                    'ssgberk-server': str(self.benchmarker.config.server_host),
                }
                name = None

            sysctl = {'net.core.somaxconn': 65535}

            ulimit = [{
                'name': 'nofile',
                'hard': 200000,
                'soft': 200000
            }, {
                'name': 'rtprio',
                'hard': 99,
                'soft': 99
            }]

            container = self.server.containers.run(
                "matheusrv/ssgberk.test.%s" % test.name,
                name=name,
                network=self.benchmarker.config.network,
                network_mode=self.benchmarker.config.network_mode,
                stderr=True,
                detach=True,
                init=True,
                extra_hosts=extra_hosts,
                privileged=True,
                ulimits=ulimit,
                sysctls=sysctl,
                remove=True,
                log_config={'type': None})

            watch_thread = Thread(target=watch_container,
                                  args=(
                                      container,
                                      "%s.dockerfile" % test.name,
                                  ))
            watch_thread.daemon = True
            watch_thread.start()

        except Exception:
            with open(os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                      'w') as run_log:
                tb = traceback.format_exc()
                log("Running docker cointainer: %s.dockerfile failed" %
                    test.name,
                    prefix=log_prefix,
                    file=run_log)
                log(tb, prefix=log_prefix, file=run_log)

        return container
Ejemplo n.º 13
0
def verify_queries_count(self,
                         tbl_name,
                         url,
                         concurrency=512,
                         count=2,
                         expected_queries=1024,
                         expected_rows=1024,
                         check_updates=False):
    '''
    Checks that the number of executed queries, at the given concurrency level,
    corresponds to: the total number of http requests made * the number of queries per request.
    No margin is accepted on the number of queries, which seems reliable.
    On the number of rows read or updated, the margin related to the database applies (1% by default see cls.margin)
    On updates, if the use of bulk updates is detected (number of requests close to that expected), a margin
    (5% see bulk_margin) is allowed on the number of updated rows.
    '''
    log("VERIFYING QUERY COUNT FOR %s" % url,
        border='-',
        color=Fore.WHITE + Style.BRIGHT)

    problems = []

    queries, rows, rows_updated, margin, trans_failures = databases[
        self.database.lower()].verify_queries(self.config, tbl_name, url,
                                              concurrency, count,
                                              check_updates)

    isBulk = check_updates and (queries < 1.001 * expected_queries) and (
        queries > 0.999 * expected_queries)

    if check_updates and not isBulk:  # Restore the normal queries number if bulk queries are not used
        expected_queries = (expected_queries - count * concurrency) * 2

    # Add a margin based on the number of cpu cores
    queries_margin = 1.015  # For a run on Travis
    if multiprocessing.cpu_count() > 2:
        queries_margin = 1  # real run (Citrine or Azure) -> no margin on queries
        # Check for transactions failures (socket errors...)
        if trans_failures > 0:
            problems.append(
                ("fail", "%s failed transactions." % trans_failures, url))

    problems.append(
        display_queries_count_result(queries * queries_margin,
                                     expected_queries, queries,
                                     "executed queries", url))

    problems.append(
        display_queries_count_result(rows, expected_rows, int(rows / margin),
                                     "rows read", url))

    if check_updates:
        bulk_margin = 1
        if isBulk:  # Special marge for bulk queries
            bulk_margin = 1.05
        problems.append(
            display_queries_count_result(rows_updated * bulk_margin,
                                         expected_rows,
                                         int(rows_updated / margin),
                                         "rows updated", url))

    return problems
Ejemplo n.º 14
0
def gather_tests(include=[],
                 exclude=[],
                 benchmarker_config=None,
                 results=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A config is needed to construct full FrameworkTest objects. If
    one is not provided, a default config will be created.
    '''

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Search for configuration files
    config_files = []

    if benchmarker_config.test_lang:
        benchmarker_config.test_dir = []
        for lang in benchmarker_config.test_lang:
            if os.path.exists("{!s}/frameworks/{!s}".format(
                    benchmarker_config.fwroot, lang)):
                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
                        benchmarker_config.fwroot, lang)):
                    benchmarker_config.test_dir.append("{!s}/{!s}".format(
                        lang, test_dir))
            else:
                raise Exception(
                    "Unable to locate language directory: {!s}".format(lang))

    if benchmarker_config.test_dir:
        for test_dir in benchmarker_config.test_dir:
            dir_config_files = glob.glob(
                "{!s}/frameworks/{!s}/benchmark_config.json".format(
                    benchmarker_config.fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception(
                    "Unable to locate tests in test-dir: {!s}".format(
                        test_dir))
    else:
        config_files.extend(
            glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(
                benchmarker_config.fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                log("Error loading config: {!s}".format(config_file_name),
                    color=Fore.RED)
                raise Exception("Error loading config file")

        # Find all tests in the config file
        config_tests = parse_config(config, os.path.dirname(config_file_name),
                                    benchmarker_config, results)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
Ejemplo n.º 15
0
import imp
import re

from colorama import Fore
from glob import glob
from toolset.utils.output_helper import log

databases = {}
db_folders = glob("/FrameworkBenchmarks/toolset/databases/*/")

# Loads all the databases from the folders in this directory
# and checks to see if they've implemented the required methods
for folder in db_folders:
    # regex that grabs the characters between "toolset/database/"
    # and the final "/" in the db folder string to get the db name
    db_name = re.findall(r'.+\/(.+)\/$', folder, re.M)[0]
    db = imp.load_source("Database", "%s%s.py" % (folder, db_name))

    if not hasattr(db.Database, "get_current_world_table")\
            or not hasattr(db.Database, "test_connection"):
        log("Database %s does not implement the required methods" + db_name,
            color=Fore.RED)

    databases[db_name] = db.Database
Ejemplo n.º 16
0
 def log_build_end(self, log_prefix, file):
     total = int(time.time() - self.build_start)
     self.build_total = self.build_total + total
     log_str = "Build time: %s" % TimeLogger.output(total)
     self.build_logs.append({'log_prefix': log_prefix, 'str': log_str})
     log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
Ejemplo n.º 17
0
def __build_dependencies(benchmarker_config,
                         test,
                         docker_buildargs,
                         build_log_dir=os.devnull):
    '''
    Builds all the dependency docker images for the given test.
    Does not build the test docker image.
    '''
    dependencies = OrderedSet(
        list(
            reversed(
                __gather_dependencies(
                    benchmarker_config,
                    os.path.join(test.directory,
                                 "%s.dockerfile" % test.name)))))

    docker_dir = os.path.join(benchmarker_config.fwroot, "toolset", "setup",
                              "docker")
    for dep in dependencies:
        log_prefix = dep + ": "
        pulled = False

        # Do not pull techempower/ images if we are building specifically
        if not benchmarker_config.build and 'techempower/' not in dep:
            client = docker.DockerClient(
                base_url=benchmarker_config.server_docker_host)
            try:
                # If we have it, use it
                client.images.get(dep)
                pulled = True
                log("Found published image; skipping build", prefix=log_prefix)
            except:
                # Pull the dependency image
                try:
                    log("Attempting docker pull for image (this can take some time)",
                        prefix=log_prefix)
                    client.images.pull(dep)
                    pulled = True
                    log("Found published image; skipping build",
                        prefix=log_prefix)
                except:
                    log("Docker pull failed; %s could not be found; terminating"
                        % dep,
                        prefix=log_prefix,
                        color=Fore.RED)
                    return 1

        if not pulled:
            dep_ref = dep.strip().split(':')[0].strip()
            dependency = dep_ref.split('/')[1]
            build_log_file = build_log_dir
            if build_log_dir is not os.devnull:
                build_log_file = os.path.join(build_log_dir,
                                              "%s.log" % dependency.lower())
            with open(build_log_file, 'w') as build_log:
                docker_file = os.path.join(test.directory,
                                           dependency + ".dockerfile")
                if not docker_file or not os.path.exists(docker_file):
                    docker_file = find(docker_dir, dependency + ".dockerfile")
                if not docker_file:
                    log("Docker build failed; %s could not be found; terminating"
                        % (dependency + ".dockerfile"),
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.RED)
                    return 1

                # Build the dependency image
                try:
                    for line in docker.APIClient(
                            base_url=benchmarker_config.server_docker_host
                    ).build(path=os.path.dirname(docker_file),
                            dockerfile="%s.dockerfile" % dependency,
                            tag=dep,
                            buildargs=docker_buildargs,
                            forcerm=True):
                        if line.startswith('{"stream":'):
                            line = json.loads(line)
                            line = line[line.keys()[0]].encode('utf-8')
                            log(line,
                                prefix=log_prefix,
                                file=build_log,
                                color=Fore.WHITE + Style.BRIGHT \
                                    if re.match(r'^Step \d+\/\d+', line) else '')
                except Exception:
                    tb = traceback.format_exc()
                    log("Docker dependency build failed; terminating",
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.RED)
                    log(tb, prefix=log_prefix, file=build_log)
                    return 1
Ejemplo n.º 18
0
def start_database(benchmarker_config, test, database):
    '''
    Sets up a container for the given database and port, and starts said docker 
    container.
    '''
    image_name = "techempower/%s:latest" % database
    log_prefix = image_name + ": "

    database_dir = os.path.join(benchmarker_config.fwroot, "toolset", "setup",
                                "docker", "databases", database)
    docker_file = "%s.dockerfile" % database

    pulled = False
    client = docker.DockerClient(
        base_url=benchmarker_config.database_docker_host)
    try:
        # Don't pull if we have it
        client.images.get(image_name)
        pulled = True
        log("Found published image; skipping build", prefix=log_prefix)
    except:
        # Pull the dependency image
        try:
            log("Attempting docker pull for image (this can take some time)",
                prefix=log_prefix)
            client.images.pull(image_name)
            pulled = True
            log("Found published image; skipping build", prefix=log_prefix)
        except:
            pass

    if not pulled:
        for line in docker.APIClient(
                base_url=benchmarker_config.database_docker_host).build(
                    path=database_dir,
                    dockerfile=docker_file,
                    tag="techempower/%s" % database):
            if line.startswith('{"stream":'):
                line = json.loads(line)
                line = line[line.keys()[0]].encode('utf-8')
                log(line,
                    prefix=log_prefix,
                    color=Fore.WHITE + Style.BRIGHT \
                        if re.match(r'^Step \d+\/\d+', line) else '')

    client = docker.DockerClient(
        base_url=benchmarker_config.database_docker_host)

    sysctl = {'net.core.somaxconn': 65535, 'kernel.sem': "250 32000 256 512"}

    ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]

    container = client.containers.run(
        "techempower/%s" % database,
        name="tfb-database",
        network=benchmarker_config.network,
        network_mode=benchmarker_config.network_mode,
        detach=True,
        ulimits=ulimit,
        sysctls=sysctl)

    # Sleep until the database accepts connections
    slept = 0
    max_sleep = 60
    database_ready = False
    while not database_ready and slept < max_sleep:
        time.sleep(1)
        slept += 1
        database_ready = test_database(benchmarker_config, database)

    if not database_ready:
        log("Database was not ready after startup", prefix=log_prefix)

    return container
Ejemplo n.º 19
0
def run(benchmarker_config, test, run_log_dir):
    '''
    Run the given Docker container(s)
    '''
    client = docker.DockerClient(
        base_url=benchmarker_config.server_docker_host)
    containers = []

    log_prefix = "%s: " % test.name
    try:

        def watch_container(container, docker_file):
            with open(
                    os.path.join(
                        run_log_dir, "%s.log" %
                        docker_file.replace(".dockerfile", "").lower()),
                    'w') as run_log:
                for line in container.logs(stream=True):
                    log(line, prefix=log_prefix, file=run_log)

        extra_hosts = None
        name = "tfb-server"

        if benchmarker_config.network is None:
            extra_hosts = {
                socket.gethostname(): str(benchmarker_config.server_host),
                'tfb-server': str(benchmarker_config.server_host),
                'tfb-database': str(benchmarker_config.database_host)
            }
            name = None

        sysctl = {'net.core.somaxconn': 65535}

        ulimit = [{
            'name': 'nofile',
            'hard': 200000,
            'soft': 200000
        }, {
            'name': 'rtprio',
            'hard': 99,
            'soft': 99
        }]

        container = client.containers.run(
            "techempower/tfb.test.%s" % test.name,
            name=name,
            network=benchmarker_config.network,
            network_mode=benchmarker_config.network_mode,
            stderr=True,
            detach=True,
            init=True,
            extra_hosts=extra_hosts,
            privileged=True,
            ulimits=ulimit,
            sysctls=sysctl)

        containers.append(container)

        watch_thread = Thread(target=watch_container,
                              args=(
                                  container,
                                  "%s.dockerfile" % test.name,
                              ))
        watch_thread.daemon = True
        watch_thread.start()

    except Exception:
        with open(os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                  'w') as run_log:
            tb = traceback.format_exc()
            log("Running docker cointainer: %s.dockerfile failed" % test.name,
                prefix=log_prefix,
                file=run_log)
            log(tb, prefix=log_prefix, file=run_log)

    return containers
Ejemplo n.º 20
0
    def finish(self):
        '''
        Finishes these results.
        '''
        if not self.config.parse:
            # Normally you don't have to use Fore.BLUE before each line, but
            # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
            # or stream flush, so we have to ensure that the color code is printed repeatedly
            log("Verification Summary",
                border='=',
                border_bottom='-',
                color=Fore.CYAN)
            for test in self.benchmarker.tests:
                log(Fore.CYAN + "| {!s}".format(test.name))
                if test.name in self.verify.keys():
                    for test_type, result in self.verify[
                            test.name].iteritems():
                        if result.upper() == "PASS":
                            color = Fore.GREEN
                        elif result.upper() == "WARN":
                            color = Fore.YELLOW
                        else:
                            color = Fore.RED
                        log(Fore.CYAN + "|       " + test_type.ljust(13) +
                            ' : ' + color + result.upper())
                else:
                    log(Fore.CYAN + "|      " + Fore.RED +
                        "NO RESULTS (Did framework launch?)")
            log('', border='=', border_bottom='', color=Fore.CYAN)

        log("Results are saved in " + self.directory)
Ejemplo n.º 21
0
    def __build(self, base_url, path, build_log_file, log_prefix, dockerfile,
                tag, buildargs={}):
        '''
        Builds docker containers using docker-py low-level api
        '''

        self.benchmarker.time_logger.mark_build_start()
        with open(build_log_file, 'w') as build_log:
            try:
                client = docker.APIClient(base_url=base_url)
                output = client.build(
                    path=path,
                    dockerfile=dockerfile,
                    tag=tag,
                    forcerm=True,
                    timeout=3600,
                    pull=True,
                    buildargs=buildargs
                )
                buffer = ""
                for token in output:
                    if token.startswith('{"stream":'):
                        token = json.loads(token)
                        token = token[token.keys()[0]].encode('utf-8')
                        buffer += token
                    elif token.startswith('{"errorDetail":'):
                        token = json.loads(token)
                        raise Exception(token['errorDetail']['message'])
                    while "\n" in buffer:
                        index = buffer.index("\n")
                        line = buffer[:index]
                        buffer = buffer[index + 1:]
                        log(line,
                            prefix=log_prefix,
                            file=build_log,
                            color=Fore.WHITE + Style.BRIGHT \
                                if re.match(r'^Step \d+\/\d+', line) else '')
                    # Kill docker builds if they exceed 60 mins. This will only
                    # catch builds that are still printing output.
                    if self.benchmarker.time_logger.time_since_start() > 3600:
                        log("Build time exceeded 60 minutes",
                            prefix=log_prefix,
                            file=build_log,
                            color=Fore.RED)
                        raise Exception

                if buffer:
                    log(buffer,
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.WHITE + Style.BRIGHT \
                            if re.match(r'^Step \d+\/\d+', buffer) else '')
            except Exception:
                tb = traceback.format_exc()
                log("Docker build failed; terminating",
                    prefix=log_prefix,
                    file=build_log,
                    color=Fore.RED)
                log(tb, prefix=log_prefix, file=build_log)
                self.benchmarker.time_logger.log_build_end(
                    log_prefix=log_prefix, file=build_log)
                raise

            self.benchmarker.time_logger.log_build_end(
                log_prefix=log_prefix, file=build_log)
Ejemplo n.º 22
0
 def __write_results(self):
     try:
         with open(self.file, 'w') as f:
             f.write(json.dumps(self.__to_jsonable(), indent=2))
     except (IOError):
         log("Error writing results.json")
Ejemplo n.º 23
0
 def watch_container(docker_container, docker_file):
     with open(
             os.path.join(run_log_dir, "%s.log" % docker_file.replace(
                 ".dockerfile", "").lower()), 'w') as run_log:
         for line in docker_container.logs(stream=True):
             log(line, prefix=log_prefix, file=run_log)
Ejemplo n.º 24
0
    def run(self, test, run_log_dir):
        '''
        Run the given Docker container(s)
        '''

        log_prefix = "%s: " % test.name
        container = None

        try:

            def watch_container(docker_container, docker_file):
                with open(
                        os.path.join(
                            run_log_dir, "%s.log" % docker_file.replace(
                                ".dockerfile", "").lower()), 'w') as run_log:
                    for line in docker_container.logs(stream=True):
                        log(line, prefix=log_prefix, file=run_log)

            extra_hosts = None
            name = "tfb-server"

            if self.benchmarker.config.network is None:
                extra_hosts = {
                    socket.gethostname():
                    str(self.benchmarker.config.server_host),
                    'tfb-server':
                    str(self.benchmarker.config.server_host),
                    'tfb-database':
                    str(self.benchmarker.config.database_host)
                }
                name = None

            sysctl = {'net.core.somaxconn': 65535}

            ulimit = [{
                'name': 'nofile',
                'hard': 200000,
                'soft': 200000
            }, {
                'name': 'rtprio',
                'hard': 99,
                'soft': 99
            }]

            docker_cmd = ''
            if hasattr(test, 'docker_cmd'):
                docker_cmd = test.docker_cmd

            # Expose ports in debugging mode
            ports = {}
            if self.benchmarker.config.mode == "debug":
                ports = {test.port: test.port}

            container = self.server.containers.run(
                "techempower/tfb.test.%s" % test.name,
                name=name,
                command=docker_cmd,
                network=self.benchmarker.config.network,
                network_mode=self.benchmarker.config.network_mode,
                ports=ports,
                stderr=True,
                detach=True,
                init=True,
                extra_hosts=extra_hosts,
                privileged=True,
                ulimits=ulimit,
                mem_limit=mem_limit,
                sysctls=sysctl,
                remove=True,
                log_config={'type': None})

            watch_thread = Thread(
                target=watch_container,
                args=(
                    container,
                    "%s.dockerfile" % test.name,
                ))
            watch_thread.daemon = True
            watch_thread.start()

        except Exception:
            with open(
                    os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                    'w') as run_log:
                tb = traceback.format_exc()
                log("Running docker container: %s.dockerfile failed" %
                    test.name,
                    prefix=log_prefix,
                    file=run_log)
                log(tb, prefix=log_prefix, file=run_log)

        return container
Ejemplo n.º 25
0
def build(benchmarker_config, test_names, build_log_dir=os.devnull):
    '''
    Builds the dependency chain as well as the test implementation docker images
    for the given tests.
    '''
    tests = gather_tests(
        include=test_names, benchmarker_config=benchmarker_config)

    for test in tests:
        log_prefix = "%s: " % test.name

        # Build the test image
        test_docker_file = "%s.dockerfile" % test.name
        build_log_file = build_log_dir
        if build_log_dir is not os.devnull:
            build_log_file = os.path.join(
                build_log_dir,
                "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
        with open(build_log_file, 'w') as build_log:
            try:
                client = docker.APIClient(
                    base_url=benchmarker_config.server_docker_host)
                output = client.build(
                    path=test.directory,
                    dockerfile=test_docker_file,
                    tag="techempower/tfb.test.%s" %
                        test_docker_file.replace(".dockerfile", ""),
                    forcerm=True,
                    pull=True)
                buffer = ""
                for token in output:
                    if token.startswith('{"stream":'):
                        token = json.loads(token)
                        token = token[token.keys()[0]].encode('utf-8')
                    buffer += token
                    while "\n" in buffer:
                        index = buffer.index("\n")
                        line = buffer[:index]
                        buffer = buffer[index + 1:]
                        log(line,
                            prefix=log_prefix,
                            file=build_log,
                            color=Fore.WHITE + Style.BRIGHT \
                                if re.match(r'^Step \d+\/\d+', line) else '')

                if buffer:
                    log(buffer,
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.WHITE + Style.BRIGHT \
                            if re.match(r'^Step \d+\/\d+', buffer) else '')
            except Exception:
                tb = traceback.format_exc()
                log("Docker build failed; terminating",
                    prefix=log_prefix,
                    file=build_log,
                    color=Fore.RED)
                log(tb, prefix=log_prefix, file=build_log)
                return 1

    return 0
Ejemplo n.º 26
0
    def parse_config(self, config, directory):
        """
        Parses a config file into a list of FrameworkTest objects
        """
        from toolset.benchmark.framework_test import FrameworkTest
        tests = []

        # The config object can specify multiple tests
        # Loop over them and parse each into a FrameworkTest
        for test in config['tests']:

            tests_to_run = [name for (name, keys) in test.iteritems()]

            if "default" not in tests_to_run:
                log("Framework %s does not define a default test in benchmark_config.json"
                    % config['framework'],
                    color=Fore.YELLOW)

            # Check that each test configuration is acceptable
            # Throw exceptions if a field is missing, or how to improve the field
            for test_name, test_keys in test.iteritems():
                # Validates and normalizes the benchmark_config entry
                test_keys = Metadata.validate_test(test_name, test_keys,
                                                   config['framework'],
                                                   directory)

                # Map test type to a parsed FrameworkTestType object
                runTests = dict()
                for type_name, type_obj in self.benchmarker.config.types.iteritems(
                ):
                    try:
                        # Makes a FrameWorkTestType object using some of the keys in config
                        # e.g. JsonTestType uses "json_url"
                        runTests[type_name] = type_obj.copy().parse(test_keys)
                    except AttributeError:
                        # This is quite common - most tests don't support all types
                        # Quitely log it and move on (debug logging is on in travis and this causes
                        # ~1500 lines of debug, so I'm totally ignoring it for now
                        # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
                        pass

                # We need to sort by test_type to run
                sortedTestKeys = sorted(runTests.keys(),
                                        key=Metadata.test_order)
                sortedRunTests = OrderedDict()
                for sortedTestKey in sortedTestKeys:
                    sortedRunTests[sortedTestKey] = runTests[sortedTestKey]

                # Prefix all test names with framework except 'default' test
                # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
                if test_name == 'default':
                    test_name = config['framework']
                else:
                    test_name = "%s-%s" % (config['framework'], test_name)

                # By passing the entire set of keys, each FrameworkTest will have a member for each key
                tests.append(
                    FrameworkTest(test_name, directory, self.benchmarker,
                                  sortedRunTests, test_keys))

        return tests
Ejemplo n.º 27
0
def __stop(signal, frame):
    log("Shutting down (may take a moment)")
    docker_helper.stop(config)
    sys.exit(0)
Ejemplo n.º 28
0
    def __run_test(self, test, benchmark_log):
        '''
        Runs the given test, verifies that the webapp is accepting requests,
        optionally benchmarks the webapp, and ultimately stops all services
        started for this test.
        '''
        log_prefix = "%s: " % test.name

        # If the test is in the excludes list, we skip it
        if self.config.exclude != None and test.name in self.config.exclude:
            message = "Test {name} has been added to the excludes list. Skipping.".format(
                name=test.name)
            self.results.write_intermediate(test.name, message)
            log(message, prefix=log_prefix, file=benchmark_log)
            return False

        database_container = None
        try:
            if self.__is_port_bound(test.port):
                time.sleep(60)

            if self.__is_port_bound(test.port):
                # We gave it our all
                message = "Error: Port %s is not available, cannot start %s" % (
                    test.port, test.name)
                self.results.write_intermediate(test.name, message)
                log(message,
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.RED)
                return False

            # Start database container
            if test.database.lower() != "none":
                database_container = docker_helper.start_database(
                    self.config, test, test.database.lower())
                if database_container is None:
                    message = "ERROR: Problem building/running database container"
                    self.results.write_intermediate(test.name, message)
                    log(message,
                        prefix=log_prefix,
                        file=benchmark_log,
                        color=Fore.RED)
                    return False

            # Start webapp
            container = test.start()
            if container is None:
                docker_helper.stop(self.config, container, database_container,
                                   test)
                message = "ERROR: Problem starting {name}".format(
                    name=test.name)
                self.results.write_intermediate(test.name, message)
                log(message,
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.RED)
                return False

            slept = 0
            max_sleep = 60
            accepting_requests = False
            while not accepting_requests and slept < max_sleep:
                accepting_requests = test.is_accepting_requests()
                time.sleep(1)
                slept += 1

            if not accepting_requests:
                docker_helper.stop(self.config, container, database_container,
                                   test)
                message = "ERROR: Framework is not accepting requests from client machine"
                self.results.write_intermediate(test.name, message)
                log(message,
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.RED)
                return False

            # Debug mode blocks execution here until ctrl+c
            if self.config.mode == "debug":
                log("Entering debug mode. Server has started. CTRL-c to stop.",
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.YELLOW)
                while True:
                    time.sleep(1)

            # Verify URLs
            log("Verifying framework URLs", prefix=log_prefix)
            passed_verify = test.verify_urls()

            # Benchmark this test
            if self.config.mode == "benchmark":
                log("Benchmarking %s" % test.name,
                    file=benchmark_log,
                    border='-')
                self.__benchmark(test, benchmark_log)

            # Stop this test
            docker_helper.stop(self.config, container, database_container,
                               test)

            # Save results thus far into the latest results directory
            self.results.write_intermediate(
                test.name, time.strftime("%Y%m%d%H%M%S", time.localtime()))

            # Upload the results thus far to another server (optional)
            self.results.upload()

            if self.config.mode == "verify" and not passed_verify:
                log("Failed verify!",
                    prefix=log_prefix,
                    file=benchmark_log,
                    color=Fore.RED)
                return False
        except (OSError, IOError, subprocess.CalledProcessError) as e:
            tb = traceback.format_exc()
            self.results.write_intermediate(test.name,
                                            "error during test: " + str(e))
            log("Subprocess Error %s" % test.name,
                file=benchmark_log,
                border='-',
                color=Fore.RED)
            log(tb, prefix=log_prefix, file=benchmark_log)
            return False

        return True
Ejemplo n.º 29
0
 def log_test_accepting_requests(self, log_prefix, file):
     log("Time until accepting requests: %s" %
         TimeLogger.output(self.accepting_requests),
         prefix=log_prefix,
         file=file,
         color=Fore.YELLOW)
Ejemplo n.º 30
0
    def get_current_world_table(self):
        '''
        Return a JSON object containing all 10,000 World items as they currently
        exist in the database. This is used for verifying that entries in the
        database have actually changed during an Update verification test.
        '''
        database_name = ""
        results_json = []
        try:
            database_name = self.database.lower()
        except AttributeError:
            pass

        if database_name == "mysql":
            try:
                db = MySQLdb.connect(self.config.database_host,
                                     "benchmarkdbuser", "benchmarkdbpass",
                                     "hello_world")
                cursor = db.cursor()
                cursor.execute("SELECT * FROM World")
                results = cursor.fetchall()
                results_json.append(json.loads(json.dumps(dict(results))))
                db.close()
            except Exception:
                tb = traceback.format_exc()
                log("ERROR: Unable to load current MySQL World table.",
                    color=Fore.RED)
                log(tb)
        elif database_name == "postgres":
            try:
                db = psycopg2.connect(host=self.config.database_host,
                                      port="5432",
                                      user="******",
                                      password="******",
                                      database="hello_world")
                cursor = db.cursor()
                cursor.execute("SELECT * FROM \"World\"")
                results = cursor.fetchall()
                results_json.append(json.loads(json.dumps(dict(results))))
                cursor = db.cursor()
                cursor.execute("SELECT * FROM \"world\"")
                results = cursor.fetchall()
                results_json.append(json.loads(json.dumps(dict(results))))
                db.close()
            except Exception:
                tb = traceback.format_exc()
                log("ERROR: Unable to load current Postgres World table.",
                    color=Fore.RED)
                log(tb)
        elif database_name == "mongodb":
            try:
                worlds_json = {}
                print("DATABASE_HOST: %s" % self.config.database_host)
                connection = pymongo.MongoClient(
                    host=self.config.database_host)
                db = connection.hello_world
                for world in db.world.find():
                    if "randomNumber" in world:
                        if "id" in world:
                            worlds_json[str(int(world["id"]))] = int(
                                world["randomNumber"])
                        elif "_id" in world:
                            worlds_json[str(int(world["_id"]))] = int(
                                world["randomNumber"])
                results_json.append(worlds_json)
                connection.close()
            except Exception:
                tb = traceback.format_exc()
                log("ERROR: Unable to load current MongoDB World table.",
                    color=Fore.RED)
                log(tb)
        elif database_name == "reindexer":
            try:
                worlds_json = {}
                resp = requests.get(
                    "http://" + self.config.database_host +
                    ":9088/api/v1/db/hello_world/namespaces/world/items?limit=10000"
                )
                for world in resp.json()["items"]:
                    if "randomNumber" in world:
                        worlds_json[str(int(world["id"]))] = int(
                            world["randomNumber"])
                        results_json.append(worlds_json)
            except Exception:
                tb = traceback.format_exc()
                log("ERROR: Unable to load current Reindexer World table.",
                    color=Fore.RED)
                log(tb)
        else:
            raise ValueError(
                "Database: {!s} does not exist".format(database_name))

        return results_json
Ejemplo n.º 31
0
 def output_headers_and_body(self):
     log(str(self.headers))
     log(self.body)
Ejemplo n.º 32
0
    def gather_tests(self, include=None, exclude=None):
        '''
        Given test names as strings, returns a list of FrameworkTest objects.
        For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
        variables for checking the test directory, the test database os, and
        other useful items.

        With no arguments, every test in this framework will be returned.
        With include, only tests with this exact name will be returned.
        With exclude, all tests but those excluded will be returned.
        '''

        # Help callers out a bit
        include = include or []
        exclude = exclude or []

        # Search for configuration files
        config_files = []

        if self.benchmarker.config.test_lang:
            self.benchmarker.config.test_dir = []
            for lang in self.benchmarker.config.test_lang:
                self.benchmarker.config.test_dir.extend(
                    self.gather_language_tests(lang))

        if self.benchmarker.config.test_dir:
            for test_dir in self.benchmarker.config.test_dir:
                config_files.append(self.get_framework_config(test_dir))
        else:
            config_files.extend(
                glob.glob("{!s}/*/*/benchmark_config.json".format(
                    self.benchmarker.config.lang_root)))

        tests = []
        for config_file_name in config_files:
            config = None
            with open(config_file_name, 'r') as config_file:
                try:
                    config = json.load(config_file)
                except ValueError:
                    log("Error loading config: {!s}".format(config_file_name),
                        color=Fore.RED)
                    raise Exception("Error loading config file")

            # Find all tests in the config file
            config_tests = self.parse_config(config,
                                             os.path.dirname(config_file_name))

            # Filter
            for test in config_tests:
                if len(include) > 0:
                    if test.name in include:
                        tests.append(test)
                elif test.name not in exclude:
                    tests.append(test)

        # Ensure we were able to locate everything that was
        # explicitly included
        if len(include):
            names = {test.name for test in tests}
            if len(set(include) - set(names)):
                missing = list(set(include) - set(names))
                raise Exception("Unable to locate tests %s" % missing)

        tests.sort(key=lambda x: x.name)

        return tests
Ejemplo n.º 33
0
        def verify_type(test_type):
            verificationPath = os.path.join(log_path, test_type)
            try:
                os.makedirs(verificationPath)
            except OSError:
                pass
            with open(os.path.join(verificationPath, 'verification.txt'),
                      'w') as verification:
                test = self.runTests[test_type]
                log("VERIFYING %s" % test_type.upper(),
                    file=verification,
                    border='-',
                    color=Fore.WHITE + Style.BRIGHT)

                base_url = "http://%s:%s" % (
                    self.benchmarker_config.server_host, self.port)

                try:
                    # Verifies headers from the server. This check is made from the
                    # App Server using Pythons requests module. Will do a second check from
                    # the client to make sure the server isn't only accepting connections
                    # from localhost on a multi-machine setup.
                    results = test.verify(base_url)

                    # Now verify that the url is reachable from the client machine, unless
                    # we're already failing
                    if not any(result == 'fail'
                               for (result, reason, url) in results):
                        docker_helper.test_client_connection(
                            self.benchmarker_config, base_url + test.get_url())
                except ConnectionError as e:
                    results = [('fail', "Server did not respond to request",
                                base_url)]
                    log("Verifying test %s for %s caused an exception: %s" %
                        (test_type, self.name, e),
                        color=Fore.RED)
                except Exception as e:
                    results = [('fail', """Caused Exception in TFB
            This almost certainly means your return value is incorrect,
            but also that you have found a bug. Please submit an issue
            including this message: %s\n%s""" % (e, traceback.format_exc()),
                                base_url)]
                    log("Verifying test %s for %s caused an exception: %s" %
                        (test_type, self.name, e),
                        color=Fore.RED)
                    traceback.format_exc()

                test.failed = any(
                    result == 'fail' for (result, reason, url) in results)
                test.warned = any(
                    result == 'warn' for (result, reason, url) in results)
                test.passed = all(
                    result == 'pass' for (result, reason, url) in results)

                def output_result(result, reason, url):
                    specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
                    color = Fore.GREEN
                    if result.upper() == "WARN":
                        color = Fore.YELLOW
                    elif result.upper() == "FAIL":
                        color = Fore.RED

                    log("   {!s}{!s}{!s} for {!s}".format(
                        color, result.upper(), Style.RESET_ALL, url),
                        file=verification)
                    if reason is not None and len(reason) != 0:
                        for line in reason.splitlines():
                            log("     " + line, file=verification)
                        if not test.passed:
                            log("     See {!s}".format(specific_rules_url),
                                file=verification)

                [output_result(r1, r2, url) for (r1, r2, url) in results]

                if test.failed:
                    self.results.report_verify_results(self, test_type, 'fail')
                elif test.warned:
                    self.results.report_verify_results(self, test_type, 'warn')
                elif test.passed:
                    self.results.report_verify_results(self, test_type, 'pass')
                else:
                    raise Exception(
                        "Unknown error - test did not pass,warn,or fail")
Ejemplo n.º 34
0
 def watch_container(container):
     with open(raw_file, 'w') as benchmark_file:
         for line in container.logs(stream=True):
             log(line, file=benchmark_file)
Ejemplo n.º 35
0
def build(benchmarker_config, test_names, build_log_dir=os.devnull):
    '''
    Builds the dependency chain as well as the test implementation docker images
    for the given tests.
    '''
    docker_buildargs = {
        'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
        'TFB_DATABASE': str(benchmarker_config.database_host)
    }

    tests = gather_tests(include=test_names,
                         benchmarker_config=benchmarker_config)

    for test in tests:
        log_prefix = "%s: " % test.name

        test_docker_files = ["%s.dockerfile" % test.name]
        if test.docker_files is not None:
            if type(test.docker_files) is list:
                test_docker_files.extend(test.docker_files)
            else:
                raise Exception(
                    "docker_files in benchmark_config.json must be an array")

        if __build_dependencies(benchmarker_config, test, test_docker_files,
                                docker_buildargs, build_log_dir) > 0:
            return 1

        # Build the test images
        for test_docker_file in test_docker_files:
            build_log_file = build_log_dir
            if build_log_dir is not os.devnull:
                build_log_file = os.path.join(
                    build_log_dir, "%s.log" %
                    test_docker_file.replace(".dockerfile", "").lower())
            with open(build_log_file, 'w') as build_log:
                try:
                    for line in docker.APIClient(
                            base_url=benchmarker_config.server_docker_host
                    ).build(path=test.directory,
                            dockerfile=test_docker_file,
                            tag="techempower/tfb.test.%s" %
                            test_docker_file.replace(".dockerfile", ""),
                            buildargs=docker_buildargs,
                            forcerm=True):
                        if line.startswith('{"stream":'):
                            line = json.loads(line)
                            line = line[line.keys()[0]].encode('utf-8')
                            log(line,
                                prefix=log_prefix,
                                file=build_log,
                                color=Fore.WHITE + Style.BRIGHT \
                                    if re.match(r'^Step \d+\/\d+', line) else '')
                except Exception:
                    tb = traceback.format_exc()
                    log("Docker build failed; terminating",
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.RED)
                    log(tb, prefix=log_prefix, file=build_log)
                    return 1

    return 0