def build(benchmarker_config, test_names, build_log_dir=os.devnull):
    '''
    Builds the test docker containers
    '''
    tests = gather_tests(include=test_names,
                         benchmarker_config=benchmarker_config)

    for test in tests:
        log_prefix = "%s: " % test.name

        # Build the test image
        test_docker_file = "%s.dockerfile" % test.name
        build_log_file = build_log_dir
        if build_log_dir is not os.devnull:
            build_log_file = os.path.join(
                build_log_dir,
                "%s.log" % test_docker_file.replace(".dockerfile", "").lower())

        try:
            __build(base_url=benchmarker_config.server_docker_host,
                    build_log_file=build_log_file,
                    log_prefix=log_prefix,
                    path=test.directory,
                    dockerfile=test_docker_file,
                    tag="techempower/tfb.test.%s" %
                    test_docker_file.replace(".dockerfile", ""))
        except Exception:
            return 1

    return 0
Ejemplo n.º 2
0
    def __run_list_test_metadata(self):
        '''
        Prints the metadata for all the available tests
        '''
        all_tests = gather_tests(benchmarker_config=self.config)
        all_tests_json = json.dumps(
            map(
                lambda test: {
                    "name": test.name,
                    "approach": test.approach,
                    "classification": test.classification,
                    "database": test.database,
                    "framework": test.framework,
                    "language": test.language,
                    "orm": test.orm,
                    "platform": test.platform,
                    "webserver": test.webserver,
                    "os": test.os,
                    "database_os": test.database_os,
                    "display_name": test.display_name,
                    "notes": test.notes,
                    "versus": test.versus
                }, all_tests))

        with open(os.path.join(self.results.directory, "test_metadata.json"),
                  "w") as f:
            f.write(all_tests_json)
def publish(benchmarker_config):
    '''
    Builds fresh versions of all the docker container images known in the 
    system and attempts to publish them to dockerhub.
    Note: this requires `docker login` to have been performed prior to the
    call.
    '''
    start_time = time.time()

    docker_buildargs = {
        'CPU_COUNT': str(multiprocessing.cpu_count()),
        'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
        'TFB_DATABASE': str(benchmarker_config.database_host)
    }

    # Force building instead of pulling
    benchmarker_config.build = True
    tests = gather_tests(benchmarker_config=benchmarker_config)
    for test in tests:
        __build_dependencies(benchmarker_config, test, docker_buildargs)

    client = docker.DockerClient(
        base_url=benchmarker_config.server_docker_host)

    client.login("USERNAME", "PASSWORD")

    for image in client.images.list():
        has_tags = len(image.tags) > 0
        if has_tags and 'techempower' in image.tags[
                0] and 'tfb.test' not in image.tags[0]:
            log("Pushing docker image: %s" % image.tags[0].split(':')[0])
            client.images.push(image.tags[0].split(':')[0])

    log("Publish took: %s seconds" % (time.time() - start_time))
def build(benchmarker_config, test_names, build_log_dir=os.devnull):
    '''
    Builds the dependency chain as well as the test implementation docker images
    for the given tests.
    '''
    docker_buildargs = {
        'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
        'TFB_DATABASE': str(benchmarker_config.database_host)
    }

    tests = gather_tests(include=test_names,
                         benchmarker_config=benchmarker_config)

    for test in tests:
        log_prefix = "%s: " % test.name

        if __build_dependencies(benchmarker_config, test, docker_buildargs,
                                build_log_dir) > 0:
            return 1

        # Build the test image
        test_docker_file = "%s.dockerfile" % test.name
        build_log_file = build_log_dir
        if build_log_dir is not os.devnull:
            build_log_file = os.path.join(
                build_log_dir,
                "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
        with open(build_log_file, 'w') as build_log:
            try:
                for line in docker.APIClient(
                        base_url=benchmarker_config.server_docker_host).build(
                            path=test.directory,
                            dockerfile=test_docker_file,
                            tag="techempower/tfb.test.%s" %
                            test_docker_file.replace(".dockerfile", ""),
                            buildargs=docker_buildargs,
                            forcerm=True):
                    if line.startswith('{"stream":'):
                        line = json.loads(line)
                        line = line[line.keys()[0]].encode('utf-8')
                        log(line,
                            prefix=log_prefix,
                            file=build_log,
                            color=Fore.WHITE + Style.BRIGHT \
                                if re.match(r'^Step \d+\/\d+', line) else '')
            except Exception:
                tb = traceback.format_exc()
                log("Docker build failed; terminating",
                    prefix=log_prefix,
                    file=build_log,
                    color=Fore.RED)
                log(tb, prefix=log_prefix, file=build_log)
                return 1

    return 0
Ejemplo n.º 5
0
def build(benchmarker_config, test_names, build_log_dir=os.devnull):
    '''
    Builds the dependency chain as well as the test implementation docker images
    for the given tests.
    '''
    tests = gather_tests(
        include=test_names, benchmarker_config=benchmarker_config)

    for test in tests:
        log_prefix = "%s: " % test.name

        # Build the test image
        test_docker_file = "%s.dockerfile" % test.name
        build_log_file = build_log_dir
        if build_log_dir is not os.devnull:
            build_log_file = os.path.join(
                build_log_dir,
                "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
        with open(build_log_file, 'w') as build_log:
            try:
                client = docker.APIClient(
                    base_url=benchmarker_config.server_docker_host)
                output = client.build(
                    path=test.directory,
                    dockerfile=test_docker_file,
                    tag="techempower/tfb.test.%s" %
                        test_docker_file.replace(".dockerfile", ""),
                    forcerm=True,
                    pull=True)
                buffer = ""
                for token in output:
                    if token.startswith('{"stream":'):
                        token = json.loads(token)
                        token = token[token.keys()[0]].encode('utf-8')
                    buffer += token
                    while "\n" in buffer:
                        index = buffer.index("\n")
                        line = buffer[:index]
                        buffer = buffer[index + 1:]
                        log(line,
                            prefix=log_prefix,
                            file=build_log,
                            color=Fore.WHITE + Style.BRIGHT \
                                if re.match(r'^Step \d+\/\d+', line) else '')

                if buffer:
                    log(buffer,
                        prefix=log_prefix,
                        file=build_log,
                        color=Fore.WHITE + Style.BRIGHT \
                            if re.match(r'^Step \d+\/\d+', buffer) else '')
            except Exception:
                tb = traceback.format_exc()
                log("Docker build failed; terminating",
                    prefix=log_prefix,
                    file=build_log,
                    color=Fore.RED)
                log(tb, prefix=log_prefix, file=build_log)
                return 1

    return 0
Ejemplo n.º 6
0
def main(argv=None):
    '''
    Runs the toolset.
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Suite options
    parser.add_argument('--build',
                        nargs='+',
                        help='Builds the dockerfile(s) for the given test(s)')
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Removes the results directory')
    parser.add_argument('--new',
                        action='store_true',
                        default=False,
                        help='Initialize a new framework test')
    parser.add_argument(
        '--quiet',
        action='store_true',
        default=False,
        help=
        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
    )
    parser.add_argument(
        '--results-name',
        help='Gives a name to this set of results, formatted as a date',
        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument(
        '--results-environment',
        help='Describes the environment in which these results were gathered',
        default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument(
        '--results-upload-uri',
        default=None,
        help=
        'A URI where the in-progress results.json file will be POSTed periodically'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )

    # Test options
    parser.add_argument('--test',
                        default=None,
                        nargs='+',
                        help='names of tests to run')
    parser.add_argument(
        '--test-dir',
        nargs='+',
        dest='test_dir',
        help='name of framework directory containing all tests to run')
    parser.add_argument(
        '--test-lang',
        nargs='+',
        dest='test_lang',
        help='name of language directory containing all tests to run')
    parser.add_argument('--exclude',
                        default=None,
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'cached_query',
                            'fortune', 'update', 'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify', 'debug'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
    )
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument('--server-host',
                        default='tfb-server',
                        help='Hostname/IP for application server')
    parser.add_argument('--database-host',
                        default='tfb-database',
                        help='Hostname/IP for database server')
    parser.add_argument('--client-host',
                        default='',
                        help='Hostname/IP for client server')
    parser.add_argument('--concurrency-levels',
                        nargs='+',
                        default=[16, 32, 64, 128, 256, 512],
                        help='List of concurrencies to benchmark')
    parser.add_argument('--pipeline-concurrency-levels',
                        nargs='+',
                        default=[256, 1024, 4096, 16384],
                        help='List of pipeline concurrencies to benchmark')
    parser.add_argument('--query-levels',
                        nargs='+',
                        default=[1, 5, 10, 15, 20],
                        help='List of query levels to benchmark')
    parser.add_argument('--cached-query-levels',
                        nargs='+',
                        default=[1, 10, 20, 50, 100],
                        help='List of cached query levels to benchmark')

    # Network options
    parser.add_argument('--network-mode',
                        default=None,
                        help='The network mode to run docker in')

    args = parser.parse_args()

    global config
    config = BenchmarkConfig(args)
    results = Results(config)

    if config.new:
        Scaffolding(config)

    elif config.build:
        docker_helper.build(config, config.build)

    elif config.clean:
        cleaner.clean(results)
        docker_helper.clean(config)

    elif config.list_tests:
        all_tests = gather_tests(benchmarker_config=config)

        for test in all_tests:
            log(test.name)

    elif config.parse != None:
        # TODO: broken
        all_tests = gather_tests(benchmarker_config=config)

        for test in all_tests:
            test.parse_all()

        results.parse(all_tests)

    else:
        benchmarker = Benchmarker(config, results)
        benchmarker.run()

    return 0