def start_b(data_set_name,
            file_name=None,
            models_to_run=None,
            models_to_run_2=MLP_MODELS,
            ensembles=ENSEMBLES):  # change to None to choose from console
    """ starts benchmark """
    if models_to_run == 'SLM_MODELS':
        models_to_run = SLM_MODELS
    if models_to_run == 'MLP_MODELS':
        models_to_run = MLP_MODELS
    if models_to_run_2 == 'SLM_MODELS':
        models_to_run_2 = SLM_MODELS
    if models_to_run_2 == 'MLP_MODELS':
        models_to_run_2 = MLP_MODELS
    # SLM MODELS
    if models_to_run is not None:
        benchmarker = Benchmarker(data_set_name,
                                  models=models_to_run,
                                  ensembles=ensembles,
                                  benchmark_id='slm')
        # benchmarker.run()
        benchmarker.run_nested_cv()
    # MLP MODELS
    if models_to_run_2 is not None:
        benchmarker = Benchmarker(data_set_name,
                                  models=models_to_run_2,
                                  ensembles=ensembles,
                                  benchmark_id='mlp')
        benchmarker.run_nested_cv()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot:
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s" % setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open(args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = {"client-host": "localhost"}

    ##########################################################
    # Set up default values
    ##########################################################
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s',
                        '--server-host',
                        default=serverHost,
                        help='The application server.')
    parser.add_argument('-c',
                        '--client-host',
                        default=clientHost,
                        help='The client / load generation server.')
    parser.add_argument(
        '-u',
        '--client-user',
        default=clientUser,
        help='The username to use for SSH to the client instance.')
    parser.add_argument('-i',
                        '--client-identity-file',
                        dest='client_identity_file',
                        default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument(
        '-d',
        '--database-host',
        default=databaHost,
        help=
        'The database server.  If not provided, defaults to the value of --client-host.'
    )
    parser.add_argument(
        '--database-user',
        default=databaUser,
        help=
        'The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.'
    )
    parser.add_argument(
        '--database-identity-file',
        default=dbIdenFile,
        dest='database_identity_file',
        help=
        'The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.'
    )
    parser.add_argument('-p',
                        dest='password_prompt',
                        action='store_true',
                        help='Prompt for password')

    # Install options
    parser.add_argument(
        '--install',
        choices=['client', 'database', 'server', 'all'],
        default=None,
        help=
        'Runs installation script(s) before continuing on to execute the tests.'
    )
    parser.add_argument(
        '--install-error-action',
        choices=['abort', 'continue'],
        default='continue',
        help='action to take in case of error during installation')
    parser.add_argument(
        '--install-strategy',
        choices=['unified', 'pertest'],
        default='unified',
        help=
        '''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer'''
    )
    parser.add_argument(
        '--install-only',
        action='store_true',
        default=False,
        help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'fortune', 'update',
                            'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')
    parser.add_argument(
        '--list-test-metadata',
        action='store_true',
        default=False,
        help=
        'writes all the test metadata as a JSON file in the results directory')
    parser.add_argument(
        '--name',
        default="ec2",
        help=
        'The name to give this test. Results will be placed in a folder using this name.'
    )
    parser.add_argument(
        '--os',
        choices=['linux', 'windows'],
        default='linux',
        help=
        'The operating system of the application/framework server (the one running'
        + 'this binary')
    parser.add_argument('--database-os',
                        choices=['linux', 'windows'],
                        default='linux',
                        help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument(
        '--concurrency-levels',
        default=[8, 16, 32, 64, 128, 256],
        help=
        'Runs wrk benchmarker with different concurrency value (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--query-levels',
        default=[1, 5, 10, 15, 20],
        help=
        'Database queries requested per HTTP connection, used during query test (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--threads',
        default=maxThreads,
        help=
        'Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system',
        type=int)
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )

    # Misc Options
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
        print 'Usernames (e.g. --client-user and --database-user) are required!'
        print 'The system will SSH into the client and the database for the install stage'
        print 'Aborting'
        exit(1)

    if args.database_user is None:
        args.database_user = args.client_user

    if args.database_host is None:
        args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.list_test_metadata:
        benchmarker.run_list_test_metadata()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    elif not args.install_only:
        return benchmarker.run()
def main(argv=None):
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv
	
    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')

    # Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open (args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = { "client-host":"localhost"}

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description='Run the Framework Benchmarking test suite.',
        parents=[conf_parser])
    parser.add_argument('-s', '--server-host', default='localhost', help='The application server.')
    parser.add_argument('-c', '--client-host', default='localhost', help='The client / load generation server.')
    parser.add_argument('-u', '--client-user', help='The username to use for SSH to the client instance.')
    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', help='The key to use for SSH to the client instance.')
    parser.add_argument('-d', '--database-host', help='The database server.  If not provided, defaults to the value of --client-host.')
    parser.add_argument('--database-user', help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
    parser.add_argument('--database-identity-file', dest='database_identity_file', help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
    parser.add_argument('-p', dest='password_prompt', action='store_true')
    parser.add_argument('--install-software', action='store_true', help='runs the installation script before running the rest of the commands')
    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default='all', help='Allows you to only install the server, client, or database software')
    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
    parser.add_argument('--max-concurrency', default=256, help='the maximum concurrency that the tests will run at. The query tests will run at this concurrency', type=int)
    parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
    parser.add_argument('--query-interval', default=5, type=int)
    parser.add_argument('--max-threads', default=8, help='The max number of threads to run weight at, this should be set to the number of cores for your system.', type=int)
    parser.add_argument('--duration', default=60, help='Time in seconds that each test should run for.')
    parser.add_argument('--starting-concurrency', default=8, type=int)
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
    parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application server.')
    parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    if args.verbose:
        print 'Configuration options: '
        pprint(args)
    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    if benchmarker.list_tests:
      benchmarker.run_list_tests()
    elif benchmarker.list_test_metadata:
      benchmarker.run_list_test_metadata()
    elif benchmarker.parse != None:
      benchmarker.parse_timestamp()
    else:
      benchmarker.run()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    os.environ['FWROOT'] = setup_util.get_fwroot()
    os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
    # 'Ubuntu', '14.04', 'trusty' respectively
    os.environ['TFB_DISTRIB_ID'], os.environ[
        'TFB_DISTRIB_RELEASE'], os.environ[
            'TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
    # App server cpu count
    os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())

    print("FWROOT is {!s}.".format(os.environ['FWROOT']))

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    defaults = {}
    try:
        if not os.path.exists(
                os.path.join(
                    os.environ['FWROOT'],
                    args.conf_file)) and not os.path.exists(
                        os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
            print("No config file found. Aborting!")
            exit(1)
        with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
            config = ConfigParser.SafeConfigParser()
            config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
            defaults.update(dict(config.items("Defaults")))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        print("Configuration file not found!")
        exit(1)

    ##########################################################
    # Set up default values
    ##########################################################

    # Verify and massage options
    if defaults['client_user'] is None or defaults['client_host'] is None:
        print("client_user and client_host are required!")
        print("Please check your configuration file.")
        print("Aborting!")
        exit(1)

    if defaults['database_user'] is None:
        defaults['database_user'] = defaults['client_user']
    if defaults['database_host'] is None:
        defaults['database_host'] = defaults['client_host']
    if defaults['server_host'] is None:
        defaults['server_host'] = defaults['client_host']
    if defaults['ulimit'] is None:
        defaults['ulimit'] = 200000

    os.environ['ULIMIT'] = str(defaults['ulimit'])

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Install options
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Removes the results directory')
    parser.add_argument('--clean-all',
                        action='store_true',
                        dest='clean_all',
                        default=False,
                        help='Removes the results and installs directories')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument(
        '--test-dir',
        nargs='+',
        dest='test_dir',
        help='name of framework directory containing all tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'cached_query',
                            'fortune', 'update', 'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify', 'debug'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
    )
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )

    # Misc Options
    parser.add_argument(
        '--results-name',
        help='Gives a name to this set of results, formatted as a date',
        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument(
        '--results-environment',
        help='Describes the environment in which these results were gathered',
        default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument(
        '--results-upload-uri',
        default=None,
        help=
        'A URI where the in-progress results.json file will be POSTed periodically'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.add_argument(
        '--quiet',
        action='store_true',
        default=False,
        help=
        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
    )
    parser.add_argument(
        '--clear-tmp',
        action='store_true',
        default=False,
        help=
        'Clears files written to /tmp after each framework\'s tests complete.')
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    else:
        return benchmarker.run()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    os.environ['FWROOT'] = setup_util.get_fwroot()
    os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
    # 'Ubuntu', '14.04', 'trusty' respectively
    os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
    # App server cpu count
    os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())

    print("FWROOT is {!s}.".format(os.environ['FWROOT']))

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file', default='benchmark.cfg', metavar='FILE',
        help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    defaults = {}
    try:
        if not os.path.exists(os.path.join(os.environ['FWROOT'], args.conf_file)) and not os.path.exists(os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
            print("No config file found. Aborting!")
            exit(1)
        with open (os.path.join(os.environ['FWROOT'], args.conf_file)):
            config = ConfigParser.SafeConfigParser()
            config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
            defaults.update(dict(config.items("Defaults")))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        print("Configuration file not found!")
        exit(1)

    ##########################################################
    # Set up default values
    ##########################################################

    # Verify and massage options
    if defaults['client_user'] is None or defaults['client_host'] is None:
        print("client_user and client_host are required!")
        print("Please check your configuration file.")
        print("Aborting!")
        exit(1)

    if defaults['database_user'] is None:
        defaults['database_user'] = defaults['client_user']
    if defaults['database_host'] is None:
        defaults['database_host'] = defaults['client_host']
    if defaults['server_host'] is None:
        defaults['server_host'] = defaults['client_host']
    if defaults['ulimit'] is None:
        defaults['ulimit'] = 200000

    os.environ['ULIMIT'] = str(defaults['ulimit'])

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
                                     parents=[conf_parser],
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                     epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Install options
    parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
    parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
    parser.add_argument('--clear-tmp', action='store_true', default=False, help='Clears files written to /tmp after each framework\'s tests complete.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    else:
        return benchmarker.run()
Example #6
0
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
  Given test names as strings, returns a list of FrameworkTest objects. 
  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
  variables for checking the test directory, the test database os, and 
  other useful items. 

  With no arguments, every test in this framework will be returned.  
  With include, only tests with this exact name will be returned. 
  With exclude, all tests but those excluded will be returned. 

  A benchmarker is needed to construct full FrameworkTest objects. If
  one is not provided, a default Benchmarker will be created. 
  '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None

        benchmarker = Benchmarker(defaults)

    # Search in both old and new directories
    fwroot = setup_util.get_fwroot()
    config_files = glob.glob("%s/*/benchmark_config" % fwroot)
    config_files.extend(
        glob.glob("%s/frameworks/*/*/benchmark_config" % fwroot))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except:
                # User-friendly errors
                print("Error loading '%s'." % config_file_name)
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if test.name in exclude:
                continue
            elif len(include) is 0 or test.name in include:
                tests.append(test)

    tests.sort(key=lambda x: x.name)
    return tests
def start_xcs(data_set_name, models=XCS, benchmark_id='xcs'):
    # benchmarker = Benchmarker(data_set_name, models=MLP_MODELS, ensembles=ensembles, benchmark_id=benchmark_id)
    benchmarker = Benchmarker(data_set_name,
                              models=models,
                              benchmark_id=benchmark_id)
    benchmarker.run_nested_cv_xcs()
Example #8
0
def start_slm(data_set_name, ensembles=ENSEMBLES, benchmark_id='slm'):
    benchmarker = Benchmarker(data_set_name,
                              models=SLM_MODELS,
                              ensembles=ensembles,
                              benchmark_id=benchmark_id)
    benchmarker.run_nested_cv()
Example #9
0
def main(argv=None):
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')

    # Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open(args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = {"client-host": "localhost"}

    ##########################################################
    # Set up default values
    ##########################################################
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description='Run the Framework Benchmarking test suite.',
        parents=[conf_parser])
    parser.add_argument('-s',
                        '--server-host',
                        default=serverHost,
                        help='The application server.')
    parser.add_argument('-c',
                        '--client-host',
                        default=clientHost,
                        help='The client / load generation server.')
    parser.add_argument(
        '-u',
        '--client-user',
        default=clientUser,
        help='The username to use for SSH to the client instance.')
    parser.add_argument('-i',
                        '--client-identity-file',
                        default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument(
        '-d',
        '--database-host',
        default=databaHost,
        help=
        'The database server.  If not provided, defaults to the value of --client-host.'
    )
    parser.add_argument(
        '--database-user',
        default=databaUser,
        help=
        'The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.'
    )
    parser.add_argument(
        '--database-identity-file',
        default=dbIdenFile,
        dest='database_identity_file',
        help=
        'The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.'
    )
    parser.add_argument('-p', dest='password_prompt', action='store_true')
    parser.add_argument(
        '--install-software',
        action='store_true',
        help=
        'runs the installation script before running the rest of the commands')
    parser.add_argument(
        '--install',
        choices=['client', 'database', 'server', 'all'],
        default='all',
        help=
        'Allows you to only install the server, client, or database software')
    parser.add_argument(
        '--install-error-action',
        choices=['abort', 'continue'],
        default='continue',
        help='action to take in case of error during installation')
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'fortune', 'update',
                            'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')
    parser.add_argument(
        '--list-test-metadata',
        action='store_true',
        default=False,
        help=
        'writes all the test metadata as a JSON file in the results directory')
    parser.add_argument(
        '--max-concurrency',
        default=256,
        help=
        'the maximum concurrency that the tests will run at. The query tests will run at this concurrency',
        type=int)
    parser.add_argument(
        '--max-queries',
        default=20,
        help='The maximum number of queries to run during the query test',
        type=int)
    parser.add_argument('--query-interval', default=5, type=int)
    parser.add_argument(
        '--max-threads',
        default=maxThreads,
        help=
        'The max number of threads to run weight at, this should be set to the number of cores for your system.',
        type=int)
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument('--starting-concurrency', default=8, type=int)
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '--name',
        default="ec2",
        help=
        'The name to give this test. Results will be placed in a folder using this name.'
    )
    parser.add_argument('--os',
                        choices=['linux', 'windows'],
                        default='linux',
                        help='The operating system of the application server.')
    parser.add_argument('--database-os',
                        choices=['linux', 'windows'],
                        default='linux',
                        help='The operating system of the database server.')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    if args.verbose:
        print 'Configuration options: '
        pprint(args)
    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    if benchmarker.list_tests:
        benchmarker.run_list_tests()
    elif benchmarker.list_test_metadata:
        benchmarker.run_list_test_metadata()
    elif benchmarker.parse != None:
        benchmarker.parse_timestamp()
    else:
        benchmarker.run()
Example #10
0
def main(argv=None):
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append(".")

    # Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append("toolset/setup/linux")

    conf_parser = argparse.ArgumentParser(
        description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False
    )
    conf_parser.add_argument(
        "--conf_file",
        default="benchmark.cfg",
        metavar="FILE",
        help="Optional configuration file to provide argument defaults. All config options can be overridden using the command line.",
    )
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open(args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + "/" + args.conf_file])
            defaults = dict(config.items("Defaults"))
    except IOError:
        if args.conf_file != "benchmark.cfg":
            print "Configuration file not found!"
        defaults = {"client-host": "localhost"}

    ##########################################################
    # Set up default values
    ##########################################################
    serverHost = os.environ.get("TFB_SERVER_HOST")
    clientHost = os.environ.get("TFB_CLIENT_HOST")
    clientUser = os.environ.get("TFB_CLIENT_USER")
    clientIden = os.environ.get("TFB_CLIENT_IDENTITY_FILE")
    databaHost = os.getenv("TFB_DATABASE_HOST", clientHost)
    databaUser = os.getenv("TFB_DATABASE_USER", clientUser)
    dbIdenFile = os.getenv("TFB_DATABASE_IDENTITY_FILE", clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Run the Framework Benchmarking test suite.", parents=[conf_parser])
    parser.add_argument("-s", "--server-host", default=serverHost, help="The application server.")
    parser.add_argument("-c", "--client-host", default=clientHost, help="The client / load generation server.")
    parser.add_argument(
        "-u", "--client-user", default=clientUser, help="The username to use for SSH to the client instance."
    )
    parser.add_argument(
        "-i", "--client-identity-file", default=clientIden, help="The key to use for SSH to the client instance."
    )
    parser.add_argument(
        "-d",
        "--database-host",
        default=databaHost,
        help="The database server.  If not provided, defaults to the value of --client-host.",
    )
    parser.add_argument(
        "--database-user",
        default=databaUser,
        help="The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.",
    )
    parser.add_argument(
        "--database-identity-file",
        default=dbIdenFile,
        dest="database_identity_file",
        help="The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.",
    )
    parser.add_argument("-p", dest="password_prompt", action="store_true")
    parser.add_argument(
        "--install-software",
        action="store_true",
        help="runs the installation script before running the rest of the commands",
    )
    parser.add_argument(
        "--install",
        choices=["client", "database", "server", "all"],
        default="all",
        help="Allows you to only install the server, client, or database software",
    )
    parser.add_argument(
        "--install-error-action",
        choices=["abort", "continue"],
        default="continue",
        help="action to take in case of error during installation",
    )
    parser.add_argument("--test", nargs="+", help="names of tests to run")
    parser.add_argument("--exclude", nargs="+", help="names of tests to exclude")
    parser.add_argument(
        "--type",
        choices=["all", "json", "db", "query", "fortune", "update", "plaintext"],
        default="all",
        help="which type of test to run",
    )
    parser.add_argument(
        "-m",
        "--mode",
        choices=["benchmark", "verify"],
        default="benchmark",
        help="verify mode will only start up the tests, curl the urls and shutdown",
    )
    parser.add_argument(
        "--list-tests", action="store_true", default=False, help="lists all the known tests that can run"
    )
    parser.add_argument(
        "--list-test-metadata",
        action="store_true",
        default=False,
        help="writes all the test metadata as a JSON file in the results directory",
    )
    parser.add_argument(
        "--max-concurrency",
        default=256,
        help="the maximum concurrency that the tests will run at. The query tests will run at this concurrency",
        type=int,
    )
    parser.add_argument(
        "--max-queries", default=20, help="The maximum number of queries to run during the query test", type=int
    )
    parser.add_argument("--query-interval", default=5, type=int)
    parser.add_argument(
        "--max-threads",
        default=maxThreads,
        help="The max number of threads to run weight at, this should be set to the number of cores for your system.",
        type=int,
    )
    parser.add_argument("--duration", default=15, help="Time in seconds that each test should run for.")
    parser.add_argument("--starting-concurrency", default=8, type=int)
    parser.add_argument(
        "--sleep",
        type=int,
        default=60,
        help="the amount of time to sleep after starting each test to allow the server to start up.",
    )
    parser.add_argument(
        "--parse", help="Parses the results of the given timestamp and merges that with the latest results"
    )
    parser.add_argument(
        "--name", default="ec2", help="The name to give this test. Results will be placed in a folder using this name."
    )
    parser.add_argument(
        "--os", choices=["linux", "windows"], default="linux", help="The operating system of the application server."
    )
    parser.add_argument(
        "--database-os",
        choices=["linux", "windows"],
        default="linux",
        help="The operating system of the database server.",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        action="store_true",
        default=False,
        help="Causes the configuration to print before any other commands are executed.",
    )
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    if args.verbose:
        print "Configuration options: "
        pprint(args)
    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    if benchmarker.list_tests:
        benchmarker.run_list_tests()
    elif benchmarker.list_test_metadata:
        benchmarker.run_list_test_metadata()
    elif benchmarker.parse != None:
        benchmarker.parse_timestamp()
    else:
        benchmarker.run()
Example #11
0
def start_b(data_set_name, file_name=None):
    benchmarker = Benchmarker(data_set_name)
    benchmarker.run()
Example #12
0
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
parser.add_argument('--max-concurrency', default=256, help='the maximum concurrency that the tests will run at. The query tests will run at this concurrency', type=int)
parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
parser.add_argument('--query-interval', default=5, type=int)
parser.add_argument('--max-threads', default=8, help='The max number of threads to run weight at, this should be set to the number of cores for your system.', type=int)
parser.add_argument('--duration', default=60, help='Time in seconds that each test should run for.')
parser.add_argument('--starting-concurrency', default=8, type=int)
parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application server.')
parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
  benchmarker.run_list_tests()
elif benchmarker.list_test_metadata:
  benchmarker.run_list_test_metadata()
elif benchmarker.parse != None:
  benchmarker.parse_timestamp()
else:
  benchmarker.run()
parser.add_argument('--install', choices=['client', 'server', 'all'], default='all', help='Allows you to only install the server or client software')
parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--next-sort', action='store_true', default=False, help='displays the next value that can be used as a sort value')
parser.add_argument('--max-concurrency', default=256, help='the maximum concurrency that the tests will run at. The query tests will run at this concurrency', type=int)
parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
parser.add_argument('--query-interval', default=5, type=int)
parser.add_argument('--max-threads', default=8, help='The max number of threads to run weight at, this should be set to the number of cores for your system.', type=int)
parser.add_argument('--duration', default=60, help='Time in seconds that each test should run for.')
parser.add_argument('--starting-concurrency', default=8, type=int)
parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
  benchmarker.run_list_tests()
elif benchmarker.next_sort:
  benchmarker.next_sort_value()
elif benchmarker.parse != None:
  benchmarker.parse_timestamp()
else:
  benchmarker.run()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot: 
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s"%setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open (args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k,v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = { "client-host":"localhost"}

    ##########################################################
    # Set up default values
    ##########################################################        
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
    parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
    parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument('-d', '--database-host', default=databaHost,
                        help='The database server.  If not provided, defaults to the value of --client-host.')
    parser.add_argument('--database-user', default=databaUser,
                        help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
    parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
    parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
    
    
    # Install options
    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
                        help='Runs installation script(s) before continuing on to execute the tests.')
    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
    parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified', 
        help='''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer''')
    parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
    parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
                        'this binary')
    parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction) 
    parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
      print 'Usernames (e.g. --client-user and --database-user) are required!'
      print 'The system will SSH into the client and the database for the install stage'
      print 'Aborting'
      exit(1)

    if args.database_user is None:
      args.database_user = args.client_user

    if args.database_host is None:
      args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks, 
    #   they are either str or bool based on the python version
    if args.list_tests:
      benchmarker.run_list_tests()
    elif args.list_test_metadata:
      benchmarker.run_list_test_metadata()
    elif args.parse != None:
      benchmarker.parse_timestamp()
    elif not args.install_only:
      return benchmarker.run()
def main(argv=None):
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv
	
    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')

    # Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open (args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = { "client-host":"localhost"}

    ##########################################################
    # Set up default values
    ##########################################################        
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description='Run the Framework Benchmarking test suite.',
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    # SSH options
    parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
    parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
    parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument('-d', '--database-host', default=databaHost,
                        help='The database server.  If not provided, defaults to the value of --client-host.')
    parser.add_argument('--database-user', default=databaUser,
                        help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
    parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
    parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
    parser.add_argument('--install-software', action='store_true', help='runs the installation script before running the rest of the commands')
    
    # Install options
    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
                        help='Runs installation script(s) before continuing on to execute the tests.')
    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
    parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
                        'this binary')
    parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument('--max-concurrency', default=256, help='the maximum number of HTTP connections that wrk will keep open. The query tests will run at this maximum', type=int)
    parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
    parser.add_argument('--query-interval', default=5, type=int, help='Query tests will go from 1 query to max queries in increments of interval queries')
    parser.add_argument('--max-threads', default=maxThreads, help='The max number of threads to run wrk at. This should be set to the number of cores for your client system.', type=int)
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--starting-concurrency', default=8, type=int)
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
      print 'Usernames (e.g. --client-user and --database-user) are required!'
      print 'The system will SSH into the client and the database for the install stage'
      print 'Aborting'
      exit(1)

    if args.database_user is None:
      args.database_user = args.client_user

    if args.database_host is None:
      args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(args)
    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    if benchmarker.list_tests:
      benchmarker.run_list_tests()
    elif benchmarker.list_test_metadata:
      benchmarker.run_list_test_metadata()
    elif benchmarker.parse != None:
      benchmarker.parse_timestamp()
    else:
      benchmarker.run()
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A benchmarker is needed to construct full FrameworkTest objects. If
    one is not provided, a default Benchmarker will be created.
    '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except Exception:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None
        defaults[
            'results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
        defaults['results_environment'] = "My Server Environment"
        defaults['test_dir'] = None
        defaults['test_lang'] = None
        defaults['quiet'] = True

        benchmarker = Benchmarker(defaults)

    # Search for configuration files
    fwroot = setup_util.get_fwroot()
    config_files = []

    if benchmarker.test_lang:
        benchmarker.test_dir = []
        for lang in benchmarker.test_lang:
            if os.path.exists("{!s}/frameworks/{!s}".format(fwroot, lang)):
                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
                        fwroot, lang)):
                    benchmarker.test_dir.append("{!s}/{!s}".format(
                        lang, test_dir))
            else:
                raise Exception(
                    "Unable to locate language directory: {!s}".format(lang))

    if benchmarker.test_dir:
        for test_dir in benchmarker.test_dir:
            dir_config_files = glob.glob(
                "{!s}/frameworks/{!s}/benchmark_config.json".format(
                    fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception(
                    "Unable to locate tests in test-dir: {!s}".format(
                        test_dir))
    else:
        config_files.extend(
            glob.glob(
                "{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                # User-friendly errors
                print("Error loading '{!s}'.".format(config_file_name))
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
Example #17
0
    '--sleep',
    type=int,
    default=60,
    help=
    'the amount of time to sleep after starting each test to allow the server to start up.'
)
parser.add_argument(
    '--parse',
    help=
    'Parses the results of the given timestamp and merges that with the latest results'
)
parser.add_argument(
    '--name',
    default="ec2",
    help=
    'The name to give this test. Results will be placed in a folder using this name.'
)
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
    benchmarker.run_list_tests()
elif benchmarker.next_sort:
    benchmarker.next_sort_value()
elif benchmarker.parse != None:
    benchmarker.parse_timestamp()
else:
    benchmarker.run()