def gather_docker_dependencies(docker_file):
    '''
    Gathers all the known docker dependencies for the given docker image.
    '''
    # Avoid setting up a circular import
    from setup.linux import setup_util
    deps = []

    docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup",
                              "linux", "docker")

    if os.path.exists(docker_file):
        with open(docker_file) as fp:
            for line in fp.readlines():
                tokens = line.strip().split(' ')
                if tokens[0] == "FROM":
                    # This is magic that our base image points to
                    if tokens[1] != "ubuntu:16.04":
                        depToken = tokens[1].strip().split(
                            ':')[0].strip().split('/')[1]
                        deps.append(depToken)
                        dep_docker_file = os.path.join(
                            os.path.dirname(docker_file),
                            depToken + ".dockerfile")
                        if not os.path.exists(dep_docker_file):
                            dep_docker_file = find_docker_file(
                                docker_dir, depToken + ".dockerfile")
                        deps.extend(
                            gather_docker_dependencies(dep_docker_file))

    return deps
Beispiel #2
0
def initialize(args):
  fwroot = setup_util.get_fwroot()
  dbuser = args.database_user
  dbhost = args.database_host
  dbiden = args.database_identity_file
  cluser = args.client_user
  clhost = args.client_host
  cliden = args.client_identity_file
  aphost = args.server_host

  # test ssh connections to all the machines
  client_conn = __check_connection(cluser, clhost, cliden, aphost)
  database_conn = __check_connection(dbuser, dbhost, dbiden, aphost)

  conn_success = client_conn and database_conn
  if not conn_success and not args.quiet:
    return __print_failure()
  
  # set up client machine
  if not __init_client(fwroot, cluser, clhost, cliden, args.quiet) and not args.quiet:
    return __print_failure()


  # set up database software
  if not __init_database(fwroot, dbuser, dbhost, dbiden, args.quiet) and not args.quiet:
    return __print_failure()
def gather_langauges():
    '''
    Gathers all the known languages in the suite via the folder names
    beneath FWROOT.
    '''
    # Avoid setting up a circular import
    from setup.linux import setup_util

    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
    langs = []
    for dir in glob.glob(os.path.join(lang_dir, "*")):
        langs.append(dir.replace(lang_dir, "")[1:])
    return langs
Beispiel #4
0
def gather_langauges():
    '''
    Gathers all the known languages in the suite via the folder names
    beneath FWROOT.
    '''
    # Avoid setting up a circular import
    from setup.linux import setup_util

    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
    langs = []
    for dir in glob.glob(os.path.join(lang_dir, "*")):
        langs.append(dir.replace(lang_dir,"")[1:])
    return langs
    def __init__(self, args):

        # Map type strings to their objects
        types = dict()
        types['json'] = JsonTestType()
        types['db'] = DBTestType()
        types['query'] = QueryTestType()
        types['fortune'] = FortuneTestType()
        types['update'] = UpdateTestType()
        types['plaintext'] = PlaintextTestType()

        # Turn type into a map instead of a string
        if args['type'] == 'all':
            args['types'] = types
        else:
            args['types'] = { args['type'] : types[args['type']] }
        del args['type']


        args['max_threads'] = args['threads']
        args['max_concurrency'] = max(args['concurrency_levels'])

        self.__dict__.update(args)
        # pprint(self.__dict__)

        self.quiet_out = QuietOutputStream(self.quiet)

        self.start_time = time.time()
        self.run_test_timeout_seconds = 7200

        # setup logging
        logging.basicConfig(stream=self.quiet_out, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None: self.database_user = self.client_user
        if self.database_host == None: self.database_host = self.client_host
        if self.database_identity_file == None: self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup current_benchmark.txt location
        self.current_benchmark = "/tmp/current_benchmark.txt"

        if hasattr(self, 'parse') and self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        # setup results and latest_results directories
        self.result_directory = os.path.join(self.fwroot, "results")
        if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
            shutil.rmtree(os.path.join(self.fwroot, "results"))

        # remove installs directories if --clean-all provided
        self.install_root = "%s/%s" % (self.fwroot, "installs")
        if args['clean_all']:
            os.system("sudo rm -rf " + self.install_root)
            os.mkdir(self.install_root)

        self.results = None
        try:
            with open(os.path.join(self.full_results_directory(), 'results.json'), 'r') as f:
                #Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test not found.")

        if self.results == None:
            self.results = dict()
            self.results['uuid'] = str(uuid.uuid4())
            self.results['name'] = datetime.now().strftime(self.results_name)
            self.results['environmentDescription'] = self.results_environment
            self.results['startTime'] = int(round(time.time() * 1000))
            self.results['completionTime'] = None
            self.results['concurrencyLevels'] = self.concurrency_levels
            self.results['queryIntervals'] = self.query_levels
            self.results['frameworks'] = [t.name for t in self.__gather_tests]
            self.results['duration'] = self.duration
            self.results['rawData'] = dict()
            self.results['rawData']['json'] = dict()
            self.results['rawData']['db'] = dict()
            self.results['rawData']['query'] = dict()
            self.results['rawData']['fortune'] = dict()
            self.results['rawData']['update'] = dict()
            self.results['rawData']['plaintext'] = dict()
            self.results['completed'] = dict()
            self.results['succeeded'] = dict()
            self.results['succeeded']['json'] = []
            self.results['succeeded']['db'] = []
            self.results['succeeded']['query'] = []
            self.results['succeeded']['fortune'] = []
            self.results['succeeded']['update'] = []
            self.results['succeeded']['plaintext'] = []
            self.results['failed'] = dict()
            self.results['failed']['json'] = []
            self.results['failed']['db'] = []
            self.results['failed']['query'] = []
            self.results['failed']['fortune'] = []
            self.results['failed']['update'] = []
            self.results['failed']['plaintext'] = []
            self.results['verify'] = dict()
        else:
            #for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results['frameworks'] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot: 
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s"%setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open (args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k,v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = { "client-host":"localhost"}

    ##########################################################
    # Set up default values
    ##########################################################        
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
    parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
    parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument('-d', '--database-host', default=databaHost,
                        help='The database server.  If not provided, defaults to the value of --client-host.')
    parser.add_argument('--database-user', default=databaUser,
                        help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
    parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
    parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
    
    
    # Install options
    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
                        help='Runs installation script(s) before continuing on to execute the tests.')
    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
    parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified', 
        help='''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer''')
    parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
    parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
                        'this binary')
    parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction) 
    parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
      print 'Usernames (e.g. --client-user and --database-user) are required!'
      print 'The system will SSH into the client and the database for the install stage'
      print 'Aborting'
      exit(1)

    if args.database_user is None:
      args.database_user = args.client_user

    if args.database_host is None:
      args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks, 
    #   they are either str or bool based on the python version
    if args.list_tests:
      benchmarker.run_list_tests()
    elif args.list_test_metadata:
      benchmarker.run_list_test_metadata()
    elif args.parse != None:
      benchmarker.parse_timestamp()
    elif not args.install_only:
      return benchmarker.run()
    def __init__(self, args):

        # Map type strings to their objects
        types = dict()
        types['json'] = JsonTestType()
        types['db'] = DBTestType()
        types['query'] = QueryTestType()
        types['fortune'] = FortuneTestType()
        types['update'] = UpdateTestType()
        types['plaintext'] = PlaintextTestType()

        # Turn type into a map instead of a string
        if args['type'] == 'all':
            args['types'] = types
        else:
            args['types'] = {args['type']: types[args['type']]}
        del args['type']

        args['max_threads'] = args['threads']
        args['max_concurrency'] = max(args['concurrency_levels'])

        self.__dict__.update(args)
        # pprint(self.__dict__)

        self.start_time = time.time()
        self.run_test_timeout_seconds = 3600

        # setup logging
        logging.basicConfig(stream=sys.stderr, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None: self.database_user = self.client_user
        if self.database_host == None: self.database_host = self.client_host
        if self.database_identity_file == None:
            self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup results and latest_results directories
        self.result_directory = os.path.join("results", self.name)
        self.latest_results_directory = self.latest_results_directory()

        if hasattr(self, 'parse') and self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        # Load the latest data
        #self.latest = None
        #try:
        #  with open('toolset/benchmark/latest.json', 'r') as f:
        #    # Load json file into config object
        #    self.latest = json.load(f)
        #    logging.info("toolset/benchmark/latest.json loaded to self.latest")
        #    logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
        #except IOError:
        #  logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
        #
        #self.results = None
        #try:
        #  if self.latest != None and self.name in self.latest.keys():
        #    with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
        #      # Load json file into config object
        #      self.results = json.load(f)
        #except IOError:
        #  pass

        self.results = None
        try:
            with open(
                    os.path.join(self.latest_results_directory,
                                 'results.json'), 'r') as f:
                #Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test %s not found.", self.name)

        if self.results == None:
            self.results = dict()
            self.results['name'] = self.name
            self.results['concurrencyLevels'] = self.concurrency_levels
            self.results['queryIntervals'] = self.query_levels
            self.results['frameworks'] = [t.name for t in self.__gather_tests]
            self.results['duration'] = self.duration
            self.results['rawData'] = dict()
            self.results['rawData']['json'] = dict()
            self.results['rawData']['db'] = dict()
            self.results['rawData']['query'] = dict()
            self.results['rawData']['fortune'] = dict()
            self.results['rawData']['update'] = dict()
            self.results['rawData']['plaintext'] = dict()
            self.results['completed'] = dict()
            self.results['succeeded'] = dict()
            self.results['succeeded']['json'] = []
            self.results['succeeded']['db'] = []
            self.results['succeeded']['query'] = []
            self.results['succeeded']['fortune'] = []
            self.results['succeeded']['update'] = []
            self.results['succeeded']['plaintext'] = []
            self.results['failed'] = dict()
            self.results['failed']['json'] = []
            self.results['failed']['db'] = []
            self.results['failed']['query'] = []
            self.results['failed']['fortune'] = []
            self.results['failed']['update'] = []
            self.results['failed']['plaintext'] = []
            self.results['verify'] = dict()
        else:
            #for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results['frameworks'] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

        if self.install is not None:
            install = Installer(self, self.install_strategy)
            install.install_software()
Beispiel #8
0
  def __init__(self, args):
    
    self.__dict__.update(args)
    self.start_time = time.time()
    self.run_test_timeout_seconds = 3600

    # setup logging
    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
    
    # setup some additional variables
    if self.database_user == None: self.database_user = self.client_user
    if self.database_host == None: self.database_host = self.client_host
    if self.database_identity_file == None: self.database_identity_file = self.client_identity_file

    # Remember root directory
    self.fwroot = setup_util.get_fwroot()

    # setup results and latest_results directories 
    self.result_directory = os.path.join("results", self.name)
    self.latest_results_directory = self.latest_results_directory()
  
    if self.parse != None:
      self.timestamp = self.parse
    else:
      self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

    # Setup the concurrency levels array. This array goes from
    # starting_concurrency to max concurrency, doubling each time
    self.concurrency_levels = []
    concurrency = self.starting_concurrency
    while concurrency <= self.max_concurrency:
      self.concurrency_levels.append(concurrency)
      concurrency = concurrency * 2

    # Setup query interval array
    # starts at 1, and goes up to max_queries, using the query_interval
    self.query_intervals = []
    queries = 1
    while queries <= self.max_queries:
      self.query_intervals.append(queries)
      if queries == 1:
        queries = 0

      queries = queries + self.query_interval
    
    # Load the latest data
    #self.latest = None
    #try:
    #  with open('toolset/benchmark/latest.json', 'r') as f:
    #    # Load json file into config object
    #    self.latest = json.load(f)
    #    logging.info("toolset/benchmark/latest.json loaded to self.latest")
    #    logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
    #except IOError:
    #  logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
    #
    #self.results = None
    #try: 
    #  if self.latest != None and self.name in self.latest.keys():
    #    with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
    #      # Load json file into config object
    #      self.results = json.load(f)
    #except IOError:
    #  pass

    self.results = None
    try:
      with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
        #Load json file into results object
        self.results = json.load(f)
    except IOError:
      logging.warn("results.json for test %s not found.",self.name) 
    
    if self.results == None:
      self.results = dict()
      self.results['name'] = self.name
      self.results['concurrencyLevels'] = self.concurrency_levels
      self.results['queryIntervals'] = self.query_intervals
      self.results['frameworks'] = [t.name for t in self.__gather_tests]
      self.results['duration'] = self.duration
      self.results['rawData'] = dict()
      self.results['rawData']['json'] = dict()
      self.results['rawData']['db'] = dict()
      self.results['rawData']['query'] = dict()
      self.results['rawData']['fortune'] = dict()
      self.results['rawData']['update'] = dict()
      self.results['rawData']['plaintext'] = dict()
      self.results['completed'] = dict()
      self.results['succeeded'] = dict()
      self.results['succeeded']['json'] = []
      self.results['succeeded']['db'] = []
      self.results['succeeded']['query'] = []
      self.results['succeeded']['fortune'] = []
      self.results['succeeded']['update'] = []
      self.results['succeeded']['plaintext'] = []
      self.results['failed'] = dict()
      self.results['failed']['json'] = []
      self.results['failed']['db'] = []
      self.results['failed']['query'] = []
      self.results['failed']['fortune'] = []
      self.results['failed']['update'] = []
      self.results['failed']['plaintext'] = []
      self.results['verify'] = dict()
    else:
      #for x in self.__gather_tests():
      #  if x.name not in self.results['frameworks']:
      #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
      # Always overwrite framework list
      self.results['frameworks'] = [t.name for t in self.__gather_tests]

    # Setup the ssh command string
    self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
    self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
    if self.database_identity_file != None:
      self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
    if self.client_identity_file != None:
      self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

    if self.install is not None:
      install = Installer(self, self.install_strategy)
      install.install_software()
Beispiel #9
0
  def __init__(self, args):
    
    # Map type strings to their objects
    types = dict()
    types['json'] = JsonTestType()
    types['db'] = DBTestType()
    types['query'] = QueryTestType()
    types['fortune'] = FortuneTestType()
    types['update'] = UpdateTestType()
    types['plaintext'] = PlaintextTestType()

    # Turn type into a map instead of a string
    if args['type'] == 'all':
        args['types'] = types
    else:
        args['types'] = { args['type'] : types[args['type']] }
    del args['type']
    

    args['max_threads'] = args['threads']
    args['max_concurrency'] = max(args['concurrency_levels'])

    self.__dict__.update(args)
    # pprint(self.__dict__)

    self.start_time = time.time()
    self.run_test_timeout_seconds = 3600

    # setup logging
    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
    
    # setup some additional variables
    if self.database_user == None: self.database_user = self.client_user
    if self.database_host == None: self.database_host = self.client_host
    if self.database_identity_file == None: self.database_identity_file = self.client_identity_file

    # Remember root directory
    self.fwroot = setup_util.get_fwroot()

    # setup results and latest_results directories 
    self.result_directory = os.path.join("results", self.name)
    self.latest_results_directory = self.latest_results_directory()
  
    if hasattr(self, 'parse') and self.parse != None:
      self.timestamp = self.parse
    else:
      self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

    self.results = None
    try:
      with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
        #Load json file into results object
        self.results = json.load(f)
    except IOError:
      logging.warn("results.json for test %s not found.",self.name) 
    
    if self.results == None:
      self.results = dict()
      self.results['name'] = self.name
      self.results['concurrencyLevels'] = self.concurrency_levels
      self.results['queryIntervals'] = self.query_levels
      self.results['frameworks'] = [t.name for t in self.__gather_tests]
      self.results['duration'] = self.duration
      self.results['rawData'] = dict()
      self.results['rawData']['json'] = dict()
      self.results['rawData']['db'] = dict()
      self.results['rawData']['query'] = dict()
      self.results['rawData']['fortune'] = dict()
      self.results['rawData']['update'] = dict()
      self.results['rawData']['plaintext'] = dict()
      self.results['completed'] = dict()
      self.results['succeeded'] = dict()
      self.results['succeeded']['json'] = []
      self.results['succeeded']['db'] = []
      self.results['succeeded']['query'] = []
      self.results['succeeded']['fortune'] = []
      self.results['succeeded']['update'] = []
      self.results['succeeded']['plaintext'] = []
      self.results['failed'] = dict()
      self.results['failed']['json'] = []
      self.results['failed']['db'] = []
      self.results['failed']['query'] = []
      self.results['failed']['fortune'] = []
      self.results['failed']['update'] = []
      self.results['failed']['plaintext'] = []
      self.results['verify'] = dict()
    else:
      #for x in self.__gather_tests():
      #  if x.name not in self.results['frameworks']:
      #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
      # Always overwrite framework list
      self.results['frameworks'] = [t.name for t in self.__gather_tests]

    # Setup the ssh command string
    self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
    self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
    if self.database_identity_file != None:
      self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
    if self.client_identity_file != None:
      self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

    if self.install is not None:
      install = Installer(self, self.install_strategy)
      install.install_software()
Beispiel #10
0
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
  Given test names as strings, returns a list of FrameworkTest objects. 
  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
  variables for checking the test directory, the test database os, and 
  other useful items. 

  With no arguments, every test in this framework will be returned.  
  With include, only tests with this exact name will be returned. 
  With exclude, all tests but those excluded will be returned. 

  A benchmarker is needed to construct full FrameworkTest objects. If
  one is not provided, a default Benchmarker will be created. 
  '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None

        benchmarker = Benchmarker(defaults)

    # Search in both old and new directories
    fwroot = setup_util.get_fwroot()
    config_files = glob.glob("%s/*/benchmark_config" % fwroot)
    config_files.extend(
        glob.glob("%s/frameworks/*/*/benchmark_config" % fwroot))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except:
                # User-friendly errors
                print("Error loading '%s'." % config_file_name)
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if test.name in exclude:
                continue
            elif len(include) is 0 or test.name in include:
                tests.append(test)

    tests.sort(key=lambda x: x.name)
    return tests
Beispiel #11
0
          for line in out:
            log.info(line.rstrip('\n'))
      except IOError:
        log.error("No OUT file found")

    log.error("Running inside Travis-CI, so I will print a copy of the verification summary")

    results = None
    try:
      with open('results/ec2/latest/results.json', 'r') as f:
        results = json.load(f)
    except IOError:
      log.critical("No results.json found, unable to print verification summary") 
      sys.exit(retcode)

    target_dir = setup_util.get_fwroot() + '/frameworks/' + testdir
    dirtests = [t for t in gather_tests() if t.directory == target_dir]

    # Normally you don't have to use Fore.* before each line, but 
    # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
    # or stream flush, so we have to ensure that the color code is printed repeatedly
    prefix = Fore.CYAN
    for line in header("Verification Summary", top='=', bottom='').split('\n'):
      print prefix + line

    for test in dirtests:
      print prefix + "| Test: %s" % test.name
      if test.name not in runner.names:
        print prefix + "|      " + Fore.YELLOW + "Unable to verify in Travis-CI"
      elif test.name in results['verify'].keys():
        for test_type, result in results['verify'][test.name].iteritems():
Beispiel #12
0
    def __init__(self, mode, testdir=None):
        '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

        self.directory = testdir
        self.mode = mode
        if mode == "cisetup":
            logging.basicConfig(level=logging.DEBUG)
        else:
            logging.basicConfig(level=logging.INFO)

        try:
            # NOTE: THIS IS VERY TRICKY TO GET RIGHT!
            #
            # Our goal: Look at the files changed and determine if we need to
            # run a verification for this folder. For a pull request, we want to
            # see the list of files changed by any commit in that PR. For a
            # push to master, we want to see a list of files changed by the pushed
            # commits. If this list of files contains the current directory, or
            # contains the toolset/ directory, then we need to run a verification
            #
            # If modifying, please consider:
            #  - the commit range for a pull request is the first PR commit to
            #    the github auto-merge commit
            #  - the commits in the commit range may include merge commits
            #    other than the auto-merge commit. An git log with -m
            #    will know that *all* the files in the merge were changed,
            #    but that is not the changeset that we care about
            #  - git diff shows differences, but we care about git log, which
            #    shows information on what was changed during commits
            #  - master can (and will!) move during a build. This is one
            #    of the biggest problems with using git diff - master will
            #    be updated, and those updates will include changes to toolset,
            #    and suddenly every job in the build will start to run instead
            #    of fast-failing
            #  - commit_range is not set if there was only one commit pushed,
            #    so be sure to test for that on both master and PR
            #  - commit_range and commit are set very differently for pushes
            #    to an owned branch versus pushes to a pull request, test
            #  - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE
            #    will become invalid if additional commits are pushed while a job is
            #    building. See https://github.com/travis-ci/travis-ci/issues/2666
            #  - If you're really insane, consider that the last commit in a
            #    pull request could have been a merge commit. This means that
            #    the github auto-merge commit could have more than two parents
            #  - Travis cannot really support rebasing onto an owned branch, the
            #    commit_range they provide will include commits that are non-existant
            #    in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668
            #
            #  - TEST ALL THESE OPTIONS:
            #      - On a branch you own (e.g. your fork's master)
            #          - single commit
            #          - multiple commits pushed at once
            #          - commit+push, then commit+push again before the first
            #            build has finished. Verify all jobs in the first build
            #            used the correct commit range
            #          - multiple commits, including a merge commit. Verify that
            #            the unrelated merge commit changes are not counted as
            #            changes the user made
            #      - On a pull request
            #          - repeat all above variations
            #
            #
            # ==== CURRENT SOLUTION FOR PRs ====
            #
            # For pull requests, we will examine Github's automerge commit to see
            # what files would be touched if we merged this into the current master.
            # You can't trust the travis variables here, as the automerge commit can
            # be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666
            # We instead use the FETCH_HEAD, which will always point to the SHA of
            # the lastest merge commit. However, if we only used FETCH_HEAD than any
            # new commits to a pull request would instantly start affecting currently
            # running jobs and the the list of changed files may become incorrect for
            # those affected jobs. The solution is to walk backward from the FETCH_HEAD
            # to the last commit in the pull request. Based on how github currently
            # does the automerge, this is the second parent of FETCH_HEAD, and
            # therefore we use FETCH_HEAD^2 below
            #
            # This may not work perfectly in situations where the user had advanced
            # merging happening in their PR. We correctly handle them merging in
            # from upstream, but if they do wild stuff then this will likely break
            # on that. However, it will also likely break by seeing a change in
            # toolset and triggering a full run when a partial run would be
            # acceptable
            #
            # ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ====
            #
            # This one is fairly simple. Find the commit or commit range, and
            # examine the log of files changes. If you encounter any merges,
            # then fully explode the two parent commits that made the merge
            # and look for the files changed there. This is an aggressive
            # strategy to ensure that commits to master are always tested
            # well
            log.debug("TRAVIS_COMMIT_RANGE: %s",
                      os.environ['TRAVIS_COMMIT_RANGE'])
            log.debug("TRAVIS_COMMIT      : %s", os.environ['TRAVIS_COMMIT'])

            is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false")
            if is_PR:
                log.debug('I am testing a pull request')
                first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split(
                    '...')[0]
                last_commit = subprocess.check_output(
                    "git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
                log.debug("Guessing that first commit in PR is : %s",
                          first_commit)
                log.debug("Guessing that final commit in PR is : %s",
                          last_commit)

                if first_commit == "":
                    # Travis-CI is not yet passing a commit range for pull requests
                    # so we must use the automerge's changed file list. This has the
                    # negative effect that new pushes to the PR will immediately
                    # start affecting any new jobs, regardless of the build they are on
                    log.debug(
                        "No first commit, using Github's automerge commit")
                    self.commit_range = "--first-parent -1 -m FETCH_HEAD"
                elif first_commit == last_commit:
                    # There is only one commit in the pull request so far,
                    # or Travis-CI is not yet passing the commit range properly
                    # for pull requests. We examine just the one commit using -1
                    #
                    # On the oddball chance that it's a merge commit, we pray
                    # it's a merge from upstream and also pass --first-parent
                    log.debug("Only one commit in range, examining %s",
                              last_commit)
                    self.commit_range = "-m --first-parent -1 %s" % last_commit
                else:
                    # In case they merged in upstream, we only care about the first
                    # parent. For crazier merges, we hope
                    self.commit_range = "--first-parent %s...%s" % (
                        first_commit, last_commit)

            if not is_PR:
                log.debug('I am not testing a pull request')
                # Three main scenarios to consider
                #  - 1 One non-merge commit pushed to master
                #  - 2 One merge commit pushed to master (e.g. a PR was merged).
                #      This is an example of merging a topic branch
                #  - 3 Multiple commits pushed to master
                #
                #  1 and 2 are actually handled the same way, by showing the
                #  changes being brought into to master when that one commit
                #  was merged. Fairly simple, `git log -1 COMMIT`. To handle
                #  the potential merge of a topic branch you also include
                #  `--first-parent -m`.
                #
                #  3 needs to be handled by comparing all merge children for
                #  the entire commit range. The best solution here would *not*
                #  use --first-parent because there is no guarantee that it
                #  reflects changes brought into master. Unfortunately we have
                #  no good method inside Travis-CI to easily differentiate
                #  scenario 1/2 from scenario 3, so I cannot handle them all
                #  separately. 1/2 are the most common cases, 3 with a range
                #  of non-merge commits is the next most common, and 3 with
                #  a range including merge commits is the least common, so I
                #  am choosing to make our Travis-CI setup potential not work
                #  properly on the least common case by always using
                #  --first-parent

                # Handle 3
                # Note: Also handles 2 because Travis-CI sets COMMIT_RANGE for
                # merged PR commits
                self.commit_range = "--first-parent -m %s" % os.environ[
                    'TRAVIS_COMMIT_RANGE']

                # Handle 1
                if self.commit_range == "":
                    self.commit_range = "--first-parent -m -1 %s" % os.environ[
                        'TRAVIS_COMMIT']

        except KeyError:
            log.warning(
                "I should only be used for automated integration tests e.g. Travis-CI"
            )
            log.warning("Were you looking for run-tests.py?")
            self.commit_range = "-m HEAD^...HEAD"

        #
        # Find the one test from benchmark_config.json that we are going to run
        #

        tests = gather_tests()
        self.fwroot = setup_util.get_fwroot()
        target_dir = self.fwroot + '/frameworks/' + testdir
        log.debug("Target directory is %s", target_dir)
        dirtests = [t for t in tests if t.directory == target_dir]

        # Travis-CI is linux only
        osvalidtests = [
            t for t in dirtests
            if t.os.lower() == "linux" and (t.database_os.lower() == "linux"
                                            or t.database_os.lower() == "none")
        ]

        # Our Travis-CI only has some databases supported
        validtests = [
            t for t in osvalidtests
            if t.database.lower() in self.SUPPORTED_DATABASES
        ]
        supported_databases = ','.join(self.SUPPORTED_DATABASES)
        log.info(
            "Found %s usable tests (%s valid for linux, %s valid for linux and {%s}) in directory '%s'",
            len(dirtests), len(osvalidtests), len(validtests),
            supported_databases, '$FWROOT/frameworks/' + testdir)
        if len(validtests) == 0:
            log.critical(
                "Found no test that is possible to run in Travis-CI! Aborting!"
            )
            if len(osvalidtests) != 0:
                log.critical(
                    "Note: Found these tests that could run in Travis-CI if more databases were supported"
                )
                log.critical("Note: %s", osvalidtests)
                databases_needed = [t.database for t in osvalidtests]
                databases_needed = list(set(databases_needed))
                log.critical("Note: Here are the needed databases:")
                log.critical("Note: %s", databases_needed)
            sys.exit(1)

        self.names = [t.name for t in validtests]
        log.info("Using tests %s to verify directory %s", self.names,
                 '$FWROOT/frameworks/' + testdir)
Beispiel #13
0
          for line in out:
            log.info(line.rstrip('\n'))
      except IOError:
        log.error("No OUT file found")

    log.error("Running inside Travis-CI, so I will print a copy of the verification summary")

    results = None
    try:
      with open('results/ec2/latest/results.json', 'r') as f:
        results = json.load(f)
    except IOError:
      log.critical("No results.json found, unable to print verification summary") 
      sys.exit(retcode)

    target_dir = setup_util.get_fwroot() + '/frameworks/' + testdir
    dirtests = [t for t in gather_tests() if t.directory == target_dir]

    # Normally you don't have to use Fore.* before each line, but 
    # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
    # or stream flush, so we have to ensure that the color code is printed repeatedly
    prefix = Fore.CYAN
    for line in header("Verification Summary", top='=', bottom='').split('\n'):
      print prefix + line

    for test in dirtests:
      print prefix + "| Test: %s" % test.name
      if test.name not in runner.names:
        print prefix + "|      " + Fore.YELLOW + "Unable to verify in Travis-CI"
      elif test.name in results['verify'].keys():
        for test_type, result in results['verify'][test.name].iteritems():
Beispiel #14
0
    def __init__(self, args):

        self.__dict__.update(args)
        self.start_time = time.time()
        self.run_test_timeout_seconds = 3600

        # setup logging
        logging.basicConfig(stream=sys.stderr, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None: self.database_user = self.client_user
        if self.database_host == None: self.database_host = self.client_host
        if self.database_identity_file == None:
            self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup results and latest_results directories
        self.result_directory = os.path.join("results", self.name)
        self.latest_results_directory = self.latest_results_directory()

        if self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        # Setup the concurrency levels array. This array goes from
        # starting_concurrency to max concurrency, doubling each time
        self.concurrency_levels = []
        concurrency = self.starting_concurrency
        while concurrency <= self.max_concurrency:
            self.concurrency_levels.append(concurrency)
            concurrency = concurrency * 2

        # Setup query interval array
        # starts at 1, and goes up to max_queries, using the query_interval
        self.query_intervals = []
        queries = 1
        while queries <= self.max_queries:
            self.query_intervals.append(queries)
            if queries == 1:
                queries = 0

            queries = queries + self.query_interval

        # Load the latest data
        #self.latest = None
        #try:
        #  with open('toolset/benchmark/latest.json', 'r') as f:
        #    # Load json file into config object
        #    self.latest = json.load(f)
        #    logging.info("toolset/benchmark/latest.json loaded to self.latest")
        #    logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
        #except IOError:
        #  logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
        #
        #self.results = None
        #try:
        #  if self.latest != None and self.name in self.latest.keys():
        #    with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
        #      # Load json file into config object
        #      self.results = json.load(f)
        #except IOError:
        #  pass

        self.results = None
        try:
            with open(
                    os.path.join(self.latest_results_directory,
                                 'results.json'), 'r') as f:
                #Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test %s not found.", self.name)

        if self.results == None:
            self.results = dict()
            self.results['name'] = self.name
            self.results['concurrencyLevels'] = self.concurrency_levels
            self.results['queryIntervals'] = self.query_intervals
            self.results['frameworks'] = [t.name for t in self.__gather_tests]
            self.results['duration'] = self.duration
            self.results['rawData'] = dict()
            self.results['rawData']['json'] = dict()
            self.results['rawData']['db'] = dict()
            self.results['rawData']['query'] = dict()
            self.results['rawData']['fortune'] = dict()
            self.results['rawData']['update'] = dict()
            self.results['rawData']['plaintext'] = dict()
            self.results['completed'] = dict()
            self.results['succeeded'] = dict()
            self.results['succeeded']['json'] = []
            self.results['succeeded']['db'] = []
            self.results['succeeded']['query'] = []
            self.results['succeeded']['fortune'] = []
            self.results['succeeded']['update'] = []
            self.results['succeeded']['plaintext'] = []
            self.results['failed'] = dict()
            self.results['failed']['json'] = []
            self.results['failed']['db'] = []
            self.results['failed']['query'] = []
            self.results['failed']['fortune'] = []
            self.results['failed']['update'] = []
            self.results['failed']['plaintext'] = []
            self.results['verify'] = dict()
        else:
            #for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results['frameworks'] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

        if self.install is not None:
            install = Installer(self, self.install_strategy)
            install.install_software()
Beispiel #15
0
  def __init__(self, mode, testdir=None):
    '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

    self.directory = testdir
    self.mode = mode
    if mode == "cisetup":
      logging.basicConfig(level=logging.DEBUG)
    else:
      logging.basicConfig(level=logging.INFO)

    try:
      # NOTE: THIS IS VERY TRICKY TO GET RIGHT!
      #
      # Our goal: Look at the files changed and determine if we need to 
      # run a verification for this folder. For a pull request, we want to 
      # see the list of files changed by any commit in that PR. For a 
      # push to master, we want to see a list of files changed by the pushed
      # commits. If this list of files contains the current directory, or 
      # contains the toolset/ directory, then we need to run a verification
      # 
      # If modifying, please consider: 
      #  - the commit range for a pull request is the first PR commit to 
      #    the github auto-merge commit
      #  - the commits in the commit range may include merge commits
      #    other than the auto-merge commit. An git log with -m 
      #    will know that *all* the files in the merge were changed, 
      #    but that is not the changeset that we care about
      #  - git diff shows differences, but we care about git log, which
      #    shows information on what was changed during commits
      #  - master can (and will!) move during a build. This is one 
      #    of the biggest problems with using git diff - master will 
      #    be updated, and those updates will include changes to toolset, 
      #    and suddenly every job in the build will start to run instead 
      #    of fast-failing
      #  - commit_range is not set if there was only one commit pushed, 
      #    so be sure to test for that on both master and PR
      #  - commit_range and commit are set very differently for pushes
      #    to an owned branch versus pushes to a pull request, test
      #  - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE 
      #    will become invalid if additional commits are pushed while a job is 
      #    building. See https://github.com/travis-ci/travis-ci/issues/2666
      #  - If you're really insane, consider that the last commit in a 
      #    pull request could have been a merge commit. This means that 
      #    the github auto-merge commit could have more than two parents
      #  - Travis cannot really support rebasing onto an owned branch, the
      #    commit_range they provide will include commits that are non-existant
      #    in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668
      #  
      #  - TEST ALL THESE OPTIONS: 
      #      - On a branch you own (e.g. your fork's master)
      #          - single commit
      #          - multiple commits pushed at once
      #          - commit+push, then commit+push again before the first
      #            build has finished. Verify all jobs in the first build 
      #            used the correct commit range
      #          - multiple commits, including a merge commit. Verify that
      #            the unrelated merge commit changes are not counted as 
      #            changes the user made
      #      - On a pull request
      #          - repeat all above variations
      #
      #
      # ==== CURRENT SOLUTION FOR PRs ====
      #
      # For pull requests, we will examine Github's automerge commit to see
      # what files would be touched if we merged this into the current master. 
      # You can't trust the travis variables here, as the automerge commit can
      # be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666
      # We instead use the FETCH_HEAD, which will always point to the SHA of 
      # the lastest merge commit. However, if we only used FETCH_HEAD than any
      # new commits to a pull request would instantly start affecting currently
      # running jobs and the the list of changed files may become incorrect for
      # those affected jobs. The solution is to walk backward from the FETCH_HEAD
      # to the last commit in the pull request. Based on how github currently 
      # does the automerge, this is the second parent of FETCH_HEAD, and 
      # therefore we use FETCH_HEAD^2 below
      #
      # This may not work perfectly in situations where the user had advanced 
      # merging happening in their PR. We correctly handle them merging in 
      # from upstream, but if they do wild stuff then this will likely break
      # on that. However, it will also likely break by seeing a change in 
      # toolset and triggering a full run when a partial run would be 
      # acceptable
      #
      # ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ====
      #
      # This one is fairly simple. Find the commit or commit range, and 
      # examine the log of files changes. If you encounter any merges, 
      # then fully explode the two parent commits that made the merge
      # and look for the files changed there. This is an aggressive 
      # strategy to ensure that commits to master are always tested 
      # well
      log.debug("TRAVIS_COMMIT_RANGE: %s", os.environ['TRAVIS_COMMIT_RANGE'])
      log.debug("TRAVIS_COMMIT      : %s", os.environ['TRAVIS_COMMIT'])

      is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false")
      if is_PR:
        log.debug('I am testing a pull request')
        first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0]
        last_commit = subprocess.check_output("git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
        log.debug("Guessing that first commit in PR is : %s", first_commit)
        log.debug("Guessing that final commit in PR is : %s", last_commit)

        if first_commit == "":
          # Travis-CI is not yet passing a commit range for pull requests
          # so we must use the automerge's changed file list. This has the 
          # negative effect that new pushes to the PR will immediately 
          # start affecting any new jobs, regardless of the build they are on
          log.debug("No first commit, using Github's automerge commit")
          self.commit_range = "--first-parent -1 -m FETCH_HEAD"
        elif first_commit == last_commit:
          # There is only one commit in the pull request so far, 
          # or Travis-CI is not yet passing the commit range properly 
          # for pull requests. We examine just the one commit using -1
          #
          # On the oddball chance that it's a merge commit, we pray  
          # it's a merge from upstream and also pass --first-parent 
          log.debug("Only one commit in range, examining %s", last_commit)
          self.commit_range = "-m --first-parent -1 %s" % last_commit
        else: 
          # In case they merged in upstream, we only care about the first 
          # parent. For crazier merges, we hope
          self.commit_range = "--first-parent %s...%s" % (first_commit, last_commit)

      if not is_PR:
        log.debug('I am not testing a pull request')
        # If more than one commit was pushed, examine everything including 
        # all details on all merges
        self.commit_range = "-m %s" % os.environ['TRAVIS_COMMIT_RANGE']
        
        # If only one commit was pushed, examine that one. If it was a 
        # merge be sure to show all details
        if self.commit_range == "":
          self.commit_range = "-m -1 %s" % os.environ['TRAVIS_COMMIT']

    except KeyError:
      log.warning("I should only be used for automated integration tests e.g. Travis-CI")
      log.warning("Were you looking for run-tests.py?")
      self.commit_range = "-m HEAD^...HEAD"

    #
    # Find the one test from benchmark_config that we are going to run
    #

    tests = gather_tests()
    self.fwroot = setup_util.get_fwroot()
    target_dir = self.fwroot + '/frameworks/' + testdir
    log.debug("Target directory is %s", target_dir)
    dirtests = [t for t in tests if t.directory == target_dir]
    
    # Travis-CI is linux only
    osvalidtests = [t for t in dirtests if t.os.lower() == "linux"
                  and (t.database_os.lower() == "linux" or t.database_os.lower() == "none")]
    
    # Our Travis-CI only has some databases supported
    validtests = [t for t in osvalidtests if t.database.lower() == "mysql"
                  or t.database.lower() == "postgres"
                  or t.database.lower() == "mongodb"
                  or t.database.lower() == "none"]
    log.info("Found %s usable tests (%s valid for linux, %s valid for linux and {mysql,postgres,mongodb,none}) in directory '%s'", 
      len(dirtests), len(osvalidtests), len(validtests), '$FWROOT/frameworks/' + testdir)
    if len(validtests) == 0:
      log.critical("Found no test that is possible to run in Travis-CI! Aborting!")
      if len(osvalidtests) != 0:
        log.critical("Note: Found these tests that could run in Travis-CI if more databases were supported")
        log.critical("Note: %s", osvalidtests)
        databases_needed = [t.database for t in osvalidtests]
        databases_needed = list(set(databases_needed))
        log.critical("Note: Here are the needed databases:")
        log.critical("Note: %s", databases_needed)
      sys.exit(1)

    self.names = [t.name for t in validtests]
    log.info("Using tests %s to verify directory %s", self.names, '$FWROOT/frameworks/' + testdir)
Beispiel #16
0
def gather_tests(include = [], exclude=[], benchmarker=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A benchmarker is needed to construct full FrameworkTest objects. If
    one is not provided, a default Benchmarker will be created.
    '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k,v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except Exception:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None
        defaults['results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
        defaults['results_environment'] = "My Server Environment"
        defaults['test_dir'] = None
        defaults['quiet'] = True

        benchmarker = Benchmarker(defaults)


    # Search for configuration files
    fwroot = setup_util.get_fwroot()
    config_files = []
    if benchmarker.test_dir:
        for test_dir in benchmarker.test_dir:
            dir_config_files = glob.glob("{!s}/frameworks/{!s}/benchmark_config.json".format(fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception("Unable to locate tests in test-dir: {!s}".format(test_dir))
    else:
        config_files.extend(glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                # User-friendly errors
                print("Error loading '{!s}'.".format(config_file_name))
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(config,
                                                   os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
Beispiel #17
0
    def __init__(self, args):

        # Map type strings to their objects
        types = dict()
        types['json'] = JsonTestType()
        types['db'] = DBTestType()
        types['query'] = QueryTestType()
        types['fortune'] = FortuneTestType()
        types['update'] = UpdateTestType()
        types['plaintext'] = PlaintextTestType()

        # Turn type into a map instead of a string
        if args['type'] == 'all':
            args['types'] = types
        else:
            args['types'] = {args['type']: types[args['type']]}
        del args['type']

        args['max_threads'] = args['threads']
        args['max_concurrency'] = max(args['concurrency_levels'])

        self.__dict__.update(args)
        # pprint(self.__dict__)

        self.start_time = time.time()
        self.run_test_timeout_seconds = 7200

        # setup logging
        logging.basicConfig(stream=sys.stderr, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None: self.database_user = self.client_user
        if self.database_host == None: self.database_host = self.client_host
        if self.database_identity_file == None:
            self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup results and latest_results directories
        self.result_directory = os.path.join("results")
        if (args['clean'] or args['clean_all']) and os.path.exists(
                os.path.join(self.fwroot, "results")):
            shutil.rmtree(os.path.join(self.fwroot, "results"))
        self.latest_results_directory = self.latest_results_directory()

        # remove installs directories if --clean-all provided
        self.install_root = "%s/%s" % (self.fwroot, "installs")
        if args['clean_all']:
            os.system("sudo rm -rf " + self.install_root)
            os.mkdir(self.install_root)

        if hasattr(self, 'parse') and self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        self.results = None
        try:
            with open(
                    os.path.join(self.latest_results_directory,
                                 'results.json'), 'r') as f:
                #Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test not found.")

        if self.results == None:
            self.results = dict()
            self.results['concurrencyLevels'] = self.concurrency_levels
            self.results['queryIntervals'] = self.query_levels
            self.results['frameworks'] = [t.name for t in self.__gather_tests]
            self.results['duration'] = self.duration
            self.results['rawData'] = dict()
            self.results['rawData']['json'] = dict()
            self.results['rawData']['db'] = dict()
            self.results['rawData']['query'] = dict()
            self.results['rawData']['fortune'] = dict()
            self.results['rawData']['update'] = dict()
            self.results['rawData']['plaintext'] = dict()
            self.results['completed'] = dict()
            self.results['succeeded'] = dict()
            self.results['succeeded']['json'] = []
            self.results['succeeded']['db'] = []
            self.results['succeeded']['query'] = []
            self.results['succeeded']['fortune'] = []
            self.results['succeeded']['update'] = []
            self.results['succeeded']['plaintext'] = []
            self.results['failed'] = dict()
            self.results['failed']['json'] = []
            self.results['failed']['db'] = []
            self.results['failed']['query'] = []
            self.results['failed']['fortune'] = []
            self.results['failed']['update'] = []
            self.results['failed']['plaintext'] = []
            self.results['verify'] = dict()
        else:
            #for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results['frameworks'] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

        if self.install is not None:
            install = Installer(self, self.install_strategy)
            install.install_software()
    def __init__(self, args):

        # Map type strings to their objects
        types = dict()
        types['json'] = JsonTestType()
        types['db'] = DBTestType()
        types['query'] = QueryTestType()
        types['fortune'] = FortuneTestType()
        types['update'] = UpdateTestType()
        types['plaintext'] = PlaintextTestType()
        types['cached_query'] = CachedQueryTestType()

        # Turn type into a map instead of a string
        if args['type'] == 'all':
            args['types'] = types
        else:
            args['types'] = {args['type']: types[args['type']]}
        del args['type']

        args['max_concurrency'] = max(args['concurrency_levels'])
        if 'pipeline_concurrency_levels' not in args:
            args['pipeline_concurrency_levels'] = [256, 1024, 4096, 16384]

        self.__dict__.update(args)
        # pprint(self.__dict__)

        self.quiet_out = QuietOutputStream(self.quiet)

        self.start_time = time.time()
        self.run_test_timeout_seconds = 7200

        # setup logging
        logging.basicConfig(stream=self.quiet_out, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None: self.database_user = self.client_user
        if self.database_host == None: self.database_host = self.client_host
        if self.database_identity_file == None:
            self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup current_benchmark.txt location
        self.current_benchmark = "/tmp/current_benchmark.txt"

        if hasattr(self, 'parse') and self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        # setup results and latest_results directories
        self.result_directory = os.path.join(self.fwroot, "results")
        if (args['clean'] or args['clean_all']) and os.path.exists(
                os.path.join(self.fwroot, "results")):
            os.system("sudo rm -rf " + self.result_directory + "/*")

        # remove installs directories if --clean-all provided
        self.install_root = "%s/%s" % (self.fwroot, "installs")
        if args['clean_all']:
            os.system("sudo rm -rf " + self.install_root)
            os.mkdir(self.install_root)

        self.results = None
        try:
            with open(
                    os.path.join(self.full_results_directory(),
                                 'results.json'), 'r') as f:
                #Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test not found.")

        if self.results == None:
            self.results = dict()
            self.results['uuid'] = str(uuid.uuid4())
            self.results['name'] = datetime.now().strftime(self.results_name)
            self.results['environmentDescription'] = self.results_environment
            self.results['startTime'] = int(round(time.time() * 1000))
            self.results['completionTime'] = None
            self.results['concurrencyLevels'] = self.concurrency_levels
            self.results[
                'pipelineConcurrencyLevels'] = self.pipeline_concurrency_levels
            self.results['queryIntervals'] = self.query_levels
            self.results['cachedQueryIntervals'] = self.cached_query_levels
            self.results['frameworks'] = [t.name for t in self.__gather_tests]
            self.results['duration'] = self.duration
            self.results['rawData'] = dict()
            self.results['rawData']['json'] = dict()
            self.results['rawData']['db'] = dict()
            self.results['rawData']['query'] = dict()
            self.results['rawData']['fortune'] = dict()
            self.results['rawData']['update'] = dict()
            self.results['rawData']['plaintext'] = dict()
            self.results['rawData']['cached_query'] = dict()
            self.results['completed'] = dict()
            self.results['succeeded'] = dict()
            self.results['succeeded']['json'] = []
            self.results['succeeded']['db'] = []
            self.results['succeeded']['query'] = []
            self.results['succeeded']['fortune'] = []
            self.results['succeeded']['update'] = []
            self.results['succeeded']['plaintext'] = []
            self.results['succeeded']['cached_query'] = []
            self.results['failed'] = dict()
            self.results['failed']['json'] = []
            self.results['failed']['db'] = []
            self.results['failed']['query'] = []
            self.results['failed']['fortune'] = []
            self.results['failed']['update'] = []
            self.results['failed']['plaintext'] = []
            self.results['failed']['cached_query'] = []
            self.results['verify'] = dict()
        else:
            #for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results['frameworks'] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

        self.__process = None
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    os.environ['FWROOT'] = setup_util.get_fwroot()
    os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
    # 'Ubuntu', '14.04', 'trusty' respectively
    os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
    # App server cpu count
    os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())

    print("FWROOT is {!s}.".format(os.environ['FWROOT']))

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file', default='benchmark.cfg', metavar='FILE',
        help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    defaults = {}
    try:
        if not os.path.exists(os.path.join(os.environ['FWROOT'], args.conf_file)) and not os.path.exists(os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
            print("No config file found. Aborting!")
            exit(1)
        with open (os.path.join(os.environ['FWROOT'], args.conf_file)):
            config = ConfigParser.SafeConfigParser()
            config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
            defaults.update(dict(config.items("Defaults")))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        print("Configuration file not found!")
        exit(1)

    ##########################################################
    # Set up default values
    ##########################################################

    # Verify and massage options
    if defaults['client_user'] is None or defaults['client_host'] is None:
        print("client_user and client_host are required!")
        print("Please check your configuration file.")
        print("Aborting!")
        exit(1)

    if defaults['database_user'] is None:
        defaults['database_user'] = defaults['client_user']
    if defaults['database_host'] is None:
        defaults['database_host'] = defaults['client_host']
    if defaults['server_host'] is None:
        defaults['server_host'] = defaults['client_host']
    if defaults['ulimit'] is None:
        defaults['ulimit'] = 200000

    os.environ['ULIMIT'] = str(defaults['ulimit'])

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
                                     parents=[conf_parser],
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
                                     epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Install options
    parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
    parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
    parser.add_argument('--clear-tmp', action='store_true', default=False, help='Clears files written to /tmp after each framework\'s tests complete.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    else:
        return benchmarker.run()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    os.environ['FWROOT'] = setup_util.get_fwroot()
    os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
    # 'Ubuntu', '14.04', 'trusty' respectively
    os.environ['TFB_DISTRIB_ID'], os.environ[
        'TFB_DISTRIB_RELEASE'], os.environ[
            'TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
    # App server cpu count
    os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())

    print("FWROOT is {!s}.".format(os.environ['FWROOT']))

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    defaults = {}
    try:
        if not os.path.exists(
                os.path.join(
                    os.environ['FWROOT'],
                    args.conf_file)) and not os.path.exists(
                        os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
            print("No config file found. Aborting!")
            exit(1)
        with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
            config = ConfigParser.SafeConfigParser()
            config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
            defaults.update(dict(config.items("Defaults")))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        print("Configuration file not found!")
        exit(1)

    ##########################################################
    # Set up default values
    ##########################################################

    # Verify and massage options
    if defaults['client_user'] is None or defaults['client_host'] is None:
        print("client_user and client_host are required!")
        print("Please check your configuration file.")
        print("Aborting!")
        exit(1)

    if defaults['database_user'] is None:
        defaults['database_user'] = defaults['client_user']
    if defaults['database_host'] is None:
        defaults['database_host'] = defaults['client_host']
    if defaults['server_host'] is None:
        defaults['server_host'] = defaults['client_host']
    if defaults['ulimit'] is None:
        defaults['ulimit'] = 200000

    os.environ['ULIMIT'] = str(defaults['ulimit'])

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # Install options
    parser.add_argument('--clean',
                        action='store_true',
                        default=False,
                        help='Removes the results directory')
    parser.add_argument('--clean-all',
                        action='store_true',
                        dest='clean_all',
                        default=False,
                        help='Removes the results and installs directories')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument(
        '--test-dir',
        nargs='+',
        dest='test_dir',
        help='name of framework directory containing all tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'cached_query',
                            'fortune', 'update', 'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify', 'debug'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
    )
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')

    # Benchmark options
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )

    # Misc Options
    parser.add_argument(
        '--results-name',
        help='Gives a name to this set of results, formatted as a date',
        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
    parser.add_argument(
        '--results-environment',
        help='Describes the environment in which these results were gathered',
        default='(unspecified, hostname = %s)' % socket.gethostname())
    parser.add_argument(
        '--results-upload-uri',
        default=None,
        help=
        'A URI where the in-progress results.json file will be POSTed periodically'
    )
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.add_argument(
        '--quiet',
        action='store_true',
        default=False,
        help=
        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
    )
    parser.add_argument(
        '--clear-tmp',
        action='store_true',
        default=False,
        help=
        'Clears files written to /tmp after each framework\'s tests complete.')
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    else:
        return benchmarker.run()
  def start(self, out):

    # Setup environment variables
    logDir = os.path.join(self.fwroot, self.benchmarker.full_results_directory(), 'logs', self.name.lower())

    def tee_output(prefix, line):
      # Needs to be one atomic write
      # Explicitly use UTF-8 as it's the most common framework output
      # TODO improve encoding handling
      line = prefix.encode('utf-8') + line

      # Log to current terminal
      sys.stdout.write(line)
      sys.stdout.flush()

      out.write(line)
      out.flush()

    prefix = "Setup %s: " % self.name

    ###########################
    # Build the Docker images
    ##########################

    # Build the test docker file based on the test name
    # then build any additional docker files specified in the benchmark_config
    # Note - If you want to be able to stream the output of the build process you have
    # to use the low level API:
    #  https://docker-py.readthedocs.io/en/stable/api.html#module-docker.api.build

    prev_line = os.linesep
    def handle_build_output(line):
      if line.startswith('{"stream":'):
        line = json.loads(line)
        line = line[line.keys()[0]].encode('utf-8')
        if prev_line.endswith(os.linesep):
          tee_output(prefix, line)
        else:
          tee_output(line)
        self.prev_line = line

    docker_buildargs = { 'CPU_COUNT': str(multiprocessing.cpu_count()),
                         'MAX_CONCURRENCY': str(max(self.benchmarker.concurrency_levels)) }

    test_docker_files = ["%s.dockerfile" % self.name]
    if self.docker_files is not None:
      if type(self.docker_files) is list:
        test_docker_files.extend(self.docker_files)
      else:
        raise Exception("docker_files in benchmark_config.json must be an array")

    for test_docker_file in test_docker_files:
      deps = list(reversed(gather_docker_dependencies(os.path.join(self.directory, test_docker_file))))

      docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup", "linux", "docker")

      for dependency in deps:
        docker_file = os.path.join(self.directory, dependency + ".dockerfile")
        if not docker_file or not os.path.exists(docker_file):
          docker_file = find_docker_file(docker_dir, dependency + ".dockerfile")
        if not docker_file:
          tee_output(prefix, "Docker build failed; %s could not be found; terminating\n" % (dependency + ".dockerfile"))
          return 1

        # Build the dependency image
        try:
          for line in docker.APIClient(base_url='unix://var/run/docker.sock').build(
            path=os.path.dirname(docker_file),
            dockerfile="%s.dockerfile" % dependency,
            tag="tfb/%s" % dependency,
            buildargs=docker_buildargs,
            forcerm=True
          ):
            handle_build_output(line)
        except Exception as e:
          tee_output(prefix, "Docker dependency build failed; terminating\n")
          print(e)
          return 1

    # Build the test images
    for test_docker_file in test_docker_files:
        try:
          for line in docker.APIClient(base_url='unix://var/run/docker.sock').build(
            path=self.directory,
            dockerfile=test_docker_file,
            tag="tfb/test/%s" % test_docker_file.replace(".dockerfile",""),
            buildargs=docker_buildargs,
            forcerm=True
          ):
            handle_build_output(line)
        except Exception as e:
          tee_output(prefix, "Docker build failed; terminating\n")
          print(e)
          return 1


    ##########################
    # Run the Docker container
    ##########################

	client = docker.from_env()

    for test_docker_file in test_docker_files:
      try:
        def watch_container(container, prefix):
          for line in container.logs(stream=True):
            tee_output(prefix, line)

        container = client.containers.run(
          "tfb/test/%s" % test_docker_file.replace(".dockerfile", ""),
          network_mode="host",
          privileged=True,
          stderr=True,
          detach=True)

        prefix = "Server %s: " % self.name
        watch_thread = Thread(target = watch_container, args=(container,prefix))
        watch_thread.daemon = True
        watch_thread.start()

      except Exception as e:
        tee_output(prefix, "Running docker cointainer: %s failed" % test_docker_file)
        print(e)
        return 1

    return 0
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A benchmarker is needed to construct full FrameworkTest objects. If
    one is not provided, a default Benchmarker will be created.
    '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except Exception:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None
        defaults[
            'results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
        defaults['results_environment'] = "My Server Environment"
        defaults['test_dir'] = None
        defaults['test_lang'] = None
        defaults['quiet'] = True

        benchmarker = Benchmarker(defaults)

    # Search for configuration files
    fwroot = setup_util.get_fwroot()
    config_files = []

    if benchmarker.test_lang:
        benchmarker.test_dir = []
        for lang in benchmarker.test_lang:
            if os.path.exists("{!s}/frameworks/{!s}".format(fwroot, lang)):
                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
                        fwroot, lang)):
                    benchmarker.test_dir.append("{!s}/{!s}".format(
                        lang, test_dir))
            else:
                raise Exception(
                    "Unable to locate language directory: {!s}".format(lang))

    if benchmarker.test_dir:
        for test_dir in benchmarker.test_dir:
            dir_config_files = glob.glob(
                "{!s}/frameworks/{!s}/benchmark_config.json".format(
                    fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception(
                    "Unable to locate tests in test-dir: {!s}".format(
                        test_dir))
    else:
        config_files.extend(
            glob.glob(
                "{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                # User-friendly errors
                print("Error loading '{!s}'.".format(config_file_name))
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot:
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s" % setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open(args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = {"client-host": "localhost"}

    ##########################################################
    # Set up default values
    ##########################################################
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s',
                        '--server-host',
                        default=serverHost,
                        help='The application server.')
    parser.add_argument('-c',
                        '--client-host',
                        default=clientHost,
                        help='The client / load generation server.')
    parser.add_argument(
        '-u',
        '--client-user',
        default=clientUser,
        help='The username to use for SSH to the client instance.')
    parser.add_argument('-i',
                        '--client-identity-file',
                        dest='client_identity_file',
                        default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument(
        '-d',
        '--database-host',
        default=databaHost,
        help=
        'The database server.  If not provided, defaults to the value of --client-host.'
    )
    parser.add_argument(
        '--database-user',
        default=databaUser,
        help=
        'The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.'
    )
    parser.add_argument(
        '--database-identity-file',
        default=dbIdenFile,
        dest='database_identity_file',
        help=
        'The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.'
    )
    parser.add_argument('-p',
                        dest='password_prompt',
                        action='store_true',
                        help='Prompt for password')

    # Install options
    parser.add_argument(
        '--install',
        choices=['client', 'database', 'server', 'all'],
        default=None,
        help=
        'Runs installation script(s) before continuing on to execute the tests.'
    )
    parser.add_argument(
        '--install-error-action',
        choices=['abort', 'continue'],
        default='continue',
        help='action to take in case of error during installation')
    parser.add_argument(
        '--install-strategy',
        choices=['unified', 'pertest'],
        default='unified',
        help=
        '''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer'''
    )
    parser.add_argument(
        '--install-only',
        action='store_true',
        default=False,
        help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'fortune', 'update',
                            'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')
    parser.add_argument(
        '--list-test-metadata',
        action='store_true',
        default=False,
        help=
        'writes all the test metadata as a JSON file in the results directory')
    parser.add_argument(
        '--name',
        default="ec2",
        help=
        'The name to give this test. Results will be placed in a folder using this name.'
    )
    parser.add_argument(
        '--os',
        choices=['linux', 'windows'],
        default='linux',
        help=
        'The operating system of the application/framework server (the one running'
        + 'this binary')
    parser.add_argument('--database-os',
                        choices=['linux', 'windows'],
                        default='linux',
                        help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument(
        '--concurrency-levels',
        default=[8, 16, 32, 64, 128, 256],
        help=
        'Runs wrk benchmarker with different concurrency value (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--query-levels',
        default=[1, 5, 10, 15, 20],
        help=
        'Database queries requested per HTTP connection, used during query test (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--threads',
        default=maxThreads,
        help=
        'Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system',
        type=int)
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )

    # Misc Options
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
        print 'Usernames (e.g. --client-user and --database-user) are required!'
        print 'The system will SSH into the client and the database for the install stage'
        print 'Aborting'
        exit(1)

    if args.database_user is None:
        args.database_user = args.client_user

    if args.database_host is None:
        args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.list_test_metadata:
        benchmarker.run_list_test_metadata()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    elif not args.install_only:
        return benchmarker.run()
Beispiel #24
0
def gather_tests(include = [], exclude=[], benchmarker=None):
  '''
  Given test names as strings, returns a list of FrameworkTest objects. 
  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
  variables for checking the test directory, the test database os, and 
  other useful items. 

  With no arguments, every test in this framework will be returned.  
  With include, only tests with this exact name will be returned. 
  With exclude, all tests but those excluded will be returned. 

  A benchmarker is needed to construct full FrameworkTest objects. If
  one is not provided, a default Benchmarker will be created. 
  '''

  # Avoid setting up a circular import
  from benchmark import framework_test
  from benchmark.benchmarker import Benchmarker
  from setup.linux import setup_util

  # Help callers out a bit
  if include is None:
    include = []
  if exclude is None:
    exclude = []
  
  # Setup default Benchmarker using example configuration
  if benchmarker is None:
    print "Creating Benchmarker from benchmark.cfg.example"
    default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
    config = ConfigParser.SafeConfigParser()
    config.readfp(open(default_config))
    defaults = dict(config.items("Defaults"))
    
    # Convert strings into proper python types
    for k,v in defaults.iteritems():
      try:
        defaults[k] = literal_eval(v)
      except:
        pass

    # Ensure we only run the __init__ method of Benchmarker
    defaults['install'] = None
    
    benchmarker = Benchmarker(defaults)

  
  # Search in both old and new directories
  fwroot = setup_util.get_fwroot() 
  config_files = glob.glob("%s/*/benchmark_config" % fwroot) 
  config_files.extend(glob.glob("%s/frameworks/*/*/benchmark_config" % fwroot))
  
  tests = []
  for config_file_name in config_files:
    config = None
    with open(config_file_name, 'r') as config_file:
      try:
        config = json.load(config_file)
      except ValueError:
        # User-friendly errors
        print("Error loading '%s'." % config_file_name)
        raise

    # Find all tests in the config file
    config_tests = framework_test.parse_config(config, 
      os.path.dirname(config_file_name), benchmarker)
    
    # Filter
    for test in config_tests:
      if test.name in exclude:
        continue
      elif len(include) is 0 or test.name in include:
        tests.append(test)

  tests.sort(key=lambda x: x.name)
  return tests
    def __init__(self, args):

        # Map type strings to their objects
        types = dict()
        types["json"] = JsonTestType()
        types["db"] = DBTestType()
        types["query"] = QueryTestType()
        types["fortune"] = FortuneTestType()
        types["update"] = UpdateTestType()
        types["plaintext"] = PlaintextTestType()

        # Turn type into a map instead of a string
        if args["type"] == "all":
            args["types"] = types
        else:
            args["types"] = {args["type"]: types[args["type"]]}
        del args["type"]

        args["max_threads"] = args["threads"]
        args["max_concurrency"] = max(args["concurrency_levels"])

        self.__dict__.update(args)
        # pprint(self.__dict__)

        self.start_time = time.time()
        self.run_test_timeout_seconds = 7200

        # setup logging
        logging.basicConfig(stream=sys.stderr, level=logging.INFO)

        # setup some additional variables
        if self.database_user == None:
            self.database_user = self.client_user
        if self.database_host == None:
            self.database_host = self.client_host
        if self.database_identity_file == None:
            self.database_identity_file = self.client_identity_file

        # Remember root directory
        self.fwroot = setup_util.get_fwroot()

        # setup results and latest_results directories
        self.result_directory = os.path.join("results", self.name)
        if args["clean"] or args["clean_all"]:
            shutil.rmtree(os.path.join(self.fwroot, "results"))
        self.latest_results_directory = self.latest_results_directory()

        # remove installs directories if --clean-all provided
        self.install_root = "%s/%s" % (self.fwroot, "installs")
        if args["clean_all"]:
            os.system("rm -rf " + self.install_root)
            os.mkdir(self.install_root)

        if hasattr(self, "parse") and self.parse != None:
            self.timestamp = self.parse
        else:
            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())

        self.results = None
        try:
            with open(os.path.join(self.latest_results_directory, "results.json"), "r") as f:
                # Load json file into results object
                self.results = json.load(f)
        except IOError:
            logging.warn("results.json for test %s not found.", self.name)

        if self.results == None:
            self.results = dict()
            self.results["name"] = self.name
            self.results["concurrencyLevels"] = self.concurrency_levels
            self.results["queryIntervals"] = self.query_levels
            self.results["frameworks"] = [t.name for t in self.__gather_tests]
            self.results["duration"] = self.duration
            self.results["rawData"] = dict()
            self.results["rawData"]["json"] = dict()
            self.results["rawData"]["db"] = dict()
            self.results["rawData"]["query"] = dict()
            self.results["rawData"]["fortune"] = dict()
            self.results["rawData"]["update"] = dict()
            self.results["rawData"]["plaintext"] = dict()
            self.results["completed"] = dict()
            self.results["succeeded"] = dict()
            self.results["succeeded"]["json"] = []
            self.results["succeeded"]["db"] = []
            self.results["succeeded"]["query"] = []
            self.results["succeeded"]["fortune"] = []
            self.results["succeeded"]["update"] = []
            self.results["succeeded"]["plaintext"] = []
            self.results["failed"] = dict()
            self.results["failed"]["json"] = []
            self.results["failed"]["db"] = []
            self.results["failed"]["query"] = []
            self.results["failed"]["fortune"] = []
            self.results["failed"]["update"] = []
            self.results["failed"]["plaintext"] = []
            self.results["verify"] = dict()
        else:
            # for x in self.__gather_tests():
            #  if x.name not in self.results['frameworks']:
            #    self.results['frameworks'] = self.results['frameworks'] + [x.name]
            # Always overwrite framework list
            self.results["frameworks"] = [t.name for t in self.__gather_tests]

        # Setup the ssh command string
        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
        if self.database_identity_file != None:
            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
        if self.client_identity_file != None:
            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file

        if self.install is not None:
            install = Installer(self, self.install_strategy)
            install.install_software()