def stop(self, out, err):
    # Load profile for this installation
    profile="%s/bash_profile.sh" % self.directory
    if not os.path.exists(profile):
      logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
      profile="$FWROOT/config/benchmark_profile"
    
    setup_util.replace_environ(config=profile, 
              command='export TROOT=%s && export IROOT=%s' %
              (self.directory, self.install_root))

    # Run the module stop (inside parent of TROOT)
    #     - we use the parent as a historical accident - a lot of tests
    #       use subprocess's cwd argument already
    previousDir = os.getcwd()
    os.chdir(os.path.dirname(self.troot))
    logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.troot))
    try:
      retcode = self.setup_module.stop(out, err)
      if retcode == None: 
        retcode = 0
    except Exception:
      retcode = 1 
      st = traceback.format_exc()
      st = '\n'.join((4 * ' ') + x for x in st.splitlines())
      st = "Stop exception:\n%s\n" % st
      logging.info(st)
      err.write(st + '\n')
    os.chdir(previousDir)

    # Give processes sent a SIGTERM a moment to shut down gracefully
    time.sleep(5)

    return retcode
    def start(self, out, err):
        # Load profile for this installation
        profile = "%s/bash_profile.sh" % self.directory
        if not os.path.exists(profile):
            logging.warning("Directory %s does not have a bash_profile.sh" %
                            self.directory)
            profile = "$FWROOT/config/benchmark_profile"

        # Setup variables for TROOT and IROOT
        setup_util.replace_environ(
            config=profile,
            command='export TROOT=%s && export IROOT=%s' %
            (self.directory, self.install_root))

        # Because start can take so long, we print a dot to let the user know
        # we are working
        class ProgressPrinterThread(Thread):
            def __init__(self, event):
                Thread.__init__(self)
                self.stopped = event

            def run(self):
                while not self.stopped.wait(20):
                    sys.stderr.write("Waiting for start to return...\n")

        stopFlag = Event()
        thread = ProgressPrinterThread(stopFlag)
        thread.start()

        # Run the module start (inside parent of TROOT)
        #     - we use the parent as a historical accident - a lot of tests
        #       use subprocess's cwd argument already
        previousDir = os.getcwd()
        os.chdir(os.path.dirname(self.troot))
        logging.info("Running setup module start (cwd=%s)",
                     os.path.dirname(self.troot))
        try:
            retcode = self.setup_module.start(self, out, err)
            if retcode == None:
                retcode = 0
        except Exception:
            retcode = 1
            st = traceback.format_exc()
            st = '\n'.join((4 * ' ') + x for x in st.splitlines())
            st = "Start exception:\n%s" % st
            logging.info(st)
            err.write(st + '\n')
        os.chdir(previousDir)

        # Stop the progress printer
        stopFlag.set()

        logging.info("Called setup.py start")

        return retcode
Esempio n. 3
0
  def start(self, out, err):
    # Load profile for this installation
    profile="%s/bash_profile.sh" % self.directory
    if not os.path.exists(profile):
      logging.warning("Framework %s does not have a bash_profile" % self.name)
      profile="$FWROOT/config/benchmark_profile"
    
    set_iroot="export IROOT=%s" % self.install_root
    setup_util.replace_environ(config=profile, command=set_iroot)

    return self.setup_module.start(self.benchmarker, out, err)
  def start(self, out, err):
    # Load profile for this installation
    profile="%s/bash_profile.sh" % self.directory
    if not os.path.exists(profile):
      logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
      profile="$FWROOT/config/benchmark_profile"

    # Setup variables for TROOT and IROOT
    setup_util.replace_environ(config=profile, 
              command='export TROOT=%s && export IROOT=%s' %
              (self.directory, self.install_root))

    # Because start can take so long, we print a dot to let the user know 
    # we are working
    class ProgressPrinterThread(Thread):
      def __init__(self, event):
          Thread.__init__(self)
          self.stopped = event

      def run(self):
        while not self.stopped.wait(20):
          sys.stderr.write("Waiting for start to return...\n")
    stopFlag = Event()
    thread = ProgressPrinterThread(stopFlag)
    thread.start()

    # Run the module start (inside parent of TROOT)
    #     - we use the parent as a historical accident - a lot of tests
    #       use subprocess's cwd argument already
    previousDir = os.getcwd()
    os.chdir(os.path.dirname(self.troot))
    logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.troot))
    try:
      retcode = self.setup_module.start(self, out, err)    
      if retcode == None: 
        retcode = 0
    except Exception:
      retcode = 1
      st = traceback.format_exc()
      st = '\n'.join((4 * ' ') + x for x in st.splitlines())
      st = "Start exception:\n%s" % st
      logging.info(st)
      err.write(st + '\n')
    os.chdir(previousDir)

    # Stop the progress printer
    stopFlag.set()

    logging.info("Called setup.py start")

    return retcode
    def stop(self, out, err):
        # Load profile for this installation
        profile = "%s/bash_profile.sh" % self.directory
        if not os.path.exists(profile):
            logging.warning("Directory %s does not have a bash_profile.sh" %
                            self.directory)
            profile = "$FWROOT/config/benchmark_profile"

        setup_util.replace_environ(
            config=profile,
            command='export TROOT=%s && export IROOT=%s' %
            (self.directory, self.install_root))

        # Run the module stop (inside parent of TROOT)
        #     - we use the parent as a historical accident - a lot of tests
        #       use subprocess's cwd argument already
        previousDir = os.getcwd()
        os.chdir(os.path.dirname(self.troot))
        logging.info("Running setup module stop (cwd=%s)",
                     os.path.dirname(self.troot))
        try:
            retcode = self.setup_module.stop(out, err)
            if retcode == None:
                retcode = 0
        except Exception:
            retcode = 1
            st = traceback.format_exc()
            st = '\n'.join((4 * ' ') + x for x in st.splitlines())
            st = "Stop exception:\n%s\n" % st
            logging.info(st)
            err.write(st + '\n')
        os.chdir(previousDir)

        # Give processes sent a SIGTERM a moment to shut down gracefully
        time.sleep(5)

        return retcode
    def start(self, out, err):
        # Load profile for this installation
        profile = "%s/bash_profile.sh" % self.directory
        if not os.path.exists(profile):
            profile = "$FWROOT/config/benchmark_profile"

        # Setup variables for TROOT and IROOT
        setup_util.replace_environ(
            config=profile,
            command=
            'export TROOT=%s && export IROOT=%s && export DBHOST=%s && export MAX_THREADS=%s && export OUT=%s && export ERR=%s'
            % (self.directory, self.install_root, self.database_host,
               self.benchmarker.threads, os.path.join(self.fwroot, out.name),
               os.path.join(self.fwroot, err.name)))

        # Because start can take so long, we print a dot to let the user know
        # we are working
        class ProgressPrinterThread(Thread):
            def __init__(self, event):
                Thread.__init__(self)
                self.stopped = event

            def run(self):
                while not self.stopped.wait(20):
                    sys.stderr.write("Waiting for start to return...\n")

        stopFlag = Event()
        thread = ProgressPrinterThread(stopFlag)
        thread.start()

        # Run the module start (inside parent of TROOT)
        #     - we use the parent as a historical accident - a lot of tests
        #       use subprocess's cwd argument already
        previousDir = os.getcwd()
        os.chdir(os.path.dirname(self.troot))
        logging.info("Running setup module start (cwd=%s)", self.directory)

        # Write the stderr to our temp.txt file to be read and fed back
        # to the user via logging later.
        with open('temp', 'w') as errout:
            # Run the start script for the test as the "testrunner" user.
            # This requires superuser privs, so `sudo` is necessary.
            #   -u [username] The username
            #   -E Preserves the current environment variables
            #   -H Forces the home var (~) to be reset to the user specified
            #   -e Force bash to exit on first error
            # Note: check_call is a blocking call, so any startup scripts
            # run by the framework that need to continue (read: server has
            # started and needs to remain that way), then they should be
            # executed in the background.
            try:
                retcode = subprocess.check_call(
                    'sudo -u %s -E -H bash -e %s.sh' %
                    (self.benchmarker.runner_user, self.setup_file),
                    cwd=self.directory,
                    shell=True,
                    stderr=errout,
                    stdout=out)
                if retcode == None:
                    retcode = 0
            except Exception:
                retcode = 1
        with open('temp', 'r') as errout:
            # Read out temp error output in its entirety
            body = errout.read()
            if len(body) > 0:
                # Log it to the user.
                logging.error(body)
                # Log it to our err.txt file
                err.write(body)
        # We are done with our temp file - delete it
        os.remove('temp')
        os.chdir(previousDir)

        # Stop the progress printer
        stopFlag.set()

        logging.info("Executed %s.sh", self.setup_file)

        return retcode
  def start(self, out, err):

    # Setup environment variables    
    logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower())
    bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
    setup_util.replace_environ(config='$FWROOT/config/benchmark_profile', 
              command='''\
              export TROOT=%s       &&  \
              export IROOT=%s       &&  \
              export DBHOST=%s      &&  \
              export LOGDIR=%s      &&  \
              export MAX_THREADS=%s &&  \
              export MAX_CONCURRENCY=%s \
              ''' % (
                self.directory, 
                self.install_root, 
                self.database_host, 
                logDir,
                self.benchmarker.threads,
                max(self.benchmarker.concurrency_levels)))

    # Always ensure that IROOT belongs to the runner_user
    chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
      self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root))
    subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash')

    # Run the module start inside parent of TROOT
    #  - we use the parent as a historical accident, a number of tests
    # refer to their TROOT maually still
    previousDir = os.getcwd()
    os.chdir(os.path.dirname(self.troot))
    logging.info("Running setup module start (cwd=%s)", self.directory)
      
    # Run the start script for the test as the "testrunner" user
    # 
    # `sudo` - Switching user requires superuser privs
    #   -u [username] The username
    #   -E Preserves the current environment variables
    #   -H Forces the home var (~) to be reset to the user specified
    # `stdbuf` - Disable buffering, send output to python ASAP
    #   -o0 zero-sized buffer for stdout
    #   -e0 zero-sized buffer for stderr
    # `bash` - Run the setup.sh script using bash
    #   -e Force bash to exit on first error
    #   -x Turn on bash tracing e.g. print commands before running
    #
    # Most servers do not output to stdout/stderr while serving 
    # requests so there is no performance hit from disabling 
    # output buffering. This disabling is necessary to 
    # a) allow TFB to show output in real time and b) avoid loosing 
    # output in the buffer when the testrunner processes are forcibly 
    # killed
    # 
    # See http://www.pixelbeat.org/programming/stdio_buffering/
    # See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
    # See http://eyalarubas.com/python-subproc-nonblock.html
    command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
      self.benchmarker.runner_user,
      bash_functions_path, 
      os.path.join(self.troot, self.setup_file))
    
    debug_command = '''\
      export FWROOT=%s          &&  \\
      export TROOT=%s           &&  \\
      export IROOT=%s           &&  \\
      export DBHOST=%s          &&  \\
      export LOGDIR=%s          &&  \\
      export MAX_THREADS=%s     &&  \\
      export MAX_CONCURRENCY=%s && \\
      cd %s && \\
      %s''' % (self.fwroot, 
        self.directory, 
        self.install_root, 
        self.database_host,
        logDir,
        self.benchmarker.threads, 
        self.directory,
        max(self.benchmarker.concurrency_levels),
        command)
    logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)


    def tee_output(prefix, line):
      # Needs to be one atomic write
      # Explicitly use UTF-8 as it's the most common framework output 
      # TODO improve encoding handling 
      line = prefix.encode('utf-8') + line

      # Log to current terminal
      sys.stdout.write(line)
      sys.stdout.flush()
      # logging.error("".join([prefix, line]))

      out.write(line)
      out.flush()

    # Start the setup.sh command
    p = subprocess.Popen(command, cwd=self.directory, 
          shell=True, stdout=subprocess.PIPE, 
          stderr=subprocess.STDOUT)
    nbsr = setup_util.NonBlockingStreamReader(p.stdout, 
      "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))

    # Set a limit on total execution time of setup.sh
    timeout = datetime.now() + timedelta(minutes = 105)
    time_remaining = timeout - datetime.now()

    # Need to print to stdout once every 10 minutes or Travis-CI will abort
    travis_timeout = datetime.now() + timedelta(minutes = 5)

    # Flush output until setup.sh work is finished. This is 
    # either a) when setup.sh exits b) when the port is bound
    # c) when we run out of time. Note that 'finished' doesn't 
    # guarantee setup.sh process is dead - the OS may choose to make 
    # setup.sh a zombie process if it still has living children
    #
    # Note: child processes forked (using &) will remain alive 
    # after setup.sh has exited. The will have inherited the 
    # stdout/stderr descriptors and will be directing their 
    # output to the pipes. 
    #
    prefix = "Setup %s: " % self.name
    while (p.poll() is None
      and not self.benchmarker.is_port_bound(self.port)
      and not time_remaining.total_seconds() < 0):
      
      # The conditions above are slow to check, so 
      # we will delay output substantially if we only
      # print one line per condition check. 
      # Adding a tight loop here mitigates the effect, 
      # ensuring that most of the output directly from 
      # setup.sh is sent to tee_output before the outer
      # loop exits and prints things like "setup.sh exited"
      # 
      for i in xrange(10):
        try:
          line = nbsr.readline(0.05)
          if line:
            tee_output(prefix, line)

            # Reset Travis-CI timer
            travis_timeout = datetime.now() + timedelta(minutes = 5)
        except setup_util.EndOfStream:
          tee_output(prefix, "Setup has terminated\n")
          break
      time_remaining = timeout - datetime.now()

      if (travis_timeout - datetime.now()).total_seconds() < 0:
        sys.stdout.write(prefix + 'Printing so Travis-CI does not time out\n')
        sys.stdout.write(prefix + "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
          p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
        sys.stdout.flush()
        travis_timeout = datetime.now() + timedelta(minutes = 5)

    # Did we time out?
    if time_remaining.total_seconds() < 0: 
      tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
      p.kill()
      return 1

    # What's our return code? 
    # If setup.sh has terminated, use that code
    # Otherwise, detect if the port was bound
    tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
      p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
    retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
    if p.poll() is not None:
      tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll()))
    elif self.benchmarker.is_port_bound(self.port):
      tee_output(prefix, "Bound port detected on %s\n" % self.port)

    # Before we return control to the benchmarker, spin up a 
    # thread to keep an eye on the pipes in case the running 
    # framework uses stdout/stderr. Once all processes accessing
    # the subprocess.PIPEs are dead, this thread will terminate. 
    # Use a different prefix to indicate this is the framework 
    # speaking
    prefix = "Server %s: " % self.name
    def watch_child_pipes(nbsr, prefix):
      while True:
        try:
          line = nbsr.readline(60)
          if line:
            tee_output(prefix, line)
        except setup_util.EndOfStream:
          tee_output(prefix, "Framework processes have terminated\n")
          return

    watch_thread = Thread(target = watch_child_pipes,
      args = (nbsr, prefix))
    watch_thread.daemon = True
    watch_thread.start()

    logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
    os.chdir(previousDir)

    return retcode
Esempio n. 8
0
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout=Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot: 
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s"%setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open (args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k,v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = { "client-host":"localhost"}

    ##########################################################
    # Set up default values
    ##########################################################        
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.')
    parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.')
    parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.')
    parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument('-d', '--database-host', default=databaHost,
                        help='The database server.  If not provided, defaults to the value of --client-host.')
    parser.add_argument('--database-user', default=databaUser,
                        help='The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.')
    parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file',
                        help='The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.')
    parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password')
    
    
    # Install options
    parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None,
                        help='Runs installation script(s) before continuing on to execute the tests.')
    parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
    parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified', 
        help='''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer''')
    parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
    parser.add_argument('--list-test-metadata', action='store_true', default=False, help='writes all the test metadata as a JSON file in the results directory')
    parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
    parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application/framework server (the one running' +
                        'this binary')
    parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument('--concurrency-levels', default=[8, 16, 32, 64, 128, 256], help='Runs wrk benchmarker with different concurrency value (type int-sequence)', action=StoreSeqAction)
    parser.add_argument('--query-levels', default=[1, 5,10,15,20], help='Database queries requested per HTTP connection, used during query test (type int-sequence)', action=StoreSeqAction) 
    parser.add_argument('--threads', default=maxThreads, help='Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system', type=int)
    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')

    # Misc Options
    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
      print 'Usernames (e.g. --client-user and --database-user) are required!'
      print 'The system will SSH into the client and the database for the install stage'
      print 'Aborting'
      exit(1)

    if args.database_user is None:
      args.database_user = args.client_user

    if args.database_host is None:
      args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks, 
    #   they are either str or bool based on the python version
    if args.list_tests:
      benchmarker.run_list_tests()
    elif args.list_test_metadata:
      benchmarker.run_list_test_metadata()
    elif args.parse != None:
      benchmarker.parse_timestamp()
    elif not args.install_only:
      return benchmarker.run()
def main(argv=None):
    ''' Runs the program. There are three ways to pass arguments 
    1) environment variables TFB_*
    2) configuration file benchmark.cfg
    3) command line flags
    In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
    but command line flags have the final say
    '''
    # Do argv default this way, as doing it in the functional declaration sets it at compile time
    if argv is None:
        argv = sys.argv

    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
    sys.stdout = Unbuffered(sys.stdout)

    # Update python environment
    # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
    sys.path.append('.')
    # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
    sys.path.append('toolset/setup/linux')

    # Update environment for shell scripts
    fwroot = setup_util.get_fwroot()
    if not fwroot:
        fwroot = os.getcwd()
    setup_util.replace_environ(config='config/benchmark_profile', root=fwroot)
    print "FWROOT is %s" % setup_util.get_fwroot()

    conf_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument(
        '--conf_file',
        default='benchmark.cfg',
        metavar='FILE',
        help=
        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
    )
    args, remaining_argv = conf_parser.parse_known_args()

    try:
        with open(args.conf_file):
            config = ConfigParser.SafeConfigParser()
            config.read([os.getcwd() + '/' + args.conf_file])
            defaults = dict(config.items("Defaults"))
            # Convert strings into proper python types
            for k, v in defaults.iteritems():
                try:
                    defaults[k] = literal_eval(v)
                except Exception:
                    pass
    except IOError:
        if args.conf_file != 'benchmark.cfg':
            print 'Configuration file not found!'
        defaults = {"client-host": "localhost"}

    ##########################################################
    # Set up default values
    ##########################################################
    serverHost = os.environ.get('TFB_SERVER_HOST')
    clientHost = os.environ.get('TFB_CLIENT_HOST')
    clientUser = os.environ.get('TFB_CLIENT_USER')
    clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE')
    databaHost = os.getenv('TFB_DATABASE_HOST', clientHost)
    databaUser = os.getenv('TFB_DATABASE_USER', clientUser)
    dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden)
    maxThreads = 8
    try:
        maxThreads = multiprocessing.cpu_count()
    except Exception:
        pass

    ##########################################################
    # Set up argument parser
    ##########################################################
    parser = argparse.ArgumentParser(
        description="Install or run the Framework Benchmarks test suite.",
        parents=[conf_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        epilog=
        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. 
        Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those 
        values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a 
        list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 
        0:1:5 creates [0, 1, 2, 3, 4]
        ''')

    # SSH options
    parser.add_argument('-s',
                        '--server-host',
                        default=serverHost,
                        help='The application server.')
    parser.add_argument('-c',
                        '--client-host',
                        default=clientHost,
                        help='The client / load generation server.')
    parser.add_argument(
        '-u',
        '--client-user',
        default=clientUser,
        help='The username to use for SSH to the client instance.')
    parser.add_argument('-i',
                        '--client-identity-file',
                        dest='client_identity_file',
                        default=clientIden,
                        help='The key to use for SSH to the client instance.')
    parser.add_argument(
        '-d',
        '--database-host',
        default=databaHost,
        help=
        'The database server.  If not provided, defaults to the value of --client-host.'
    )
    parser.add_argument(
        '--database-user',
        default=databaUser,
        help=
        'The username to use for SSH to the database instance.  If not provided, defaults to the value of --client-user.'
    )
    parser.add_argument(
        '--database-identity-file',
        default=dbIdenFile,
        dest='database_identity_file',
        help=
        'The key to use for SSH to the database instance.  If not provided, defaults to the value of --client-identity-file.'
    )
    parser.add_argument('-p',
                        dest='password_prompt',
                        action='store_true',
                        help='Prompt for password')

    # Install options
    parser.add_argument(
        '--install',
        choices=['client', 'database', 'server', 'all'],
        default=None,
        help=
        'Runs installation script(s) before continuing on to execute the tests.'
    )
    parser.add_argument(
        '--install-error-action',
        choices=['abort', 'continue'],
        default='continue',
        help='action to take in case of error during installation')
    parser.add_argument(
        '--install-strategy',
        choices=['unified', 'pertest'],
        default='unified',
        help=
        '''Affects : With unified, all server software is installed into a single directory. 
        With pertest each test gets its own installs directory, but installation takes longer'''
    )
    parser.add_argument(
        '--install-only',
        action='store_true',
        default=False,
        help='Do not run benchmark or verification, just install and exit')

    # Test options
    parser.add_argument('--test', nargs='+', help='names of tests to run')
    parser.add_argument('--exclude',
                        nargs='+',
                        help='names of tests to exclude')
    parser.add_argument('--type',
                        choices=[
                            'all', 'json', 'db', 'query', 'fortune', 'update',
                            'plaintext'
                        ],
                        default='all',
                        help='which type of test to run')
    parser.add_argument(
        '-m',
        '--mode',
        choices=['benchmark', 'verify'],
        default='benchmark',
        help=
        'verify mode will only start up the tests, curl the urls and shutdown')
    parser.add_argument('--list-tests',
                        action='store_true',
                        default=False,
                        help='lists all the known tests that can run')
    parser.add_argument(
        '--list-test-metadata',
        action='store_true',
        default=False,
        help=
        'writes all the test metadata as a JSON file in the results directory')
    parser.add_argument(
        '--name',
        default="ec2",
        help=
        'The name to give this test. Results will be placed in a folder using this name.'
    )
    parser.add_argument(
        '--os',
        choices=['linux', 'windows'],
        default='linux',
        help=
        'The operating system of the application/framework server (the one running'
        + 'this binary')
    parser.add_argument('--database-os',
                        choices=['linux', 'windows'],
                        default='linux',
                        help='The operating system of the database server.')

    # Benchmark options
    parser.add_argument(
        '--concurrency-levels',
        default=[8, 16, 32, 64, 128, 256],
        help=
        'Runs wrk benchmarker with different concurrency value (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--query-levels',
        default=[1, 5, 10, 15, 20],
        help=
        'Database queries requested per HTTP connection, used during query test (type int-sequence)',
        action=StoreSeqAction)
    parser.add_argument(
        '--threads',
        default=maxThreads,
        help=
        'Run wrk benchmarker with this many threads. This should probably be the number of cores for your client system',
        type=int)
    parser.add_argument('--duration',
                        default=15,
                        help='Time in seconds that each test should run for.')
    parser.add_argument(
        '--sleep',
        type=int,
        default=60,
        help=
        'the amount of time to sleep after starting each test to allow the server to start up.'
    )

    # Misc Options
    parser.add_argument(
        '--parse',
        help=
        'Parses the results of the given timestamp and merges that with the latest results'
    )
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=False,
        help=
        'Causes the configuration to print before any other commands are executed.'
    )
    parser.set_defaults(
        **defaults
    )  # Must do this after add, or each option's default will override the configuration file default
    args = parser.parse_args(remaining_argv)

    # Verify and massage options
    if args.client_user is None:
        print 'Usernames (e.g. --client-user and --database-user) are required!'
        print 'The system will SSH into the client and the database for the install stage'
        print 'Aborting'
        exit(1)

    if args.database_user is None:
        args.database_user = args.client_user

    if args.database_host is None:
        args.database_host = args.client_host

    if args.verbose:
        print 'Configuration options: '
        pprint(vars(args))

    benchmarker = Benchmarker(vars(args))

    # Run the benchmarker in the specified mode
    #   Do not use benchmarker variables for these checks,
    #   they are either str or bool based on the python version
    if args.list_tests:
        benchmarker.run_list_tests()
    elif args.list_test_metadata:
        benchmarker.run_list_test_metadata()
    elif args.parse != None:
        benchmarker.parse_timestamp()
    elif not args.install_only:
        return benchmarker.run()
Esempio n. 10
0
  def start(self, out):

    # Setup environment variables    
    logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower())
    bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
    setup_util.replace_environ(config='$FWROOT/config/benchmark_profile', 
              command='''\
              export TROOT=%s       &&  \
              export IROOT=%s       &&  \
              export DBHOST=%s      &&  \
              export LOGDIR=%s      &&  \
              export MAX_THREADS=%s &&  \
              export MAX_CONCURRENCY=%s \
              ''' % (
                self.directory, 
                self.install_root, 
                self.database_host, 
                logDir,
                self.benchmarker.threads,
                max(self.benchmarker.concurrency_levels)))

    # Always ensure that IROOT belongs to the runner_user
    if not os.path.exists(self.install_root):
      os.mkdir(self.install_root)
    chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
      self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root))
    subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash')

    # Run the module start inside parent of TROOT
    #  - we use the parent as a historical accident, a number of tests
    # refer to their TROOT maually still
    previousDir = os.getcwd()
    os.chdir(os.path.dirname(self.troot))
    logging.info("Running setup module start (cwd=%s)", self.directory)
      
    # Run the start script for the test as the "testrunner" user
    # 
    # `sudo` - Switching user requires superuser privs
    #   -u [username] The username
    #   -E Preserves the current environment variables
    #   -H Forces the home var (~) to be reset to the user specified
    # `stdbuf` - Disable buffering, send output to python ASAP
    #   -o0 zero-sized buffer for stdout
    #   -e0 zero-sized buffer for stderr
    # `bash` - Run the setup.sh script using bash
    #   -e Force bash to exit on first error
    #   -x Turn on bash tracing e.g. print commands before running
    #
    # Most servers do not output to stdout/stderr while serving 
    # requests so there is no performance hit from disabling 
    # output buffering. This disabling is necessary to 
    # a) allow TFB to show output in real time and b) avoid loosing 
    # output in the buffer when the testrunner processes are forcibly 
    # killed
    # 
    # See http://www.pixelbeat.org/programming/stdio_buffering/
    # See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
    # See http://eyalarubas.com/python-subproc-nonblock.html
    command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
      self.benchmarker.runner_user,
      bash_functions_path, 
      os.path.join(self.troot, self.setup_file))
    
    debug_command = '''\
      export FWROOT=%s          &&  \\
      export TROOT=%s           &&  \\
      export IROOT=%s           &&  \\
      export DBHOST=%s          &&  \\
      export LOGDIR=%s          &&  \\
      export MAX_THREADS=%s     &&  \\
      export MAX_CONCURRENCY=%s && \\
      cd %s && \\
      %s''' % (self.fwroot, 
        self.directory, 
        self.install_root, 
        self.database_host,
        logDir,
        self.benchmarker.threads, 
        max(self.benchmarker.concurrency_levels),
        self.directory,
        command)
    logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)


    def tee_output(prefix, line):
      # Needs to be one atomic write
      # Explicitly use UTF-8 as it's the most common framework output 
      # TODO improve encoding handling 
      line = prefix.encode('utf-8') + line

      # Log to current terminal
      sys.stdout.write(line)
      sys.stdout.flush()
      # logging.error("".join([prefix, line]))

      out.write(line)
      out.flush()

    # Start the setup.sh command
    p = subprocess.Popen(command, cwd=self.directory, 
          shell=True, stdout=subprocess.PIPE, 
          stderr=subprocess.STDOUT)
    nbsr = setup_util.NonBlockingStreamReader(p.stdout, 
      "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))

    # Set a limit on total execution time of setup.sh
    timeout = datetime.now() + timedelta(minutes = 105)
    time_remaining = timeout - datetime.now()

    # Need to print to stdout once every 10 minutes or Travis-CI will abort
    travis_timeout = datetime.now() + timedelta(minutes = 5)

    # Flush output until setup.sh work is finished. This is 
    # either a) when setup.sh exits b) when the port is bound
    # c) when we run out of time. Note that 'finished' doesn't 
    # guarantee setup.sh process is dead - the OS may choose to make 
    # setup.sh a zombie process if it still has living children
    #
    # Note: child processes forked (using &) will remain alive 
    # after setup.sh has exited. The will have inherited the 
    # stdout/stderr descriptors and will be directing their 
    # output to the pipes. 
    #
    prefix = "Setup %s: " % self.name
    while (p.poll() is None
      and not self.benchmarker.is_port_bound(self.port)
      and not time_remaining.total_seconds() < 0):
      
      # The conditions above are slow to check, so 
      # we will delay output substantially if we only
      # print one line per condition check. 
      # Adding a tight loop here mitigates the effect, 
      # ensuring that most of the output directly from 
      # setup.sh is sent to tee_output before the outer
      # loop exits and prints things like "setup.sh exited"
      # 
      for i in xrange(10):
        try:
          line = nbsr.readline(0.05)
          if line:
            tee_output(prefix, line)

            # Reset Travis-CI timer
            travis_timeout = datetime.now() + timedelta(minutes = 5)
        except setup_util.EndOfStream:
          tee_output(prefix, "Setup has terminated\n")
          break
      time_remaining = timeout - datetime.now()

      if (travis_timeout - datetime.now()).total_seconds() < 0:
        sys.stdout.write(prefix + 'Printing so Travis-CI does not time out\n')
        sys.stdout.write(prefix + "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
          p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
        sys.stdout.flush()
        travis_timeout = datetime.now() + timedelta(minutes = 5)

    # Did we time out?
    if time_remaining.total_seconds() < 0: 
      tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
      p.kill()
      return 1

    # What's our return code? 
    # If setup.sh has terminated, use that code
    # Otherwise, detect if the port was bound
    tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
      p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
    retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
    if p.poll() is not None:
      tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll()))
    elif self.benchmarker.is_port_bound(self.port):
      tee_output(prefix, "Bound port detected on %s\n" % self.port)

    # Before we return control to the benchmarker, spin up a 
    # thread to keep an eye on the pipes in case the running 
    # framework uses stdout/stderr. Once all processes accessing
    # the subprocess.PIPEs are dead, this thread will terminate. 
    # Use a different prefix to indicate this is the framework 
    # speaking
    prefix = "Server %s: " % self.name
    def watch_child_pipes(nbsr, prefix):
      while True:
        try:
          line = nbsr.readline(60)
          if line:
            tee_output(prefix, line)
        except setup_util.EndOfStream:
          tee_output(prefix, "Framework processes have terminated\n")
          return

    watch_thread = Thread(target = watch_child_pipes,
      args = (nbsr, prefix))
    watch_thread.daemon = True
    watch_thread.start()

    logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
    os.chdir(previousDir)

    return retcode
Esempio n. 11
0
  def start(self, out, err):
    # Load profile for this installation
    profile="$FWROOT/config/benchmark_profile"

    # Setup variables for TROOT and IROOT
    setup_util.replace_environ(config=profile, 
              command='export TROOT=%s && export IROOT=%s && export DBHOST=%s && export MAX_THREADS=%s && export OUT=%s && export ERR=%s' %
              (self.directory, self.install_root, self.database_host, self.benchmarker.threads, os.path.join(self.fwroot, out.name), os.path.join(self.fwroot, err.name)))

    # Because start can take so long, we print a dot to let the user know 
    # we are working
    class ProgressPrinterThread(Thread):
      def __init__(self, event):
          Thread.__init__(self)
          self.stopped = event

      def run(self):
        while not self.stopped.wait(20):
          sys.stderr.write("Waiting for start to return...\n")
    stopFlag = Event()
    thread = ProgressPrinterThread(stopFlag)
    thread.start()

    # Run the module start (inside parent of TROOT)
    #     - we use the parent as a historical accident - a lot of tests
    #       use subprocess's cwd argument already
    previousDir = os.getcwd()
    os.chdir(os.path.dirname(self.troot))
    logging.info("Running setup module start (cwd=%s)", self.directory)
      
    # Write the stderr to our temp.txt file to be read and fed back
    # to the user via logging later.
    with open('temp', 'w') as errout:
      # Run the start script for the test as the "testrunner" user.
      # This requires superuser privs, so `sudo` is necessary.
      #   -u [username] The username
      #   -E Preserves the current environment variables
      #   -H Forces the home var (~) to be reset to the user specified
      #   -e Force bash to exit on first error
      # Note: check_call is a blocking call, so any startup scripts
      # run by the framework that need to continue (read: server has
      # started and needs to remain that way), then they should be
      # executed in the background.
      command = 'sudo -u %s -E -H bash -e %s.sh' % (self.benchmarker.runner_user, self.setup_file)
      
      debug_command = '''\
        export FWROOT=%s && \\
        export TROOT=%s && \\
        export IROOT=%s && \\
        export DBHOST=%s && \\
        export MAX_THREADS=%s && \\
        export OUT=%s && \\
        export ERR=%s && \\
        cd %s && \\
        %s''' % (self.fwroot, 
          self.directory, 
          self.install_root, 
          self.database_host, 
          self.benchmarker.threads, 
          os.path.join(self.fwroot, out.name), 
          os.path.join(self.fwroot, err.name),
          self.directory,
          command)
      logging.info("To run framework manually, copy/paste this:\n%s", debug_command)

      try:
        subprocess.check_call(command, cwd=self.directory, 
          shell=True, stderr=errout, stdout=out)
        retcode = 0
      except Exception:
        logging.exception("Failure running setup.sh")
        retcode = 1
    with open('temp', 'r') as errout:
      # Read out temp error output in its entirety
      body = errout.read()
      if len(body) > 0:
        # Log it to the user.
        logging.error(body)
        # Log it to our err.txt file
        err.write(body)
    # We are done with our temp file - delete it
    os.remove('temp')
    os.chdir(previousDir)

    # Stop the progress printer
    stopFlag.set()

    logging.info("Executed %s.sh", self.setup_file)

    return retcode