示例#1
0
  def __init__(self, mode, testdir=None):
    '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

    logging.basicConfig(level=logging.INFO)
    self.directory = testdir
    self.mode = mode

    try:
      self.commit_range = os.environ['TRAVIS_COMMIT_RANGE']
      if self.commit_range == "":
          self.commit_range = "-1 %s" % os.environ['TRAVIS_COMMIT']
    except KeyError:
      log.warning("I should only be used for automated integration tests e.g. Travis-CI")
      log.warning("Were you looking for run-tests.py?")
      last_commit = subprocess.check_output("git rev-parse HEAD^", shell=True).rstrip('\n')
      self.commit_range = "%s...HEAD" % last_commit

    #
    # Find the one test from benchmark_config that we are going to run
    #

    tests = gather_tests()
    dirtests = [t for t in tests if t.directory == testdir]
    
    # Travis-CI is linux only
    osvalidtests = [t for t in dirtests if t.os.lower() == "linux"
                  and (t.database_os.lower() == "linux" or t.database_os.lower() == "none")]
    
    # Travis-CI only has some supported databases
    validtests = [t for t in osvalidtests if t.database.lower() == "mysql"
                  or t.database.lower() == "postgres"
                  or t.database.lower() == "mongodb"
                  or t.database.lower() == "none"]
    log.info("Found %s tests (%s for linux, %s for linux and mysql) in directory '%s'", 
      len(dirtests), len(osvalidtests), len(validtests), testdir)
    if len(validtests) == 0:
      log.critical("Found no test that is possible to run in Travis-CI! Aborting!")
      if len(osvalidtests) != 0:
        log.critical("Note: Found these tests that could run in Travis-CI if more databases were supported")
        log.critical("Note: %s", osvalidtests)
        databases_needed = [t.database for t in osvalidtests]
        databases_needed = list(set(databases_needed))
        log.critical("Note: Here are the needed databases:")
        log.critical("Note: %s", databases_needed)
      sys.exit(1)

    self.names = [t.name for t in validtests]
    log.info("Choosing to use test %s to verify directory %s", self.names, testdir)
示例#2
0
  def __init__(self, mode, testdir=None):
    '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

    self.directory = testdir
    self.mode = mode
    if mode == "cisetup":
      logging.basicConfig(level=logging.DEBUG)
    else:
      logging.basicConfig(level=logging.INFO)

    try:
      # NOTE: THIS IS VERY TRICKY TO GET RIGHT!
      #
      # Our goal: Look at the files changed and determine if we need to 
      # run a verification for this folder. For a pull request, we want to 
      # see the list of files changed by any commit in that PR. For a 
      # push to master, we want to see a list of files changed by the pushed
      # commits. If this list of files contains the current directory, or 
      # contains the toolset/ directory, then we need to run a verification
      # 
      # If modifying, please consider: 
      #  - the commit range for a pull request is the first PR commit to 
      #    the github auto-merge commit
      #  - the commits in the commit range may include merge commits
      #    other than the auto-merge commit. An git log with -m 
      #    will know that *all* the files in the merge were changed, 
      #    but that is not the changeset that we care about
      #  - git diff shows differences, but we care about git log, which
      #    shows information on what was changed during commits
      #  - master can (and will!) move during a build. This is one 
      #    of the biggest problems with using git diff - master will 
      #    be updated, and those updates will include changes to toolset, 
      #    and suddenly every job in the build will start to run instead 
      #    of fast-failing
      #  - commit_range is not set if there was only one commit pushed, 
      #    so be sure to test for that on both master and PR
      #  - commit_range and commit are set very differently for pushes
      #    to an owned branch versus pushes to a pull request, test
      #  - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE 
      #    will become invalid if additional commits are pushed while a job is 
      #    building. See https://github.com/travis-ci/travis-ci/issues/2666
      #  - If you're really insane, consider that the last commit in a 
      #    pull request could have been a merge commit. This means that 
      #    the github auto-merge commit could have more than two parents
      #  - Travis cannot really support rebasing onto an owned branch, the
      #    commit_range they provide will include commits that are non-existant
      #    in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668
      #  
      #  - TEST ALL THESE OPTIONS: 
      #      - On a branch you own (e.g. your fork's master)
      #          - single commit
      #          - multiple commits pushed at once
      #          - commit+push, then commit+push again before the first
      #            build has finished. Verify all jobs in the first build 
      #            used the correct commit range
      #          - multiple commits, including a merge commit. Verify that
      #            the unrelated merge commit changes are not counted as 
      #            changes the user made
      #      - On a pull request
      #          - repeat all above variations
      #
      #
      # ==== CURRENT SOLUTION FOR PRs ====
      #
      # For pull requests, we will examine Github's automerge commit to see
      # what files would be touched if we merged this into the current master. 
      # You can't trust the travis variables here, as the automerge commit can
      # be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666
      # We instead use the FETCH_HEAD, which will always point to the SHA of 
      # the lastest merge commit. However, if we only used FETCH_HEAD than any
      # new commits to a pull request would instantly start affecting currently
      # running jobs and the the list of changed files may become incorrect for
      # those affected jobs. The solution is to walk backward from the FETCH_HEAD
      # to the last commit in the pull request. Based on how github currently 
      # does the automerge, this is the second parent of FETCH_HEAD, and 
      # therefore we use FETCH_HEAD^2 below
      #
      # This may not work perfectly in situations where the user had advanced 
      # merging happening in their PR. We correctly handle them merging in 
      # from upstream, but if they do wild stuff then this will likely break
      # on that. However, it will also likely break by seeing a change in 
      # toolset and triggering a full run when a partial run would be 
      # acceptable
      #
      # ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ====
      #
      # This one is fairly simple. Find the commit or commit range, and 
      # examine the log of files changes. If you encounter any merges, 
      # then fully explode the two parent commits that made the merge
      # and look for the files changed there. This is an aggressive 
      # strategy to ensure that commits to master are always tested 
      # well
      log.debug("TRAVIS_COMMIT_RANGE: %s", os.environ['TRAVIS_COMMIT_RANGE'])
      log.debug("TRAVIS_COMMIT      : %s", os.environ['TRAVIS_COMMIT'])

      is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false")
      if is_PR:
        log.debug('I am testing a pull request')
        first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0]
        last_commit = subprocess.check_output("git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
        log.debug("Guessing that first commit in PR is : %s", first_commit)
        log.debug("Guessing that final commit in PR is : %s", last_commit)

        if first_commit == "":
          # Travis-CI is not yet passing a commit range for pull requests
          # so we must use the automerge's changed file list. This has the 
          # negative effect that new pushes to the PR will immediately 
          # start affecting any new jobs, regardless of the build they are on
          log.debug("No first commit, using Github's automerge commit")
          self.commit_range = "--first-parent -1 -m FETCH_HEAD"
        elif first_commit == last_commit:
          # There is only one commit in the pull request so far, 
          # or Travis-CI is not yet passing the commit range properly 
          # for pull requests. We examine just the one commit using -1
          #
          # On the oddball chance that it's a merge commit, we pray  
          # it's a merge from upstream and also pass --first-parent 
          log.debug("Only one commit in range, examining %s", last_commit)
          self.commit_range = "-m --first-parent -1 %s" % last_commit
        else: 
          # In case they merged in upstream, we only care about the first 
          # parent. For crazier merges, we hope
          self.commit_range = "--first-parent %s...%s" % (first_commit, last_commit)

      if not is_PR:
        log.debug('I am not testing a pull request')
        # If more than one commit was pushed, examine everything including 
        # all details on all merges
        self.commit_range = "-m %s" % os.environ['TRAVIS_COMMIT_RANGE']
        
        # If only one commit was pushed, examine that one. If it was a 
        # merge be sure to show all details
        if self.commit_range == "":
          self.commit_range = "-m -1 %s" % os.environ['TRAVIS_COMMIT']

    except KeyError:
      log.warning("I should only be used for automated integration tests e.g. Travis-CI")
      log.warning("Were you looking for run-tests.py?")
      self.commit_range = "-m HEAD^...HEAD"

    #
    # Find the one test from benchmark_config that we are going to run
    #

    tests = gather_tests()
    self.fwroot = setup_util.get_fwroot()
    target_dir = self.fwroot + '/frameworks/' + testdir
    log.debug("Target directory is %s", target_dir)
    dirtests = [t for t in tests if t.directory == target_dir]
    
    # Travis-CI is linux only
    osvalidtests = [t for t in dirtests if t.os.lower() == "linux"
                  and (t.database_os.lower() == "linux" or t.database_os.lower() == "none")]
    
    # Our Travis-CI only has some databases supported
    validtests = [t for t in osvalidtests if t.database.lower() == "mysql"
                  or t.database.lower() == "postgres"
                  or t.database.lower() == "mongodb"
                  or t.database.lower() == "none"]
    log.info("Found %s usable tests (%s valid for linux, %s valid for linux and {mysql,postgres,mongodb,none}) in directory '%s'", 
      len(dirtests), len(osvalidtests), len(validtests), '$FWROOT/frameworks/' + testdir)
    if len(validtests) == 0:
      log.critical("Found no test that is possible to run in Travis-CI! Aborting!")
      if len(osvalidtests) != 0:
        log.critical("Note: Found these tests that could run in Travis-CI if more databases were supported")
        log.critical("Note: %s", osvalidtests)
        databases_needed = [t.database for t in osvalidtests]
        databases_needed = list(set(databases_needed))
        log.critical("Note: Here are the needed databases:")
        log.critical("Note: %s", databases_needed)
      sys.exit(1)

    self.names = [t.name for t in validtests]
    log.info("Using tests %s to verify directory %s", self.names, '$FWROOT/frameworks/' + testdir)
示例#3
0
    def __install_server_software(self):
        print("\nINSTALL: Installing server software (strategy=%s)\n" %
              self.strategy)
        # Install global prerequisites (requires sudo)
        bash_functions_path = '$FWROOT/toolset/setup/linux/bash_functions.sh'
        prereq_path = '$FWROOT/toolset/setup/linux/prerequisites.sh'
        self.__run_command(". %s && . %s" % (bash_functions_path, prereq_path))
        self.__run_command(
            "sudo chown -R %s:%s %s" %
            (self.benchmarker.runner_user, self.benchmarker.runner_user,
             os.path.join(self.fwroot, self.install_dir)))

        tests = gather_tests(include=self.benchmarker.test,
                             exclude=self.benchmarker.exclude,
                             benchmarker=self.benchmarker)

        dirs = [t.directory for t in tests]

        # Locate all installation files
        install_files = glob.glob("%s/*/install.sh" % self.fwroot)
        install_files.extend(
            glob.glob("%s/frameworks/*/*/install.sh" % self.fwroot))

        # Run install for selected tests
        for test_install_file in install_files:
            test_dir = os.path.dirname(test_install_file)
            test_rel_dir = os.path.relpath(test_dir, self.fwroot)
            logging.debug("Considering install of %s (%s, %s)",
                          test_install_file, test_rel_dir, test_dir)

            if test_dir not in dirs:
                continue

            logging.info("Running installation for directory %s (cwd=%s)",
                         test_dir, test_dir)

            # Collect the tests in this directory
            # local_tests = [t for t in tests if t.directory == test_dir]

            # Find installation directory
            #   e.g. FWROOT/installs or FWROOT/installs/pertest/<test-name>
            test_install_dir = "%s/%s" % (self.fwroot, self.install_dir)
            if self.strategy is 'pertest':
                test_install_dir = "%s/pertest/%s" % (test_install_dir,
                                                      test_dir)
            if not os.path.exists(test_install_dir):
                os.makedirs(test_install_dir)

            # Move into the proper working directory
            previousDir = os.getcwd()
            os.chdir(test_dir)

            # Load profile for this installation
            profile = "%s/bash_profile.sh" % test_dir
            if not os.path.exists(profile):
                profile = "$FWROOT/config/benchmark_profile"
            else:
                logging.info("Loading environment from %s (cwd=%s)", profile,
                             test_dir)
            setup_util.replace_environ(
                config=profile,
                command='export TROOT=%s && export IROOT=%s' %
                (test_dir, test_install_dir))

            # Run test installation script
            #   FWROOT - Path of the FwBm root
            #   IROOT  - Path of this test's install directory
            #   TROOT  - Path to this test's directory
            # Note: Cannot use ''' for newlines here or the script
            # passed to `bash -c` will fail.
            self.__run_command(
                'sudo -u %s -E -H bash -c "export TROOT=%s && export IROOT=%s && source %s && source %s"'
                % (self.benchmarker.runner_user, test_dir, test_install_dir,
                   bash_functions_path, test_install_file),
                cwd=test_install_dir)

            # Move back to previous directory
            os.chdir(previousDir)

        self.__run_command("sudo apt-get -yq autoremove")

        print("\nINSTALL: Finished installing server software\n")
示例#4
0
  def __install_server_software(self):
    print("\nINSTALL: Installing server software (strategy=%s)\n"%self.strategy)
    # Install global prerequisites (requires sudo)
    bash_functions_path='$FWROOT/toolset/setup/linux/bash_functions.sh'
    prereq_path='$FWROOT/toolset/setup/linux/prerequisites.sh'
    self.__run_command(". %s && . %s" % (bash_functions_path, prereq_path))
    self.__run_command("sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
      self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_dir)))

    tests = gather_tests(include=self.benchmarker.test, 
      exclude=self.benchmarker.exclude,
      benchmarker=self.benchmarker)
    
    dirs = [t.directory for t in tests]

    # Locate all installation files
    install_files = glob.glob("%s/*/install.sh" % self.fwroot)
    install_files.extend(glob.glob("%s/frameworks/*/*/install.sh" % self.fwroot))

    # Run install for selected tests
    for test_install_file in install_files:
      test_dir = os.path.dirname(test_install_file)
      test_rel_dir = os.path.relpath(test_dir, self.fwroot)
      logging.debug("Considering install of %s (%s, %s)", test_install_file, test_rel_dir, test_dir)

      if test_dir not in dirs:
        continue

      logging.info("Running installation for directory %s (cwd=%s)", test_dir, test_dir)

      # Collect the tests in this directory
      # local_tests = [t for t in tests if t.directory == test_dir]

      # Find installation directory 
      #   e.g. FWROOT/installs or FWROOT/installs/pertest/<test-name>
      test_install_dir="%s/%s" % (self.fwroot, self.install_dir)
      if self.strategy is 'pertest':
        test_install_dir="%s/pertest/%s" % (test_install_dir, test_dir)
      if not os.path.exists(test_install_dir):
        os.makedirs(test_install_dir)
      
      # Move into the proper working directory
      previousDir = os.getcwd()
      os.chdir(test_dir)

      # Load profile for this installation
      profile="%s/bash_profile.sh" % test_dir
      if not os.path.exists(profile):
        profile="$FWROOT/config/benchmark_profile"
      else:
        logging.info("Loading environment from %s (cwd=%s)", profile, test_dir)
      setup_util.replace_environ(config=profile, 
        command='export TROOT=%s && export IROOT=%s' %
        (test_dir, test_install_dir))

      # Run test installation script
      #   FWROOT - Path of the FwBm root
      #   IROOT  - Path of this test's install directory
      #   TROOT  - Path to this test's directory 
      # Note: Cannot use ''' for newlines here or the script
      # passed to `bash -c` will fail.
      self.__run_command('sudo -u %s -E -H bash -c "export TROOT=%s && export IROOT=%s && source %s && source %s"' % 
        (self.benchmarker.runner_user, test_dir, test_install_dir, 
          bash_functions_path, test_install_file),
          cwd=test_install_dir)

      # Move back to previous directory
      os.chdir(previousDir)

    self.__run_command("sudo apt-get -yq autoremove");    

    print("\nINSTALL: Finished installing server software\n")
示例#5
0
            log.info(line.rstrip('\n'))
      except IOError:
        log.error("No OUT file found")

    log.error("Running inside Travis-CI, so I will print a copy of the verification summary")

    results = None
    try:
      with open('results/ec2/latest/results.json', 'r') as f:
        results = json.load(f)
    except IOError:
      log.critical("No results.json found, unable to print verification summary") 
      sys.exit(retcode)

    target_dir = setup_util.get_fwroot() + '/frameworks/' + testdir
    dirtests = [t for t in gather_tests() if t.directory == target_dir]

    # Normally you don't have to use Fore.* before each line, but 
    # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
    # or stream flush, so we have to ensure that the color code is printed repeatedly
    prefix = Fore.CYAN
    for line in header("Verification Summary", top='=', bottom='').split('\n'):
      print prefix + line

    for test in dirtests:
      print prefix + "| Test: %s" % test.name
      if test.name not in runner.names:
        print prefix + "|      " + Fore.YELLOW + "Unable to verify in Travis-CI"
      elif test.name in results['verify'].keys():
        for test_type, result in results['verify'][test.name].iteritems():
          if result.upper() == "PASS":
示例#6
0
    def __init__(self, mode, testdir=None):
        '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

        self.directory = testdir
        self.mode = mode
        if mode == "cisetup":
            logging.basicConfig(level=logging.DEBUG)
        else:
            logging.basicConfig(level=logging.INFO)

        try:
            # NOTE: THIS IS VERY TRICKY TO GET RIGHT!
            #
            # Our goal: Look at the files changed and determine if we need to
            # run a verification for this folder. For a pull request, we want to
            # see the list of files changed by any commit in that PR. For a
            # push to master, we want to see a list of files changed by the pushed
            # commits. If this list of files contains the current directory, or
            # contains the toolset/ directory, then we need to run a verification
            #
            # If modifying, please consider:
            #  - the commit range for a pull request is the first PR commit to
            #    the github auto-merge commit
            #  - the commits in the commit range may include merge commits
            #    other than the auto-merge commit. An git log with -m
            #    will know that *all* the files in the merge were changed,
            #    but that is not the changeset that we care about
            #  - git diff shows differences, but we care about git log, which
            #    shows information on what was changed during commits
            #  - master can (and will!) move during a build. This is one
            #    of the biggest problems with using git diff - master will
            #    be updated, and those updates will include changes to toolset,
            #    and suddenly every job in the build will start to run instead
            #    of fast-failing
            #  - commit_range is not set if there was only one commit pushed,
            #    so be sure to test for that on both master and PR
            #  - commit_range and commit are set very differently for pushes
            #    to an owned branch versus pushes to a pull request, test
            #  - For merge commits, the TRAVIS_COMMIT and TRAVIS_COMMIT_RANGE
            #    will become invalid if additional commits are pushed while a job is
            #    building. See https://github.com/travis-ci/travis-ci/issues/2666
            #  - If you're really insane, consider that the last commit in a
            #    pull request could have been a merge commit. This means that
            #    the github auto-merge commit could have more than two parents
            #  - Travis cannot really support rebasing onto an owned branch, the
            #    commit_range they provide will include commits that are non-existant
            #    in the repo cloned on the workers. See https://github.com/travis-ci/travis-ci/issues/2668
            #
            #  - TEST ALL THESE OPTIONS:
            #      - On a branch you own (e.g. your fork's master)
            #          - single commit
            #          - multiple commits pushed at once
            #          - commit+push, then commit+push again before the first
            #            build has finished. Verify all jobs in the first build
            #            used the correct commit range
            #          - multiple commits, including a merge commit. Verify that
            #            the unrelated merge commit changes are not counted as
            #            changes the user made
            #      - On a pull request
            #          - repeat all above variations
            #
            #
            # ==== CURRENT SOLUTION FOR PRs ====
            #
            # For pull requests, we will examine Github's automerge commit to see
            # what files would be touched if we merged this into the current master.
            # You can't trust the travis variables here, as the automerge commit can
            # be different for jobs on the same build. See https://github.com/travis-ci/travis-ci/issues/2666
            # We instead use the FETCH_HEAD, which will always point to the SHA of
            # the lastest merge commit. However, if we only used FETCH_HEAD than any
            # new commits to a pull request would instantly start affecting currently
            # running jobs and the the list of changed files may become incorrect for
            # those affected jobs. The solution is to walk backward from the FETCH_HEAD
            # to the last commit in the pull request. Based on how github currently
            # does the automerge, this is the second parent of FETCH_HEAD, and
            # therefore we use FETCH_HEAD^2 below
            #
            # This may not work perfectly in situations where the user had advanced
            # merging happening in their PR. We correctly handle them merging in
            # from upstream, but if they do wild stuff then this will likely break
            # on that. However, it will also likely break by seeing a change in
            # toolset and triggering a full run when a partial run would be
            # acceptable
            #
            # ==== CURRENT SOLUTION FOR OWNED BRANCHES (e.g. master) ====
            #
            # This one is fairly simple. Find the commit or commit range, and
            # examine the log of files changes. If you encounter any merges,
            # then fully explode the two parent commits that made the merge
            # and look for the files changed there. This is an aggressive
            # strategy to ensure that commits to master are always tested
            # well
            log.debug("TRAVIS_COMMIT_RANGE: %s",
                      os.environ['TRAVIS_COMMIT_RANGE'])
            log.debug("TRAVIS_COMMIT      : %s", os.environ['TRAVIS_COMMIT'])

            is_PR = (os.environ['TRAVIS_PULL_REQUEST'] != "false")
            if is_PR:
                log.debug('I am testing a pull request')
                first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split(
                    '...')[0]
                last_commit = subprocess.check_output(
                    "git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
                log.debug("Guessing that first commit in PR is : %s",
                          first_commit)
                log.debug("Guessing that final commit in PR is : %s",
                          last_commit)

                if first_commit == "":
                    # Travis-CI is not yet passing a commit range for pull requests
                    # so we must use the automerge's changed file list. This has the
                    # negative effect that new pushes to the PR will immediately
                    # start affecting any new jobs, regardless of the build they are on
                    log.debug(
                        "No first commit, using Github's automerge commit")
                    self.commit_range = "--first-parent -1 -m FETCH_HEAD"
                elif first_commit == last_commit:
                    # There is only one commit in the pull request so far,
                    # or Travis-CI is not yet passing the commit range properly
                    # for pull requests. We examine just the one commit using -1
                    #
                    # On the oddball chance that it's a merge commit, we pray
                    # it's a merge from upstream and also pass --first-parent
                    log.debug("Only one commit in range, examining %s",
                              last_commit)
                    self.commit_range = "-m --first-parent -1 %s" % last_commit
                else:
                    # In case they merged in upstream, we only care about the first
                    # parent. For crazier merges, we hope
                    self.commit_range = "--first-parent %s...%s" % (
                        first_commit, last_commit)

            if not is_PR:
                log.debug('I am not testing a pull request')
                # Three main scenarios to consider
                #  - 1 One non-merge commit pushed to master
                #  - 2 One merge commit pushed to master (e.g. a PR was merged).
                #      This is an example of merging a topic branch
                #  - 3 Multiple commits pushed to master
                #
                #  1 and 2 are actually handled the same way, by showing the
                #  changes being brought into to master when that one commit
                #  was merged. Fairly simple, `git log -1 COMMIT`. To handle
                #  the potential merge of a topic branch you also include
                #  `--first-parent -m`.
                #
                #  3 needs to be handled by comparing all merge children for
                #  the entire commit range. The best solution here would *not*
                #  use --first-parent because there is no guarantee that it
                #  reflects changes brought into master. Unfortunately we have
                #  no good method inside Travis-CI to easily differentiate
                #  scenario 1/2 from scenario 3, so I cannot handle them all
                #  separately. 1/2 are the most common cases, 3 with a range
                #  of non-merge commits is the next most common, and 3 with
                #  a range including merge commits is the least common, so I
                #  am choosing to make our Travis-CI setup potential not work
                #  properly on the least common case by always using
                #  --first-parent

                # Handle 3
                # Note: Also handles 2 because Travis-CI sets COMMIT_RANGE for
                # merged PR commits
                self.commit_range = "--first-parent -m %s" % os.environ[
                    'TRAVIS_COMMIT_RANGE']

                # Handle 1
                if self.commit_range == "":
                    self.commit_range = "--first-parent -m -1 %s" % os.environ[
                        'TRAVIS_COMMIT']

        except KeyError:
            log.warning(
                "I should only be used for automated integration tests e.g. Travis-CI"
            )
            log.warning("Were you looking for run-tests.py?")
            self.commit_range = "-m HEAD^...HEAD"

        #
        # Find the one test from benchmark_config.json that we are going to run
        #

        tests = gather_tests()
        self.fwroot = setup_util.get_fwroot()
        target_dir = self.fwroot + '/frameworks/' + testdir
        log.debug("Target directory is %s", target_dir)
        dirtests = [t for t in tests if t.directory == target_dir]

        # Travis-CI is linux only
        osvalidtests = [
            t for t in dirtests
            if t.os.lower() == "linux" and (t.database_os.lower() == "linux"
                                            or t.database_os.lower() == "none")
        ]

        # Our Travis-CI only has some databases supported
        validtests = [
            t for t in osvalidtests
            if t.database.lower() in self.SUPPORTED_DATABASES
        ]
        supported_databases = ','.join(self.SUPPORTED_DATABASES)
        log.info(
            "Found %s usable tests (%s valid for linux, %s valid for linux and {%s}) in directory '%s'",
            len(dirtests), len(osvalidtests), len(validtests),
            supported_databases, '$FWROOT/frameworks/' + testdir)
        if len(validtests) == 0:
            log.critical(
                "Found no test that is possible to run in Travis-CI! Aborting!"
            )
            if len(osvalidtests) != 0:
                log.critical(
                    "Note: Found these tests that could run in Travis-CI if more databases were supported"
                )
                log.critical("Note: %s", osvalidtests)
                databases_needed = [t.database for t in osvalidtests]
                databases_needed = list(set(databases_needed))
                log.critical("Note: Here are the needed databases:")
                log.critical("Note: %s", databases_needed)
            sys.exit(1)

        self.names = [t.name for t in validtests]
        log.info("Using tests %s to verify directory %s", self.names,
                 '$FWROOT/frameworks/' + testdir)
示例#7
0
            log.info(line.rstrip('\n'))
      except IOError:
        log.error("No OUT file found")

    log.error("Running inside Travis-CI, so I will print a copy of the verification summary")

    results = None
    try:
      with open('results/ec2/latest/results.json', 'r') as f:
        results = json.load(f)
    except IOError:
      log.critical("No results.json found, unable to print verification summary") 
      sys.exit(retcode)

    target_dir = setup_util.get_fwroot() + '/frameworks/' + testdir
    dirtests = [t for t in gather_tests() if t.directory == target_dir]

    # Normally you don't have to use Fore.* before each line, but 
    # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
    # or stream flush, so we have to ensure that the color code is printed repeatedly
    prefix = Fore.CYAN
    for line in header("Verification Summary", top='=', bottom='').split('\n'):
      print prefix + line

    for test in dirtests:
      print prefix + "| Test: %s" % test.name
      if test.name not in runner.names:
        print prefix + "|      " + Fore.YELLOW + "Unable to verify in Travis-CI"
      elif test.name in results['verify'].keys():
        for test_type, result in results['verify'][test.name].iteritems():
          if result.upper() == "PASS":
示例#8
0
    def __init__(self, mode, testdir=None):
        '''
    mode = [cisetup|prereq|install|verify] for what we want to do
    testdir  = framework directory we are running
    '''

        logging.basicConfig(level=logging.INFO)
        self.directory = testdir
        self.mode = mode

        try:
            self.commit_range = os.environ['TRAVIS_COMMIT_RANGE']
            if self.commit_range == "":
                self.commit_range = "-1 %s" % os.environ['TRAVIS_COMMIT']
        except KeyError:
            log.warning(
                "I should only be used for automated integration tests e.g. Travis-CI"
            )
            log.warning("Were you looking for run-tests.py?")
            last_commit = subprocess.check_output("git rev-parse HEAD^",
                                                  shell=True).rstrip('\n')
            self.commit_range = "%s...HEAD" % last_commit

        #
        # Find the one test from benchmark_config that we are going to run
        #

        tests = gather_tests()
        dirtests = [t for t in tests if t.directory == testdir]

        # Travis-CI is linux only
        osvalidtests = [
            t for t in dirtests
            if t.os.lower() == "linux" and (t.database_os.lower() == "linux"
                                            or t.database_os.lower() == "none")
        ]

        # Travis-CI only has some supported databases
        validtests = [
            t for t in osvalidtests if t.database.lower() == "mysql"
            or t.database.lower() == "postgres"
            or t.database.lower() == "mongodb" or t.database.lower() == "none"
        ]
        log.info(
            "Found %s tests (%s for linux, %s for linux and mysql) in directory '%s'",
            len(dirtests), len(osvalidtests), len(validtests), testdir)
        if len(validtests) == 0:
            log.critical(
                "Found no test that is possible to run in Travis-CI! Aborting!"
            )
            if len(osvalidtests) != 0:
                log.critical(
                    "Note: Found these tests that could run in Travis-CI if more databases were supported"
                )
                log.critical("Note: %s", osvalidtests)
                databases_needed = [t.database for t in osvalidtests]
                databases_needed = list(set(databases_needed))
                log.critical("Note: Here are the needed databases:")
                log.critical("Note: %s", databases_needed)
            sys.exit(1)

        self.names = [t.name for t in validtests]
        log.info("Choosing to use test %s to verify directory %s", self.names,
                 testdir)
  def __install_server_software(self):
    print("\nINSTALL: Installing server software (strategy=%s)\n"%self.strategy)
    # Install global prerequisites
    bash_functions_path='$FWROOT/toolset/setup/linux/bash_functions.sh'
    prereq_path='$FWROOT/toolset/setup/linux/prerequisites.sh'
    self.__run_command(". %s && . %s" % (bash_functions_path, prereq_path))

    tests = gather_tests(include=self.benchmarker.test, 
      exclude=self.benchmarker.exclude,
      benchmarker=self.benchmarker)
    
    dirs = [t.directory for t in tests]

    # Locate all installation files
    install_files = glob.glob("%s/*/install.sh" % self.fwroot)

    # Run install for selected tests
    for test_install_file in install_files:
      test_dir = os.path.basename(os.path.dirname(test_install_file))
      test_rel_dir = os.path.relpath(os.path.dirname(test_install_file), self.fwroot)

      if test_dir not in dirs:
        continue
              
      logging.info("Running installation for directory %s", test_dir)

      # Find installation directory 
      # e.g. FWROOT/installs or FWROOT/installs/pertest/<test-name>
      test_install_dir="%s/%s" % (self.fwroot, self.install_dir)
      if self.strategy is 'pertest':
        test_install_dir="%s/pertest/%s" % (test_install_dir, test_dir)
      test_rel_install_dir=os.path.relpath(test_install_dir, self.fwroot)
      if not os.path.exists(test_install_dir):
        os.makedirs(test_install_dir)

      # Load profile for this installation
      profile="%s/bash_profile.sh" % test_dir
      if not os.path.exists(profile):
        logging.warning("Directory %s does not have a bash_profile"%test_dir)
        profile="$FWROOT/config/benchmark_profile"
      else:
        logging.info("Loading environment from %s", profile)
      setup_util.replace_environ(config=profile, 
        command='export TROOT=$FWROOT%s && export IROOT=$FWROOT%s' %
        (test_rel_dir, test_rel_install_dir))

      # Find relative installation file
      test_rel_install_file = "$FWROOT%s" % setup_util.path_relative_to_root(test_install_file)

      # Then run test installer file
      # Give all installers a number of variables
      # FWROOT - Path of the FwBm root
      # IROOT  - Path of this test's install directory
      # TROOT  - Path to this test's directory 
      self.__run_command('''
        export TROOT=$FWROOT/%s && 
        export IROOT=$FWROOT/%s && 
        . %s && 
        . %s''' % 
        (test_rel_dir, test_rel_install_dir, 
          bash_functions_path, test_rel_install_file),
          cwd=test_install_dir)

    self.__run_command("sudo apt-get -y autoremove");    

    print("\nINSTALL: Finished installing server software\n")
示例#10
0
    def __install_server_software(self):
        print("\nINSTALL: Installing server software (strategy=%s)\n" %
              self.strategy)
        # Install global prerequisites (requires sudo)
        bash_functions_path = '$FWROOT/toolset/setup/linux/bash_functions.sh'
        prereq_path = '$FWROOT/toolset/setup/linux/prerequisites.sh'
        self.__run_command(". %s && . %s" % (bash_functions_path, prereq_path))
        self.__run_command(
            "sudo chown -R %s:%s %s" %
            (self.benchmarker.runner_user, self.benchmarker.runner_user,
             os.path.join(self.fwroot, self.install_dir)))

        tests = gather_tests(include=self.benchmarker.test,
                             exclude=self.benchmarker.exclude,
                             benchmarker=self.benchmarker)

        dirs = [t.directory for t in tests]

        # Locate all installation files
        install_files = glob.glob("%s/*/install.sh" % self.fwroot)
        install_files.extend(
            glob.glob("%s/frameworks/*/*/install.sh" % self.fwroot))

        # Run install for selected tests
        for test_install_file in install_files:
            test_dir = os.path.dirname(test_install_file)
            test_rel_dir = os.path.relpath(test_dir, self.fwroot)
            logging.debug("Considering install of %s (%s, %s)",
                          test_install_file, test_rel_dir, test_dir)

            if test_dir not in dirs:
                continue

            logging.info("Running installation for directory %s (cwd=%s)",
                         test_dir, test_dir)

            # Collect the tests in this directory
            # local_tests = [t for t in tests if t.directory == test_dir]

            # Find installation directory
            #   e.g. FWROOT/installs or FWROOT/installs/pertest/<test-name>
            test_install_dir = "%s/%s" % (self.fwroot, self.install_dir)
            if self.strategy is 'pertest':
                test_install_dir = "%s/pertest/%s" % (test_install_dir,
                                                      test_dir)
            if not os.path.exists(test_install_dir):
                os.makedirs(test_install_dir)

            # Move into the proper working directory
            previousDir = os.getcwd()
            os.chdir(test_dir)

            # Load environment
            setup_util.replace_environ(
                config='$FWROOT/config/benchmark_profile',
                command='export TROOT=%s && export IROOT=%s' %
                (test_dir, test_install_dir))

            # Run the install.sh script for the test as the "testrunner" user
            #
            # `sudo` - Switching user requires superuser privs
            #   -u [username] The username
            #   -E Preserves the current environment variables
            #   -H Forces the home var (~) to be reset to the user specified
            #   TROOT  - Path to this test's directory
            #   IROOT  - Path of this test's install directory
            # TODO export bash functions and call install.sh directly
            command = 'sudo -u %s -E -H bash -c "source %s && source %s"' % (
                self.benchmarker.runner_user, bash_functions_path,
                test_install_file)

            debug_command = '''\
        export FWROOT=%s && \\
        export TROOT=%s && \\
        export IROOT=%s && \\
        cd $IROOT && \\
        %s''' % (self.fwroot, test_dir, test_install_dir, command)
            logging.info("To run installation manually, copy/paste this:\n%s",
                         debug_command)

            # Run test installation script
            self.__run_command(command, cwd=test_install_dir)

            # Move back to previous directory
            os.chdir(previousDir)

        self.__run_command("sudo apt-get -yq autoremove")

        print("\nINSTALL: Finished installing server software\n")
  def __install_server_software(self):
    print("\nINSTALL: Installing server software (strategy=%s)\n"%self.strategy)
    # Install global prerequisites (requires sudo)
    bash_functions_path='$FWROOT/toolset/setup/linux/bash_functions.sh'
    prereq_path='$FWROOT/toolset/setup/linux/prerequisites.sh'
    self.__run_command(". %s && . %s" % (bash_functions_path, prereq_path))
    self.__run_command("sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
      self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_dir)))

    tests = gather_tests(include=self.benchmarker.test, 
      exclude=self.benchmarker.exclude,
      benchmarker=self.benchmarker)
    
    dirs = [t.directory for t in tests]

    # Locate all installation files
    install_files = glob.glob("%s/*/install.sh" % self.fwroot)
    install_files.extend(glob.glob("%s/frameworks/*/*/install.sh" % self.fwroot))

    # Run install for selected tests
    for test_install_file in install_files:
      test_dir = os.path.dirname(test_install_file)
      test_rel_dir = os.path.relpath(test_dir, self.fwroot)
      logging.debug("Considering install of %s (%s, %s)", test_install_file, test_rel_dir, test_dir)

      if test_dir not in dirs:
        continue

      logging.info("Running installation for directory %s (cwd=%s)", test_dir, test_dir)

      # Collect the tests in this directory
      # local_tests = [t for t in tests if t.directory == test_dir]

      # Find installation directory 
      #   e.g. FWROOT/installs or FWROOT/installs/pertest/<test-name>
      test_install_dir="%s/%s" % (self.fwroot, self.install_dir)
      if self.strategy is 'pertest':
        test_install_dir="%s/pertest/%s" % (test_install_dir, test_dir)
      if not os.path.exists(test_install_dir):
        os.makedirs(test_install_dir)
      
      # Move into the proper working directory
      previousDir = os.getcwd()
      os.chdir(test_dir)

      # Load environment
      setup_util.replace_environ(config='$FWROOT/config/benchmark_profile', 
        command='export TROOT=%s && export IROOT=%s' %
        (test_dir, test_install_dir))

      # Run the install.sh script for the test as the "testrunner" user
      # 
      # `sudo` - Switching user requires superuser privs
      #   -u [username] The username
      #   -E Preserves the current environment variables
      #   -H Forces the home var (~) to be reset to the user specified
      #   TROOT  - Path to this test's directory 
      #   IROOT  - Path of this test's install directory
      # TODO export bash functions and call install.sh directly
      command = 'sudo -u %s -E -H bash -c "source %s && source %s"' % (
        self.benchmarker.runner_user, 
        bash_functions_path, 
        test_install_file)

      debug_command = '''\
        export FWROOT=%s && \\
        export TROOT=%s && \\
        export IROOT=%s && \\
        cd $IROOT && \\
        %s''' % (self.fwroot, 
          test_dir, 
          test_install_dir,
          command)
      logging.info("To run installation manually, copy/paste this:\n%s", debug_command)

      # Run test installation script
      self.__run_command(command, cwd=test_install_dir)

      # Move back to previous directory
      os.chdir(previousDir)

    self.__run_command("sudo apt-get -yq autoremove");    

    print("\nINSTALL: Finished installing server software\n")