def __gather_tests(self):
    tests = []

    # Assume we are running from FrameworkBenchmarks
    config_files = glob.glob('*/benchmark_config')

    for config_file_name in config_files:
      # Look for the benchmark_config file, this will set up our tests.
      # Its format looks like this:
      #
      # {
      #   "framework": "nodejs",
      #   "tests": [{
      #     "default": {
      #       "setup_file": "setup",
      #       "json_url": "/json"
      #     },
      #     "mysql": {
      #       "setup_file": "setup",
      #       "db_url": "/mysql",
      #       "query_url": "/mysql?queries="
      #     },
      #     ...
      #   }]
      # }
      config = None

      with open(config_file_name, 'r') as config_file:
        # Load json file into config object
        try:
          config = json.load(config_file)
        except:
          print("Error loading '%s'." % config_file_name)
          raise

      if config is None:
        continue

      test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
      # If the user specified which tests to run, then 
      # we can skip over tests that are not in that list
      if self.test == None:
        tests = tests + test
      else:
        for atest in test:
          if atest.name in self.test:
            tests.append(atest)

    tests.sort(key=lambda x: x.name)

    # If the tests have been interrupted somehow, then we want to resume them where we left
    # off, rather than starting from the beginning
    if os.path.isfile('current_benchmark.txt'):
        with open('current_benchmark.txt', 'r') as interrupted_benchmark:
            interrupt_bench = interrupted_benchmark.read()
            for index, atest in enumerate(tests):
                if atest.name == interrupt_bench:
                    tests = tests[index:]
                    break
    return tests
示例#2
0
    def __gather_tests(self):
        tests = []

        # Assume we are running from FrameworkBenchmarks
        config_files = glob.glob('*/benchmark_config')

        for config_file_name in config_files:
            # Look for the benchmark_config file, this will set up our tests.
            # Its format looks like this:
            #
            # {
            #   "framework": "nodejs",
            #   "tests": [{
            #     "default": {
            #       "setup_file": "setup",
            #       "json_url": "/json"
            #     },
            #     "mysql": {
            #       "setup_file": "setup",
            #       "db_url": "/mysql",
            #       "query_url": "/mysql?queries="
            #     },
            #     ...
            #   }]
            # }
            config = None

            with open(config_file_name, 'r') as config_file:
                # Load json file into config object
                try:
                    config = json.load(config_file)
                except:
                    print("Error loading '%s'." % config_file_name)
                    raise

            if config is None:
                continue

            test = framework_test.parse_config(
                config, os.path.dirname(config_file_name), self)
            # If the user specified which tests to run, then
            # we can skip over tests that are not in that list
            if self.test == None:
                tests = tests + test
            else:
                for atest in test:
                    if atest.name in self.test:
                        tests.append(atest)

        tests.sort(key=lambda x: x.name)
        return tests
  def __gather_tests(self):
    tests = []

    # Assume we are running from FrameworkBenchmarks
    config_files = glob.glob('*/benchmark_config')

    for config_file_name in config_files:
      # Look for the benchmark_config file, this will set up our tests.
      # Its format looks like this:
      #
      # {
      #   "framework": "nodejs",
      #   "tests": [{
      #     "default": {
      #       "setup_file": "setup",
      #       "json_url": "/json"
      #     },
      #     "mysql": {
      #       "setup_file": "setup",
      #       "db_url": "/mysql",
      #       "query_url": "/mysql?queries="
      #     },
      #     ...
      #   }]
      # }
      config = None

      with open(config_file_name, 'r') as config_file:
        # Load json file into config object
        try:
          config = json.load(config_file)
        except:
          print("Error loading '%s'." % config_file_name)
          raise

      if config is None:
        continue

      test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
      # If the user specified which tests to run, then 
      # we can skip over tests that are not in that list
      if self.test == None:
        tests = tests + test
      else:
        for atest in test:
          if atest.name in self.test:
            tests.append(atest)

    tests.sort(key=lambda x: x.name)
    return tests
    def __gather_tests(self):
        tests = []
        # Loop through each directory (we assume we're being run from the benchmarking root)
        # and look for the files that signify a benchmark test
        for dirname, dirnames, filenames in os.walk('.'):
            # Look for the benchmark_config file, this will set up our tests.
            # Its format looks like this:
            #
            # {
            #   "framework": "nodejs",
            #   "tests": [{
            #     "default": {
            #       "setup_file": "setup",
            #       "json_url": "/json"
            #     },
            #     "mysql": {
            #       "setup_file": "setup",
            #       "db_url": "/mysql",
            #       "query_url": "/mysql?queries="
            #     },
            #     ...
            #   }]
            # }
            if 'benchmark_config' in filenames:
                config = None
                config_file_name = os.path.join(dirname, 'benchmark_config')

                with open(config_file_name, 'r') as config_file:
                    # Load json file into config object
                    try:
                        config = json.load(config_file)
                    except:
                        print("Error loading '%s'." % config_file_name)
                        raise

                if config == None:
                    continue

                test = framework_test.parse_config(config, dirname[2:], self)
                # If the user specified which tests to run, then
                # we can skip over tests that are not in that list
                if self.test == None:
                    tests = tests + test
                else:
                    for atest in test:
                        if atest.name in self.test:
                            tests.append(atest)

        tests.sort(key=lambda x: x.name)
        return tests
示例#5
0
  def __gather_tests(self):
    tests = []
    # Loop through each directory (we assume we're being run from the benchmarking root)
    # and look for the files that signify a benchmark test
    for dirname, dirnames, filenames in os.walk('.'):
      # Look for the benchmark_config file, this will set up our tests.
      # Its format looks like this:
      #
      # {
      #   "framework": "nodejs",
      #   "tests": [{
      #     "default": {
      #       "setup_file": "setup",
      #       "json_url": "/json"
      #     },
      #     "mysql": {
      #       "setup_file": "setup",
      #       "db_url": "/mysql",
      #       "query_url": "/mysql?queries="
      #     },
      #     ...
      #   }]
      # }
      if 'benchmark_config' in filenames:
        config = None
        config_file_name = os.path.join(dirname, 'benchmark_config')

        with open(config_file_name, 'r') as config_file:
          # Load json file into config object
          try:
            config = json.load(config_file)
          except:
            print("Error loading '%s'." % config_file_name)
            raise

        if config == None:
          continue

        test = framework_test.parse_config(config, dirname[2:], self)
        # If the user specified which tests to run, then 
        # we can skip over tests that are not in that list
        if self.test == None:
          tests = tests + test
        else:
          for atest in test:
            if atest.name in self.test:
              tests.append(atest)

    tests.sort(key=lambda x: x.name)
    return tests
示例#6
0
  def gather_tests(self):
    ''' Returns all available tests as FrameworkTest list '''

    # Fake benchmarker fields that are used
    class bench_shim():
      def __init__(self):
        self.type = 'all'
        self.fwroot = os.getcwd()
        self.install_strategy='pertest'

    # Gather all tests
    tests = []
    for config_file_name in glob.glob('*/benchmark_config'):
      with open(config_file_name, 'r') as config_file:
        config = json.load(config_file)
        test = framework_test.parse_config(config, os.path.dirname(config_file_name), bench_shim())
        tests = tests + test
    tests.sort(key=lambda x: x.name)
    return tests
示例#7
0
    def __gather_tests(self):
        tests = []
        # Loop through each directory (we assume we're being run from the benchmarking root)
        # and look for the files that signify a benchmark test
        for dirname, dirnames, filenames in os.walk('.'):
            # Look for the benchmark_config file, this will set up our tests.
            # Its format looks like this:
            #
            # {
            #   "framework": "nodejs",
            #   "tests": [{
            #     "default": {
            #       "setup_file": "setup",
            #       "json_url": "/json"
            #     },
            #     "mysql": {
            #       "setup_file": "setup",
            #       "db_url": "/mysql",
            #       "query_url": "/mysql?queries="
            #     },
            #     ...
            #   }]
            # }
            if 'benchmark_config' in filenames:
                config = None
                with open(os.path.join(dirname, 'benchmark_config'),
                          'r') as config_file:
                    # Load json file into config object
                    config = json.load(config_file)

                if config == None:
                    continue

                tests = tests + framework_test.parse_config(
                    config, dirname[2:], self)

        tests.sort(key=lambda x: x.sort)
        return tests
  def __gather_tests(self):
    tests = []
    # Loop through each directory (we assume we're being run from the benchmarking root)
    # and look for the files that signify a benchmark test
    for dirname, dirnames, filenames in os.walk('.'):
      # Look for the benchmark_config file, this will set up our tests.
      # Its format looks like this:
      #
      # {
      #   "framework": "nodejs",
      #   "tests": [{
      #     "default": {
      #       "setup_file": "setup",
      #       "json_url": "/json"
      #     },
      #     "mysql": {
      #       "setup_file": "setup",
      #       "db_url": "/mysql",
      #       "query_url": "/mysql?queries="
      #     },
      #     ...
      #   }]
      # }
      if 'benchmark_config' in filenames:
        config = None
        with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
          # Load json file into config object
          config = json.load(config_file)

        if config == None:
          continue

        tests = tests + framework_test.parse_config(config, dirname[2:], self)

    tests.sort(key=lambda x: x.sort)
    return tests
示例#9
0
def gather_tests(include = [], exclude=[], benchmarker=None):
  '''
  Given test names as strings, returns a list of FrameworkTest objects. 
  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
  variables for checking the test directory, the test database os, and 
  other useful items. 

  With no arguments, every test in this framework will be returned.  
  With include, only tests with this exact name will be returned. 
  With exclude, all tests but those excluded will be returned. 

  A benchmarker is needed to construct full FrameworkTest objects. If
  one is not provided, a default Benchmarker will be created. 
  '''

  # Avoid setting up a circular import
  from benchmark import framework_test
  from benchmark.benchmarker import Benchmarker
  from setup.linux import setup_util

  # Help callers out a bit
  if include is None:
    include = []
  if exclude is None:
    exclude = []
  
  # Setup default Benchmarker using example configuration
  if benchmarker is None:
    print "Creating Benchmarker from benchmark.cfg.example"
    default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
    config = ConfigParser.SafeConfigParser()
    config.readfp(open(default_config))
    defaults = dict(config.items("Defaults"))
    
    # Convert strings into proper python types
    for k,v in defaults.iteritems():
      try:
        defaults[k] = literal_eval(v)
      except:
        pass

    # Ensure we only run the __init__ method of Benchmarker
    defaults['install'] = None
    
    benchmarker = Benchmarker(defaults)

  
  # Search in both old and new directories
  fwroot = setup_util.get_fwroot() 
  config_files = glob.glob("%s/*/benchmark_config" % fwroot) 
  config_files.extend(glob.glob("%s/frameworks/*/*/benchmark_config" % fwroot))
  
  tests = []
  for config_file_name in config_files:
    config = None
    with open(config_file_name, 'r') as config_file:
      try:
        config = json.load(config_file)
      except ValueError:
        # User-friendly errors
        print("Error loading '%s'." % config_file_name)
        raise

    # Find all tests in the config file
    config_tests = framework_test.parse_config(config, 
      os.path.dirname(config_file_name), benchmarker)
    
    # Filter
    for test in config_tests:
      if test.name in exclude:
        continue
      elif len(include) is 0 or test.name in include:
        tests.append(test)

  tests.sort(key=lambda x: x.name)
  return tests
示例#10
0
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A benchmarker is needed to construct full FrameworkTest objects. If
    one is not provided, a default Benchmarker will be created.
    '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except Exception:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None
        defaults[
            'results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
        defaults['results_environment'] = "My Server Environment"
        defaults['test_dir'] = None
        defaults['test_lang'] = None
        defaults['quiet'] = True

        benchmarker = Benchmarker(defaults)

    # Search for configuration files
    fwroot = setup_util.get_fwroot()
    config_files = []

    if benchmarker.test_lang:
        benchmarker.test_dir = []
        for lang in benchmarker.test_lang:
            if os.path.exists("{!s}/frameworks/{!s}".format(fwroot, lang)):
                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
                        fwroot, lang)):
                    benchmarker.test_dir.append("{!s}/{!s}".format(
                        lang, test_dir))
            else:
                raise Exception(
                    "Unable to locate language directory: {!s}".format(lang))

    if benchmarker.test_dir:
        for test_dir in benchmarker.test_dir:
            dir_config_files = glob.glob(
                "{!s}/frameworks/{!s}/benchmark_config.json".format(
                    fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception(
                    "Unable to locate tests in test-dir: {!s}".format(
                        test_dir))
    else:
        config_files.extend(
            glob.glob(
                "{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                # User-friendly errors
                print("Error loading '{!s}'.".format(config_file_name))
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
示例#11
0
def gather_tests(include = [], exclude=[], benchmarker=None):
    '''
    Given test names as strings, returns a list of FrameworkTest objects.
    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
    variables for checking the test directory, the test database os, and
    other useful items.

    With no arguments, every test in this framework will be returned.
    With include, only tests with this exact name will be returned.
    With exclude, all tests but those excluded will be returned.

    A benchmarker is needed to construct full FrameworkTest objects. If
    one is not provided, a default Benchmarker will be created.
    '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Old, hacky method to exclude all tests was to
    # request a test known to not exist, such as ''.
    # If test '' was requested, short-circuit and return
    # nothing immediately
    if len(include) == 1 and '' in include:
        return []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k,v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except Exception:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None
        defaults['results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
        defaults['results_environment'] = "My Server Environment"
        defaults['test_dir'] = None
        defaults['quiet'] = True

        benchmarker = Benchmarker(defaults)


    # Search for configuration files
    fwroot = setup_util.get_fwroot()
    config_files = []
    if benchmarker.test_dir:
        for test_dir in benchmarker.test_dir:
            dir_config_files = glob.glob("{!s}/frameworks/{!s}/benchmark_config.json".format(fwroot, test_dir))
            if len(dir_config_files):
                config_files.extend(dir_config_files)
            else:
                raise Exception("Unable to locate tests in test-dir: {!s}".format(test_dir))
    else:
        config_files.extend(glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except ValueError:
                # User-friendly errors
                print("Error loading '{!s}'.".format(config_file_name))
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(config,
                                                   os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if len(include) is 0 and len(exclude) is 0:
                # No filters, we are running everything
                tests.append(test)
            elif test.name in exclude:
                continue
            elif test.name in include:
                tests.append(test)
            else:
                # An include list exists, but this test is
                # not listed there, so we ignore it
                pass

    # Ensure we were able to locate everything that was
    # explicitly included
    if 0 != len(include):
        names = {test.name for test in tests}
        if 0 != len(set(include) - set(names)):
            missing = list(set(include) - set(names))
            raise Exception("Unable to locate tests %s" % missing)

    tests.sort(key=lambda x: x.name)
    return tests
示例#12
0
def gather_tests(include=[], exclude=[], benchmarker=None):
    '''
  Given test names as strings, returns a list of FrameworkTest objects. 
  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
  variables for checking the test directory, the test database os, and 
  other useful items. 

  With no arguments, every test in this framework will be returned.  
  With include, only tests with this exact name will be returned. 
  With exclude, all tests but those excluded will be returned. 

  A benchmarker is needed to construct full FrameworkTest objects. If
  one is not provided, a default Benchmarker will be created. 
  '''

    # Avoid setting up a circular import
    from benchmark import framework_test
    from benchmark.benchmarker import Benchmarker
    from setup.linux import setup_util

    # Help callers out a bit
    if include is None:
        include = []
    if exclude is None:
        exclude = []

    # Setup default Benchmarker using example configuration
    if benchmarker is None:
        default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
        config = ConfigParser.SafeConfigParser()
        config.readfp(open(default_config))
        defaults = dict(config.items("Defaults"))

        # Convert strings into proper python types
        for k, v in defaults.iteritems():
            try:
                defaults[k] = literal_eval(v)
            except:
                pass

        # Ensure we only run the __init__ method of Benchmarker
        defaults['install'] = None

        benchmarker = Benchmarker(defaults)

    # Search in both old and new directories
    fwroot = setup_util.get_fwroot()
    config_files = glob.glob("%s/*/benchmark_config" % fwroot)
    config_files.extend(
        glob.glob("%s/frameworks/*/*/benchmark_config" % fwroot))

    tests = []
    for config_file_name in config_files:
        config = None
        with open(config_file_name, 'r') as config_file:
            try:
                config = json.load(config_file)
            except:
                # User-friendly errors
                print("Error loading '%s'." % config_file_name)
                raise

        # Find all tests in the config file
        config_tests = framework_test.parse_config(
            config, os.path.dirname(config_file_name), benchmarker)

        # Filter
        for test in config_tests:
            if test.name in exclude:
                continue
            elif len(include) is 0 or test.name in include:
                tests.append(test)

    tests.sort(key=lambda x: x.name)
    return tests