예제 #1
0
  def test_get_benchmark_methods_exact_match(self, mock_setup):
    """Tests returning methods on a class based on a filter."""
    config = mock.Mock()
    config.python_paths_str = None
    config.benchmark_methods_str = 'benchmark_method_1,benchmark_method_2'
    config.benchmark_class_str = 'new_foo.BenchmarkClass'
    benchmark_runner = benchmark.BenchmarkRunner(config)

    methods = benchmark_runner._get_benchmark_methods()
    self.assertEqual(['benchmark_method_1', 'benchmark_method_2'], methods)
예제 #2
0
 def test_load_test_class(self):
     """Tests ok_to_run not finding existing processes."""
     sys.modules['foo.fake'] = Mock()
     os.environ['ROGUE_TEST_CLASS'] = 'foo.fake.TestClass'
     os.environ['ROGUE_TEST_METHODS'] = 'TestMethod'
     os.environ['ROGUE_PYTHON_PATH'] = 'models'
     benchmark_runner = benchmark.BenchmarkRunner()
     class_ = benchmark_runner._load_test_class('/dev/null')
     self.assertIsInstance(class_.oss_report_object,
                           type(benchmark_result.BenchmarkResult()))
예제 #3
0
  def test_load_benchmark_class(self, mock_setup):
    """Test loading module and test class."""

    # foo.fake is not found unless foo and fake are mocked.
    sys.modules['foo'] = mock.Mock()
    sys.modules['foo.fake'] = mock.Mock()
    config = mock.Mock()
    config.benchmark_class_str = 'foo.fake.TestClass'
    config.python_paths_str = None
    benchmark_runner = benchmark.BenchmarkRunner(config)
    class_instance = benchmark_runner._instantiate_benchmark_class('/dev/null')
    mock_setup.assert_called()
예제 #4
0
  def test_get_benchmark_methods_exact_match(self):
    """Tests returning methods on a class based on a filter."""
    config = mock.Mock()
    config.workspace = 'workspace'
    config.benchmark_method_patterns = [
        'new_foo.BenchmarkClass.benchmark_method_1',
        'new_foo.BenchmarkClass.benchmark_method_2']
    benchmark_runner = benchmark.BenchmarkRunner(config)

    methods = benchmark_runner._get_benchmark_methods()
    self.assertEqual(['new_foo.BenchmarkClass.benchmark_method_1',
                      'new_foo.BenchmarkClass.benchmark_method_2'], methods)
예제 #5
0
  def test_get_benchmark_methods_filter(self):
    """Tests returning methods on a class based on a filter."""
    config = mock.Mock()
    config.workspace = 'workspace'
    config.benchmark_method_patterns = ['new_foo.BenchmarkClass.filter:bench.*']
    benchmark_runner = benchmark.BenchmarkRunner(config)

    mock_benchmark_class = mock.Mock()
    mock_benchmark_class.benchmark_method_1 = 'foo'

    mock_module = mock.Mock()
    sys.modules['new_foo'] = mock_module
    mock_module.BenchmarkClass.return_value = mock_benchmark_class

    methods = benchmark_runner._get_benchmark_methods()

    self.assertEqual(1, len(methods))
    self.assertEqual('new_foo.BenchmarkClass.benchmark_method_1', methods[0])
예제 #6
0
  def test_get_benchmark_methods_filter(self, mock_setup):
    """Tests returning methods on a class based on a filter."""
    config = mock.Mock()
    config.python_paths_str = None
    config.benchmark_methods_str = 'filter:bench.*'
    config.benchmark_class_str = 'new_foo.BenchmarkClass'
    benchmark_runner = benchmark.BenchmarkRunner(config)

    mock_benchmark_class = mock.Mock()
    mock_benchmark_class.benchmark_method_1 = 'foo'

    mock_module = mock.Mock()
    sys.modules['new_foo'] = mock_module
    mock_module.BenchmarkClass.return_value = mock_benchmark_class

    methods = benchmark_runner._get_benchmark_methods()

    self.assertEqual(1, len(methods))
    self.assertEqual('benchmark_method_1', methods[0])
예제 #7
0
                job.run()
                job.wait()
        except Exception, e:
            if (benchmark.DEBUG):
                traceback.print_exc()
            else:
                print "EEK: %s" % e

    def close(self):
        benchmark.info("Cleaning up")
        #defaultSession = org.ogf.saga.session.SessionFactory.createSession(True)
        #defaultSession.close()


if __name__ == "__main__":
    if (len(sys.argv) < 4):
        print "usage: jysaga %s <jobservice-url> <#runs> <#times> <executable> [arg]*" \
          % sys.argv[0]
        os._exit(1)

    js_url = sys.argv[1]
    runs = int(sys.argv[2])
    times = int(sys.argv[3])
    ex = sys.argv[4]
    if (len(sys.argv) > 5):
        args = sys.argv[5:]

    b = SagaJobBenchmark(js_url, times, ex, args)
    runner = benchmark.BenchmarkRunner(b, runs)
    runner.run()