def get_bench_multisource_git(bench):
  benchmarking_api=jba.JubeBenchmarkingAPI(bench,"")
  path = os.path.join(test_env.config['run_path'],bench)
  if not os.path.exists(path):
    os.makedirs(path)
  benchmarking_api.jube_xml_files.add_bench_input()
  benchmarking_api.jube_xml_files.write_bench_xml()
Beispiel #2
0
def test_benchmark_no_exist(init_env):
    """ docstring """

    # pylint: disable=redefined-outer-name, unused-argument, unused-variable

    with pytest.raises(OSError):
        benchmarking_api = jba.JubeBenchmarkingAPI("bench_name", "")
Beispiel #3
0
def test_add_bench_input():
    """ docstring """

    # pylint: disable=unused-variable

    #check _revision prefix are coherent
    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    bench_input = benchmarking_api.jube_xml_files.add_bench_input()
    multisource = benchmarking_api.jube_xml_files.get_bench_multisource()
    max_files = max([len(source['files']) for source in multisource])
    bench_xml = benchmarking_api.jube_xml_files.bench_xml['simple.xml']
    benchmark = bench_xml.find('benchmark')
    assert benchmark.findall("parameterset[@name='ubench_config']")
    bench_config = benchmark.find("parameterset[@name='ubench_config']")
    assert bench_config.findall("parameter[@name='stretch']")
    assert bench_config.findall("parameter[@name='stretch_id']")
    # assert len(bench_config.findall("parameter[@name='input']")) > 0
    # assert len(bench_config.findall("parameter[@name='input_id']")) > 0
    simple_code_count = 0
    input_count = 0
    for param in bench_config.findall("parameter"):
        if "simple_code_revision" in param.text:
            simple_code_count += 1
        if "input_revision" in param.text:
            input_count += 1
    assert simple_code_count < max_files
    assert input_count < max_files
Beispiel #4
0
def test_custom_nodes(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  benchmarking_api.set_custom_nodes([1,2],['cn050','cn[103-107,145]'])
  benchmarking_api.jube_xml_files.write_bench_xml()
  xml_file = ET.parse(os.path.join(init_env.config['run_path'],"simple/simple.xml"))
  benchmark = xml_file.find('benchmark')
  assert len(benchmark.findall("parameterset[@name='custom_parameter']")) > 0
Beispiel #5
0
def test_xml_get_result_file(init_env):
    """ docstring """

    # pylint: disable=redefined-outer-name, unused-argument

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    assert benchmarking_api.jube_xml_files.get_bench_resultfile(
    ) == "result.dat"
Beispiel #6
0
def test_result_custom_nodes(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  benchmarking_api.set_custom_nodes([1,2],['cn050','cn[103-107,145]'])
  benchmarking_api.jube_xml_files.write_bench_xml()
  xml_file = ET.parse(os.path.join(init_env.config['run_path'],"simple/simple.xml"))
  benchmark = xml_file.find('benchmark')
  table = benchmark.find('result').find('table')
  result  = [column for column in table.findall('column')  if column.text == 'custom_nodes_id']
  assert len(result) > 0
Beispiel #7
0
def test_out_xml_path(init_env):
    """ docstring """

    # pylint: disable=superfluous-parens, redefined-outer-name

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    print(init_env.config['run_path'])
    assert benchmarking_api.jube_xml_files.bench_xml_path_out == os.path.join(
        init_env.config['run_path'], "simple")
Beispiel #8
0
def test_custom_nodes_not_in_result(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  benchmarking_api.set_custom_nodes([1,2],None)
  benchmarking_api.jube_xml_files.write_bench_xml()
  xml_file = ET.parse(os.path.join(init_env.config['run_path'],"simple/simple.xml"))
  benchmark = xml_file.find('benchmark')
  table = benchmark.find('result').find('table')
  for column in table.findall('column'):
    assert column.text != 'custom_nodes_id'
Beispiel #9
0
def test_write_bench_xml(init_env):
    """ docstring """

    # pylint: disable=redefined-outer-name, singleton-comparison

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    init_env.create_run_dir("simple")
    benchmarking_api.jube_xml_files.write_bench_xml()
    assert os.path.exists(os.path.join(init_env.config['run_path'],
                                       "simple")) == True
Beispiel #10
0
def test_custom_nodes(init_env):
    """ docstring """

    # pylint: disable=redefined-outer-name, no-member,c-extension-no-member

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    benchmarking_api.set_custom_nodes([1, 2], ['cn050', 'cn[103-107,145]'])
    benchmarking_api.jube_xml_files.write_bench_xml()
    xml_file = ET.parse(
        os.path.join(init_env.config['run_path'], "simple/simple.xml"))
    benchmark = xml_file.find('benchmark')
    assert benchmark.findall("parameterset[@name='custom_parameter']")
Beispiel #11
0
def test_custom_nodes_not_in_result(init_env):
    """ docstring """

    # pylint: disable=redefined-outer-name, no-member,c-extension-no-member

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    benchmarking_api.set_custom_nodes([1, 2], None)
    benchmarking_api.jube_xml_files.write_bench_xml()
    xml_file = ET.parse(
        os.path.join(init_env.config['run_path'], "simple/simple.xml"))
    benchmark = xml_file.find('benchmark')
    table = benchmark.find('result').find('table')
    for column in table.findall('column'):
        assert column.text != 'custom_nodes_id'
Beispiel #12
0
def test_result(mocker, mock_os_methods, jube_info_files):
    """It test the generation of benchmark data.

    It mainly test the method _write_bench_data
    """
    mock_file = MockFile(jube_info_files)
    # Unclebench mocks
    mocker.patch(".".join(MOCK_XML), side_effect=mockxmlparser)
    mocker.patch(".".join(MOCK_JAPI + ["JubeBenchmarkingAPI", "_analyse"]))
    mocker.patch(".".join(MOCK_JAPI +
                          ["JubeBenchmarkingAPI", "_extract_results"]))
    mock_data_write = mocker.patch(".".join(MOCK_DATA +
                                            ["DataStoreYAML", "write"]))

    # STD lib mocks
    mocker.patch("tempfile.TemporaryFile", side_effect=mock_file.jube_file)
    mocker.patch(".".join(MOCK_JAPI + ["Popen"]))
    mocker.patch(".".join(MOCK_JAPI + ["open"]),
                 side_effect=mock_file.results_file)

    jube_api = jba.JubeBenchmarkingAPI('test', 'platform')

    jube_api.result(0)
    metadata, context, r_file = mock_data_write.call_args.args
    print(metadata)
    print("=" * 45)
    print(context)
    print("=" * 45)
    print(r_file)
    print("=" * 45)
    assert '1' in context
    assert 'results_bench' in context['1']
    assert context['1']['results_bench'] == {
        'p_pat_min': '9',
        'p_pat_max': '11',
        'p_pat_avg': '10'
    }
    assert context['5']['results_bench'] == {
        'p_pat_min': '45',
        'p_pat_max': '51',
        'p_pat_avg': '50'
    }
Beispiel #13
0
def test_run(mocker, mock_os_methods):
    """Test run method of JubeBenchmarkingAPI"""
    def mockanalyse(bench_id):
        rand = str(time.time())
        fake_id = int(rand[9])
        return fake_id, '000001'

    mocker.patch(".".join(MOCK_XML), side_effect=mockxmlparser)

    mocker.patch(".".join(MOCK_UTILS + ["Popen"]), side_effect=mockpopen)

    mocker.patch(".".join(MOCK_JAPI + ["JubeBenchmarkingAPI", "get_max_id"]),
                 side_effect=mockanalyse)

    jube_api = jba.JubeBenchmarkingAPI('test', 'platform')
    j_job, updated_params = jube_api.run({
        'w': "",
        'execute': False,
        'custom_params': {}
    })

    assert isinstance(j_job.jubeid, int)
Beispiel #14
0
def test_init():
    """ docstring """

    benchmarking_api = jba.JubeBenchmarkingAPI("", "")
    assert isinstance(benchmarking_api.benchmark_path, str)
    assert benchmarking_api.benchmark_name == ""
Beispiel #15
0
def test_benchmark_no_exist(init_env):
  with pytest.raises(OSError):
    benchmarking_api=jba.JubeBenchmarkingAPI("bench_name","")
Beispiel #16
0
def test_benchmark_empty(init_env):
  init_env.create_empty_bench()
  with pytest.raises(ET.ParseError):
    benchmarking_api=jba.JubeBenchmarkingAPI("test_bench","")
Beispiel #17
0
def test_write_bench_xml(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  init_env.create_run_dir("simple")
  benchmarking_api.jube_xml_files.write_bench_xml()
  assert os.path.exists(os.path.join(init_env.config['run_path'],"simple")) == True
Beispiel #18
0
def test_xml_get_result_file(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  assert benchmarking_api.jube_xml_files.get_bench_resultfile() == "result.dat"
Beispiel #19
0
def test_out_xml_path(init_env):
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
  print init_env.config['run_path']
  assert benchmarking_api.jube_xml_files.bench_xml_path_out == os.path.join(init_env.config['run_path'],"simple")
Beispiel #20
0
def test_load_bench_file():
  benchmarking_api=jba.JubeBenchmarkingAPI("simple","")
Beispiel #21
0
def test_load_bench_file():
    """ docstring """

    # pylint: disable=unused-variable

    benchmarking_api = jba.JubeBenchmarkingAPI("simple", "")
    def get_benchmarking_api(self):
        """ Factory method to get a new JubeBenchmarkingAPI """

        return jba.JubeBenchmarkingAPI(self.benchmark_name, self.platform)