Esempio n. 1
0
def abc_harness_config() -> harness_pb2.CldriveHarness:
    """A test fixture which returns an oclgrind harness config."""
    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend(
        [gpu.cldrive.env.OclgrindOpenCLEnvironment().name])
    config.opencl_opt.extend([True])
    return config
Esempio n. 2
0
def test_CldriveHarness_RunTestcases_no_testbed():
    """Test that invalid request params returned if no testbed requested."""
    config = harness_pb2.CldriveHarness()
    harness = cldrive.CldriveHarness(config)
    req = harness_pb2.RunTestcasesRequest(testbed=None, testcases=[])
    res = harness.RunTestcases(req, None)
    assert (res.status.returncode ==
            service_pb2.ServiceStatus.INVALID_REQUEST_PARAMETERS)
    assert res.status.error_message == "Requested testbed not found."
Esempio n. 3
0
def test_CldriveHarness_RunTestcases_no_testcases():
    """Test that empty results returned if no testcase requested."""
    config = harness_pb2.CldriveHarness()
    harness = cldrive.CldriveHarness(config)
    assert len(harness.testbeds)
    req = harness_pb2.RunTestcasesRequest(testbed=harness.testbeds[0],
                                          testcases=[])
    res = harness.RunTestcases(req, None)
    assert res.status.returncode == service_pb2.ServiceStatus.SUCCESS
    assert not res.results
Esempio n. 4
0
def test_CldriveHarness_oclgrind_testbed_count_two():
    """Test that correct number of testbeds are instantiated."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert len(harness.testbeds) == 2
Esempio n. 5
0
def test_CldriveHarness_oclgrind_testbed_opts():
    """Test that opencl_opt option set on testbeds."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert harness.testbeds[0].opts["opencl_opt"] == "enabled"
    assert harness.testbeds[1].opts["opencl_opt"] == "disabled"
Esempio n. 6
0
def test_CldriveHarness_oclgrind_testbed_names():
    """Test that correct names set on testbeds."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert harness.testbeds[0].name == oclgrind_env_name
    assert harness.testbeds[1].name == oclgrind_env_name
Esempio n. 7
0
def main(argv):
  if len(argv) > 1:
    unknown_args = ', '.join(argv[1:])
    raise app.UsageError(f"Unknown arguments {unknown_args}")

  logging.info('Preparing OpenCL testbed.')
  config = harness_pb2.CldriveHarness()
  config.opencl_env.extend([env.OclgrindOpenCLEnvironment().name])
  config.opencl_opt.extend([FLAGS.opencl_opt])
  harness = cldrive.CldriveHarness(config)
  assert len(harness.testbeds) >= 1

  input_directories = FLAGS.input_directories
  logging.info('Reading testcases from: %s', ' '.join(input_directories))

  output_directory = pathlib.Path(FLAGS.output_directory)
  logging.info('Writing results to %s', output_directory)
  output_directory.mkdir(parents=True, exist_ok=True)

  # Load testcases.
  testcase_dirs = [
    pathlib.Path(x) for x in input_directories if
    pathlib.Path(x).is_dir()]
  if not testcase_dirs:
    raise app.UsageError('No --input_directories found.')
  testcase_paths = labtypes.flatten(
      [[pathlib.Path(y) for y in fs.ls(x, abspaths=True)]
       for x in testcase_dirs])
  testcases = [
    pbutil.FromFile(path, deepsmith_pb2.Testcase()) for path in testcase_paths]
  logging.info('Read %d testcases.', len(testcases))
  if not len(testcases):
    raise app.UsageError("No testcases found: '%s'",
                         ' '.join(input_directories))

  # Execute testcases.
  req = harness_pb2.RunTestcasesRequest()
  req.testbed.CopyFrom(harness.testbeds[0])
  req.testcases.extend(testcases)
  res = harness.RunTestcases(req, None)

  # Write results to file.
  for testcase, result in zip(testcases, res.results):
    result_id = crypto.md5_str(str(testcase))
    pbutil.ToFile(result, output_directory / f'{result_id}.pbtxt')

  logging.info('Executed %d testcases and wrote results to %s',
               len(res.results), output_directory)
  execution_times = [
    result.profiling_events[0].duration_ms for result in res.results]
  logging.info('Average time to evaluate testcase: %.2f ms',
               sum(execution_times) / len(execution_times))
Esempio n. 8
0
def test_CldriveHarness_oclgrind_testbed_uneven_name_and_opt():
    """Error is raised if number of opt_opt != number of opencl_env."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True])

    with test.Raises(ValueError) as e_ctx:
        cldrive.CldriveHarness(config, default_to_all_environments=False)
    assert (
        "CldriveHarness.opencl_env and CldriveHarness.opencl_opt lists are "
        "not the same length") in str(e_ctx.value)
Esempio n. 9
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError("Unrecognized arguments")
    if FLAGS.harness_batch_size <= 0:
        raise app.UsageError("--harness_batch_size must be positive")
    datastore_config = services.ServiceConfigFromFlag(
        "datastore_config", datastore_pb2.DataStore())
    harness_config = services.ServiceConfigFromFlag(
        "harness_config", harness_pb2.CldriveHarness())

    datastore_stub = services.GetServiceStub(
        datastore_config, datastore_pb2_grpc.DataStoreServiceStub)
    harness_stub = services.GetServiceStub(harness_config,
                                           harness_pb2_grpc.HarnessServiceStub)

    target_total_results = FLAGS.target_total_results
    harness_batch_size = FLAGS.harness_batch_size
    capabilities = GetHarnessCapabilities(harness_stub)
    testbeds = collections.deque(capabilities.testbeds)
    if testbeds:
        app.Log(
            1,
            "%d testbeds: %s",
            len(capabilities.testbeds),
            ", ".join(x.name for x in capabilities.testbeds),
        )
        while testbeds:
            testbed = testbeds.popleft()
            testcases = GetTestcasesToRun(
                datastore_stub,
                capabilities.harness,
                testbed,
                target_total_results,
                harness_batch_size,
            )
            app.Log(
                1,
                "Received %d testcases to execute on %s",
                len(testcases),
                testbed.name,
            )
            if testcases:
                results = RunTestcases(harness_stub, testbed, testcases)
                SubmitResults(datastore_stub, results)
                # If there are testcases to run, then we add it back to the testbeds
                # queue, as there may be more.
                testbeds.append(testbed)
        app.Log(1, "done")
    else:
        app.Warning("No testbeds, nothing to do!")
Esempio n. 10
0
def test_CldriveHarness_oclgrind_testbed():
  """Test that harness can be made from project-local oclgrind."""
  config = harness_pb2.CldriveHarness()
  config.opencl_env.extend([gpu.cldrive.env.OclgrindOpenCLEnvironment().name,
                            gpu.cldrive.env.OclgrindOpenCLEnvironment().name])
  config.opencl_opt.extend([True, False])
  harness = cldrive.CldriveHarness(config)
  assert len(harness.testbeds) == 2
  assert harness.testbeds[
           0].name == gpu.cldrive.env.OclgrindOpenCLEnvironment().name
  assert harness.testbeds[0].opts['opencl_opt'] == 'enabled'
  assert harness.testbeds[
           1].name == gpu.cldrive.env.OclgrindOpenCLEnvironment().name
  assert harness.testbeds[1].opts['opencl_opt'] == 'disabled'
Esempio n. 11
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Unrecognized arguments')
    harness_config = services.ServiceConfigFromFlag(
        'harness_config', harness_pb2.CldriveHarness())
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    services.AssertLocalServiceHostname(harness_config.service)
    service = CldriveHarness(harness_config)
    harness_pb2_grpc.add_HarnessServiceServicer_to_server(service, server)
    server.add_insecure_port(f'[::]:{harness_config.service.port}')
    logging.info('%s listening on %s:%s',
                 type(service).__name__, harness_config.service.hostname,
                 harness_config.service.port)
    server.start()
    try:
        while True:
            time.sleep(3600 * 24)
    except KeyboardInterrupt:
        server.stop(0)
Esempio n. 12
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Unrecognized arguments')
    if FLAGS.harness_batch_size <= 0:
        raise app.UsageError('--harness_batch_size must be positive')
    datastore_config = services.ServiceConfigFromFlag(
        'datastore_config', datastore_pb2.DataStore())
    harness_config = services.ServiceConfigFromFlag(
        'harness_config', harness_pb2.CldriveHarness())

    datastore_stub = services.GetServiceStub(
        datastore_config, datastore_pb2_grpc.DataStoreServiceStub)
    harness_stub = services.GetServiceStub(harness_config,
                                           harness_pb2_grpc.HarnessServiceStub)

    target_total_results = FLAGS.target_total_results
    harness_batch_size = FLAGS.harness_batch_size
    capabilities = GetHarnessCapabilities(harness_stub)
    testbeds = collections.deque(capabilities.testbeds)
    if testbeds:
        logging.info('%d testbeds: %s', len(capabilities.testbeds),
                     ', '.join(x.name for x in capabilities.testbeds))
        while testbeds:
            testbed = testbeds.popleft()
            testcases = GetTestcasesToRun(datastore_stub, capabilities.harness,
                                          testbed, target_total_results,
                                          harness_batch_size)
            logging.info('Received %d testcases to execute on %s',
                         len(testcases), testbed.name)
            if testcases:
                results = RunTestcases(harness_stub, testbed, testcases)
                SubmitResults(datastore_stub, results)
                # If there are testcases to run, then we add it back to the testbeds
                # queue, as there may be more.
                testbeds.append(testbed)
        logging.info('done')
    else:
        logging.warning('No testbeds, nothing to do!')
Esempio n. 13
0
def main(argv: typing.List[str]):
    """Main entry point."""
    if len(argv) > 1:
        raise app.UsageError("Unknown arguments: '{}'.".format(" ".join(
            argv[1:])))

    os.environ["CLGEN_CACHE"] = f"{FLAGS.result_cache_dir}/clgen"
    # An OpenCL corpus, configured as described in CGO'17.
    corpus = corpuses.Corpus(
        corpus_pb2.Corpus(
            local_directory=FLAGS.github_kernels_dir,
            ascii_character_atomizer=True,
            contentfile_separator="\n\n",
            preprocessor=[
                "deeplearning.clgen.preprocessors.opencl:ClangPreprocessWithShim",
                "deeplearning.clgen.preprocessors.opencl:Compile",
                "deeplearning.clgen.preprocessors.opencl:NormalizeIdentifiers",
                "deeplearning.clgen.preprocessors.opencl:StripDoubleUnderscorePrefixes",
                "deeplearning.clgen.preprocessors.common:StripDuplicateEmptyLines",
                "deeplearning.clgen.preprocessors.opencl:SanitizeKernelPrototype",
                "deeplearning.clgen.preprocessors.common:StripTrailingWhitespace",
                "deeplearning.clgen.preprocessors.opencl:ClangFormat",
                "deeplearning.clgen.preprocessors.common:MinimumLineCount3",
                "deeplearning.clgen.preprocessors.opencl:Compile",
            ],
        ))
    corpus.Create()

    cache_dir = pathlib.Path(FLAGS.result_cache_dir) / corpus.hash
    cache_dir.mkdir(parents=True, exist_ok=True)

    driver = cldrive.CldriveHarness(
        harness_pb2.CldriveHarness(
            opencl_env=[FLAGS.opencl_env],
            opencl_opt=[FLAGS.opencl_opt],
        ))

    with corpus.preprocessed.Session() as session:
        # Query to return all successfully preprocessed OpenCL kernels in a stable
        # order.
        q = (session.query(preprocessed.PreprocessedContentFile.text).filter(
            preprocessed.PreprocessedContentFile.preprocessing_succeeded ==
            True).order_by(preprocessed.PreprocessedContentFile.id))

        num_good_files = q.count()
        num_files = session.query(preprocessed.PreprocessedContentFile).count()
        app.Log(
            1,
            "Corpus of %s files (%.1f%% of %s)",
            humanize.Commas(num_good_files),
            (num_good_files / num_files) * 100,
            humanize.Commas(num_files),
        )

        srcs = [x[0] for x in q]
        batch_size = 8
        max_batch = math.ceil(len(srcs) / batch_size)

        all_outcomes = []
        for i, start_idx in enumerate(range(0, len(srcs), batch_size)):
            cached_results_path = cache_dir / f"{i}.pkl"

            if cached_results_path.is_file():
                app.Log(1, "batch %d of %d", i + 1, max_batch)
                # Read cached results.
                with open(cached_results_path, "rb") as f:
                    outcomes = pickle.load(f)
            elif FLAGS.summarize_only:
                continue
            else:
                app.Log(1, "batch %d of %d", i + 1, max_batch)
                # Evaluate OpenCL kernels and cache results.
                batch = srcs[start_idx:start_idx + batch_size]
                testcases = labtypes.flatten(
                    [OpenClSourceToTestCases(src) for src in batch])
                results = RunTestCasesOrDie(driver, testcases)

                outcomes = [
                    GetOutcomeWithDynamicChecks(result, driver)
                    for result in results
                ]
                with open(cached_results_path, "wb") as f:
                    pickle.dump(outcomes, f)

            all_outcomes += outcomes
            df = pd.DataFrame(
                list(zip(all_outcomes, np.ones(len(all_outcomes)))) +
                [("Total", len(all_outcomes))],
                columns=["outcome", "count"],
            )
            summary = df.groupby("outcome").sum().reset_index()
            summary["ratio"] = [
                f"{x:.2%}" for x in
                # Double the "ratio" values because the 'count' column contains a
                # grand total row.
                2 * summary["count"].values / summary["count"].sum()
            ]
            summary["count"] = [
                humanize.Commas(int(x)) for x in summary["count"]
            ]
            print(summary)
            del df
            del summary
Esempio n. 14
0
def cldrive_harness_config() -> harness_pb2.CldriveHarness:
  """Test fixture to return an Cldrive test harness config."""
  config = harness_pb2.CldriveHarness()
  config.opencl_env.extend([env.OclgrindOpenCLEnvironment().name])
  config.opencl_opt.extend([True])
  return config