Exemple #1
0
def test_RunTestcases_cldrive_syntax_error(
    cldrive_harness_config: harness_pb2.CldriveHarness, opencl_opt: bool):
  """Test execution of a test case with invalid syntax."""
  cldrive_harness_config.opencl_opt[0] = opencl_opt
  harness = cldrive.CldriveHarness(cldrive_harness_config)
  testcases = [
    deepsmith_pb2.Testcase(
        toolchain='opencl',
        harness=deepsmith_pb2.Harness(name='cldrive'),
        inputs={
          'src': 'kernel void A(global int* a) {\n!11@invalid syntax!',
          'gsize': '1,1,1',
          'lsize': '1,1,1',
          'timeout_seconds': '60',
        })
  ]
  results = opencl_fuzz.RunTestcases(harness, testcases)
  assert len(results) == 1
  # Testcase.invariant_opts.driver_type field is set by cldrive harness.
  testcases[0].invariant_opts['driver_type'] = 'compile_only'
  assert testcases[0] == results[0].testcase
  assert results[0].testbed == cldrive.OpenClEnvironmentToTestbed(
      harness.envs[0])
  assert results[0].outcome == deepsmith_pb2.Result.BUILD_FAILURE
  assert results[0].outputs['stdout'] == ''
  print(results[0].outputs['stderr'])
  opt_str = 'on' if opencl_opt else 'off'
  assert results[0].outputs['stderr'] == f"""\
Exemple #2
0
def test_CldriveHarness_RunTestcases_no_testbed():
    """Test that invalid request params returned if no testbed requested."""
    config = harness_pb2.CldriveHarness()
    harness = cldrive.CldriveHarness(config)
    req = harness_pb2.RunTestcasesRequest(testbed=None, testcases=[])
    res = harness.RunTestcases(req, None)
    assert (res.status.returncode ==
            service_pb2.ServiceStatus.INVALID_REQUEST_PARAMETERS)
    assert res.status.error_message == "Requested testbed not found."
Exemple #3
0
def test_CldriveHarness_oclgrind_testbed_count_two():
    """Test that correct number of testbeds are instantiated."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert len(harness.testbeds) == 2
Exemple #4
0
def test_CldriveHarness_RunTestcases_no_testcases():
    """Test that empty results returned if no testcase requested."""
    config = harness_pb2.CldriveHarness()
    harness = cldrive.CldriveHarness(config)
    assert len(harness.testbeds)
    req = harness_pb2.RunTestcasesRequest(testbed=harness.testbeds[0],
                                          testcases=[])
    res = harness.RunTestcases(req, None)
    assert res.status.returncode == service_pb2.ServiceStatus.SUCCESS
    assert not res.results
Exemple #5
0
def test_CldriveHarness_oclgrind_testbed_opts():
    """Test that opencl_opt option set on testbeds."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert harness.testbeds[0].opts["opencl_opt"] == "enabled"
    assert harness.testbeds[1].opts["opencl_opt"] == "disabled"
Exemple #6
0
def test_CldriveHarness_oclgrind_testbed_names():
    """Test that correct names set on testbeds."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True, False])

    harness = cldrive.CldriveHarness(config)
    assert harness.testbeds[0].name == oclgrind_env_name
    assert harness.testbeds[1].name == oclgrind_env_name
Exemple #7
0
def test_CldriveHarness_RunTestcases_driver_cflags(abc_harness_config,
                                                   abc_run_testcases_request):
    """Test that valid driver cflags do not break the build."""
    abc_harness_config.driver_cflag.extend(["-O3", "-g"])
    harness = cldrive.CldriveHarness(abc_harness_config)
    res = harness.RunTestcases(abc_run_testcases_request, None)
    assert res.status.returncode == service_pb2.ServiceStatus.SUCCESS
    assert len(res.results) == 1
    result = res.results[0]
    # Nothing interesting to see here.
    assert result.outcome == deepsmith_pb2.Result.PASS
Exemple #8
0
def test_CldriveHarness_RunTestcases_invalid_driver_cflags(
        abc_harness_config, abc_run_testcases_request):
    """Test that invalid driver cflags cause driver to fail to build."""
    abc_harness_config.driver_cflag.extend(["--not_a_real_flag"])
    harness = cldrive.CldriveHarness(abc_harness_config)
    res = harness.RunTestcases(abc_run_testcases_request, None)
    assert res.status.returncode == service_pb2.ServiceStatus.SUCCESS
    assert len(res.results) == 1
    result = res.results[0]
    # A driver compilation error is an unknown outcome.
    assert result.outcome == deepsmith_pb2.Result.UNKNOWN
Exemple #9
0
def test_RunTestcases_cldrive_syntax_error(
  cldrive_harness_config: harness_pb2.CldriveHarness, opencl_opt: bool
):
  """Test execution of a test case with invalid syntax."""
  cldrive_harness_config.opencl_opt[0] = opencl_opt
  harness = cldrive.CldriveHarness(cldrive_harness_config)
  testcases = [
    deepsmith_pb2.Testcase(
      toolchain="opencl",
      harness=deepsmith_pb2.Harness(name="cldrive"),
      inputs={
        "src": "kernel void A(global int* a) {\n!11@invalid syntax!",
        "gsize": "1,1,1",
        "lsize": "1,1,1",
        "timeout_seconds": "60",
      },
    )
  ]
  results = opencl_fuzz.RunTestcases(harness, testcases)
  assert len(results) == 1
  # Testcase.invariant_opts.driver_type field is set by cldrive harness.
  testcases[0].invariant_opts["driver_type"] = "compile_only"
  assert testcases[0] == results[0].testcase
  assert results[0].testbed == cldrive.OpenClEnvironmentToTestbed(
    harness.envs[0]
  )
  assert results[0].outcome == deepsmith_pb2.Result.BUILD_FAILURE
  assert results[0].outputs["stdout"] == ""
  print(results[0].outputs["stderr"])
  opt_str = "on" if opencl_opt else "off"
  assert (
    results[0].outputs["stderr"]
    == f"""\
[cldrive] Platform: Oclgrind
[cldrive] Device: Oclgrind Simulator
[cldrive] OpenCL optimizations: {opt_str}
1 warning and 3 errors generated.
input.cl:1:34: error: expected ';' after expression
kernel void A(global int* a) {{!11@invalid syntax!
                                 ^
                                 ;
input.cl:1:34: error: expected expression
input.cl:1:50: error: expected '}}'
kernel void A(global int* a) {{!11@invalid syntax!
                                                 ^
input.cl:1:30: note: to match this '{{'
kernel void A(global int* a) {{!11@invalid syntax!
                             ^
input.cl:1:31: warning: expression result unused
kernel void A(global int* a) {{!11@invalid syntax!
                              ^~~
clBuildProgram CL_BUILD_PROGRAM_FAILURE
"""
  )
Exemple #10
0
def test_RunTestcases_cldrive_pass(
  cldrive_harness_config: harness_pb2.CldriveHarness, opencl_opt: bool
):
  """Test execution of a simple test case."""
  cldrive_harness_config.opencl_opt[0] = opencl_opt
  harness = cldrive.CldriveHarness(cldrive_harness_config)
  testcases = [
    deepsmith_pb2.Testcase(
      toolchain="opencl",
      harness=deepsmith_pb2.Harness(name="cldrive"),
      inputs={
        "src": "kernel void A(global int* a) {a[get_global_id(0)] = 10;}",
        "gsize": "1,1,1",
        "lsize": "1,1,1",
        "timeout_seconds": "60",
      },
    )
  ]
  results = opencl_fuzz.RunTestcases(harness, testcases)
  assert len(results) == 1
  # Testcase.invariant_opts.driver_type field is set by cldrive harness.
  testcases[0].invariant_opts["driver_type"] = "compile_and_run"
  assert testcases[0] == results[0].testcase
  assert results[0].testbed == cldrive.OpenClEnvironmentToTestbed(
    harness.envs[0]
  )
  assert results[0].outcome == deepsmith_pb2.Result.PASS
  assert results[0].outputs["stdout"] == (
    "global int * a: 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 "
    "22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 "
    "46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 "
    "70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 "
    "94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 "
    "114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 "
    "132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 "
    "150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 "
    "168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 "
    "186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 "
    "204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 "
    "222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 "
    "240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255\n"
  )
  opt_str = "on" if opencl_opt else "off"
  assert (
    results[0].outputs["stderr"]
    == f"""\
[cldrive] Platform: Oclgrind
[cldrive] Device: Oclgrind Simulator
[cldrive] OpenCL optimizations: {opt_str}
[cldrive] Kernel: "A"
done.
"""
  )
Exemple #11
0
def main(argv):
  if len(argv) > 1:
    unknown_args = ', '.join(argv[1:])
    raise app.UsageError(f"Unknown arguments {unknown_args}")

  logging.info('Preparing OpenCL testbed.')
  config = harness_pb2.CldriveHarness()
  config.opencl_env.extend([env.OclgrindOpenCLEnvironment().name])
  config.opencl_opt.extend([FLAGS.opencl_opt])
  harness = cldrive.CldriveHarness(config)
  assert len(harness.testbeds) >= 1

  input_directories = FLAGS.input_directories
  logging.info('Reading testcases from: %s', ' '.join(input_directories))

  output_directory = pathlib.Path(FLAGS.output_directory)
  logging.info('Writing results to %s', output_directory)
  output_directory.mkdir(parents=True, exist_ok=True)

  # Load testcases.
  testcase_dirs = [
    pathlib.Path(x) for x in input_directories if
    pathlib.Path(x).is_dir()]
  if not testcase_dirs:
    raise app.UsageError('No --input_directories found.')
  testcase_paths = labtypes.flatten(
      [[pathlib.Path(y) for y in fs.ls(x, abspaths=True)]
       for x in testcase_dirs])
  testcases = [
    pbutil.FromFile(path, deepsmith_pb2.Testcase()) for path in testcase_paths]
  logging.info('Read %d testcases.', len(testcases))
  if not len(testcases):
    raise app.UsageError("No testcases found: '%s'",
                         ' '.join(input_directories))

  # Execute testcases.
  req = harness_pb2.RunTestcasesRequest()
  req.testbed.CopyFrom(harness.testbeds[0])
  req.testcases.extend(testcases)
  res = harness.RunTestcases(req, None)

  # Write results to file.
  for testcase, result in zip(testcases, res.results):
    result_id = crypto.md5_str(str(testcase))
    pbutil.ToFile(result, output_directory / f'{result_id}.pbtxt')

  logging.info('Executed %d testcases and wrote results to %s',
               len(res.results), output_directory)
  execution_times = [
    result.profiling_events[0].duration_ms for result in res.results]
  logging.info('Average time to evaluate testcase: %.2f ms',
               sum(execution_times) / len(execution_times))
Exemple #12
0
def test_CldriveHarness_oclgrind_testbed_uneven_name_and_opt():
    """Error is raised if number of opt_opt != number of opencl_env."""
    oclgrind_env_name = env.OclgrindOpenCLEnvironment().name

    config = harness_pb2.CldriveHarness()
    config.opencl_env.extend([oclgrind_env_name, oclgrind_env_name])
    config.opencl_opt.extend([True])

    with test.Raises(ValueError) as e_ctx:
        cldrive.CldriveHarness(config, default_to_all_environments=False)
    assert (
        "CldriveHarness.opencl_env and CldriveHarness.opencl_opt lists are "
        "not the same length") in str(e_ctx.value)
Exemple #13
0
def test_CldriveHarness_oclgrind_testbed():
  """Test that harness can be made from project-local oclgrind."""
  config = harness_pb2.CldriveHarness()
  config.opencl_env.extend([gpu.cldrive.env.OclgrindOpenCLEnvironment().name,
                            gpu.cldrive.env.OclgrindOpenCLEnvironment().name])
  config.opencl_opt.extend([True, False])
  harness = cldrive.CldriveHarness(config)
  assert len(harness.testbeds) == 2
  assert harness.testbeds[
           0].name == gpu.cldrive.env.OclgrindOpenCLEnvironment().name
  assert harness.testbeds[0].opts['opencl_opt'] == 'enabled'
  assert harness.testbeds[
           1].name == gpu.cldrive.env.OclgrindOpenCLEnvironment().name
  assert harness.testbeds[1].opts['opencl_opt'] == 'disabled'
Exemple #14
0
def test_RunTestcases_cldrive_pass(
        cldrive_harness_config: harness_pb2.CldriveHarness, opencl_opt: bool):
    """Test execution of a simple test case."""
    cldrive_harness_config.opencl_opt[0] = opencl_opt
    harness = cldrive.CldriveHarness(cldrive_harness_config)
    testcases = [
        deepsmith_pb2.Testcase(
            toolchain="opencl",
            harness=deepsmith_pb2.Harness(name="cldrive"),
            inputs={
                'src':
                'kernel void A(global int* a) {a[get_global_id(0)] = 10;}',
                'gsize': '1,1,1',
                'lsize': '1,1,1',
                'timeout_seconds': '60',
            })
    ]
    results = opencl_fuzz.RunTestcases(harness, testcases)
    assert len(results) == 1
    # Testcase.invariant_opts.driver_type field is set by cldrive harness.
    testcases[0].invariant_opts['driver_type'] = 'compile_and_run'
    assert testcases[0] == results[0].testcase
    assert results[0].testbed == cldrive.OpenClEnvironmentToTestbed(
        harness.envs[0])
    assert results[0].outcome == deepsmith_pb2.Result.PASS
    assert results[0].outputs['stdout'] == (
        'global int * a: 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 '
        '22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 '
        '46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 '
        '70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 '
        '94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 '
        '114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 '
        '132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 '
        '150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 '
        '168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 '
        '186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 '
        '204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 '
        '222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 '
        '240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255\n')
    opt_str = 'on' if opencl_opt else 'off'
    assert results[0].outputs['stderr'] == f"""\
Exemple #15
0
def main(argv: typing.List[str]):
    """Main entry point."""
    if len(argv) > 1:
        raise app.UsageError("Unknown arguments: '{}'.".format(" ".join(
            argv[1:])))

    os.environ["CLGEN_CACHE"] = f"{FLAGS.result_cache_dir}/clgen"
    # An OpenCL corpus, configured as described in CGO'17.
    corpus = corpuses.Corpus(
        corpus_pb2.Corpus(
            local_directory=FLAGS.github_kernels_dir,
            ascii_character_atomizer=True,
            contentfile_separator="\n\n",
            preprocessor=[
                "deeplearning.clgen.preprocessors.opencl:ClangPreprocessWithShim",
                "deeplearning.clgen.preprocessors.opencl:Compile",
                "deeplearning.clgen.preprocessors.opencl:NormalizeIdentifiers",
                "deeplearning.clgen.preprocessors.opencl:StripDoubleUnderscorePrefixes",
                "deeplearning.clgen.preprocessors.common:StripDuplicateEmptyLines",
                "deeplearning.clgen.preprocessors.opencl:SanitizeKernelPrototype",
                "deeplearning.clgen.preprocessors.common:StripTrailingWhitespace",
                "deeplearning.clgen.preprocessors.opencl:ClangFormat",
                "deeplearning.clgen.preprocessors.common:MinimumLineCount3",
                "deeplearning.clgen.preprocessors.opencl:Compile",
            ],
        ))
    corpus.Create()

    cache_dir = pathlib.Path(FLAGS.result_cache_dir) / corpus.hash
    cache_dir.mkdir(parents=True, exist_ok=True)

    driver = cldrive.CldriveHarness(
        harness_pb2.CldriveHarness(
            opencl_env=[FLAGS.opencl_env],
            opencl_opt=[FLAGS.opencl_opt],
        ))

    with corpus.preprocessed.Session() as session:
        # Query to return all successfully preprocessed OpenCL kernels in a stable
        # order.
        q = (session.query(preprocessed.PreprocessedContentFile.text).filter(
            preprocessed.PreprocessedContentFile.preprocessing_succeeded ==
            True).order_by(preprocessed.PreprocessedContentFile.id))

        num_good_files = q.count()
        num_files = session.query(preprocessed.PreprocessedContentFile).count()
        app.Log(
            1,
            "Corpus of %s files (%.1f%% of %s)",
            humanize.Commas(num_good_files),
            (num_good_files / num_files) * 100,
            humanize.Commas(num_files),
        )

        srcs = [x[0] for x in q]
        batch_size = 8
        max_batch = math.ceil(len(srcs) / batch_size)

        all_outcomes = []
        for i, start_idx in enumerate(range(0, len(srcs), batch_size)):
            cached_results_path = cache_dir / f"{i}.pkl"

            if cached_results_path.is_file():
                app.Log(1, "batch %d of %d", i + 1, max_batch)
                # Read cached results.
                with open(cached_results_path, "rb") as f:
                    outcomes = pickle.load(f)
            elif FLAGS.summarize_only:
                continue
            else:
                app.Log(1, "batch %d of %d", i + 1, max_batch)
                # Evaluate OpenCL kernels and cache results.
                batch = srcs[start_idx:start_idx + batch_size]
                testcases = labtypes.flatten(
                    [OpenClSourceToTestCases(src) for src in batch])
                results = RunTestCasesOrDie(driver, testcases)

                outcomes = [
                    GetOutcomeWithDynamicChecks(result, driver)
                    for result in results
                ]
                with open(cached_results_path, "wb") as f:
                    pickle.dump(outcomes, f)

            all_outcomes += outcomes
            df = pd.DataFrame(
                list(zip(all_outcomes, np.ones(len(all_outcomes)))) +
                [("Total", len(all_outcomes))],
                columns=["outcome", "count"],
            )
            summary = df.groupby("outcome").sum().reset_index()
            summary["ratio"] = [
                f"{x:.2%}" for x in
                # Double the "ratio" values because the 'count' column contains a
                # grand total row.
                2 * summary["count"].values / summary["count"].sum()
            ]
            summary["count"] = [
                humanize.Commas(int(x)) for x in summary["count"]
            ]
            print(summary)
            del df
            del summary
Exemple #16
0
def cl_launcher_harness(
    cl_launcher_harness_config: harness_pb2.ClLauncherHarness
) -> harness_pb2.ClLauncherHarness:
  """Test fixture to return a cl_launcher test harness."""
  return cldrive.CldriveHarness(cl_launcher_harness_config)
Exemple #17
0
def cldrive_harness(cldrive_harness_config: harness_pb2.CldriveHarness
                    ) -> cldrive.CldriveHarness:
  """Test fixture to return an Cldrive test harness."""
  return cldrive.CldriveHarness(cldrive_harness_config)
Exemple #18
0
def abc_harness(abc_harness_config) -> cldrive.CldriveHarness:
    """A test fixture which returns an oclgrind harness."""
    return cldrive.CldriveHarness(abc_harness_config)