def test_perf_vars_without_reference(perftest, sanity_file, perf_file,
                                     dummy_gpu_exec_ctx):
    logfile = 'perf.log'

    @sn.deferrable
    def extract_perf(patt, tag):
        val = sn.evaluate(sn.extractsingle(patt, perf_file, tag, float))
        with open(logfile, 'a') as fp:
            fp.write(f'{tag}={val}')

        return val

    sanity_file.write_text('result = success\n')
    perf_file.write_text('perf1 = 1.0\n' 'perf3 = 3.3\n')
    perftest.perf_variables = {
        'value1':
        sn.make_performance_function(
            extract_perf(r'perf1 = (?P<v1>\S+)', 'v1'), 'unit'),
        'value3':
        sn.make_performance_function(extract_perf, 'unit',
                                     r'perf3 = (?P<v3>\S+)', 'v3')
    }
    _run_sanity(perftest, *dummy_gpu_exec_ctx)

    logfile = os.path.join(perftest.stagedir, logfile)
    with open(logfile) as fp:
        log_output = fp.read()

    assert 'v1' in log_output
    assert 'v3' in log_output
Exemple #2
0
    def setup_per_benchmark(self):
        bench, bench_metric = self.benchmark_info
        if bench_metric == 'latency':
            self.message_size = 8
            unit = 'us'
        elif bench_metric == 'bandwidth':
            self.message_size = 4194304
            unit = 'MB/s'
        else:
            raise ValueError(f'unknown benchmark metric: {bench_metric}')

        self.executable = bench.split('.')[-1]
        self.executable_opts = [
            '-m', f'{self.message_size}', '-x', f'{self.num_warmup_iters}',
            '-i', f'{self.num_iters}'
        ]
        if self.device_buffers != 'cpu':
            self.executable_opts += ['-d', self.device_buffers]

        if bench.startswith('mpi.pt2pt'):
            self.executable_opts += ['D', 'D']
            self.num_tasks = 2

        self.perf_variables = {
            bench_metric: sn.make_performance_function(self._extract_metric,
                                                       unit)
        }
def test_perf_vars_with_reference(perftest, sanity_file, perf_file,
                                  dummy_gpu_exec_ctx):
    # This test also checks that a performance function that raises an
    # exception is simply skipped.

    logfile = 'perf.log'

    @sn.deferrable
    def extract_perf(patt, tag):
        val = sn.evaluate(sn.extractsingle(patt, perf_file, tag, float))
        with open(logfile, 'a') as fp:
            fp.write(f'{tag}={val}')

        return val

    def dummy_perf(x):
        # Dummy function to check that a performance variable is simply
        # skipped when the wrong number of arguments are passed to it.
        with open(logfile, 'a') as fp:
            fp.write('v2')

        return 1

    sanity_file.write_text('result = success\n')
    perf_file.write_text('perf1 = 1.0\n')

    # Make the unit in the reference different from the performance function
    perftest.reference = {'*': {'value1': (0, None, None, 'unit_')}}
    perftest.perf_variables = {
        'value1':
        sn.make_performance_function(
            extract_perf(r'perf1 = (?P<v1>\S+)', 'v1'), 'unit'),
        'value2':
        sn.make_performance_function(dummy_perf, 'other_units', perftest,
                                     'extra_arg'),
    }
    _run_sanity(perftest, *dummy_gpu_exec_ctx)

    logfile = os.path.join(perftest.stagedir, logfile)
    with open(logfile) as fp:
        log_output = fp.read()

    assert 'v1' in log_output
    assert 'v2' not in log_output