Ejemplo n.º 1
0
 def log_output():
     nonlocal log_pos
     buffer.seek(log_pos)
     new_output = buffer.read()
     log_pos = buffer.tell()
     if new_output:
         log.debug(f'Output from {command[0]}', new_output)
Ejemplo n.º 2
0
def run(local, targets_and_opts):
    from pyutils import buildinfo

    binary_dir = buildinfo.binary_dir
    command = []
    if targets_and_opts:
        run_command = os.path.join(binary_dir, targets_and_opts)
        command += run_command.split()

    if local:
        output = runtools.run(command)
    else:
        output = runtools.srun(command)

    data = json.loads(output)

    data[var._project_name] = {'commit': _git_commit(), 'datetime': _git_datetime()}
    data['environment'] = {
        'hostname': env.hostname(),
        'clustername': env.clustername(local),
        'compiler': buildinfo.compiler,
        'datetime': _now(),
        'envfile': buildinfo.envfile
    }
    log.debug('Perftests data', pprint.pformat(data))

    return data
Ejemplo n.º 3
0
def _run_sbatch(rundir, commands, cwd, use_srun, use_mpi_config):
    sbatchstr = _generate_sbatch(rundir, cwd, use_srun, use_mpi_config)
    log.debug('Generated sbatch file', sbatchstr)
    with open(_sbatch_file(rundir), 'w') as sbatch:
        sbatch.write(sbatchstr)

    runscriptstr = _generate_runscript(commands)
    log.debug('Generated runscript file', runscriptstr)
    with open(_runscript_file(rundir), 'w') as runscript:
        runscript.write(runscriptstr)

    command = ['sbatch', '--wait', _sbatch_file(rundir)]
    log.info('Invoking sbatch', ' '.join(command))
    start = time.time()
    result = subprocess.run(command,
                            env=env.env,
                            stderr=subprocess.PIPE,
                            stdout=subprocess.PIPE)
    end = time.time()
    log.info(f'sbatch finished in {end - start:.2f}s')
    if result.returncode != 0 and result.stderr:
        log.error(
            f'sbatch finished with exit code '
            f'{result.returncode} and message', result.stderr.decode())
        raise RuntimeError(f'Job submission failed: {result.stderr.decode()}')

    m = re.match(r'Submitted batch job (\d+)', result.stdout.decode())
    if not m:
        log.error(f'Failed parsing sbatch output', result.stdout.decode())
        raise RuntimeError('Job submission failed; sbatch output: ' +
                           result.stdout.decode())
    return int(m.group(1))
Ejemplo n.º 4
0
def run(local, scheduling_policy, threads, extra_opts):
    from pyutils import buildinfo

    binary = os.path.join(buildinfo.binary_dir, 'bin',
                          'future_overhead_report_test')
    command = [binary] + [str(scheduling_policy)] + [str(threads)]
    if extra_opts:
        command += extra_opts.split()

    if local:
        output = runtools.run(command)
    else:
        output = runtools.srun(command)

    data = json.loads(output)

    data[var._project_name] = {
        'commit': _git_commit(),
        'datetime': _git_datetime()
    }
    data['environment'] = {
        'hostname': env.hostname(),
        'clustername': env.clustername(local),
        'compiler': buildinfo.compiler,
        'datetime': _now(),
        'envfile': buildinfo.envfile
    }
    log.debug('Perftests data', pprint.pformat(data))

    return data
Ejemplo n.º 5
0
def _history_plot(title, dates, measurements, output):
    fig, ax = plt.subplots(figsize=(10, 5))
    dates = [matplotlib.dates.date2num(d) for d in dates]
    if len(dates) > len(set(dates)):
        log.warning('Non-unique dates in history plot')

    locator = matplotlib.dates.AutoDateLocator()
    formatter = matplotlib.dates.AutoDateFormatter(locator)
    formatter.scaled[1 / 24] = '%y-%m-%d %H:%M'
    formatter.scaled[1 / (24 * 60)] = '%y-%m-%d %H:%M'
    formatter.scaled[1 / (24 * 60 * 60)] = '%y-%m-%d %H:%M:%S'

    ax.set_title(title)
    ax.xaxis.set_major_locator(locator)
    ax.xaxis.set_major_formatter(formatter)

    style = next(iter(plt.rcParams['axes.prop_cycle']))
    ax.fill_between(dates,
                    measurements.min,
                    measurements.max,
                    alpha=0.2,
                    **style)
    ax.fill_between(dates,
                    measurements.q1,
                    measurements.q3,
                    alpha=0.5,
                    **style)
    ax.plot(dates, measurements.q2, '|-', **style)
    ax.set_ylim(bottom=0)
    ax.set_ylabel('Time [s]')
    fig.autofmt_xdate()
    fig.tight_layout()
    fig.savefig(output, dpi=300)
    log.debug(f'Successfully written history plot to {output}')
    plt.close(fig)
Ejemplo n.º 6
0
def _bar_plot(title, labels, full_data, output):
    def fmt(seconds, *args):
        return f'{seconds * 1000:.2f} ms'

    fig, ax = plt.subplots(figsize=(10, 5))
    x0 = 0
    xticklabels = []
    for label, data in zip(labels, full_data):
        if data:
            x = x0 + np.arange(len(data))
            x0 += len(data)
            keys, values = zip(*sorted(data.items()))
            bars = ax.bar(x, values, label=label)
            for bar in bars:
                ax.text(bar.get_x() + bar.get_width() / 2,
                        bar.get_height(),
                        fmt(bar.get_height()),
                        ha='center',
                        va='bottom')
            xticklabels += [k.upper() for k in keys]

    ax.legend(loc='upper left')
    ax.set_xticks(np.arange(len(xticklabels)))
    ax.set_xticklabels(xticklabels)
    ax.set_title(title)
    ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(fmt))
    fig.tight_layout()
    fig.savefig(output, dpi=300)
    log.debug(f'Successfully written bar plot to {output}')
    plt.close(fig)
Ejemplo n.º 7
0
def load(envfile):
    if not os.path.exists(envfile):
        raise FileNotFoundError(f'Could find environment file "{envfile}"')
    env['GTCMAKE_PYUTILS_ENVFILE'] = os.path.abspath(envfile)

    envdir, envfile = os.path.split(envfile)
    output = runtools.run(['bash', '-c', f'source {envfile} && env -0'],
                          cwd=envdir).strip('\0')
    env.update(line.split('=', 1) for line in output.split('\0'))

    log.info(f'Loaded environment from {os.path.join(envdir, envfile)}')
    log.debug(f'New environment',
              '\n'.join(f'{k}={v}' for k, v in sorted(env.items())))
Ejemplo n.º 8
0
def run(command, **kwargs):
    if not command:
        raise ValueError('No command provided')

    log.info('Invoking', ' '.join(command))
    start = time.time()

    loop = asyncio.get_event_loop()
    output = loop.run_until_complete(_run_async(command, **kwargs))

    end = time.time()
    log.info(f'{command[0]} finished in {end - start:.2f}s')
    log.debug(f'{command[0]} output', output)
    return output
Ejemplo n.º 9
0
def load(filename):
    """Loads result data from the given json file.

    Args:
        filename: The name of the input file.
    """
    with open(filename, 'r') as fp:
        data = json.load(fp)
    log.debug(f'Read json from {filename}', data)

    if data['version'] == version:
        d = data['runinfo']
        runinfo_data = RunInfo(name=d['name'],
                               version=d['version'],
                               datetime=time.from_timestr(d['datetime']),
                               precision=d['precision'],
                               backend=d['backend'],
                               grid=d['grid'],
                               compiler=d['compiler'],
                               hostname=d['hostname'],
                               clustername=d['clustername'])
    elif data['version'] == 0.4:
        runtime_data = data['runtime']
        config_data = data['config']
        runinfo_data = RunInfo(name=runtime_data['name'],
                               version=runtime_data['version'],
                               datetime=time.from_timestr(
                                   runtime_data['datetime']),
                               precision=runtime_data['precision'],
                               backend=runtime_data['backend'],
                               grid=runtime_data['grid'],
                               compiler=runtime_data['compiler'],
                               hostname=config_data['hostname'],
                               clustername=config_data['clustername'])
    elif data['version'] != version:
        raise ValueError(f'Unknown result file version "{data["version"]}"')

    times_data = [
        Time(stencil=d['stencil'], measurements=d['measurements'])
        for d in data['times']
    ]

    result = Result(runinfo=runinfo_data,
                    times=times_data,
                    domain=data['domain'],
                    datetime=time.from_timestr(data['datetime']),
                    version=data['version'])
    log.info(f'Successfully loaded result from {filename}')
    return result
Ejemplo n.º 10
0
def _histogram_plot(title, before, after, output):
    fig, ax = plt.subplots(figsize=(10, 5))
    bins = np.linspace(0, max(np.amax(before), np.amax(after)), 50)
    ax.hist(before, alpha=0.5, bins=bins, density=True, label='Before')
    ax.hist(after, alpha=0.5, bins=bins, density=True, label='After')
    style = iter(plt.rcParams['axes.prop_cycle'])
    ax.axvline(np.median(before), **next(style))
    ax.axvline(np.median(after), **next(style))
    ax.legend(loc='upper left')
    ax.set_xlabel('Time [s]')
    ax.set_title(title)
    fig.tight_layout()
    fig.savefig(output)
    log.debug(f'Successfully written histogram plot to {output}')
    plt.close(fig)
Ejemplo n.º 11
0
 def compare_medians(cls, before, after, n=1000, alpha=0.05):
     scale = np.median(before)
     before = np.asarray(before) / scale
     after = np.asarray(after) / scale
     # bootstrap sampling
     before_samples = np.random.choice(before, (before.size, n))
     after_samples = np.random.choice(after, (after.size, n))
     # bootstrap estimates of difference of medians
     bootstrap_estimates = (np.median(after_samples, axis=0) -
                            np.median(before_samples, axis=0))
     # percentile bootstrap confidence interval
     ci = np.quantile(bootstrap_estimates, [alpha / 2, 1 - alpha / 2])
     log.debug(f'Boostrap results (n = {n}, alpha = {alpha})',
               f'{ci[0]:8.5f} - {ci[1]:8.5f}')
     return cls(*ci)
Ejemplo n.º 12
0
def _add_comparison_table(report, cis):
    names = list(sorted(set(k.name for k in cis.keys())))
    executors = list(sorted(set(k.executor for k in cis.keys())))
    exitcode = 0

    def css_class(classification):
        if '-' in classification:
            exitcode = 1
            return 'bad', exitcode
        if '?' in classification:
            exitcode = 1
            return 'unknown', exitcode
        if '+' in classification:
            exitcode = 0
            return 'good', exitcode
        exitcode = 0
        return '', exitcode

    with report.table('Comparison') as table:
        with table.row() as row:
            row.fill('BENCHMARK', *(b.upper() for b in executors))

        for name in names:
            with table.row() as row:
                name_cell = row.cell(name.replace('_', ' ').title())
                row_classification = ''
                for executor in executors:
                    try:
                        classification = [cis[_OutputKey(name=name,
                                           executor=executor)].classify()]
                        if (len(classification) <= 1) or (classification[0] ==
                                classification[1]):
                            classification = classification[0]
                        else:
                            classification = ' '.join(classification)
                    except KeyError:
                        classification = ''
                    row_classification += classification
                    class_qualifier = css_class(classification)
                    row.cell(classification).set('class', class_qualifier[0])
                row_class_qualifier = css_class(row_classification)
                name_cell.set('class', row_class_qualifier[0])
                exitcode = exitcode or row_class_qualifier[1]

    log.debug('Generated performance comparison table')
    return exitcode
Ejemplo n.º 13
0
def run(command, **kwargs):
    if not command:
        raise ValueError('No command provided')

    log.info('Invoking', ' '.join(command))
    start = time.time()
    try:
        output = subprocess.check_output(command,
                                         env=env.env,
                                         stderr=subprocess.STDOUT,
                                         **kwargs)
    except subprocess.CalledProcessError as e:
        log.error(f'{command[0]} failed with output', e.output.decode())
        raise e
    end = time.time()
    log.info(f'{command[0]} finished in {end - start:.2f}s')
    output = output.decode().strip()
    log.debug(f'{command[0]} output', output)
    return output
Ejemplo n.º 14
0
def save(filename, data):
    """Saves the result data to the given a json file.

    Overwrites the file if it already exists.

    Args:
        filename: The name of the output file.
        data: An instance of `Result`.
    """
    log.debug(f'Saving data to {filename}', data)

    def convert(d):
        if isinstance(d, time.datetime):
            return time.timestr(d)
        if isinstance(d, (Result, RunInfo, Time)):
            return dict(d)

    with open(filename, 'w') as fp:
        json.dump(data, fp, indent=4, sort_keys=True, default=convert)
    log.info(f'Successfully saved result to {filename}')
Ejemplo n.º 15
0
def _retreive_outputs(rundir, commands, task_id):
    command = [
        'sacct', '--jobs', f'{task_id}', '--format', 'jobid,exitcode',
        '--parsable2', '--noheader'
    ]
    for i in range(1, 7):
        try:
            output = run(command)
        except subprocess.CalledProcessError:
            time.sleep(1)
            continue
        infos = [o.split('|') for o in output.splitlines() if '.batch' in o]
        exitcodes = [int(code.split(':')[0]) for _, code in sorted(infos)]
        if len(exitcodes) == len(commands):
            break
        time.sleep(i**2)
    else:
        raise RuntimeError('Could not get exit codes of jobs')

    time.sleep(5)

    outputs = []
    for i, (command, exitcode) in enumerate(zip(commands, exitcodes)):
        if exitcode != 0:
            log.debug(f'Exit code of command "{command}"', exitcode)
        with open(_stdout_file(rundir, i), 'r') as outfile:
            stdout = outfile.read()
            if stdout.strip():
                log.debug(f'Stdout of command "{command}"', stdout)
        with open(_stderr_file(rundir, i), 'r') as outfile:
            stderr = outfile.read()
            if stderr.strip():
                log.debug(f'Stderr of command "{command}"', stderr)
        outputs.append((exitcode, stdout, stderr))
    return outputs
Ejemplo n.º 16
0
def run(domain, runs):
    from pyutils import buildinfo

    binary = os.path.join(buildinfo.binary_dir, 'tests', 'regression',
                          'perftests')

    output = runtools.srun([binary] + [str(d)
                                       for d in domain] + [str(runs), '-d'])
    data = json.loads(output)

    data['gridtools'] = {'commit': _git_commit(), 'datetime': _git_datetime()}
    data['environment'] = {
        'hostname': env.hostname(),
        'clustername': env.clustername(),
        'compiler': buildinfo.compiler,
        'datetime': _now(),
        'envfile': buildinfo.envfile
    }
    data['domain'] = list(domain)
    log.debug('Perftests data', pprint.pformat(data))

    return data
Ejemplo n.º 17
0
def _retreive_outputs(rundir, commands, task_id):
    command = [
        'sacct', '--jobs', f'{task_id}', '--format', 'jobid,exitcode',
        '--parsable2', '--noheader'
    ]
    for i in range(1, 7):
        try:
            output = run(command)
        except subprocess.CalledProcessError:
            time.sleep(1)
            continue
        infos = [o.split('|')[1] for o in output.splitlines() if '.batch' in o]
        if len(infos) > 1:
            raise RuntimeError(f'Unexpected sacct output: {output}')
        elif len(infos) == 1:
            exitcode = int(infos[0].split(':')[0])
            if exitcode != 0:
                log.warning('Batch job finished with non-zero exitcode',
                            exitcode)
            break
        time.sleep(i**2)
    else:
        raise RuntimeError('Could not get exit code of job')

    time.sleep(5)

    with open(_stderr_file(rundir), 'r') as outfile:
        stderr = outfile.read()
        log.debug('Raw job stderr', stderr)
        stderr = stderr.split('%RETURNCODE%\n')[:-1]
        stderr, exitcodes = zip(*(o.split('%PYUTILS%') for o in stderr))
        exitcodes = [int(exitcode) for exitcode in exitcodes]

    with open(_stdout_file(rundir), 'r') as outfile:
        stdout = outfile.read()
        log.debug('Raw job stdout', stdout)
        stdout = stdout.split('%PYUTILS%\n')[:-1]

    return list(zip(exitcodes, stdout, stderr))
Ejemplo n.º 18
0
def load(grid):
    """Stencil loading functions.

    Loads all stencils for the given grid from the respective module.

    Args:
        grid: Name of the grid for which the stencils should be loaded.

    Returns:
        A list of all stencils provided for the given type.
    """

    log.debug(f'Trying to import stencils for grid "{grid}"')
    mod = importlib.import_module('perftest.stencils.' + grid)

    stencils = []
    for k, v in mod.__dict__.items():
        if isinstance(v, type) and issubclass(v, Stencil) and v is not Stencil:
            stencils.append(v())

    sstr = ', '.join(f'"{s.name}"' for s in stencils)
    log.info(f'Successfully imported stencils {sstr} for grid "{grid}"')
    return stencils
Ejemplo n.º 19
0
def _add_comparison_table(report, cis):
    names = list(sorted(set(k.name for k in cis.keys())))
    executors = list(sorted(set(k.executor for k in cis.keys())))
    exitcode = 0

    def css_class(classification):
        if '-' in classification:
            exitcode = 1
            return 'bad', exitcode
        if '?' in classification:
            exitcode = 1
            return 'unknown', exitcode
        if '+' in classification:
            exitcode = 0
            return 'good', exitcode
        exitcode = 0
        return '', exitcode

    with report.table('Comparison') as table:
        with table.row() as row:
            row.fill('BENCHMARK', *(b.upper() for b in executors))

        for name in names:
            with table.row() as row:
                name_cell = row.cell(name.replace('_', ' ').title())
                row_classification = ''
                for executor in executors:
                    try:
                        classification = [cis[_OutputKey(name=name,
                                           executor=executor)].classify()]
                        if (len(classification) <= 1) or (classification[0] ==
                                classification[1]):
                            classification = classification[0]
                        else:
                            classification = ' '.join(classification)
                    except KeyError:
                        classification = ''
                    row_classification += classification
                    class_qualifier = css_class(classification)
                    row.cell(classification).set('class', class_qualifier[0])
                row_class_qualifier = css_class(row_classification)
                name_cell.set('class', row_class_qualifier[0])
                exitcode = exitcode or row_class_qualifier[1]

    with report.table('Explanation of Symbols') as table:

        def add_help(string, meaning):
            with table.row() as row:
                row.fill(string, meaning)

        add_help('Symbol', 'MEANING')
        add_help('=', 'No performance change (confidence interval within ±1%)')
        add_help(
            '(=)',
            'Probably no performance change (confidence interval within ±2%)')
        add_help('(+)/(-)',
                 'Very small performance improvement/degradation (≤1%)')
        add_help('+/-', 'Small performance improvement/degradation (≤5%)')
        add_help('++/--', 'Large performance improvement/degradation (≤10%)')
        add_help('+++/---',
                 'Very large performance improvement/degradation (>10%)')
        add_help(
            '?', 'Probably no change, but quite large uncertainty '
            '(confidence interval with ±5%)')
        add_help('??', 'Unclear result, very large uncertainty (±10%)')
        add_help('???', 'Something unexpected…')

    log.debug('Generated performance comparison table')
    return exitcode
Ejemplo n.º 20
0
def _add_comparison_table(report, cis):
    names = list(sorted(set(k.name for k in cis.keys())))
    backends = list(sorted(set(k.backend for k in cis.keys())))

    def css_class(classification):
        if '-' in classification:
            return 'bad'
        if '?' in classification:
            return 'unknown'
        if '+' in classification:
            return 'good'
        return ''

    with report.table('Comparison') as table:
        with table.row() as row:
            row.fill('BENCHMARK', *(b.upper() for b in backends))

        for name in names:
            with table.row() as row:
                name_cell = row.cell(name.replace('_', ' ').title())
                row_classification = ''
                for backend in backends:
                    try:
                        classification = [
                            cis[_OutputKey(name=name,
                                           backend=backend,
                                           float_type=float_type)].classify()
                            for float_type in ('float', 'double')
                        ]
                        if classification[0] == classification[1]:
                            classification = classification[0]
                        else:
                            classification = ' '.join(classification)
                    except KeyError:
                        classification = ''
                    row_classification += classification
                    row.cell(classification).set('class',
                                                 css_class(classification))
                name_cell.set('class', css_class(row_classification))

    with report.table('Explanation of Symbols') as table:

        def add_help(string, meaning):
            with table.row() as row:
                row.fill(string, meaning)

        add_help('Symbol', 'MEANING')
        add_help('=', 'No performance change (confidence interval within ±1%)')
        add_help(
            '(=)',
            'Probably no performance change (confidence interval within ±2%)')
        add_help('(+)/(-)',
                 'Very small performance improvement/degradation (≤1%)')
        add_help('+/-', 'Small performance improvement/degradation (≤5%)')
        add_help('++/--', 'Large performance improvement/degradation (≤10%)')
        add_help('+++/---',
                 'Very large performance improvement/degradation (>10%)')
        add_help(
            '?', 'Probably no change, but quite large uncertainty '
            '(confidence interval with ±5%)')
        add_help('??', 'Unclear result, very large uncertainty (±10%)')
        add_help('???', 'Something unexpected…')

    log.debug('Generated performance comparison table')
Ejemplo n.º 21
0
 def _write_css(self):
     path, rel_path = self.get_data_path('.css')
     with path.open('w') as css_file:
         css_file.write(_CSS)
     log.debug(f'Successfully written CSS to {path}')
     return rel_path