Ejemplo n.º 1
0
def run(domain, runs):
    from pyutils import buildinfo
    stencils = stencil_loader.load(buildinfo.grid)

    results = dict()
    for backend in buildinfo.backends:
        if backend == 'naive':
            continue

        commands = [_stencil_command(backend, s, domain) for s in stencils]
        allcommands = [c for c in commands for _ in range(runs)]
        log.info('Running stencils')
        alloutputs = runtools.sbatch_retry(allcommands, 5)
        log.info('Running stencils finished')
        alltimes = [_parse_time(o) for _, o, _ in alloutputs]
        times = [alltimes[i:i + runs] for i in range(0, len(alltimes), runs)]

        info = result.RunInfo(name='gridtools',
                              version=_git_commit(),
                              datetime=_git_datetime(),
                              precision=buildinfo.precision,
                              backend=backend,
                              grid=buildinfo.grid,
                              compiler=buildinfo.compiler,
                              hostname=env.hostname(),
                              clustername=env.clustername())

        results[backend] = result.from_data(info, domain, stencils, times)
    return results
Ejemplo n.º 2
0
def _run_sbatch(rundir, commands, cwd, use_srun, use_mpi_config):
    sbatchstr = _generate_sbatch(rundir, cwd, use_srun, use_mpi_config)
    log.debug('Generated sbatch file', sbatchstr)
    with open(_sbatch_file(rundir), 'w') as sbatch:
        sbatch.write(sbatchstr)

    runscriptstr = _generate_runscript(commands)
    log.debug('Generated runscript file', runscriptstr)
    with open(_runscript_file(rundir), 'w') as runscript:
        runscript.write(runscriptstr)

    command = ['sbatch', '--wait', _sbatch_file(rundir)]
    log.info('Invoking sbatch', ' '.join(command))
    start = time.time()
    result = subprocess.run(command,
                            env=env.env,
                            stderr=subprocess.PIPE,
                            stdout=subprocess.PIPE)
    end = time.time()
    log.info(f'sbatch finished in {end - start:.2f}s')
    if result.returncode != 0 and result.stderr:
        log.error(
            f'sbatch finished with exit code '
            f'{result.returncode} and message', result.stderr.decode())
        raise RuntimeError(f'Job submission failed: {result.stderr.decode()}')

    m = re.match(r'Submitted batch job (\d+)', result.stdout.decode())
    if not m:
        log.error(f'Failed parsing sbatch output', result.stdout.decode())
        raise RuntimeError('Job submission failed; sbatch output: ' +
                           result.stdout.decode())
    return int(m.group(1))
Ejemplo n.º 3
0
def _run_mpi(label, verbose_ctest):
    log.info('Running MPI tests', label)
    output, = runtools.sbatch([_ctest(label, verbose_ctest)],
                              cwd=buildinfo.binary_dir,
                              use_srun=False,
                              use_mpi_config=True)
    log.info('ctest MPI test output', output)
Ejemplo n.º 4
0
def load(filename):
    """Loads result data from the given json file.

    Args:
        filename: The name of the input file.
    """
    with open(filename, 'r') as fp:
        data = json.load(fp)

    if data['version'] == version:
        d = data['runinfo']
        runinfo_data = Data(name=d['name'],
                            version=d['version'],
                            datetime=time.from_timestr(d['datetime']),
                            precision=d['precision'],
                            backend=d['backend'],
                            grid=d['grid'],
                            compiler=d['compiler'],
                            hostname=d['hostname'],
                            clustername=d['clustername'])
    elif data['version'] == 0.4:
        d = data['runtime']
        runtime_data = Data(name=d['name'],
                            version=d['version'],
                            datetime=time.from_timestr(d['datetime']),
                            grid=d['grid'],
                            precision=d['precision'],
                            backend=d['backend'],
                            compiler=d['compiler'])

        d = data['config']
        config_data = Data(configname=d['configname'],
                           hostname=d['hostname'],
                           clustername=d['clustername'])

        runinfo_data = Data(name=runtime_data.name,
                            version=runtime_data.version,
                            datetime=runtime_data.datetime,
                            precision=runtime_data.precision,
                            backend=runtime_data.backend,
                            grid=runtime_data.grid,
                            compiler=runtime_data.compiler,
                            hostname=config_data.hostname,
                            clustername=config_data.clustername)
    elif data['version'] != version:
        raise ValueError(f'Unknown result file version "{data["version"]}"')

    times_data = [
        Data(stencil=d['stencil'], measurements=d['measurements'])
        for d in data['times']
    ]

    result = Result(runinfo=runinfo_data,
                    times=times_data,
                    domain=data['domain'],
                    datetime=time.from_timestr(data['datetime']),
                    version=data['version'])
    log.info(f'Successfully loaded result from {filename}')
    return result
Ejemplo n.º 5
0
def _slurm_available():
    try:
        run(['srun', '--version'])
        log.info('Using SLURM')
        return True
    except FileNotFoundError:
        log.info('SLURM not found: invoking commands directly')
        return False
Ejemplo n.º 6
0
    def run(domain_size, runs, output):

        import perftest
        if not output.lower().endswith('.json'):
            output += '.json'

        data = perftest.run(domain_size, runs)
        with open(output, 'w') as outfile:
            json.dump(data, outfile, indent='  ')
            log.info(f'Successfully saved perftests output to {output}')
Ejemplo n.º 7
0
def _run_mpi(verbose_ctest):
    log.info('Running MPI tests')
    output, = runtools.sbatch([_ctest('mpitest_*', verbose_ctest)],
                              cwd=buildinfo.binary_dir,
                              use_srun=False,
                              use_mpi_config=True)
    exitcode, stdout, stderr = output
    log.info('ctest MPI test output', stdout)

    if exitcode != 0:
        raise RuntimeError('ctest failed')
Ejemplo n.º 8
0
def load(envfile):
    if not os.path.exists(envfile):
        raise FileNotFoundError(f'Could find environment file "{envfile}"')
    env['GTCMAKE_PYUTILS_ENVFILE'] = os.path.abspath(envfile)

    envdir, envfile = os.path.split(envfile)
    output = runtools.run(['bash', '-c', f'source {envfile} && env -0'],
                          cwd=envdir).strip('\0')
    env.update(line.split('=', 1) for line in output.split('\0'))

    log.info(f'Loaded environment from {os.path.join(envdir, envfile)}')
    log.debug(f'New environment',
              '\n'.join(f'{k}={v}' for k, v in sorted(env.items())))
Ejemplo n.º 9
0
def run(command, **kwargs):
    if not command:
        raise ValueError('No command provided')

    log.info('Invoking', ' '.join(command))
    start = time.time()

    loop = asyncio.get_event_loop()
    output = loop.run_until_complete(_run_async(command, **kwargs))

    end = time.time()
    log.info(f'{command[0]} finished in {end - start:.2f}s')
    log.debug(f'{command[0]} output', output)
    return output
Ejemplo n.º 10
0
    def run(local, run_output, targets_and_opts):
        # options
        targets_and_opts = ' '.join(targets_and_opts).lstrip()

        if not run_output.lower().endswith('.json'):
            run_output += '.json'
        # Create directory of file if does not exists yet
        mkdirp(run_output)

        import perftest
        data = perftest.run(local, targets_and_opts)
        with open(run_output, 'w') as outfile:
            json.dump(data, outfile, indent='  ')
            log.info(f'Successfully saved perftests output to {run_output}')
Ejemplo n.º 11
0
    def run(local, scheduling_policy, threads, run_output, extra_opts):
        # options
        scheduling_policy='--hpx:queuing=' + scheduling_policy
        threads='--hpx:threads=' + str(threads)
        extra_opts = ' '.join(extra_opts).lstrip()

        if not run_output.lower().endswith('.json'):
            run_output += '.json'
        # Create directory of file if does not exists yet
        mkdirp(run_output)

        import perftest
        data = perftest.run(local, scheduling_policy, threads, extra_opts)
        with open(run_output, 'w') as outfile:
            json.dump(data, outfile, indent='  ')
            log.info(f'Successfully saved perftests output to {run_output}')
Ejemplo n.º 12
0
def save(filename, data):
    """Saves the result data to the given a json file.

    Overwrites the file if it already exists.

    Args:
        filename: The name of the output file.
        data: An instance of `Result`.
    """
    def convert(d):
        if isinstance(d, Data):
            return {k: v for k, v in d.items()}
        elif isinstance(d, time.datetime):
            return time.timestr(d)

    with open(filename, 'w') as fp:
        json.dump(data, fp, indent=4, sort_keys=True, default=convert)
    log.info(f'Successfully saved result to {filename}')
Ejemplo n.º 13
0
def run(command, **kwargs):
    if not command:
        raise ValueError('No command provided')

    log.info('Invoking', ' '.join(command))
    start = time.time()
    try:
        output = subprocess.check_output(command,
                                         env=env.env,
                                         stderr=subprocess.STDOUT,
                                         **kwargs)
    except subprocess.CalledProcessError as e:
        log.error(f'{command[0]} failed with output', e.output.decode())
        raise e
    end = time.time()
    log.info(f'{command[0]} finished in {end - start:.2f}s')
    output = output.decode().strip()
    log.debug(f'{command[0]} output', output)
    return output
Ejemplo n.º 14
0
def save(filename, data):
    """Saves the result data to the given a json file.

    Overwrites the file if it already exists.

    Args:
        filename: The name of the output file.
        data: An instance of `Result`.
    """
    log.debug(f'Saving data to {filename}', data)

    def convert(d):
        if isinstance(d, time.datetime):
            return time.timestr(d)
        if isinstance(d, (Result, RunInfo, Time)):
            return dict(d)

    with open(filename, 'w') as fp:
        json.dump(data, fp, indent=4, sort_keys=True, default=convert)
    log.info(f'Successfully saved result to {filename}')
Ejemplo n.º 15
0
def load(grid):
    """Stencil loading functions.

    Loads all stencils for the given grid from the respective module.

    Args:
        grid: Name of the grid for which the stencils should be loaded.

    Returns:
        A list of all stencils provided for the given type.
    """

    log.debug(f'Trying to import stencils for grid "{grid}"')
    mod = importlib.import_module('perftest.stencils.' + grid)

    stencils = []
    for k, v in mod.__dict__.items():
        if isinstance(v, type) and issubclass(v, Stencil) and v is not Stencil:
            stencils.append(v())

    sstr = ', '.join(f'"{s.name}"' for s in stencils)
    log.info(f'Successfully imported stencils {sstr} for grid "{grid}"')
    return stencils
Ejemplo n.º 16
0
def compile_and_run_examples(build_dir, verbose_ctest):
    import build

    source_dir = os.path.join(buildinfo.install_dir, 'examples')
    build_dir = os.path.abspath(build_dir)
    os.makedirs(build_dir, exist_ok=True)

    env.set_cmake_arg('CMAKE_BUILD_TYPE', buildinfo.build_type.title())

    log.info('Configuring examples')
    build.cmake(source_dir, build_dir)
    log.info('Building examples')
    build.make(build_dir)
    log.info('Successfully built examples')
    runtools.srun(_ctest(verbose=verbose_ctest),
                  log_output=log.info,
                  cwd=build_dir)
    log.info('Successfully executed examples')
Ejemplo n.º 17
0
def _run_nompi(verbose_ctest):
    log.info('Running non-MPI tests')
    outputs = runtools.sbatch([
        _ctest('unittest_*', verbose_ctest),
        _ctest('regression_*', verbose_ctest)
    ],
                              cwd=buildinfo.binary_dir)
    unit_exitcode, stdout, stderr = outputs[0]
    log.info('ctest unit test output', stdout)
    regression_exitcode, stdout, stderr = outputs[1]
    log.info('ctest regression test output', stdout)

    if unit_exitcode != 0 or regression_exitcode != 0:
        raise RuntimeError('ctest failed')
Ejemplo n.º 18
0
def compile_examples(build_dir):
    import build
    from pyutils import buildinfo

    source_dir = os.path.join(buildinfo.install_dir, 'gridtools_examples')
    build_dir = os.path.abspath(build_dir)
    os.makedirs(build_dir, exist_ok=True)

    env.set_cmake_arg('CMAKE_BUILD_TYPE', buildinfo.build_type.title())

    log.info('Configuring examples')
    build.cmake(source_dir, build_dir)
    log.info('Building examples')
    build.make(build_dir)
    log.info('Successfully built examples')
Ejemplo n.º 19
0
def history(output, input, date, limit):
    from perftest import plot, result
    results = [result.load(f) for f in input]
    plot.history(results, date, limit).savefig(output)
    log.info(f'Successfully saved plot to {output}')
Ejemplo n.º 20
0
 def write(self):
     et.ElementTree(self.html).write(str(
         os.path.join(self.data_dir, 'index.html')),
                                     encoding='utf-8',
                                     method='html')
     log.info(f'Successfully written HTML report to {self.data_dir}')
Ejemplo n.º 21
0
def _run_nompi(label, verbose_ctest):
    log.info('Running non-MPI tests', label)
    output, = runtools.sbatch([_ctest(label, verbose_ctest)],
                              cwd=buildinfo.binary_dir)
    log.info('ctest unit test output', output)
Ejemplo n.º 22
0
def compare(output, input):
    from perftest import plot, result
    results = [result.load(f) for f in input]
    plot.compare(results).savefig(output)
    log.info(f'Successfully saved plot to {output}')