def add_base_stage(name: str, input_args,
                   output_stages: typing.MutableMapping[str, hpccm.Stage]):
    """Establish dependencies that are shared by multiple parallel stages."""
    # Building blocks are chunks of container-builder instructions that can be
    # copied to any build stage with the addition operator.
    building_blocks = collections.OrderedDict()
    building_blocks['base_packages'] = hpccm.building_blocks.packages(
        ospackages=_common_packages)

    # These are the most expensive and most reusable layers, so we put them first.
    building_blocks['compiler'] = get_compiler(
        input_args, compiler_build_stage=output_stages.get('compiler_build'))
    building_blocks['gdrcopy'] = get_gdrcopy(input_args,
                                             building_blocks['compiler'])
    building_blocks['ucx'] = get_ucx(input_args, building_blocks['compiler'],
                                     building_blocks['gdrcopy'])
    building_blocks['mpi'] = get_mpi(input_args, building_blocks['compiler'],
                                     building_blocks['ucx'])

    # Create the stage from which the targeted image will be tagged.
    output_stages[name] = hpccm.Stage()

    output_stages[name] += hpccm.primitives.baseimage(
        image=base_image_tag(input_args),
        _distro=hpccm_distro_name(input_args),
        _as=name)
    for bb in building_blocks.values():
        if bb is not None:
            output_stages[name] += bb
def add_tsan_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
    """Isolate the expensive TSAN preparation stage.

    This is a very expensive stage, but has few and disjoint dependencies, and
    its output is easily compartmentalized (/usr/local) so we can isolate this
    build stage to maximize build cache hits and reduce rebuild time, bookkeeping,
    and final image size.
    """
    if not isinstance(output_stages, collections.abc.MutableMapping):
        raise RuntimeError('Need output_stages container.')
    tsan_stage = hpccm.Stage()
    tsan_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='tsan')

    tsan_stage += hpccm.building_blocks.packages(ospackages=['git', 'ca-certificates', 'build-essential', 'cmake'])
    # CMake will get duplicated later, but this is an expensive image, and it isn't worth optimizing
    # out that duplication...
    tsan_stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)

    compiler_branch = 'release/' + str(input_args.llvm) + '.x'
    tsan_stage += hpccm.building_blocks.generic_cmake(
        repository='https://github.com/llvm/llvm-project.git',
        directory='/var/tmp/llvm-project/llvm/',
        prefix='/usr/local', recursive=True, branch=compiler_branch,
        cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp;clang-tools-extra;compiler-rt;lld"',
                    '-D LIBOMP_TSAN_SUPPORT=on'],
        postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-' + str(input_args.llvm),
                     'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-' + str(input_args.llvm),
                     'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-' + str(input_args.llvm),
                     'ln -s /usr/local/share/clang/run-clang-tidy.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py',
                     'ln -s /usr/local/bin/run-clang-tidy-' + str(input_args.llvm) + '.py /usr/local/bin/run-clang-tidy-' + str(input_args.llvm),
                     'ln -s /usr/local/libexec/c++-analyzer /usr/local/bin/c++-analyzer-' + str(input_args.llvm)])
    output_stages['compiler_build'] = tsan_stage
Example #3
0
def add_oneapi_compiler_build_stage(input_args, output_stages: typing.Mapping[str, hpccm.Stage]):
    """Isolate the oneAPI preparation stage.

    This stage is isolated so that its installed components are minimized in the
    final image (chiefly /opt/intel) and its environment setup script can be
    sourced. This also helps with rebuild time and final image size.

    Note that the ICC compiler inside oneAPI on linux also needs
    gcc to build other components and provide libstdc++.
    """
    if not isinstance(output_stages, collections.abc.MutableMapping):
        raise RuntimeError('Need output_stages container.')
    oneapi_stage = hpccm.Stage()
    oneapi_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='oneapi-build')

    # Add required components for the next stage (both for hpccm and Intel's setvars.sh script)
    oneapi_stage += hpccm.building_blocks.packages(ospackages=['wget', 'gnupg2', 'ca-certificates', 'lsb-release'])
    oneapi_stage += hpccm.building_blocks.packages(
        apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB'],
        apt_repositories=['deb https://apt.repos.intel.com/oneapi all main'],
        # Add minimal packages (not the whole HPC toolkit!)
        ospackages=['intel-oneapi-dpcpp-compiler', 'intel-oneapi-icc', 'intel-oneapi-mkl', 'intel-oneapi-mkl-devel']
    )
    # Ensure that all bash shells on the final container will have access to oneAPI
    oneapi_stage += hpccm.primitives.shell(
            commands=['echo "source /opt/intel/oneapi/setvars.sh" >> /etc/bash.bashrc']
            )
    setattr(oneapi_stage, 'runtime', oneapi_runtime)

    output_stages['compiler_build'] = oneapi_stage
def build(image="Characterisation-Virtual-Laboratory/CharacterisationVL-Software:2004", _bootstrap='shub', mash_version='v2.2.2', capnp_version='0.8.0', quicktree_version='v2.5'):
    stage0 = hpccm.Stage()
    hpccm.config.set_container_format("singularity")
    stage0 += baseimage(image=image, _bootstrap=_bootstrap, _distro='ubuntu20')
    stage0 += label(metadata={'maintainer': 'Luhan Cheng', 'email': '*****@*****.**'})
    stage0 += packages(ospackages=['cpanminus', 'libexpat1-dev', 'sqlite3', 'libsqlite3-dev', 'autoconf'])
    stage0 += generic_build(
        repository='https://github.com/khowe/quicktree',
        branch=quicktree_version,
        build=['make'],
        install=[
            'mv quicktree /usr/local/bin',
            'mv include/* /usr/local/include/'
        ],
    )
    stage0 += boost()
    stage0 += generic_autotools(
        url=f'https://capnproto.org/capnproto-c++-{capnp_version}.tar.gz'
    )
    stage0 += shell(commands=['cpanm -l /usr/local/perl5 --notest BioPerl Bio::Sketch::Mash DBD::SQLite DBI'])
    stage0 += generic_autotools(
        repository=f'https://github.com/marbl/Mash',
        preconfigure=['./bootstrap.sh'],
        branch=mash_version,
        with_capnp='/usr/local/',
        with_boost='/usr/local/boost/',
    )
    stage0 += environment(variables={'PERL5LIB': '$PERL5LIB:/usr/local', **from_prefix('/usr/local/mashtree')})
    stage0 += shell(commands=['cpanm -f -l /usr/local/mashtree Mashtree'])
    return stage0
def recipe() -> str:
    """Generate the recipe

    :returns: singularity recipe
    :rtype: str

    """
    hpccm.config.set_container_format('singularity')
    hpccm.config.set_singularity_version('3.3')
    stage = hpccm.Stage()
    stage += label(metadata={'CLING ALPAKA VERSION': str(version)})
    stage += environment(variables={'CLING_ALPAKA_VERSION': version})

    # the baseimage of xeus-cling-cuda is Ubuntu 16.04 with CUDA 8
    if not ap_gn.add_alpaka_dep_layer(stage, '16.04', True, []):
        print('adding the alpaka dependencies layer failed', file=sys.stderr)
        exit(1)

    install_alpaka(stage)

    build_jupyter_kernel(stage)

    # baseimage support nothing  else than dockerhub
    # so, manually add the baseimage command to the recipe
    recipe = stage.__str__()
    recipe = 'Bootstrap: library\nFrom: sehrig/default/xeus-cling-cuda-cxx:2.2\n\n' + recipe

    return recipe
Example #6
0
def CVL_ubuntu_stage(gpu=False, stage_name='stage0'):
    image = "Characterisation-Virtual-Laboratory/CharacterisationVL-Software:2004" if not gpu else 'Characterisation-Virtual-Laboratory/CharacterisationVL-Software:2004-cuda11.0'
    stage0 = hpccm.Stage(name=stage_name)
    hpccm.config.set_container_format("singularity")
    stage0 += baseimage(image=image, _bootstrap='shub', _distro='ubuntu20')
    stage0 += label(metadata={
        'maintainer': 'Luhan Cheng',
        'email': '*****@*****.**'
    })
    return stage0
def oneapi_runtime(_from='0'):
    oneapi_runtime_stage = hpccm.Stage()
    oneapi_runtime_stage += hpccm.primitives.copy(_from='oneapi-build',
                                                  files={
                                                      "/opt/intel":
                                                      "/opt/intel",
                                                      "/etc/bash.bashrc":
                                                      "/etc/bash.bashrc"
                                                  })
    return oneapi_runtime_stage
def get_deployment_stage(*, args, previous_stages, building_blocks, wrapper):
    '''
    This deploy the GROMACS along with it dependencies (fftw, mpi) to the final image
    '''
    stage = hpccm.Stage()
    stage += hpccm.primitives.baseimage(image=get_base_image(args=args))
    stage += hpccm.building_blocks.python(python3=True,
                                          python2=False,
                                          devel=False)
    stage += hpccm.building_blocks.packages(ospackages=os_packages)

    # adding runtime from compiler
    stage += building_blocks['compiler'].runtime()

    # adding runtime from previous stages
    if previous_stages.get('dev', None) is not None:
        if building_blocks.get('fftw', None) is not None:
            stage += building_blocks['fftw'].runtime(_from='dev')

        if building_blocks.get('mpi', None) is not None:
            stage += building_blocks['mpi'].runtime(_from='dev')

    if previous_stages.get('gromacs', None) is not None:
        stage += hpccm.primitives.copy(_from='gromacs',
                                       _mkdir=True,
                                       src=['/usr/local/gromacs'],
                                       dest='/usr/local/gromacs')
    # wrapper and gmx_chooser scripts
    scripts_directory = os.path.join(config.GMX_INSTALLATION_DIRECTORY,
                                     'scripts')

    stage += hpccm.primitives.shell(
        commands=['mkdir -p {}'.format(scripts_directory)])

    # setting wrapper sctipt
    wrapper = os.path.join(scripts_directory, wrapper)
    stage += hpccm.primitives.copy(src='/scripts/wrapper.py', dest=wrapper)

    # copying the gmx_chooser script
    stage += hpccm.primitives.copy(src='/scripts/gmx_chooser.py',
                                   dest=os.path.join(scripts_directory,
                                                     'gmx_chooser.py'))
    # chmod for files scripts_directory
    stage += hpccm.primitives.shell(
        commands=['chmod +x {}'.format(os.path.join(scripts_directory, '*'))])

    # # copying config file
    stage += hpccm.primitives.copy(src='config.py',
                                   dest=os.path.join(scripts_directory,
                                                     'config.py'))
    # setting environment variable so to make wrapper available to PATH
    stage += hpccm.primitives.environment(
        variables={'PATH': '{}:$PATH'.format(scripts_directory)})

    return stage
Example #9
0
    def __init__(self, *, stage_name, base_image, args, building_blocks,
                 previous_stages):
        self.stage = hpccm.Stage()
        self.base_image = base_image
        self.previous_stages = previous_stages
        # The following two will be required in generic_cmake
        self.preconfigure = []
        self.check = False

        self.__prepare(stage_name=stage_name, building_blocks=building_blocks)
        self.__gromacs(args=args, building_blocks=building_blocks)
        self.__regtest(args=args)
        self.__add__engines(args=args, building_blocks=building_blocks)
Example #10
0
def main():
    parser = argparse.ArgumentParser(
        description='Script to generate a container receipt for Alpaka',
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '--container',
        type=str,
        default='singularity',
        choices=['docker', 'singularity'],
        help='generate receipt for docker or singularity (default: singularity)'
    )
    parser.add_argument('-i',
                        type=str,
                        default='ubuntu18.04',
                        choices=images.keys(),
                        help='Choice the base image. (default: ubuntu18.04)')
    parser.add_argument(
        '-c',
        metavar='',
        nargs='+',
        type=str,
        help='Install extra compiler. Supported are GCC and Clang. '
        'E.g -c gcc:8 clang:7.0 clang:8')
    parser.add_argument('--alpaka',
                        action='store_true',
                        help='Install Alpaka to /usr/local')

    args = parser.parse_args()

    # verify distribution, CUDA support and extra compiler
    ubuntu_version = check_distribution(images[args.i])
    cuda_support = True if 'cuda' in args.i else False
    if args.c:
        check_compiler(args.c)

    # create baseimage
    hpccm.config.set_container_format(args.container)
    if args.i == 'singularity':
        hpccm.config.set_singularity_version('3.3')
    stage = hpccm.Stage()
    stage += baseimage(image=images[args.i])

    if not gn.add_alpaka_dep_layer(stage, ubuntu_version, cuda_support, args.c,
                                   args.alpaka):
        print('add alpaka dependencies layer failed', file=sys.stderr)
        exit(1)

    install_ninja(stage)

    print(stage)
Example #11
0
def get_dev_stage(*, stage_name='dev', args, building_blocks):
    '''
    This is the initial/development stage reponsible for building images with
    all required dependencies such as openmpi, fftw for GROMACS
    '''
    stage = hpccm.Stage()
    stage += hpccm.primitives.baseimage(image=get_base_image(args=args, cuda=args.cuda),
                                         _as=stage_name)

    for bb in ('compiler', 'mpi', 'cmake', 'fftw'):
        if building_blocks.get(bb, None) is not None:
            stage += building_blocks[bb]

    return stage
Example #12
0
def prepare_and_cook_fftw(*, args):
    '''
    This routine will generate FFTW only container's specification file
    '''
    stage = hpccm.Stage()
    stage += hpccm.primitives.baseimage(image=get_base_image(args=args))
    building_blocks = collections.OrderedDict()

    get_compiler(args=args, building_blocks=building_blocks)
    get_fftw(args=args,
             building_blocks=building_blocks,
             configure_opts=['--enable-' + simd for simd in args.simd],
             prefix='/usr/local')

    for bb in building_blocks:
        stage += building_blocks[bb]

    print(stage)
Example #13
0
def main():
    ############################################################################
    # setup basics
    ############################################################################
    hpccm.config.set_container_format('singularity')
    hpccm.config.set_singularity_version('3.3')

    stage0 = hpccm.Stage()
    stage0 += baseimage(image='ubuntu:bionic')

    stage0 += packages(ospackages=[
        'git', 'g++', 'wget', 'pkg-config', 'less', 'uuid-dev', 'gdb',
        'locales', 'gpg-agent', 'gnupg2', 'locales-all', 'unzip'
    ])
    # set language to en_US.UTF-8 to avoid some problems with the cling output system
    stage0 += shell(
        commands=['locale-gen en_US.UTF-8', 'update-locale LANG=en_US.UTF-8'])

    ############################################################################
    # install clang/llvm
    ############################################################################
    stage0 += llvm(version='9', extra_repository=True)

    ############################################################################
    # install ninja build system
    ############################################################################
    stage0 += shell(commands=[
        'cd /opt', 'wget --no-check-certificate ' +
        'https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip',
        'unzip ninja-linux.zip', 'mv ninja /usr/local/bin/',
        'rm ninja-linux.zip', 'cd -'
    ])

    ############################################################################
    # install cmake
    ############################################################################
    stage0 += cmake(eula=True, version='3.16.1')

    ############################################################################
    # write recipe.def
    ############################################################################
    with open('recipe.def', 'w') as filehandle:
        filehandle.write(stage0.__str__())
Example #14
0
    def __initiate_build_stage(self):
        self.stages['build'] = hpccm.Stage()

        self.stages['build'] += hpccm.primitives.baseimage(
            image=self.base_image, _as='build')

        # python TODO: need to think whether to have this in the container
        self.stages['build'] += hpccm.building_blocks.python(python3=True,
                                                             python2=False,
                                                             devel=False)

        # cmake
        self.__add_cmake(stage='build')
        # compiler
        self.__add_compiler(stage='build')
        # mpi
        self.__add_mpi(stage='build')
        # fftw
        self.__add_fftw(stage='build')
Example #15
0
    def __deployment_stage(self, *, build_stage):
        self.stages['deploy'] = hpccm.Stage()
        self.stages['deploy'] += hpccm.primitives.baseimage(
            image=self.base_image)
        self.stages['deploy'] += hpccm.building_blocks.packages(
            ospackages=self.os_packages)
        self.stages['deploy'] += self.stages[build_stage].runtime()

        # setting wrapper binaries
        # create the wrapper binaries directory
        wrappers_directory = os.path.join(config.GMX_INSTALLATION_DIRECTORY,
                                          'bin')
        self.stages['deploy'] += hpccm.primitives.shell(
            commands=['mkdir -p {}'.format(wrappers_directory)])

        for wrapper in self.wrappers:
            wrapper_path = os.path.join(wrappers_directory, wrapper)
            self.stages['deploy'] += hpccm.primitives.copy(
                src='/scripts/wrapper.py', dest=wrapper_path)

        # setting the gmx_chooser script
        self.stages['deploy'] += hpccm.primitives.copy(
            src='/scripts/gmx_chooser.py',
            dest=os.path.join(wrappers_directory, 'gmx_chooser.py'))
        # chmod
        self.stages['deploy'] += hpccm.primitives.shell(commands=[
            'chmod +x {}'.format(os.path.join(wrappers_directory, '*'))
        ])

        # copying config file
        self.stages['deploy'] += hpccm.primitives.copy(src='config.py',
                                                       dest=os.path.join(
                                                           wrappers_directory,
                                                           'config.py'))
        # environment variable
        self.stages['deploy'] += hpccm.primitives.environment(
            variables={'PATH': '$PATH:{}'.format(wrappers_directory)})

        self.stages['deploy'] += hpccm.primitives.label(
            metadata={'gromacs.version': self.cli.args.gromacs})
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
    """Define and sequence the stages for the recipe corresponding to *args*."""

    # A Dockerfile or Singularity recipe can have multiple build stages.
    # The main build stage can copy files from previous stages, though only
    # the last stage is included in the tagged output image. This means that
    # large or expensive sets of build instructions can be isolated in
    # local/temporary images, but all of the stages need to be output by this
    # script, and need to occur in the correct order, so we create a sequence
    # object early in this function.
    stages = collections.OrderedDict()

    # Building blocks are chunks of container-builder instructions that can be
    # copied to any build stage with the addition operator.
    building_blocks = collections.OrderedDict()

    os_packages = _common_packages
    building_blocks['ospackages'] = hpccm.building_blocks.packages(
        ospackages=os_packages)

    # These are the most expensive and most reusable layers, so we put them first.
    building_blocks['compiler'] = get_compiler(args)
    building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])
    building_blocks['cmake'] = get_cmake(args)
    #    building_blocks['configure_gmx'] = configure_gmx(args, building_blocks['compiler'])
    building_blocks['build_gmx'] = build_gmx(args)

    # Create the stage from which the targeted image will be tagged.
    stages['main'] = hpccm.Stage()

    stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
    for bb in building_blocks.values():
        if bb is not None:
            stages['main'] += bb

    # Note that the list of stages should be sorted in dependency order.
    for build_stage in stages.values():
        if build_stage is not None:
            yield build_stage
Example #17
0
def get_stage(container):
    # generate baseimage
    hpccm.config.set_container_format(container)
    # version >3.2 is necessary for multi-stage build
    if container == 'singularity':
        hpccm.config.set_singularity_version('3.3')
    Stage0 = hpccm.Stage()
    Stage0 += baseimage(image='ubuntu:bionic')

    # copy project from outside in the container
    if container == 'singularity':
        Stage0 += copy(src='../hello_world_tool', dest='/opt/')
    else:
        # docker: cannot copy files from outsite the build context
        # so, we need to move the build context one level up
        Stage0 += copy(src='./hello_world_tool', dest='/opt/hello_world_tool')

    # install compiler tools
    Stage0 += cmake(eula=True, version='3.14.5')
    Stage0 += packages(ospackages=['g++', 'make', 'wget', 'build-essential'])

    # build and install project
    cmb = CMakeBuild()
    cm = []
    cm.append(cmb.configure_step(build_directory='/opt/build_hello_world_tool',
                                 directory='/opt/hello_world_tool/'))
    cm.append(cmb.build_step(target='install'))
    Stage0 += shell(commands=cm)

    Stage0 += shell(commands=build_openssl(name='openssl-1.1.1c',
                                            build_dir='/opt/openssl_build'))

    # script that runs when
    # - singularity uses the run parameter or the image runs directly
    # - docker uses the run parameter without arguments
    Stage0 += runscript(commands=['hello_world_tool'])

    return Stage0
Example #18
0
    def _build(self, *, previous_stages):
        '''
        This method perform the preparation for the recipes and
        Then generate the recipes and finally cook the recipes
        '''

        self.stage = hpccm.Stage()

        self._prepare()

        for tool in tools_order:
            if tool in self.args:
                try:
                    method = getattr(self, tool)
                except AttributeError as error:
                    pass
                    # print(error)
                else:
                    # print('method', method)
                    method(self.args[tool])

        # Recipe has been prepared. Now, it is time to cook .....
        self._cook()
def main(args) -> hpccm.Stage:
    # Create Stage
    Stage0 = hpccm.Stage()

    # Create string for base image tag
    base_image_tag = str()

    # Check if we use CUDA images or plain linux images
    if (args.cuda is not None):
        cuda_version_tag = 'nvidia/cuda:' + args.cuda + '-devel'
        if (args.centos is not None):
            cuda_version_tag += '-centos' + args.centos
        elif (args.ubuntu is not None):
            if ((args.cuda == '9.0') and (args.ubuntu == '18.04')):
                raise RuntimeError('Can not combine CUDA 9.0 and Ubuntu 18.04')
            cuda_version_tag += '-ubuntu' + args.ubuntu
        else:
            raise RuntimeError('Logic error: no Linux distribution selected.')

        base_image_tag = cuda_version_tag
    else:
        if (args.centos is not None):
            base_image_tag = 'centos:centos' + args.centos
        elif (args.ubuntu is not None):
            base_image_tag = 'ubuntu:' + args.ubuntu
        else:
            raise RuntimeError('Logic error: no Linux distribution selected.')

    Stage0 += hpccm.primitives.baseimage(image=base_image_tag)

    # Install the GROMACS packages we always will need for our builds.
    Stage0 += hpccm.building_blocks.packages(ospackages=['build-essential',
                                                         'ccache',
                                                         'git',
                                                         'libfftw3-dev',
                                                         'libhwloc-dev',
                                                         'liblapack-dev',
                                                         'libx11-dev',
                                                         'moreutils',
                                                         'ninja-build',
                                                         'rsync',
                                                         'valgrind',
                                                         'wget',
                                                         'xsltproc'])

    # Add CMake to image
    Stage0 += hpccm.building_blocks.cmake(eula=True, version=args.cmake)

    # We always add Python3 and Pip
    Stage0 += hpccm.building_blocks.python(python3=True, python2=False)
    Stage0 += hpccm.building_blocks.pip(upgrade=True, pip='pip3',
                                        packages=['pytest', 'networkx', 'numpy'])

    # Compiler
    if (args.icc is not None):
        raise RuntimeError('Intel compiler toolchain recipe not implemented yet')

    if (args.llvm is not None):
        # Build the default compiler if we don't need special support
        if (args.tsan is None):
            if (args.llvm == 3):
                if ((args.ubuntu is not None) and (args.ubuntu == '18.04')):
                    raise RuntimeError('LLVM 3 and Ubuntu 18.04 can cause issues when used together')
                args.llvm = 3.6
            compiler = hpccm.building_blocks.llvm(extra_repository=True, version=args.llvm)
        # Build our own version instead to get TSAN + OMP
        else:
            compiler_branch = 'release_'+str(args.llvm)+'0'
            compiler = hpccm.building_blocks.generic_cmake(repository='https://git.llvm.org/git/llvm.git',
                    prefix='/usr/local', recursive=True, branch=compiler_branch,
                    cmake_opts=['-D CMAKE_BUILD_TYPE=Release', '-D LLVM_ENABLE_PROJECTS="clang;openmp"', '-D LIBOMP_TSAN_SUPPORT=on'],
                    preconfigure=['export branch='+compiler_branch,
                                  '(cd projects; git clone https://git.llvm.org/git/libcxx.git; cd libcxx; git checkout $branch)',
                                  '(cd projects; git clone https://git.llvm.org/git/libcxxabi.git; cd libcxxabi; git checkout $branch)',
                                  '(cd projects; git clone https://git.llvm.org/git/compiler-rt.git; cd compiler-rt; git checkout $branch)',
                                  '(cd ..; git clone https://git.llvm.org/git/openmp.git; cd openmp; git checkout $branch)',
                                  '(cd ..; git clone https://git.llvm.org/git/clang.git; cd clang; git checkout $branch)',
                                  '(cd ../clang/tools; git clone https://git.llvm.org/git/clang-tools-extra.git extra; cd extra; git checkout $branch)'],
                    postinstall=['ln -s /usr/local/bin/clang++ /usr/local/bin/clang++-'+str(args.llvm),
                                 'ln -s /usr/local/bin/clang-format /usr/local/bin/clang-format-'+str(args.llvm),
                                 'ln -s /usr/local/bin/clang-tidy /usr/local/bin/clang-tidy-'+str(args.llvm)])


    elif (args.gnu is not None):
        compiler = hpccm.building_blocks.gnu(extra_repository=True,
                                             version=args.gnu,
                                             fortran=False)
    else:
        raise RuntimeError('Logic error: no compiler toolchain selected.')


    Stage0 += compiler
    # If we use the package version of LLVM, we need to install extra packages for it.
    if (args.llvm is not None) and (args.tsan is None):
        Stage0 += hpccm.building_blocks.packages(ospackages=['libomp-dev',
                                                             'clang-format-'+str(args.llvm),
                                                             'clang-tidy-'+str(args.llvm)])

    # If needed, add MPI to the image
    if (args.mpi is not None):
        if args.mpi == 'openmpi':
            use_cuda = False
            if (args.cuda is not None):
                use_cuda = True

            Stage0 += hpccm.building_blocks.openmpi(toolchain=compiler.toolchain, cuda=use_cuda, infiniband=False)
        elif args.mpi == 'impi':
            raise RuntimeError('Intel MPI recipe not implemented yet.')

    # Add OpenCL environment if needed
    if (args.opencl is not None):
        if args.opencl == 'nvidia':
            if (args.cuda is None):
                raise RuntimeError('Need Nvidia environment for Nvidia OpenCL image')

            Stage0 += hpccm.building_blocks.packages(ospackages=['nvidia-opencl-dev'])

        elif args.opencl == 'intel':
            Stage0 += hpccm.building_blocks.packages(ospackages=['ocl-icd-opencl-dev', 'opencl-headers', 'beignet-opencl-icd'])
        elif args.opencl == 'amd':
            # Due to the wisdom of AMD, this needs to be done differently for the OS and version! Hurray!
            # And they don't allow wget, so this branch is not taken for now! AMD, please allow me to use wget.
            raise RuntimeError('AMD recipe can not be generated because they do not allow wget for getting the packages.')
            # if args.ubuntu:
            #     if args.ubuntu is not '16.04':
            #         Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/ubuntu/'+args.ubuntu+'/amdgpu-pro-18.30-641594.tar.xz',
            #                                                       install=['./amdgpu-install --opencl=legacy --headless -y'])
            #     elif:
            #         Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/ubuntu/amdgpu-pro-18.30-641594.tar.xz',
            #                                                       install=['./amdgpu-install --opencl=legacy --headless -y'])
            # elif args.centos:
            #         Stage0 += hpccm.building_blocks.generic_build(url='https://www2.ati.com/drivers/linux/rhel'+args.centos'/amdgpu-pro-18.30-641594.tar.xz',
            #                                                       install=['./amdgpu-install --opencl=legacy --headless -y'])

        if (args.clfft is not None):
            Stage0 += hpccm.building_blocks.generic_cmake(repository='https://github.com/clMathLibraries/clFFT.git',
                                                          prefix='/usr/local', recursive=True, branch=args.clfft, directory='clFFT/src')


    return Stage0
Example #20
0
def get_stage(container):
    # generate baseimage
    hpccm.config.set_container_format(container)
    # version >3.2 is necessary for multi-stage build
    if container == 'singularity':
        hpccm.config.set_singularity_version('3.3')
    Stage0 = hpccm.Stage()
    # the stages need "names" so that they can reference each other
    Stage0 += baseimage(image='ubuntu:bionic', _as='Stage0')

    # copy project from outside in the container
    if container == 'singularity':
        Stage0 += copy(src='../hello_world_tool', dest='/opt/')
    else:
        # docker: cannot copy files from outsite the build context
        # so, we need to move the build context one level up
        Stage0 += copy(src='./hello_world_tool', dest='/opt/hello_world_tool')

    # install compiler tools
    Stage0 += cmake(eula=True, version='3.14.5')
    Stage0 += packages(ospackages=['g++', 'make', 'wget', 'build-essential'])

    # build and install project
    cmb = CMakeBuild(prefix="/opt/hello_install/")
    cm = []
    cm.append(
        cmb.configure_step(build_directory='/opt/build_hello_world_tool',
                           directory='/opt/hello_world_tool/',
                           opts=['-DCMAKE_INSTALL_RPATH=/usr/local/lib/']))
    cm.append(cmb.build_step(target='install'))
    Stage0 += shell(commands=cm)

    Stage0 += shell(commands=build_openssl(name='openssl-1.1.1c',
                                           build_dir='/opt/openssl_build'))

    # add release stage
    Stage1 = hpccm.Stage()
    Stage1 += baseimage(image='ubuntu:bionic', _as='Stage1')
    Stage1 += copy(_from='Stage0',
                   src='/opt/hello_install/',
                   dest='/usr/local/')

    Stage1 += copy(_from='Stage0',
                   src='/opt/openssl_install/',
                   dest='/usr/local/')

    # the commands merge the bin, lib ect. folders of hello_install and openssl_install
    # in the /usr/local folder
    if container == "singularity":
        Stage1 += shell(commands=[
            'cp -rl /usr/local/hello_install/* /usr/local/',
            'cp -rl /usr/local/openssl_install/* /usr/local/',
            'rm -r /usr/local/hello_install/',
            'rm -r /usr/local/openssl_install/'
        ])

    # script that runs when
    # - singularity uses the run parameter or the image runs directly
    # - docker uses the run parameter without arguments
    Stage1 += runscript(commands=['hello_world_tool'])

    return [Stage0, Stage1]
Example #21
0
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
    """Define and sequence the stages for the recipe corresponding to *args*."""

    # A Dockerfile or Singularity recipe can have multiple build stages.
    # The main build stage can copy files from previous stages, though only
    # the last stage is included in the tagged output image. This means that
    # large or expensive sets of build instructions can be isolated in
    # local/temporary images, but all of the stages need to be output by this
    # script, and need to occur in the correct order, so we create a sequence
    # object early in this function.
    stages = collections.OrderedDict()

    # If we need TSAN or oneAPI support the early build is more complex,
    # so that our compiler images don't have all the cruft needed to get those things
    # installed.
    if args.llvm is not None and args.tsan is not None:
        add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
    if args.oneapi is not None:
        add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)

    # Building blocks are chunks of container-builder instructions that can be
    # copied to any build stage with the addition operator.
    building_blocks = collections.OrderedDict()

    # These are the most expensive and most reusable layers, so we put them first.
    building_blocks['compiler'] = get_compiler(args, compiler_build_stage=stages.get('compiler_build'))
    building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])

    # Install additional packages early in the build to optimize Docker build layer cache.
    os_packages = _common_packages + get_llvm_packages(args)
    if args.doxygen is not None:
        os_packages += _docs_extra_packages
    if args.oneapi is not None:
        os_packages += ['lsb-release']
    building_blocks['ospackages'] = hpccm.building_blocks.packages(ospackages=os_packages)

    building_blocks['cmake'] = hpccm.building_blocks.cmake(eula=True, version=args.cmake)
    building_blocks['opencl'] = get_opencl(args)
    building_blocks['clfft'] = get_clfft(args)

    # Add Python environments to MPI images, only, so we don't have to worry
    # about whether to install mpi4py.
    if args.mpi is not None and len(args.venvs) > 0:
        add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)

    # Create the stage from which the targeted image will be tagged.
    stages['main'] = hpccm.Stage()

    stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
    for bb in building_blocks.values():
        if bb is not None:
            stages['main'] += bb

    # We always add Python3 and Pip
    stages['main'] += hpccm.building_blocks.python(python3=True, python2=False, devel=True)
    stages['main'] += hpccm.building_blocks.pip(upgrade=True, pip='pip3',
                                                packages=['pytest', 'networkx', 'numpy'])

    # Add documentation requirements (doxygen and sphinx + misc).
    if args.doxygen is not None:
        add_documentation_dependencies(args, stages)

    if 'pyenv' in stages and stages['pyenv'] is not None:
        stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
                                                dest='/root/.pyenv')
        stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
                                                dest='/root/venv')
        # TODO: Update user home directory.
        # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
        # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
        #                                         dest='/root/')

    # Make sure that `python` resolves to something.
    stages['main'] += hpccm.primitives.shell(commands=['test -x /usr/bin/python || '
                                                       'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
                                                       '/usr/bin/python --version'])

    # Note that the list of stages should be sorted in dependency order.
    for build_stage in stages.values():
        if build_stage is not None:
            yield build_stage
Example #22
0
def add_python_stages(building_blocks: typing.Mapping[str, bb_base],
                      input_args,
                      output_stages: typing.MutableMapping[str, hpccm.Stage]):
    """Add the stage(s) necessary for the requested venvs.

    One intermediate build stage is created for each venv (see --venv option).

    Each stage partially populates Python installations and venvs in the home
    directory. The home directory is collected by the 'pyenv' stage for use by
    the main build stage.
    """
    if len(input_args.venvs) < 1:
        raise RuntimeError('No venvs to build...')
    if output_stages is None or not isinstance(output_stages, collections.abc.Mapping):
        raise RuntimeError('Need a container for output stages.')

    # Main Python stage that collects the environments from individual stages.
    # We collect the stages individually, rather than chaining them, because the
    # copy is a bit slow and wastes local Docker image space for each filesystem
    # layer.
    pyenv_stage = hpccm.Stage()
    pyenv_stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as='pyenv')
    pyenv_stage += building_blocks['compiler']
    pyenv_stage += building_blocks['mpi']
    pyenv_stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)

    for version in [StrictVersion(py_ver) for py_ver in sorted(input_args.venvs)]:
        stage_name = 'py' + str(version)
        stage = hpccm.Stage()
        stage += hpccm.primitives.baseimage(image=base_image_tag(input_args), _as=stage_name)
        stage += building_blocks['compiler']
        stage += building_blocks['mpi']
        stage += hpccm.building_blocks.packages(ospackages=_python_extra_packages)

        # TODO: Use a non-root user for testing and Python virtual environments.
        stage += hpccm.primitives.shell(commands=[
            'curl https://pyenv.run | bash',
            """echo 'export PYENV_ROOT="$HOME/.pyenv"' >> $HOME/.bashrc""",
            """echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> $HOME/.bashrc""",
            """echo 'eval "$(pyenv init -)"' >> $HOME/.bashrc""",
            """echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/.bashrc"""])
        pyenv = '$HOME/.pyenv/bin/pyenv'
        commands = ['PYTHON_CONFIGURE_OPTS="--enable-shared" {pyenv} install -s {version}'.format(
            pyenv=pyenv,
            version=str(version))]
        stage += hpccm.primitives.shell(commands=commands)

        commands = prepare_venv(version)
        stage += hpccm.primitives.shell(commands=commands)

        # TODO: Update user home directory.
        pyenv_stage += hpccm.primitives.copy(_from=stage_name, _mkdir=True, src=['/root/'],
                                             dest='/root')

        # Add the intermediate build stage to the sequence
        output_stages[stage_name] = stage

    # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
    # # `version` should be a system installation or pyenv environment (or pyenv-virtualenv)
    # # with the dependencies for all of the Python aspects of CMake-driven builds.
    # commands = '{pyenv} global {version}'.format(
    #             pyenv=pyenv,
    #             version=...)
    # pyenv_stage += hpccm.primitives.shell(commands=commands)

    # Add the aggregating build stage to the sequence. This allows the main stage to copy
    # the files in a single stage, potentially reducing the overall output image size.
    output_stages['pyenv'] = pyenv_stage
Example #23
0
parser.add_argument('--compiler', type=str, default='gnu',
                    choices=['gnu', 'llvm', 'nvhpc'],
                    help='Compiler choice (default: gnu)')
parser.add_argument('--format', type=str, default='docker',
                    choices=['docker', 'singularity'],
                    help='Container specification format (default: docker)')
parser.add_argument('--linux', type=str, default='centos',
                    choices=['centos', 'ubuntu'],
                    help='Linux distribution choice (default: centos)')
parser.add_argument('--nvhpc_eula_accept', action='store_true',
                    default=False,
                    help='Accept PGI EULA (default: false)')
args = parser.parse_args()

### Create Stage
Stage0 = hpccm.Stage()

### Linux distribution
if args.linux == 'centos':
    Stage0 += hpccm.primitives.baseimage(image='centos:7')
elif args.linux == 'ubuntu':
    Stage0 += hpccm.primitives.baseimage(image='ubuntu:16.04')

### Compiler
if args.compiler == 'gnu':
    Stage0 += hpccm.building_blocks.gnu()
elif args.compiler == 'llvm':
    Stage0 += hpccm.building_blocks.llvm()
elif args.compiler == 'nvhpc':
    if not args.nvhpc_eula_accept:
        print('EULA not accepted. To accept, use "--nvhpc_eula_accept".\n'
def main():
    parser = argparse.ArgumentParser(
        description=
        'Simple script for generating a singularity recipe for the GOL example.'
    )
    parser.add_argument(
        '--build_prefix',
        type=str,
        default='/tmp/GOL_example',
        help=
        'Define the path in which all projects will be built (default: /tmp/GOL_example).'
    )
    parser.add_argument('-v ',
                        '--version',
                        action='store_true',
                        help='print version of the container')
    args = parser.parse_args()

    if args.version:
        print(container_version)
        sys.exit(0)

    hpccm.config.set_container_format('singularity')
    hpccm.config.set_singularity_version('3.3')
    stage = hpccm.Stage()

    stage += label(metadata={'GOL_MAINTAINER': 'Simeon Ehrig'})
    stage += label(metadata={'GOL_EMAIL': '*****@*****.**'})
    stage += label(metadata={'GOL_Version': str(container_version)})

    # copy example inside container
    stage += copy(src='notebook', dest='/')
    stage += copy(src='jupyter_notebook_config.py', dest='/')

    # copy and build the pnwriter library
    stage += packages(ospackages=['libpng-dev'])
    png = []

    png_git = git()
    png.append(
        png_git.clone_step(
            repository='https://github.com/pngwriter/pngwriter.git',
            branch='dev',
            path='/opt/'))

    png_cmake = CMakeBuild(prefix='/notebook/pngwriter')
    png.append(
        png_cmake.configure_step(directory='/opt/pngwriter',
                                 opts=['-DBUILD_SHARED_LIBS=ON']))
    png.append(png_cmake.build_step(target='install'))
    png.append('rm -rf /opt/pngwriter')

    stage += shell(commands=png)

    # Copy notebook examples and pngwriter lib to the host's /tmp file system to obtain a writable file system.
    stage += runscript(commands=[
        'if [ ! -d /tmp/GOL-xeus-cling-cuda ]; then \n'
        ' mkdir /tmp/GOL-xeus-cling-cuda &&'
        ' cp -r /notebook/ /tmp/GOL-xeus-cling-cuda &&'
        ' ln -s /tmp/GOL-xeus-cling-cuda/notebook/pngwriter'
        '  /tmp/GOL-xeus-cling-cuda/notebook/GTC_presentations/simulation/ \n fi',
        'cd /tmp/GOL-xeus-cling-cuda/notebook',
        'jupyter-notebook --config=/jupyter_notebook_config.py'
    ])

    # Add the bootstrap manually because hpccm does not support .sregistry,
    recipe = stage.__str__()
    recipe = 'Bootstrap: library\nFrom: sehrig/default/xeus-cling-cuda:2.3\n\n' + recipe

    print(recipe)
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
    """Define and sequence the stages for the recipe corresponding to *args*."""

    # A Dockerfile or Singularity recipe can have multiple build stages.
    # The main build stage can copy files from previous stages, though only
    # the last stage is included in the tagged output image. This means that
    # large or expensive sets of build instructions can be isolated in
    # local/temporary images, but all of the stages need to be output by this
    # script, and need to occur in the correct order, so we create a sequence
    # object early in this function.
    stages = collections.OrderedDict()

    # If we need TSAN or oneAPI support the early build is more complex,
    # so that our compiler images don't have all the cruft needed to get those things
    # installed.
    if args.llvm is not None and args.tsan is not None:
        add_tsan_compiler_build_stage(input_args=args, output_stages=stages)
    if args.oneapi is not None:
        add_oneapi_compiler_build_stage(input_args=args, output_stages=stages)

    add_base_stage(name='build_base', input_args=args, output_stages=stages)

    # Add Python environments to MPI images, only, so we don't have to worry
    # about whether to install mpi4py.
    if args.mpi is not None and len(args.venvs) > 0:
        add_python_stages(base='build_base',
                          input_args=args,
                          output_stages=stages)

    # Building blocks are chunks of container-builder instructions that can be
    # copied to any build stage with the addition operator.
    building_blocks = collections.OrderedDict()

    for i, cmake in enumerate(args.cmake):
        building_blocks['cmake' + str(i)] = hpccm.building_blocks.cmake(
            eula=True, prefix=f'/usr/local/cmake-{cmake}', version=cmake)

    # Install additional packages early in the build to optimize Docker build layer cache.
    os_packages = list(get_llvm_packages(args)) + get_opencl_packages(
        args) + get_rocm_packages(args) + get_cp2k_packages(args)
    if args.doxygen is not None:
        os_packages += _docs_extra_packages
    if args.oneapi is not None:
        os_packages += ['lsb-release']
    if args.hipsycl is not None:
        os_packages += ['libboost-fiber-dev']
    building_blocks['extra_packages'] = []
    if args.intel_compute_runtime:
        building_blocks['extra_packages'] += hpccm.building_blocks.packages(
            apt_keys=[
                'https://repositories.intel.com/graphics/intel-graphics.key'
            ],
            apt_repositories=[
                f'deb [arch=amd64] https://repositories.intel.com/graphics/ubuntu focal main'
            ])
        os_packages += _intel_compute_runtime_extra_packages
    if args.rocm is not None:
        building_blocks['extra_packages'] += hpccm.building_blocks.packages(
            apt_keys=['http://repo.radeon.com/rocm/rocm.gpg.key'],
            apt_repositories=[
                f'deb [arch=amd64] http://repo.radeon.com/rocm/apt/{args.rocm}/ xenial main'
            ])
    building_blocks['extra_packages'] += hpccm.building_blocks.packages(
        ospackages=os_packages, apt_ppas=['ppa:intel-opencl/intel-opencl'])

    building_blocks['CP2K'] = get_cp2k(args)

    if args.cuda is not None and args.llvm is not None:
        # Hack to tell clang what version of CUDA we're using
        # based on https://github.com/llvm/llvm-project/blob/1fdec59bffc11ae37eb51a1b9869f0696bfd5312/clang/lib/Driver/ToolChains/Cuda.cpp#L43
        cuda_version_split = args.cuda.split('.')
        # LLVM requires having the version in x.y.z format, while args.cuda be be either x.y or x.y.z
        cuda_version_str = '{}.{}.{}'.format(
            cuda_version_split[0], cuda_version_split[1],
            cuda_version_split[2] if len(cuda_version_split) > 2 else 0)
        building_blocks['cuda-clang-workaround'] = hpccm.primitives.shell(
            commands=[
                f'echo "CUDA Version {cuda_version_str}" > /usr/local/cuda/version.txt'
            ])

    building_blocks['clfft'] = get_clfft(args)

    building_blocks['heffte'] = get_heffte(args)

    building_blocks['hipSYCL'] = get_hipsycl(args)

    # Add Python environments to MPI images, only, so we don't have to worry
    # about whether to install mpi4py.
    if args.mpi is not None and len(args.venvs) > 0:
        add_python_stages(base='build_base',
                          input_args=args,
                          output_stages=stages)

    # Create the stage from which the targeted image will be tagged.
    stages['main'] = hpccm.Stage()

    stages['main'] += hpccm.primitives.baseimage(
        image='build_base', _distro=hpccm_distro_name(args), _as='main')
    for bb in building_blocks.values():
        if bb is not None:
            stages['main'] += bb

    # We always add Python3 and Pip
    stages['main'] += hpccm.building_blocks.python(python3=True, python2=False)

    # Add documentation requirements (doxygen and sphinx + misc).
    if args.doxygen is not None:
        add_documentation_dependencies(args, stages)

    if 'pyenv' in stages and stages['pyenv'] is not None:
        stages['main'] += hpccm.primitives.copy(_from='pyenv',
                                                _mkdir=True,
                                                src=['/root/.pyenv/'],
                                                dest='/root/.pyenv')
        stages['main'] += hpccm.primitives.copy(_from='pyenv',
                                                _mkdir=True,
                                                src=['/root/venv/'],
                                                dest='/root/venv')
        # TODO: Update user home directory.
        # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
        # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
        #                                         dest='/root/')

    # Make sure that `python` resolves to something.
    stages['main'] += hpccm.primitives.shell(commands=[
        'test -x /usr/bin/python || '
        'update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && '
        '/usr/bin/python --version'
    ])

    # Note that the list of stages should be sorted in dependency order.
    for build_stage in stages.values():
        if build_stage is not None:
            yield build_stage
Example #26
0
def get_deployment_stage(*, args, previous_stages, building_blocks, wrapper):
    '''
    This deploy the GROMACS along with it dependencies (fftw, mpi) to the final image
    '''
    stage = hpccm.Stage()
    stage += hpccm.primitives.baseimage(image=get_base_image(args=args, cuda=args.cuda))
    stage += hpccm.building_blocks.python(python3=True, python2=False, devel=False)
    stage += hpccm.building_blocks.packages(ospackages=os_packages)

    # adding runtime from compiler
    stage += building_blocks['compiler'].runtime()

    # adding runtime from previous stages/provided container
    # fftw
    if args.fftw_container:
        stage += hpccm.primitives.copy(_from=args.fftw_container,
                                       _mkdir=True,
                                       src=['/usr/local/lib'],
                                       dest='/usr/local/fftw/lib')

        stage += hpccm.primitives.copy(_from=args.fftw_container,
                                       _mkdir=True,
                                       src=['/usr/local/include'],
                                       dest='/usr/local/fftw/include')
        # adding fftw library path
        stage += hpccm.primitives.environment(
            variables={'LD_LIBRARY_PATH': '/usr/local/fftw/lib:$LD_LIBRARY_PATH'}
        )

    elif args.fftw:
        # library path will be added automatically by runtime
        stage += building_blocks['fftw'].runtime(_from='dev')


    # mpi
    if building_blocks.get('mpi', None) is not None:
        # This means, mpi has been installed in the dev stage
        stage += building_blocks['mpi'].runtime(_from='dev')


    if previous_stages.get('gromacs', None) is not None:
        stage += hpccm.primitives.copy(_from='gromacs',
                                       _mkdir=True,
                                       src=['/usr/local/gromacs'],
                                       dest='/usr/local/gromacs')
    # wrapper and gmx_chooser scripts
    scripts_directory = os.path.join(config.GMX_INSTALLATION_DIRECTORY, 'scripts')

    stage += hpccm.primitives.shell(commands=['mkdir -p {}'.format(scripts_directory)])

    # setting wrapper sctipt
    wrapper = os.path.join(scripts_directory, wrapper)
    stage += hpccm.primitives.copy(src='/scripts/wrapper.py', dest=wrapper)

    # copying the gmx_chooser script
    stage += hpccm.primitives.copy(src='/scripts/gmx_chooser.py',
                                   dest=os.path.join(scripts_directory, 'gmx_chooser.py'))
    # mod changing for the files in the directory scripts
    stage += hpccm.primitives.shell(commands=['chmod +x {}'.format(
        os.path.join(scripts_directory, '*')
    )])

    # # copying config file
    stage += hpccm.primitives.copy(src='config.py',
                                   dest=os.path.join(scripts_directory, 'config.py'))
    # setting environment variable so to make wrapper available to PATH
    stage += hpccm.primitives.environment(variables={'PATH': '{}:$PATH'.format(scripts_directory)})

    return stage
Example #27
0
def build_stages(args) -> typing.Iterable[hpccm.Stage]:
    """Define and sequence the stages for the recipe corresponding to *args*."""

    # A Dockerfile or Singularity recipe can have multiple build stages.
    # The main build stage can copy files from previous stages, though only
    # the last stage is included in the tagged output image. This means that
    # large or expensive sets of build instructions can be isolated in
    # local/temporary images, but all of the stages need to be output by this
    # script, and need to occur in the correct order, so we create a sequence
    # object early in this function.
    stages = collections.OrderedDict()

    # If we need the TSAN compilers, the early build is more involved.
    if args.llvm is not None and args.tsan is not None:
        add_tsan_stage(input_args=args, output_stages=stages)

    # Building blocks are chunks of container-builder instructions that can be
    # copied to any build stage with the addition operator.
    building_blocks = collections.OrderedDict()

    # These are the most expensive and most reusable layers, so we put them first.
    building_blocks['compiler'] = get_compiler(args, tsan_stage=stages.get('tsan'))
    building_blocks['mpi'] = get_mpi(args, building_blocks['compiler'])

    # Install additional packages early in the build to optimize Docker build layer cache.
    os_packages = _common_packages + get_llvm_packages(args)
    if args.doxygen is not None:
        os_packages += _docs_extra_packages
    building_blocks['ospackages'] = hpccm.building_blocks.packages(ospackages=os_packages)

    building_blocks['cmake'] = hpccm.building_blocks.cmake(eula=True, version=args.cmake)
    building_blocks['opencl'] = get_opencl(args)
    building_blocks['clfft'] = get_clfft(args)

    # Add Python environments to MPI images, only, so we don't have to worry
    # about whether to install mpi4py.
    if args.mpi is not None and len(args.venvs) > 0:
        add_python_stages(building_blocks=building_blocks, input_args=args, output_stages=stages)

    # Create the stage from which the targeted image will be tagged.
    stages['main'] = hpccm.Stage()

    stages['main'] += hpccm.primitives.baseimage(image=base_image_tag(args))
    for bb in building_blocks.values():
        if bb is not None:
            stages['main'] += bb

    # We always add Python3 and Pip
    stages['main'] += hpccm.building_blocks.python(python3=True, python2=False, devel=True)
    stages['main'] += hpccm.building_blocks.pip(upgrade=True, pip='pip3',
                                                packages=['pytest', 'networkx', 'numpy'])

    # Add documentation requirements (doxygen and sphinx + misc).
    if (args.doxygen is not None):
        if (args.doxygen == '1.8.5'):
            doxygen_commit = 'ed4ed873ab0e7f15116e2052119a6729d4589f7a'
        else:
            doxygen_commit = 'a6d4f4df45febe588c38de37641513fd576b998f'
        stages['main'] += hpccm.building_blocks.generic_autotools(
            repository='https://github.com/westes/flex.git',
            commit='f7788a9a0ecccdc953ed12043ccb59ca25714018',
            prefix='/tmp/install-of-flex',
            configure_opts=['--disable-shared'],
            preconfigure=['./autogen.sh'])
        stages['main'] += hpccm.building_blocks.generic_autotools(
            repository='https://github.com/doxygen/doxygen.git',
            commit=doxygen_commit,
            prefix='',
            configure_opts=[
                '--flex /tmp/install-of-flex/bin/flex',
                '--static'],
            postinstall=[
                'sed -i \'/\"XPS\"/d;/\"PDF\"/d;/\"PS\"/d;/\"EPS\"/d;/disable ghostscript format types/d\' /etc/ImageMagick-6/policy.xml'])
        stages['main'] += hpccm.building_blocks.pip(pip='pip3', packages=['sphinx==1.6.1'])

    if 'pyenv' in stages and stages['pyenv'] is not None:
        stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/.pyenv/'],
                                                dest='/root/.pyenv')
        stages['main'] += hpccm.primitives.copy(_from='pyenv', _mkdir=True, src=['/root/venv/'],
                                                dest='/root/venv')
        # TODO: Update user home directory.
        # TODO: If we activate pyenv for login shells, the `global` "version" should be full-featured.
        # stages['main'] += hpccm.primitives.copy(_from='pyenv', src=['/root/.bashrc'],
        #                                         dest='/root/')

    # Note that the list of stages should be sorted in dependency order.
    for build_stage in stages.values():
        if build_stage is not None:
            yield build_stage
Example #28
0
def mpi(tc):
    args, distro = arguments()

    tdoc = doc(args, "toolchain")
    Stage0 = hpccm.Stage()
    Stage0.name = 'mpi'
    Stage0.baseimage(image='toolchain', _distro=distro)
    Stage0 += comment("mpi", reformat=False)

    Stage0 += raw(docker='USER root')

    # MPI libraries : default ompi, v 4.0.0
    if args.mpi == "ompi":
        #normal OFED
        Stage0 += ofed()
        if args.mpi_version is None:
            args.mpi_version = "4.1.1"
        vars = {
            "OMPI_MCA_btl_vader_single_copy_mechanism": "none",
            "OMPI_MCA_rmaps_base_mapping_policy": "slot",
            "OMPI_MCA_hwloc_base_binding_policy": "none",
            "OMPI_MCA_btl_openib_cuda_async_recv": "false",
            "OMPI_MCA_mpi_leave_pinned": "true",
            "OMPI_MCA_opal_warn_on_missing_libcuda": "false",
            "OMPI_MCA_rmaps_base_oversubscribe": "true"
        }
        if args.target_arch == "x86_64" or args.binary == "no":
            mpi_lib = openmpi(infiniband=True,
                              pmix='internal',
                              version=args.mpi_version,
                              cuda=(args.cuda != 'no'),
                              prefix="/usr/local/mpi",
                              toolchain=tc)
            vars.update({
                "PATH":
                "/usr/local/mpi/bin/:${PATH}",
                "LD_LIBRARY_PATH":
                "/usr/local/mpi/lib:/usr/local/mpi/lib64:${LD_LIBRARY_PATH}"
            })
        else:
            mpi_lib = packages(apt=['libopenmpi-dev'],
                               yum=['openmpi-devel'],
                               powertools=True,
                               epel=True)
            vars.update({
                "PATH": "/usr/lib64/openmpi/bin:${PATH}",
                "OMPI_CC": tc.CC,
                "OMPI_FC": tc.FC,
                "OMPI_F77": tc.F77,
                "OMPI_F90": tc.F90,
                "OMPI_CXX": tc.CXX
            })
        Stage0 += environment(variables=vars)
    elif args.mpi in ["mvapich2", "mvapich"]:
        # Mellanox OFED
        ofed_version = '5.0'
        Stage0 += mlnx_ofed(version='5.0-2.1.8.0', oslabel='ubuntu18.04')
        if args.cuda != 'no':
            if args.mpi_version is None:
                args.mpi_version = "2.3.6"
            Stage0 += gdrcopy()
            if args.cuda == "8.0":
                gnu_version = "5.4.0"
            elif args.cuda == "11.0" and args.mpi_version is not None and args.mpi_version >= StrictVersion(
                    "2.3.4"):
                gnu_version = "9.3.0"
            elif args.cuda >= StrictVersion(
                    "11.2"
            ) and args.mpi_version is not None and args.mpi_version >= StrictVersion(
                    "2.3.6"):
                gnu_version = "7.3.0"
            else:
                gnu_version = "4.8.5"
            if args.mpi_version >= StrictVersion("2.3.4"):
                release = 1
            else:
                release = 2
            #Issue on mvapich gdr 2.3.6 as hpccm can't find it on the website.
            if (args.mpi_version == "2.3.6"):
                Stage0 += packages(apt=[
                    'cpio', 'libnuma1', 'libpciaccess0', 'openssh-client',
                    'rpm2cpio', 'libgfortran4'
                ],
                                   yum=[
                                       'libpciaccess', 'numactl-libs',
                                       'openssh-clients', 'libgfortran'
                                   ],
                                   powertools=True,
                                   epel=True)
                _commands = [
                    'mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp http://mvapich.cse.ohio-state.edu/download/mvapich/gdr/2.3.6/mofed'
                    + ofed_version + '/mvapich2-gdr-cuda11.2.mofed' +
                    ofed_version + '.gnu' + gnu_version + '-2.3.6-1.el7.' +
                    args.target_arch + '.rpm', 'cd / '
                ]
                if args.system == 'centos':
                    _commands += 'rpm --install --nodeps /var/tmp/mvapich2-gdr*.rpm'
                else:
                    _commands += 'rpm2cpio /var/tmp/mvapich2-gdr-*.rpm | cpio -id',
                _commands += [
                    '(test -f /usr/bin/bash || ln -s /bin/bash /usr/bin/bash) ',
                    'ln -s /usr/local/cuda/lib64/stubs/nvidia-ml.so /usr/local/cuda/lib64/stubs/nvidia-ml.so.1',
                    'rm -rf /var/tmp/mvapich2-*.rpm'
                ]
                mpi_lib = shell(commands=_commands)
            else:
                mpi_lib = mvapich2_gdr(version=args.mpi_version,
                                       prefix="/usr/local/mpi",
                                       mlnx_ofed_version=ofed_version,
                                       cuda_version=args.cuda,
                                       release=release,
                                       gnu_version=gnu_version)
            Stage0 += packages(apt=['libxnvctrl-dev libibmad5'],
                               yum=['libxnvctrl-devel infiniband-diags'],
                               powertools=True,
                               epel=True)
        else:
            mpi_lib = mvapich2(version=args.mpi_version,
                               prefix="/usr/local/mpi",
                               toolchain=tc)
            Stage0 += packages(apt=['libibmad5'],
                               yum=['infiniband-diags'],
                               powertools=True,
                               epel=True)

        Stage0 += environment(
            variables={
                "PATH": "/usr/local/mpi/bin/:${PATH}",
                "LD_LIBRARY_PATH":
                "/usr/local/lib/:/usr/local/mpi/lib:/usr/local/mpi/lib64:${LD_LIBRARY_PATH}",
                "MV2_USE_GPUDIRECT_GDRCOPY": "0",
                "MV2_SMP_USE_CMA": "0",
                "MV2_ENABLE_AFFINITY": "0",
                "MV2_CPU_BINDING_POLICY": "scatter",
                "MV2_CPU_BINDING_LEVEL": "socket"
            })
    elif args.mpi == 'intel':
        mpi_lib = intel_mpi(eula=True)  #apt_get(ospackages=[intel-mpi])

    Stage0 += mpi_lib

    #Workaround missing install on mvapich_gdr in hpccm
    if args.mpi in ["mvapich2", "mvapich"] and args.cuda != 'no':
        Stage0 += shell(commands=[
            'mkdir /usr/local/mpi/',
            'cp -r /opt/mvapich2/gdr/{}/no-mpittool/no-openacc/cuda**/mofed{}/mpirun/gnu{}/* /usr/local/mpi'
            .format(args.mpi_version, ofed_version, gnu_version)
        ])

    #update ldconfig as /usr/local/lib may not be in the path
    Stage0 += shell(commands=[
        'echo "/usr/local/mpi/lib" > /etc/ld.so.conf.d/mpi.conf',
        'echo "/usr/local/mpi/lib64" >> /etc/ld.so.conf.d/mpi.conf',
        'echo "/usr/local/anaconda/lib" >> /etc/ld.so.conf.d/anaconda.conf',
        'echo "/bigdft/lib" > /etc/ld.so.conf.d/bigdft.conf', 'ldconfig'
    ])
    if args.cuda != 'no':
        Stage0 += shell(commands=[
            'cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/lib/libcuda.so.1'
        ])
        Stage0 += shell(commands=[
            'cp /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/lib/libnvidia-ml.so.1'
        ])

    Stage0 += raw(docker='USER lsim')
    return Stage0
    parser.add_argument('--format', type=str, default='docker',
                        choices=['docker', 'singularity'],
                        help='Container specification format (default: docker)')
    parser.add_argument('--image', type=str, default='ubuntu:18.04',
                        help='Base container image (default: ubuntu:18.04)')
    parser.add_argument('--notebook', required=True, type=str,
                        help='Jupyter notebook file')
    parser.add_argument('--packager', type=str, default='pip',
                        choices=['anaconda', 'pip'],
                        help='Python package manager (default: pip)')
    parser.add_argument('--requirements', type=str,
                        help='pip requirements file')
    args = parser.parse_args()

    ### Create Stage
    stage = hpccm.Stage()

    ### Base image
    stage += hpccm.primitives.baseimage(image=args.image, _docker_env=False, bootstrap="docker")
    stage += hpccm.building_blocks.packages(apt=['python3-dev'], yum=['python34-devel'], zypper=['python3-devel'])

    ### Install Python and Jupyter (and requirements / environment)
    if args.packager == 'pip':
        stage += hpccm.building_blocks.python(python2=False)
        stage += hpccm.building_blocks.pip(packages=['ipyslurm','ipython', 'jupyter', 'jupyterhub', 'tqdm', 'deep-learning', 'sklearn', 'helper', 'tensorflow-gpu', 'matplotlib'],
                                           pip='pip3',
                                           requirements=args.requirements)
    elif args.packager == 'anaconda':
        #4.7.12 *though latest at the time* did not work and only the 'latest' prefix did
        stage += hpccm.building_blocks.conda(environment=args.environment,
                                             eula=True,
Example #30
0
def sdk():
    args, distro = arguments()

    tdoc = doc(args, 'sdk')

    #######
    ## SDK stage
    #######
    # Set the image tag based on the specified version (default to 10.0)
    mkl = 'no'
    if args.cuda != 'no':
        image = 'nvidia/cuda:{}-devel-{}{}'.format(args.cuda, args.system,
                                                   args.system_version)
        if args.oneapi != 'no':
            oneapi = 'no'
            logging.warning(
                'For now we can\'t mix CUDA SDK with OneAPI base image. MKL can still be installed later. Ignoring OneAPI at this step'
            )
    elif args.oneapi != 'no':
        image = 'intel/oneapi-hpckit:devel-{}{}'.format(
            args.system, args.system_version)
        if args.target_arch != 'x86_64':
            logging.error('OneAPI is only valid for amd64 processors')
    else:
        image = '{}:{}'.format(
            args.system, args.system_version
            if args.system_version is not None else 'latest')

    Stage0 = hpccm.Stage()

    Stage0 += comment(tdoc, reformat=False)
    Stage0.name = 'sdk'
    Stage0.baseimage(image, _distro=distro)
    Stage0 += comment('SDK stage', reformat=False)

    Stage0 += label(
        metadata={'maintainer': '*****@*****.**'})
    Stage0 += environment(variables={'DEBIAN_FRONTEND': 'noninteractive'})
    #SHELL ['/bin/bash', '-c']
    Stage0 += raw(docker='SHELL ["/bin/bash", "-c"]')
    Stage0 += shell(commands=['useradd -ms /bin/bash lsim'])

    #BigDFT packages
    #system independent ones
    ospackages = [
        'autoconf', 'automake', 'bison', 'bzip2', 'chrpath', 'cmake', 'cpio',
        'curl', 'doxygen', 'ethtool', 'flex', 'gdb', 'gettext', 'git',
        'gnome-common', 'graphviz', 'intltool', 'kmod', 'libtool', 'lsof',
        'net-tools', 'ninja-build', 'patch', 'pciutils', 'perl', 'pkg-config',
        'rsync', 'strace', 'swig', 'tcl', 'tk', 'valgrind', 'vim', 'wget'
    ]

    apt_packages = ospackages + [
        'autotools-dev', 'libpcre3-dev', 'libltdl-dev', 'lsb-release',
        'libz-dev', 'zlib1g-dev', 'libzmq3-dev', 'libmount-dev', 'iproute2',
        'libnl-route-3-200', 'libnuma1', 'linux-headers-generic',
        'gtk-doc-tools', 'libxml2-dev', 'libglu1-mesa-dev', 'libnetcdf-dev',
        'libgirepository1.0-dev', 'dpatch', 'libgtk-3-dev', 'libmount-dev',
        'locales', 'ssh', 'libyaml-dev'
    ]
    yum_packages = ospackages + [
        'pcre-devel', 'libtool-ltdl-devel', 'redhat-lsb', 'glibc-devel',
        'zlib-devel', 'zeromq-devel', 'libmount-devel', 'iproute',
        'libnl3-devel', 'numactl-libs', 'kernel-headers', 'gtk-doc',
        'libxml2-devel', 'mesa-libGLU-devel', 'netcdf-devel',
        'gobject-introspection-devel', 'gtk3-devel', 'libmount-devel',
        'openssh', 'libarchive', 'libyaml-devel'
    ]
    #boost from packages except for oneapi or intel python builds.
    if args.target_arch != "x86_64" or not (args.python == 'intel'
                                            or args.oneapi != 'no'):
        apt_packages += ['libboost-dev', 'libboost-python-dev']
        yum_packages += ['boost-devel', 'boost-python3-devel']

    if args.cuda != 'no':
        apt_packages += ['ocl-icd-libopencl1']
        yum_packages += ['ocl-icd']
        Stage0 += environment(
            variables={
                'LD_LIBRARY_PATH':
                '/usr/local/lib:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}',
                'LIBRARY_PATH': '/usr/local/cuda/lib64:${LIBRARY_PATH}',
                'NVIDIA_VISIBLE_DEVICES': 'all',
                'NVIDIA_DRIVER_CAPABILITIES': 'compute,utility'
            })
        Stage0 += shell(commands=[
            'mkdir -p /etc/OpenCL/vendors',
            'echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd',
            'cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/lib/libcuda.so.1',
            'cp /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/lib/libnvidia-ml.so.1'
        ])

    Stage0 += packages(apt=apt_packages,
                       yum=yum_packages,
                       powertools=True,
                       epel=True,
                       release_stream=True)

    if args.target_arch == 'x86_64':

        conda_packages = [
            'openbabel', 'six', 'matplotlib', 'ipython', 'nbval', 'cython',
            'sphinx', 'sphinx_bootstrap_theme', 'watchdog', 'sphinx_rtd_theme',
            'flake8', 'ncurses', 'pygobject'
        ]
        channels = ['conda-forge', 'nvidia']
        if args.blas == 'mkl' and args.oneapi == 'no':
            conda_packages += ['mkl-devel']
            Stage0 += environment(
                variables={'MKLROOT': '/usr/local/anaconda/'})

        if args.python == 'intel' and args.oneapi == 'no':
            conda_packages += ['intelpython3_core']
            channels += ['intel']
        else:
            conda_packages += ['py-boost']

        if args.jupyter == 'yes':
            conda_packages += ['jupyterlab', 'ipykernel']

        #conda install
        if args.oneapi == 'no':
            Stage0 += conda(version='4.10.3',
                            python_subversion='py37',
                            channels=channels,
                            eula=True,
                            packages=conda_packages)
            conda_path = '/usr/local/anaconda/'
            commands = [
                'groupadd conda', 'usermod -a -G conda lsim',
                'chgrp -R conda ' + conda_path, 'chmod -R 770 ' + conda_path
            ]
        else:
            #use already present conda on oneapi images
            conda_path = '/opt/intel/oneapi/intelpython/latest/'
            commands = [
                conda_path + 'bin/conda config --add channels ' +
                ' --add channels '.join(channels), conda_path +
                'bin/conda install -y ' + ' '.join(conda_packages),
                conda_path + 'bin/conda clean -afy'
            ]

        if args.python == 'intel':
            commands += [
                'ln -s ' + conda_path + 'bin/python3-config' + conda_path +
                '/bin/python-config'
            ]
            #Intel python forgets to provide ncurses https://community.intel.com/t5/Intel-Distribution-for-Python/curses-missing-on-python-3-7/m-p/1201384#M1509
            #Temporarily steal the files from conda-forge package, and use them instead, as it's used in bigdft-tool.
            commands += [
                'mkdir curses', 'cd curses',
                'wget https://anaconda.org/conda-forge/python/3.7.8/download/linux-64/python-3.7.8-h6f2ec95_1_cpython.tar.bz2',
                'tar xjf python-3.7.8-h6f2ec95_1_cpython.tar.bz2',
                'cp ./lib/python3.7/lib-dynload/_curses* ' + conda_path +
                'lib/python3.7/lib-dynload/', 'cd ..', 'rm -rf curses'
            ]
        Stage0 += shell(commands=commands)

        #update LIBRARY_PATH as well to allow building against these libs :
        Stage0 += environment(
            variables={
                'PATH': conda_path + '/bin:$PATH',
                'LIBRARY_PATH': conda_path + 'lib/:${LIBRARY_PATH}'
            })
        python_path = conda_path

    else:
        #on other platforms miniconda is not available. Use system python and libraries
        ospack = [
            'python3', 'python3-flake8', 'python3-pip', 'python3-matplotlib',
            'python3-six', 'python3-sphinx', 'python3-sphinx-bootstrap-theme',
            'python3-scipy', 'python3-numpy', 'watchdog', 'python3-ipython',
            'python3-flake8'
        ]
        yum = ospack + ['python3-Cython', 'python3-sphinx_rtd_theme']
        apt = ospack + ['cython3', 'python3-sphinx-rtd-theme']
        pycommands = [
            'ln -s /usr/bin/python3 /usr/local/bin/python',
            'ln -s /usr/bin/pip3 /usr/local/bin/pip'
        ]
        if args.jupyter == 'yes':
            apt += ['jupyter-notebook', 'python3-ipykernel']
        Stage0 += packages(apt=apt, yum=yum, powertools=True, epel=True)

        #somehow there is no jupyter package for centos 8.
        if args.system == 'centos' and args.jupyter == 'yes':
            #make python3 and pip3 default
            pycommands += ['pip install jupyter ipykernel jupyterlab']

        Stage0 += shell(commands=pycommands)
        python_path = '/usr/'

    #Install boost with the provided python
    if (args.target_arch == 'x86_64'
            and (args.python == 'intel' or args.oneapi != 'no')):
        Stage0 += shell(commands=[
            'echo "\\\n\
      using python\\\n\
      : \\\n\
      : `which python`\\\n\
      : `dirname ' + python_path + '/include/python*/..`\\\n\
      : ' + python_path + '/lib\\\n\
      ;\\\n\
  " > /tmp/user-config.jam'
        ])
        Stage0 += boost(
            python=args.python != 'no',
            bootstrap_opts=[
                '--with-libraries=python,serialization',
                '--with-python=`which python`', '--without-icu'
            ],
            b2_opts=[
                '--user-config=/tmp/user-config.jam', 'install',
                'threading=multi', 'variant=release', 'link=shared', 'stage',
                '--with-regex', '--disable-icu', '--with-thread',
                '--with-serialization', '--with-iostreams', '--with-python',
                '--with-system', '--with-test', '-q'
            ])

    if (args.jupyter == 'yes'):
        Stage0 += raw(docker='EXPOSE 8888')
        Stage0 += raw(
            docker=
            'CMD jupyter lab --ip=0.0.0.0 --allow-root --NotebookApp.token=bigdft --no-browser',
            singularity=
            '%runscript\n jupyter lab --ip=0.0.0.0 --allow-root --NotebookApp.token=bigdft --no-browser'
        )

    #workaround for an issue in Ubuntu 20 on docker
    if args.system == 'ubuntu' and args.system_version >= StrictVersion(
            '20.04') and args.target_arch == "x86_64":
        Stage0 += environment(
            variables={
                'LD_PRELOAD': '/usr/lib/x86_64-linux-gnu/libtinfo.so.6'
            })

    if args.system == 'ubuntu':
        Stage0 += environment(
            variables={
                "LANG": "en_US.UTF-8",
                "LANGUAGE": "en_US.UTF-8",
                "LC_ALL": "en_US.UTF-8"
            })
    else:
        Stage0 += environment(
            variables={
                "LANG": "C.UTF-8",
                "LANGUAGE": "C.UTF-8",
                "LC_ALL": "C.UTF-8",
                "PKG_CONFIG_PATH": "/usr/lib64:/usr/share/lib64"
            })

    if args.oneapi != 'no':
        Stage0 += shell(commands=[
            'if [ -e /root/.oneapi_env_vars ]; then cp /root/.oneapi_env_vars /opt/intel/.oneapi_env_vars; chmod +x /opt/intel/.oneapi_env_vars; fi'
        ])
        Stage0 += raw(
            docker=
            'ENTRYPOINT ["bash", "-c", "source /opt/intel/.oneapi_env_vars && \\\"$@\\\"", "bash"]',
            singularity=
            "%runscript\n bash -c 'source /opt/intel/.oneapi_env_vars && \\\"$@\\\"' bash"
        )

    return Stage0