Esempio n. 1
0
def match_minimum_tc_specs(source_tc_spec, target_tc_hierarchy):
    """
    Match a source toolchain spec to the minimal corresponding toolchain in a target hierarchy

    :param source_tc_spec: specs of source toolchain
    :param target_tc_hierarchy: hierarchy of specs for target toolchain
    """
    minimal_matching_toolchain = {}
    target_compiler_family = ''

    # break out once we've found the first match since the hierarchy is ordered low to high in terms of capabilities
    for target_tc_spec in target_tc_hierarchy:
        if check_capability_mapping(source_tc_spec, target_tc_spec):
            # GCCcore has compiler capabilities,
            # but should only be used in the target if the original toolchain was also GCCcore
            if target_tc_spec['name'] != GCCcore.NAME or source_tc_spec['name'] == GCCcore.NAME:
                minimal_matching_toolchain = {'name': target_tc_spec['name'], 'version': target_tc_spec['version']}
                target_compiler_family = target_tc_spec['comp_family']
                break

    if not minimal_matching_toolchain:
        raise EasyBuildError("No possible mapping from source toolchain spec %s to target toolchain hierarchy specs %s",
                             source_tc_spec, target_tc_hierarchy)

    # Warn if we are changing compiler families, this is very likely to cause problems
    if target_compiler_family != source_tc_spec['comp_family']:
        print_warning("Your request will result in a compiler family switch (%s to %s). Here be dragons!" %
                      (source_tc_spec['comp_family'], target_compiler_family), silent=build_option('silent'))

    return minimal_matching_toolchain
Esempio n. 2
0
    def sanity_check_step(self):
        """Custom sanity check for VEP."""

        custom_paths = {
            'files': ['vep'],
            'dirs': ['modules/Bio/EnsEMBL/VEP'],
        }

        if 'Bio::EnsEMBL::XS' in [ext[0] for ext in self.cfg['exts_list']]:
            # determine Perl version used as dependency;
            # take into account that Perl module may not be loaded, for example when --sanity-check-only is used
            perl_ver = None
            deps = self.cfg.dependencies()
            for dep in deps:
                if dep['name'] == 'Perl':
                    perl_ver = dep['version']
                    break

            if perl_ver is None:
                print_warning(
                    "Failed to determine version of Perl dependency!")
            else:
                perl_majver = perl_ver.split('.')[0]
                perl_libpath = os.path.join('lib', 'perl' + perl_majver,
                                            'site_perl', perl_ver)
                bio_ensembl_xs_ext = os.path.join(perl_libpath,
                                                  'x86_64-linux-thread-multi',
                                                  'Bio', 'EnsEMBL', 'XS.pm')
                custom_paths['files'].extend([bio_ensembl_xs_ext])

        custom_commands = ['vep --help']

        super(EB_VEP, self).sanity_check_step(custom_paths=custom_paths,
                                              custom_commands=custom_commands)
Esempio n. 3
0
    def determine_build_and_host_type(self):
        """
        Return the resolved build and host type for use with --build and --host
        Uses the EasyConfig values or queries config.guess if those are not set
        Might return None for either value
        """
        build_type = self.cfg.get('build_type')
        host_type = self.cfg.get('host_type')

        if build_type is None or host_type is None:
            # config.guess script may not be obtained yet despite the call in fetch_step,
            # for example when installing a Bundle component with ConfigureMake
            if not self.config_guess:
                self.config_guess = self.obtain_config_guess()

            if not self.config_guess:
                print_warning("No config.guess available, not setting '--build' option for configure step\n"
                              "EasyBuild attempts to download a recent config.guess but seems to have failed!")
            else:
                self.check_config_guess()
                system_type, _ = run_cmd(self.config_guess, log_all=True)
                system_type = system_type.strip()
                self.log.info("%s returned a system type '%s'", self.config_guess, system_type)

                if build_type is None:
                    build_type = system_type
                    self.log.info("Providing '%s' as value to --build option of configure script", build_type)

                if host_type is None:
                    host_type = system_type
                    self.log.info("Providing '%s' as value to --host option of configure script", host_type)

        return build_type, host_type
    def build_step(self, *args, **kwargs):
        """Custom build procedure for Python, ensure stack size limit is set to 'unlimited' (if desired)."""

        if self.cfg['ulimit_unlimited']:
            # determine current stack size limit
            (out, _) = run_cmd("ulimit -s")
            curr_ulimit_s = out.strip()

            # figure out hard limit for stack size limit;
            # this determines whether or not we can use "ulimit -s unlimited"
            (out, _) = run_cmd("ulimit -s -H")
            max_ulimit_s = out.strip()

            if curr_ulimit_s == UNLIMITED:
                self.log.info("Current stack size limit is %s: OK", curr_ulimit_s)
            elif max_ulimit_s == UNLIMITED:
                self.log.info("Current stack size limit is %s, setting it to %s for build...",
                              curr_ulimit_s, UNLIMITED)
                self.cfg.update('prebuildopts', "ulimit -s %s && " % UNLIMITED)
            else:
                msg = "Current stack size limit is %s, and can not be set to %s due to hard limit of %s;"
                msg += " setting stack size limit to %s instead, "
                msg += " this may break part of the compilation (e.g. hashlib)..."
                print_warning(msg % (curr_ulimit_s, UNLIMITED, max_ulimit_s, max_ulimit_s))
                self.cfg.update('prebuildopts', "ulimit -s %s && " % max_ulimit_s)

        super(EB_Python, self).build_step(*args, **kwargs)
Esempio n. 5
0
    def build_step(self, *args, **kwargs):
        """Custom build procedure for Python, ensure stack size limit is set to 'unlimited' (if desired)."""

        if self.cfg['ulimit_unlimited']:
            # determine current stack size limit
            (out, _) = run_cmd("ulimit -s")
            curr_ulimit_s = out.strip()

            # figure out hard limit for stack size limit;
            # this determines whether or not we can use "ulimit -s unlimited"
            (out, _) = run_cmd("ulimit -s -H")
            max_ulimit_s = out.strip()

            if curr_ulimit_s == UNLIMITED:
                self.log.info("Current stack size limit is %s: OK",
                              curr_ulimit_s)
            elif max_ulimit_s == UNLIMITED:
                self.log.info(
                    "Current stack size limit is %s, setting it to %s for build...",
                    curr_ulimit_s, UNLIMITED)
                self.cfg.update('prebuildopts', "ulimit -s %s && " % UNLIMITED)
            else:
                msg = "Current stack size limit is %s, and can not be set to %s due to hard limit of %s;"
                msg += " setting stack size limit to %s instead, "
                msg += " this may break part of the compilation (e.g. hashlib)..."
                print_warning(
                    msg %
                    (curr_ulimit_s, UNLIMITED, max_ulimit_s, max_ulimit_s))
                self.cfg.update('prebuildopts',
                                "ulimit -s %s && " % max_ulimit_s)

        super(EB_Python, self).build_step(*args, **kwargs)
Esempio n. 6
0
def check_cuda_compute_capabilities(cfg_cuda_cc, ec_cuda_cc):
    """
    Checks if cuda-compute-capabilities is set and prints warning if it gets declared on multiple places.

    :param cfg_cuda_cc: cuda-compute-capabilities from cli config
    :param ec_cuda_cc: cuda-compute-capabilities from easyconfig
    :return: returns preferred cuda-compute-capabilities
    """

    cuda = get_software_root('CUDA')
    cuda_cc = cfg_cuda_cc or ec_cuda_cc or []

    if cuda:
        if cfg_cuda_cc and ec_cuda_cc:
            warning_msg = "cuda_compute_capabilities specified in easyconfig (%s)" % ec_cuda_cc
            warning_msg += " are overruled by "
            warning_msg += "--cuda-compute-capabilities configuration option (%s)" % cfg_cuda_cc
            print_warning(warning_msg)
        elif not cuda_cc:
            error_msg = "No CUDA compute capabilities specified.\nTo build LAMMPS with Cuda you need to use"
            error_msg += "the --cuda-compute-capabilities configuration option or the cuda_compute_capabilities "
            error_msg += "easyconfig parameter to specify a list of CUDA compute capabilities to compile with."
            raise EasyBuildError(error_msg)

    elif cuda_cc:
        warning_msg = "Missing CUDA package (in dependencies), "
        warning_msg += "but 'cuda_compute_capabilities' option was specified."
        print_warning(warning_msg)

    return cuda_cc
Esempio n. 7
0
    def configure_step(self):
        """ set up some options - but no configure command to run"""

        default_opts = {
            'BINARY': '64',
            'CC': os.getenv('CC'),
            'FC': os.getenv('FC'),
            'USE_OPENMP': '1',
            'USE_THREAD': '1',
        }

        if '%s=' % TARGET in self.cfg['buildopts']:
            # Add any TARGET in buildopts to default_opts, so it is passed to testopts and installopts
            for buildopt in self.cfg['buildopts'].split():
                optpair = buildopt.split('=')
                if optpair[0] == TARGET:
                    default_opts[optpair[0]] = optpair[1]
        elif LooseVersion(self.version) < LooseVersion(
                '0.3.6') and get_cpu_architecture() == POWER:
            # There doesn't seem to be a POWER9 option yet, but POWER8 should work.
            print_warning(
                "OpenBLAS 0.3.5 and lower have known issues on POWER systems")
            default_opts[TARGET] = 'POWER8'

        for key in sorted(default_opts.keys()):
            for opts_key in ['buildopts', 'testopts', 'installopts']:
                if '%s=' % key not in self.cfg[opts_key]:
                    self.cfg.update(opts_key,
                                    "%s='%s'" % (key, default_opts[key]))

        self.cfg.update('installopts', 'PREFIX=%s' % self.installdir)
Esempio n. 8
0
    def check_config_guess(self):
        """Check timestamp & SHA256 checksum of config.guess script."""
        # log version, timestamp & SHA256 checksum of config.guess that was found (if any)
        if self.config_guess:
            # config.guess includes a "timestamp='...'" indicating the version
            config_guess_version = None
            version_regex = re.compile("^timestamp='(.*)'", re.M)
            res = version_regex.search(read_file(self.config_guess))
            if res:
                config_guess_version = res.group(1)

            config_guess_checksum = compute_checksum(self.config_guess, checksum_type=CHECKSUM_TYPE_SHA256)
            try:
                config_guess_timestamp = datetime.fromtimestamp(os.stat(self.config_guess).st_mtime).isoformat()
            except OSError as err:
                self.log.warning("Failed to determine timestamp of %s: %s", self.config_guess, err)
                config_guess_timestamp = None

            self.log.info("config.guess version: %s (last updated: %s, SHA256 checksum: %s)",
                          config_guess_version, config_guess_timestamp, config_guess_checksum)

            if config_guess_version != CONFIG_GUESS_VERSION:
                tup = (self.config_guess, config_guess_version, CONFIG_GUESS_VERSION)
                print_warning("config.guess version at %s does not match expected version: %s vs %s" % tup)

            if config_guess_checksum != CONFIG_GUESS_SHA256:
                tup = (self.config_guess, config_guess_checksum, CONFIG_GUESS_SHA256)
                print_warning("SHA256 checksum of config.guess at %s does not match expected checksum: %s vs %s" % tup)
Esempio n. 9
0
def obtain_path(specs, paths, try_to_generate=False, exit_on_error=True, silent=False):
    """Obtain a path for an easyconfig that matches the given specifications."""

    # if no easyconfig files/paths were provided, but we did get a software name,
    # we can try and find a suitable easyconfig ourselves, or generate one if we can
    (generated, fn) = easyconfig.tools.obtain_ec_for(specs, paths, None)
    if not generated:
        return (fn, generated)
    else:
        # if an easyconfig was generated, make sure we're allowed to use it
        if try_to_generate:
            print_msg("Generated an easyconfig file %s, going to use it now..." % fn, silent=silent)
            return (fn, generated)
        else:
            try:
                os.remove(fn)
            except OSError, err:
                print_warning("Failed to remove generated easyconfig file %s." % fn)
            print_error(
                (
                    "Unable to find an easyconfig for the given specifications: %s; "
                    "to make EasyBuild try to generate a matching easyconfig, "
                    "use the --try-X options "
                )
                % specs,
                log=_log,
                exit_on_error=exit_on_error,
            )
    def check_config_guess(self):
        """Check timestamp & SHA256 checksum of config.guess script."""
        # log version, timestamp & SHA256 checksum of config.guess that was found (if any)
        if self.config_guess:
            # config.guess includes a "timestamp='...'" indicating the version
            config_guess_version = None
            version_regex = re.compile("^timestamp='(.*)'", re.M)
            res = version_regex.search(read_file(self.config_guess))
            if res:
                config_guess_version = res.group(1)

            config_guess_checksum = compute_checksum(self.config_guess, checksum_type=CHECKSUM_TYPE_SHA256)
            try:
                config_guess_timestamp = datetime.fromtimestamp(os.stat(self.config_guess).st_mtime).isoformat()
            except OSError as err:
                self.log.warning("Failed to determine timestamp of %s: %s", self.config_guess, err)
                config_guess_timestamp = None

            self.log.info("config.guess version: %s (last updated: %s, SHA256 checksum: %s)",
                          config_guess_version, config_guess_timestamp, config_guess_checksum)

            if config_guess_version != CONFIG_GUESS_VERSION:
                tup = (self.config_guess, config_guess_version, CONFIG_GUESS_VERSION)
                print_warning("config.guess version at %s does not match expected version: %s vs %s" % tup)

            if config_guess_checksum != CONFIG_GUESS_SHA256:
                tup = (self.config_guess, config_guess_checksum, CONFIG_GUESS_SHA256)
                print_warning("SHA256 checksum of config.guess at %s does not match expected checksum: %s vs %s" % tup)
Esempio n. 11
0
def match_minimum_tc_specs(source_tc_spec, target_tc_hierarchy):
    """
    Match a source toolchain spec to the minimal corresponding toolchain in a target hierarchy

    :param source_tc_spec: specs of source toolchain
    :param target_tc_hierarchy: hierarchy of specs for target toolchain
    """
    minimal_matching_toolchain = {}
    target_compiler_family = ''

    # break out once we've found the first match since the hierarchy is ordered low to high in terms of capabilities
    for target_tc_spec in target_tc_hierarchy:
        if check_capability_mapping(source_tc_spec, target_tc_spec):
            # GCCcore has compiler capabilities,
            # but should only be used in the target if the original toolchain was also GCCcore
            if target_tc_spec['name'] != GCCcore.NAME or source_tc_spec['name'] == GCCcore.NAME:
                minimal_matching_toolchain = {'name': target_tc_spec['name'], 'version': target_tc_spec['version']}
                target_compiler_family = target_tc_spec['comp_family']
                break

    if not minimal_matching_toolchain:
        raise EasyBuildError("No possible mapping from source toolchain spec %s to target toolchain hierarchy specs %s",
                             source_tc_spec, target_tc_hierarchy)

    # Warn if we are changing compiler families, this is very likely to cause problems
    if target_compiler_family != source_tc_spec['comp_family']:
        print_warning("Your request will result in a compiler family switch (%s to %s). Here be dragons!" %
                      (source_tc_spec['comp_family'], target_compiler_family), silent=build_option('silent'))

    return minimal_matching_toolchain
Esempio n. 12
0
def obtain_path(specs,
                paths,
                try_to_generate=False,
                exit_on_error=True,
                silent=False):
    """Obtain a path for an easyconfig that matches the given specifications."""

    # if no easyconfig files/paths were provided, but we did get a software name,
    # we can try and find a suitable easyconfig ourselves, or generate one if we can
    (generated, fn) = obtain_ec_for(specs, paths, None)
    if not generated:
        return (fn, generated)
    else:
        # if an easyconfig was generated, make sure we're allowed to use it
        if try_to_generate:
            print_msg(
                "Generated an easyconfig file %s, going to use it now..." % fn,
                silent=silent)
            return (fn, generated)
        else:
            try:
                os.remove(fn)
            except OSError, err:
                print_warning(
                    "Failed to remove generated easyconfig file %s: %s" %
                    (fn, err))
            print_error((
                "Unable to find an easyconfig for the given specifications: %s; "
                "to make EasyBuild try to generate a matching easyconfig, "
                "use the --try-X options ") % specs,
                        log=_log,
                        exit_on_error=exit_on_error)
Esempio n. 13
0
    def get_gromacs_arch(self):
        """Determine value of GMX_SIMD CMake flag based on optarch string.

        Refs:
        [0] http://manual.gromacs.org/documentation/2016.3/install-guide/index.html#typical-installation
        [1] http://manual.gromacs.org/documentation/2016.3/install-guide/index.html#simd-support
        [2] http://www.gromacs.org/Documentation/Acceleration_and_parallelization
        """
        # default: fall back on autodetection
        res = None

        optarch = build_option('optarch') or ''
        # take into account that optarch value is a dictionary if it is specified by compiler family
        if isinstance(optarch, dict):
            comp_fam = self.toolchain.comp_family()
            optarch = optarch.get(comp_fam, '')
        optarch = optarch.upper()

        # The list of GMX_SIMD options can be found
        # http://manual.gromacs.org/documentation/2018/install-guide/index.html#simd-support
        if 'MIC-AVX512' in optarch and LooseVersion(
                self.version) >= LooseVersion('2016'):
            res = 'AVX_512_KNL'
        elif 'AVX512' in optarch and LooseVersion(
                self.version) >= LooseVersion('2016'):
            res = 'AVX_512'
        elif 'AVX2' in optarch and LooseVersion(
                self.version) >= LooseVersion('5.0'):
            res = 'AVX2_256'
        elif 'AVX' in optarch:
            res = 'AVX_256'
        elif 'SSE3' in optarch or 'SSE2' in optarch or 'MARCH=NOCONA' in optarch:
            # Gromacs doesn't have any GMX_SIMD=SSE3 but only SSE2 and SSE4.1 [1].
            # According to [2] the performance difference between SSE2 and SSE4.1 is minor on x86
            # and SSE4.1 is not supported by AMD Magny-Cours[1].
            res = 'SSE2'
        elif optarch == OPTARCH_GENERIC:
            cpu_arch = get_cpu_architecture()
            if cpu_arch == X86_64:
                res = 'SSE2'
            else:
                res = 'None'
        elif optarch:
            warn_msg = "--optarch configuration setting set to %s but not taken into account; " % optarch
            warn_msg += "compiling GROMACS for the current host architecture (i.e. the default behavior)"
            self.log.warning(warn_msg)
            print_warning(warn_msg)

        if res:
            self.log.info(
                "Target architecture based on optarch configuration option ('%s'): %s",
                optarch, res)
        else:
            self.log.info(
                "No target architecture specified based on optarch configuration option ('%s')",
                optarch)

        return res
    def sanity_check_step(self):
        """Custom sanity check for AOMP"""
        shlib_ext = get_shared_lib_ext()
        arch = get_cpu_architecture()
        # Check architecture explicitly since Clang uses potentially
        # different names
        arch_map = {
            X86_64: 'x86_64',
            POWER: 'ppc64',
            AARCH64: 'aarch64',
        }

        if arch in arch_map:
            arch = arch_map[arch]
        else:
            print_warning(
                "Unknown CPU architecture (%s) for OpenMP offloading!" % arch)
        custom_paths = {
            'files': [
                "amdgcn/bitcode/hip.bc",
                "amdgcn/bitcode/opencl.bc",
                "bin/aompcc",
                "bin/aompversion",
                "bin/clang",
                "bin/flang",
                "bin/ld.lld",
                "bin/llvm-config",
                "bin/mygpu",
                "bin/opt",
                "bin/rocminfo",
                "include/amd_comgr.h",
                "include/hsa/amd_hsa_common.h",
                "include/hsa/hsa.h",
                "include/omp.h",
                "include/omp_lib.h",
                "lib/libclang.%s" % shlib_ext,
                "lib/libflang.%s" % shlib_ext,
                "lib/libomp.%s" % shlib_ext,
                "lib/libomptarget.rtl.amdgpu.%s" % shlib_ext,
                "lib/libomptarget.rtl.%s.%s" % (arch, shlib_ext),
                "lib/libomptarget.%s" % shlib_ext,
            ],
            'dirs': ["amdgcn", "include/clang", "include/hsa", "include/llvm"],
        }
        # If we are building with CUDA support we need to check if it was built properly
        if get_software_root('CUDA') or get_software_root('CUDAcore'):
            custom_paths['files'].append("lib/libomptarget.rtl.cuda.%s" %
                                         shlib_ext)
        custom_commands = [
            'aompcc --help',
            'clang --help',
            'clang++ --help',
            'flang --help',
            'llvm-config --cxxflags',
        ]
        super(EB_AOMP, self).sanity_check_step(custom_paths=custom_paths,
                                               custom_commands=custom_commands)
    def test_step(self):
        """Run unit tests"""
        # Make PyTorch tests not use the user home
        env.setvar('XDG_CACHE_HOME', os.path.join(self.tmpdir, '.cache'))
        # Pretend to be on FB CI which disables some tests, especially those which download stuff
        env.setvar('SANDCASTLE', '1')
        # Skip this test(s) which is very flaky
        env.setvar('SKIP_TEST_BOTTLENECK', '1')
        # Parse excluded_tests and flatten into space separated string
        excluded_tests = []
        for arch, tests in self.cfg['excluded_tests'].items():
            if not arch or arch == get_cpu_architecture():
                excluded_tests.extend(tests)
        # -x should not be used if there are no excluded tests
        if excluded_tests:
            excluded_tests = ['-x'] + excluded_tests
        self.cfg.template_values.update({
            'python': self.python_cmd,
            'excluded_tests': ' '.join(excluded_tests)
        })

        (tests_out, tests_ec) = super(EB_PyTorch, self).test_step(return_output_ec=True)

        ran_tests_hits = re.findall(r"^Ran (?P<test_cnt>[0-9]+) tests in", tests_out, re.M)
        test_cnt = 0
        for hit in ran_tests_hits:
            test_cnt += int(hit)

        failed_tests = nub(re.findall(r"^(?P<failed_test_name>.*) failed!\s*$", tests_out, re.M))
        failed_test_cnt = len(failed_tests)

        if failed_test_cnt:
            max_failed_tests = self.cfg['max_failed_tests']

            test_or_tests = 'tests' if failed_test_cnt > 1 else 'test'
            msg = "%d %s (out of %d) failed:\n" % (failed_test_cnt, test_or_tests, test_cnt)
            msg += '\n'.join('* %s' % t for t in sorted(failed_tests))

            if max_failed_tests == 0:
                raise EasyBuildError(msg)
            else:
                msg += '\n\n' + ' '.join([
                    "The PyTorch test suite is known to include some flaky tests,",
                    "which may fail depending on the specifics of the system or the context in which they are run.",
                    "For this PyTorch installation, EasyBuild allows up to %d tests to fail." % max_failed_tests,
                    "We recommend to double check that the failing tests listed above ",
                    "are known to be flaky, or do not affect your intended usage of PyTorch.",
                    "In case of doubt, reach out to the EasyBuild community (via GitHub, Slack, or mailing list).",
                ])
                print_warning(msg)

                if failed_test_cnt > max_failed_tests:
                    raise EasyBuildError("Too many failed tests (%d), maximum allowed is %d",
                                         failed_test_cnt, max_failed_tests)
        elif tests_ec:
            raise EasyBuildError("Test command had non-zero exit code (%s), but no failed tests found?!", tests_ec)
Esempio n. 16
0
 def run_check(args, silent=False, expected_stderr='', **kwargs):
     """Helper function to check stdout/stderr produced via print_warning."""
     self.mock_stderr(True)
     self.mock_stdout(True)
     print_warning(*args, silent=silent, **kwargs)
     stderr = self.get_stderr()
     stdout = self.get_stdout()
     self.mock_stdout(False)
     self.mock_stderr(False)
     self.assertEqual(stdout, '')
     self.assertEqual(stderr, expected_stderr)
Esempio n. 17
0
    def configure_step(self):
        """Custom configuration for R."""

        # define $BLAS_LIBS to build R correctly against BLAS/LAPACK library
        # $LAPACK_LIBS should *not* be specified since that may lead to using generic LAPACK
        # see https://github.com/easybuilders/easybuild-easyconfigs/issues/1435
        env.setvar('BLAS_LIBS', os.getenv('LIBBLAS'))
        self.cfg.update('configopts', "--with-blas --with-lapack")

        # make sure correct config script is used for Tcl/Tk
        for dep in ['Tcl', 'Tk']:
            root = get_software_root(dep)
            if root:
                dep_config = os.path.join(root, 'lib',
                                          '%sConfig.sh' % dep.lower())
                self.cfg.update(
                    'configopts',
                    '--with-%s-config=%s' % (dep.lower(), dep_config))

        if "--with-x=" not in self.cfg['configopts'].lower():
            if get_software_root('X11'):
                self.cfg.update('configopts', '--with-x=yes')
            else:
                self.cfg.update('configopts', '--with-x=no')

        # enable graphic capabilities for plotting, based on available dependencies
        for dep in ['Cairo', 'libjpeg-turbo', 'libpng', 'libtiff']:
            if get_software_root(dep):
                if dep == 'libjpeg-turbo':
                    conf_opt = 'jpeglib'
                else:
                    conf_opt = dep.lower()
                self.cfg.update('configopts', '--with-%s' % conf_opt)

        out = ConfigureMake.configure_step(self)

        # check output of configure command to verify BLAS/LAPACK settings
        ext_libs_regex = re.compile(
            "External libraries:.*BLAS\((?P<BLAS>.*)\).*LAPACK\((?P<LAPACK>.*)\)"
        )
        res = ext_libs_regex.search(out)
        if res:
            for lib in ['BLAS', 'LAPACK']:
                if res.group(lib) == 'generic':
                    warn_msg = "R will be built with generic %s, which will result in poor performance." % lib
                    self.log.warning(warn_msg)
                    print_warning(warn_msg)
                else:
                    self.log.info("R is configured to use non-generic %s: %s",
                                  lib, res.group(lib))
        else:
            warn_msg = "R is configured to be built without BLAS/LAPACK, which will result in (very) poor performance"
            self.log.warning(warn_msg)
            print_warning(warn_msg)
Esempio n. 18
0
    def cleanup(self, *args, **kwargs):
        """Clean up after using OpenMPI in toolchain."""
        super(OpenMPI, self).cleanup(*args, **kwargs)

        tmpdir = os.environ.get('TMPDIR')
        if tmpdir != self.orig_tmpdir:
            try:
                shutil.rmtree(tmpdir)
            except OSError as err:
                print_warning("Failed to clean up temporary directory %s: %s", tmpdir, err)
            env.setvar('TMPDIR', self.orig_tmpdir)
            self.log.info("$TMPDIR restored to %s", self.orig_tmpdir)
    def cleanup(self, *args, **kwargs):
        """Clean up after using OpenMPI in toolchain."""
        super(OpenMPI, self).cleanup(*args, **kwargs)

        tmpdir = os.environ.get('TMPDIR')
        if tmpdir != self.orig_tmpdir:
            try:
                shutil.rmtree(tmpdir)
            except OSError as err:
                print_warning("Failed to clean up temporary directory %s: %s", tmpdir, err)
            env.setvar('TMPDIR', self.orig_tmpdir)
            self.log.info("$TMPDIR restored to %s", self.orig_tmpdir)
Esempio n. 20
0
    def test_print_warning(self):
        """Test print_warning"""
        self.mock_stderr(True)
        self.mock_stdout(True)
        print_warning('You have been warned.')
        stderr = self.get_stderr()
        stdout = self.get_stdout()
        self.mock_stdout(False)
        self.mock_stderr(False)

        self.assertEqual(stderr, "\nWARNING: You have been warned.\n\n")
        self.assertEqual(stdout, '')
Esempio n. 21
0
    def test_print_warning(self):
        """Test print_warning"""
        self.mock_stderr(True)
        self.mock_stdout(True)
        print_warning('You have been warned.')
        stderr = self.get_stderr()
        stdout = self.get_stdout()
        self.mock_stdout(False)
        self.mock_stderr(False)

        self.assertEqual(stderr, "\nWARNING: You have been warned.\n\n")
        self.assertEqual(stdout, '')
Esempio n. 22
0
    def install_step(self):
        """Symlink target OpenSSL installation"""
        if all(self.system_ssl[key] for key in ('bin', 'engines', 'include', 'libs')):
            # note: symlink to individual files, not directories,
            # since directory symlinks get resolved easily...

            # link OpenSSL libraries in system
            lib64_dir = os.path.join(self.installdir, 'lib64')
            lib64_engines_dir = os.path.join(lib64_dir, os.path.basename(self.system_ssl['engines']))
            mkdir(lib64_engines_dir, parents=True)

            # link existing known libraries
            for libso in self.system_ssl['libs']:
                symlink(libso, os.path.join(lib64_dir, os.path.basename(libso)))

            # link engines library files
            engine_lib_pattern = [os.path.join(self.system_ssl['engines'], '*')]
            for engine_lib in expand_glob_paths(engine_lib_pattern):
                symlink(engine_lib, os.path.join(lib64_engines_dir, os.path.basename(engine_lib)))

            # relative symlink for unversioned libraries
            cwd = change_dir(lib64_dir)
            for libso in self.system_ssl['libs']:
                libso = os.path.basename(libso)
                unversioned_lib = '%s.%s' % (libso.split('.')[0], get_shared_lib_ext())
                symlink(libso, unversioned_lib, use_abspath_source=False)
            change_dir(cwd)

            # link OpenSSL headers in system
            include_dir = os.path.join(self.installdir, 'include', self.name.lower())
            mkdir(include_dir, parents=True)
            include_pattern = [os.path.join(self.system_ssl['include'], '*')]
            for header_file in expand_glob_paths(include_pattern):
                symlink(header_file, os.path.join(include_dir, os.path.basename(header_file)))

            # link OpenSSL binary in system
            bin_dir = os.path.join(self.installdir, 'bin')
            mkdir(bin_dir)
            symlink(self.system_ssl['bin'], os.path.join(bin_dir, self.name.lower()))

            # install pkg-config files
            self.install_pc_files()

        elif self.cfg.get('wrap_system_openssl'):
            # install OpenSSL component due to lack of OpenSSL in host system
            print_warning("Not all OpenSSL components found in host system, falling back to OpenSSL in EasyBuild!")
            super(EB_OpenSSL_wrapper, self).install_step()
        else:
            # install OpenSSL component by user request
            warn_msg = "Installing OpenSSL from source in EasyBuild by user request ('wrap_system_openssl=%s')"
            print_warning(warn_msg, self.cfg.get('wrap_system_openssl'))
            super(EB_OpenSSL_wrapper, self).install_step()
    def get_gromacs_arch(self):
        """Determine value of GMX_SIMD CMake flag based on optarch string.

        Refs:
        [0] http://manual.gromacs.org/documentation/2016.3/install-guide/index.html#typical-installation
        [1] http://manual.gromacs.org/documentation/2016.3/install-guide/index.html#simd-support
        [2] http://www.gromacs.org/Documentation/Acceleration_and_parallelization
        """
        # default: fall back on autodetection
        res = None

        optarch = build_option('optarch') or ''
        # take into account that optarch value is a dictionary if it is specified by compiler family
        if isinstance(optarch, dict):
            comp_fam = self.toolchain.comp_family()
            optarch = optarch.get(comp_fam, '')
        optarch = optarch.upper()

        # The list of GMX_SIMD options can be found
        # http://manual.gromacs.org/documentation/2018/install-guide/index.html#simd-support
        if 'MIC-AVX512' in optarch and LooseVersion(self.version) >= LooseVersion('2016'):
            res = 'AVX_512_KNL'
        elif 'AVX512' in optarch and LooseVersion(self.version) >= LooseVersion('2016'):
            res = 'AVX_512'
        elif 'AVX2' in optarch and LooseVersion(self.version) >= LooseVersion('5.0'):
            res = 'AVX2_256'
        elif 'AVX' in optarch:
            res = 'AVX_256'
        elif 'SSE3' in optarch or 'SSE2' in optarch or 'MARCH=NOCONA' in optarch:
            # Gromacs doesn't have any GMX_SIMD=SSE3 but only SSE2 and SSE4.1 [1].
            # According to [2] the performance difference between SSE2 and SSE4.1 is minor on x86
            # and SSE4.1 is not supported by AMD Magny-Cours[1].
            res = 'SSE2'
        elif optarch == OPTARCH_GENERIC:
            cpu_arch = get_cpu_architecture()
            if cpu_arch == X86_64:
                res = 'SSE2'
            else:
                res = 'None'
        elif optarch:
            warn_msg = "--optarch configuration setting set to %s but not taken into account; " % optarch
            warn_msg += "compiling GROMACS for the current host architecture (i.e. the default behavior)"
            self.log.warning(warn_msg)
            print_warning(warn_msg)

        if res:
            self.log.info("Target architecture based on optarch configuration option ('%s'): %s", optarch, res)
        else:
            self.log.info("No target architecture specified based on optarch configuration option ('%s')", optarch)

        return res
Esempio n. 24
0
    def sanity_check_step(self):
        """Custom sanity check for Clang."""
        shlib_ext = get_shared_lib_ext()
        custom_paths = {
            'files': [
                "bin/clang", "bin/clang++", "bin/llvm-ar", "bin/llvm-nm", "bin/llvm-as", "bin/opt", "bin/llvm-link",
                "bin/llvm-config", "bin/llvm-symbolizer", "include/llvm-c/Core.h", "include/clang-c/Index.h",
                "lib/libclang.%s" % shlib_ext, "lib/clang/%s/include/stddef.h" % self.version,
            ],
            'dirs': ["include/clang", "include/llvm", "lib/clang/%s/lib" % self.version],
        }
        if self.cfg['static_analyzer']:
            custom_paths['files'].extend(["bin/scan-build", "bin/scan-view"])

        if self.cfg['build_extra_clang_tools'] and LooseVersion(self.version) >= LooseVersion('3.4'):
            custom_paths['files'].extend(["bin/clang-tidy"])

        if self.cfg["usepolly"]:
            custom_paths['files'].extend(["lib/LLVMPolly.%s" % shlib_ext])
            custom_paths['dirs'].extend(["include/polly"])

        if self.cfg["build_lld"]:
            custom_paths['files'].extend(["bin/lld"])

        if self.cfg["libcxx"]:
            custom_paths['files'].extend(["lib/libc++.%s" % shlib_ext])
            custom_paths['files'].extend(["lib/libc++abi.%s" % shlib_ext])

        if LooseVersion(self.version) >= LooseVersion('3.8'):
            custom_paths['files'].extend(["lib/libomp.%s" % shlib_ext, "lib/clang/%s/include/omp.h" % self.version])

        if 'NVPTX' in self.cfg['build_targets']:
            arch = get_cpu_architecture()
            # Check architecture explicitly since Clang uses potentially
            # different names
            if arch == X86_64:
                arch = 'x86_64'
            elif arch == POWER:
                arch = 'ppc64'
            elif arch == AARCH64:
                arch = 'aarch64'
            else:
                print_warning("Unknown CPU architecture (%s) for OpenMP offloading!" % arch)
            custom_paths['files'].extend(["lib/libomptarget.%s" % shlib_ext,
                                          "lib/libomptarget-nvptx.a",
                                          "lib/libomptarget.rtl.cuda.%s" % shlib_ext,
                                          "lib/libomptarget.rtl.%s.%s" % (arch, shlib_ext)])

        custom_commands = ['clang --help', 'clang++ --help', 'llvm-config --cxxflags']
        super(EB_Clang, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
def _get_arch_constant():
    """
    Get value for ARCH constant.
    """
    arch = platform.uname()[4]

    # macOS on Arm produces 'arm64' rather than 'aarch64'
    if arch == 'arm64':
        arch = 'aarch64'

    if arch not in KNOWN_ARCH_CONSTANTS:
        print_warning("Using unknown value for ARCH constant: %s", arch)

    return arch
Esempio n. 26
0
def check_root_usage(allow_use_as_root=False):
    """
    Check whether we are running as root, and act accordingly

    :param allow_use_as_root: allow use of EasyBuild as root (but do print a warning when doing so)
    """
    if os.getuid() == 0:
        if allow_use_as_root:
            msg = "Using EasyBuild as root is NOT recommended, please proceed with care!\n"
            msg += "(this is only allowed because EasyBuild was configured with "
            msg += "--allow-use-as-root-and-accept-consequences)"
            print_warning(msg)
        else:
            raise EasyBuildError("You seem to be running EasyBuild with root privileges which is not wise, "
                                 "so let's end this here.")
Esempio n. 27
0
def check_root_usage(allow_use_as_root=False):
    """
    Check whether we are running as root, and act accordingly

    :param allow_use_as_root: allow use of EasyBuild as root (but do print a warning when doing so)
    """
    if os.getuid() == 0:
        if allow_use_as_root:
            msg = "Using EasyBuild as root is NOT recommended, please proceed with care!\n"
            msg += "(this is only allowed because EasyBuild was configured with "
            msg += "--allow-use-as-root-and-accept-consequences)"
            print_warning(msg)
        else:
            raise EasyBuildError("You seem to be running EasyBuild with root privileges which is not wise, "
                                 "so let's end this here.")
Esempio n. 28
0
    def configure_step(self):
        """Custom configuration for R."""

        # define $BLAS_LIBS to build R correctly against BLAS/LAPACK library
        # $LAPACK_LIBS should *not* be specified since that may lead to using generic LAPACK
        # see https://github.com/easybuilders/easybuild-easyconfigs/issues/1435
        env.setvar('BLAS_LIBS', os.getenv('LIBBLAS'))
        self.cfg.update('configopts', "--with-blas --with-lapack")

        # make sure correct config script is used for Tcl/Tk
        for dep in ['Tcl', 'Tk']:
            root = get_software_root(dep)
            if root:
                dep_config = os.path.join(root, 'lib', '%sConfig.sh' % dep.lower())
                self.cfg.update('configopts', '--with-%s-config=%s' % (dep.lower(), dep_config))

        if "--with-x=" not in self.cfg['configopts'].lower():
            if get_software_root('X11'):
                self.cfg.update('configopts', '--with-x=yes')
            else:
                self.cfg.update('configopts', '--with-x=no')

        # enable graphic capabilities for plotting, based on available dependencies
        for dep in ['Cairo', 'libjpeg-turbo', 'libpng', 'libtiff']:
            if get_software_root(dep):
                if dep == 'libjpeg-turbo':
                    conf_opt = 'jpeglib'
                else:
                    conf_opt = dep.lower()
                self.cfg.update('configopts', '--with-%s' % conf_opt)

        out = ConfigureMake.configure_step(self)

        # check output of configure command to verify BLAS/LAPACK settings
        ext_libs_regex = re.compile("External libraries:.*BLAS\((?P<BLAS>.*)\).*LAPACK\((?P<LAPACK>.*)\)")
        res = ext_libs_regex.search(out)
        if res:
            for lib in ['BLAS', 'LAPACK']:
                if res.group(lib) == 'generic':
                    warn_msg = "R will be built with generic %s, which will result in poor performance." % lib
                    self.log.warning(warn_msg)
                    print_warning(warn_msg)
                else:
                    self.log.info("R is configured to use non-generic %s: %s", lib, res.group(lib))
        else:
            warn_msg = "R is configured to be built without BLAS/LAPACK, which will result in (very) poor performance"
            self.log.warning(warn_msg)
            print_warning(warn_msg)
Esempio n. 29
0
    def install_step(self):
        """Installation of OpenSSL and SSL certificates"""
        super(EB_OpenSSL, self).install_step()

        # SSL certificates
        # OPENSSLDIR is already populated by the installation of OpenSSL
        # try to symlink system certificates in the empty 'certs' directory
        openssl_certs_dir = os.path.join(self.installdir, 'ssl', 'certs')

        if self.ssl_certs_dir:
            remove_dir(openssl_certs_dir)
            symlink(self.ssl_certs_dir, openssl_certs_dir)
        else:
            print_warning(
                "OpenSSL successfully installed without system SSL certificates. "
                "Some packages might experience limited functionality.")
Esempio n. 30
0
    def prepare(self, *args, **kwargs):
        """
        Prepare for using OpenMPI library in toolchain environment
        """
        super(OpenMPI, self).prepare(*args, **kwargs)

        # OpenMPI 2.x trips if path specified in $TMPDIR is too long
        # see https://www.open-mpi.org/faq/?category=osx#startup-errors-with-open-mpi-2.0.x
        self.orig_tmpdir = os.environ.get('TMPDIR')
        ompi_ver = self.get_software_version(self.MPI_MODULE_NAME)[0]
        if LooseVersion(ompi_ver) >= LooseVersion('2.0') and LooseVersion(ompi_ver) < LooseVersion('3.0'):
            if len(self.orig_tmpdir) > 40:
                tmpdir = tempfile.mkdtemp(prefix='/tmp/')
                env.setvar('TMPDIR', tmpdir)
                warn_msg = "Long $TMPDIR path may cause problems with OpenMPI 2.x, using shorter path: %s" % tmpdir
                self.log.warning(warn_msg)
                print_warning(warn_msg, silent=build_option('silent'))
Esempio n. 31
0
    def build_step(self, *args, **kwargs):
        """Custom build procedure for Python, ensure stack size limit is set to 'unlimited' (if desired)."""

        # make sure installation directory doesn't already exist when building with --rpath and
        # configuring with --enable-optimizations, since that leads to errors like:
        #   ./python: symbol lookup error: ./python: undefined symbol: __gcov_indirect_call
        # see also https://bugs.python.org/issue29712
        enable_opts_flag = '--enable-optimizations'
        if build_option(
                'rpath') and enable_opts_flag in self.cfg['configopts']:
            if os.path.exists(self.installdir):
                warning_msg = "Removing existing installation directory '%s', "
                warning_msg += "because EasyBuild is configured to use RPATH linking "
                warning_msg += "and %s configure option is used." % enable_opts_flag
                print_warning(warning_msg % self.installdir)
                remove_dir(self.installdir)

        if self.cfg['ulimit_unlimited']:
            # determine current stack size limit
            (out, _) = run_cmd("ulimit -s")
            curr_ulimit_s = out.strip()

            # figure out hard limit for stack size limit;
            # this determines whether or not we can use "ulimit -s unlimited"
            (out, _) = run_cmd("ulimit -s -H")
            max_ulimit_s = out.strip()

            if curr_ulimit_s == UNLIMITED:
                self.log.info("Current stack size limit is %s: OK",
                              curr_ulimit_s)
            elif max_ulimit_s == UNLIMITED:
                self.log.info(
                    "Current stack size limit is %s, setting it to %s for build...",
                    curr_ulimit_s, UNLIMITED)
                self.cfg.update('prebuildopts', "ulimit -s %s && " % UNLIMITED)
            else:
                msg = "Current stack size limit is %s, and can not be set to %s due to hard limit of %s;"
                msg += " setting stack size limit to %s instead, "
                msg += " this may break part of the compilation (e.g. hashlib)..."
                print_warning(
                    msg %
                    (curr_ulimit_s, UNLIMITED, max_ulimit_s, max_ulimit_s))
                self.cfg.update('prebuildopts',
                                "ulimit -s %s && " % max_ulimit_s)

        super(EB_Python, self).build_step(*args, **kwargs)
    def prepare(self, *args, **kwargs):
        """
        Prepare for using OpenMPI library in toolchain environment
        """
        super(OpenMPI, self).prepare(*args, **kwargs)

        # OpenMPI 2.x trips if path specified in $TMPDIR is too long
        # see https://www.open-mpi.org/faq/?category=osx#startup-errors-with-open-mpi-2.0.x
        self.orig_tmpdir = os.environ.get('TMPDIR')
        ompi_ver = self.get_software_version(self.MPI_MODULE_NAME)[0]
        if LooseVersion(ompi_ver) >= LooseVersion('2.0') and LooseVersion(ompi_ver) < LooseVersion('3.0'):
            if len(self.orig_tmpdir) > 40:
                tmpdir = tempfile.mkdtemp(prefix='/tmp/')
                env.setvar('TMPDIR', tmpdir)
                warn_msg = "Long $TMPDIR path may cause problems with OpenMPI 2.x, using shorter path: %s" % tmpdir
                self.log.warning(warn_msg)
                print_warning(warn_msg, silent=build_option('silent'))
 def make_module_req_guess(self):
     """
     A dictionary of possible directories to look for.  Return known dict for the system MPI.
     """
     guesses = {}
     if self.cfg['generate_standalone_module']:
         if self.mpi_prefix in ['/usr', '/usr/local']:
             # Force off adding paths to module since unloading such a module would be a potential shell killer
             print_warning("Ignoring option 'generate_standalone_module' since installation prefix is %s",
                           self.mpi_prefix)
         else:
             if self.cfg['name'] in ['OpenMPI', 'SpectrumMPI']:
                 guesses = ConfigureMake.make_module_req_guess(self)
             elif self.cfg['name'] in ['impi']:
                 guesses = EB_impi.make_module_req_guess(self)
             else:
                 raise EasyBuildError("I don't know how to generate module var guesses for %s", self.cfg['name'])
     return guesses
 def make_module_req_guess(self):
     """
     A dictionary of possible directories to look for.  Return known dict for the system MPI.
     """
     guesses = {}
     if self.cfg['generate_standalone_module']:
         if self.mpi_prefix in ['/usr', '/usr/local']:
             # Force off adding paths to module since unloading such a module would be a potential shell killer
             print_warning("Ignoring option 'generate_standalone_module' since installation prefix is %s",
                           self.mpi_prefix)
         else:
             if self.cfg['name'] in ['OpenMPI', 'SpectrumMPI']:
                 guesses = ConfigureMake.make_module_req_guess(self)
             elif self.cfg['name'] in ['impi']:
                 guesses = EB_impi.make_module_req_guess(self)
             else:
                 raise EasyBuildError("I don't know how to generate module var guesses for %s", self.cfg['name'])
     return guesses
Esempio n. 35
0
    def _check_version(self):
        """Check whether GC3Pie version complies with required version."""

        try:
            from pkg_resources import get_distribution, DistributionNotFound
            pkg = get_distribution('gc3pie')

            if LooseVersion(pkg.version) < LooseVersion(self.REQ_VERSION):
                raise EasyBuildError(
                    "Found GC3Pie version %s, but version %s or more recent is required",
                    pkg.version, self.REQ_VERSION)

        except ImportError:
            print_warning("Failed to check required GC3Pie version (>= %s)",
                          self.REQ_VERSION)

        except DistributionNotFound as err:
            raise EasyBuildError("Cannot load GC3Pie package: %s", err)
 def make_module_req_guess(self):
     """
     A dictionary of possible directories to look for.  Return known dict for the system compiler, or empty dict if
     generate_standalone_module parameter is False
     """
     guesses = {}
     if self.cfg['generate_standalone_module']:
         if self.compiler_prefix in ['/usr', '/usr/local']:
             # Force off adding paths to module since unloading such a module would be a potential shell killer
             print_warning("Ignoring option 'generate_standalone_module' since installation prefix is %s",
                           self.compiler_prefix)
         else:
             if self.cfg['name'] in ['GCC','GCCcore']:
                 guesses = EB_GCC.make_module_req_guess(self)
             elif self.cfg['name'] in ['icc']:
                 guesses = EB_icc.make_module_req_guess(self)
             elif self.cfg['name'] in ['ifort']:
                 guesses = EB_ifort.make_module_req_guess(self)
             else:
                 raise EasyBuildError("I don't know how to generate module var guesses for %s", self.cfg['name'])
     return guesses
    def install_step(self):
        """Installation of OpenSSL and SSL certificates"""
        super(EB_OpenSSL, self).install_step()

        # SSL certificates
        # OPENSSLDIR is already populated by the installation of OpenSSL
        # try to symlink system certificates in the empty 'certs' directory
        ssl_dir = os.path.join(self.installdir, 'ssl')
        openssl_certs_dir = os.path.join(ssl_dir, 'certs')

        if self.ssl_certs_dir:
            remove_dir(openssl_certs_dir)
            symlink(self.ssl_certs_dir, openssl_certs_dir)

            # also symlink cert.pem file, if it exists
            # (required on CentOS 7, see https://github.com/easybuilders/easybuild-easyconfigs/issues/14058)
            cert_pem_path = os.path.join(os.path.dirname(self.ssl_certs_dir), 'cert.pem')
            if os.path.isfile(cert_pem_path):
                symlink(cert_pem_path, os.path.join(ssl_dir, os.path.basename(cert_pem_path)))
        else:
            print_warning("OpenSSL successfully installed without system SSL certificates. "
                          "Some packages might experience limited functionality.")
Esempio n. 38
0
 def verify_system_libs_info(self):
     """Verifies that the stored info about $TF_SYSTEM_LIBS is complete"""
     available_libs_src = set(get_system_libs_from_tf(self.start_dir))
     available_libs_eb = set(
         split_tf_libs_txt(
             get_system_libs_for_version(self.version, as_valid_libs=True)))
     # If available_libs_eb is empty it is not an error e.g. it is not worth trying to make all old ECs work
     # So we just log it so it can be verified manually if required
     if not available_libs_eb:
         self.log.warning(
             'TensorFlow EasyBlock does not have any information for $TF_SYSTEM_LIBS stored. '
             +
             'This means most dependencies will be downloaded at build time by TensorFlow.\n'
             +
             'Available $TF_SYSTEM_LIBS according to the TensorFlow sources: %s',
             sorted(available_libs_src))
         return
     # Those 2 sets should be equal. We determine the differences here to report better errors
     missing_libs = available_libs_src - available_libs_eb
     unknown_libs = available_libs_eb - available_libs_src
     if missing_libs or unknown_libs:
         if not available_libs_src:
             msg = 'Failed to determine available $TF_SYSTEM_LIBS from the source'
         else:
             msg = 'Values for $TF_SYSTEM_LIBS in the TensorFlow EasyBlock are incomplete.\n'
             if missing_libs:
                 # Libs available according to TF sources but not listed in this EasyBlock
                 msg += 'Missing entries for $TF_SYSTEM_LIBS: %s\n' % missing_libs
             if unknown_libs:
                 # Libs listed in this EasyBlock but not present in the TF sources -> Removed?
                 msg += 'Unrecognized entries for $TF_SYSTEM_LIBS: %s\n' % unknown_libs
             msg += 'The EasyBlock needs to be updated to fully work with TensorFlow version %s' % self.version
         if build_option('strict') == run.ERROR:
             raise EasyBuildError(msg)
         else:
             print_warning(msg)
Esempio n. 39
0
def obtain_config_guess(download_source_path=None, search_source_paths=None):
    """
    Locate or download an up-to-date config.guess

    :param download_source_path: Path to download config.guess to
    :param search_source_paths: Paths to search for config.guess
    :return: Path to config.guess or None
    """
    log = fancylogger.getLogger('obtain_config_guess')

    eb_source_paths = source_paths()

    if download_source_path is None:
        download_source_path = eb_source_paths[0]
    else:
        log.deprecated("Specifying custom source path to download config.guess via 'download_source_path'", '5.0')

    if search_source_paths is None:
        search_source_paths = eb_source_paths
    else:
        log.deprecated("Specifying custom location to search for updated config.guess via 'search_source_paths'", '5.0')

    config_guess = 'config.guess'
    sourcepath_subdir = os.path.join('generic', 'eb_v%s' % EASYBLOCKS_VERSION, 'ConfigureMake')

    config_guess_path = None

    # check if config.guess has already been downloaded to source path
    for path in search_source_paths:
        cand_config_guess_path = os.path.join(path, sourcepath_subdir, config_guess)
        if os.path.isfile(cand_config_guess_path) and check_config_guess(cand_config_guess_path):
            force_download = build_option('force_download')
            if force_download:
                print_warning("Found file %s at %s, but re-downloading it anyway..."
                              % (config_guess, cand_config_guess_path))
            else:
                config_guess_path = cand_config_guess_path
                log.info("Found %s at %s", config_guess, config_guess_path)
            break

    if not config_guess_path:
        cand_config_guess_path = os.path.join(download_source_path, sourcepath_subdir, config_guess)
        config_guess_url = CONFIG_GUESS_URL_STUB + CONFIG_GUESS_COMMIT_ID
        if not download_file(config_guess, config_guess_url, cand_config_guess_path):
            print_warning("Failed to download recent %s to %s", config_guess, cand_config_guess_path, log=log)
        elif not check_config_guess(cand_config_guess_path):
            print_warning("Verification failed for file %s, not using it!", cand_config_guess_path, log=log)
            remove_file(cand_config_guess_path)
        else:
            config_guess_path = cand_config_guess_path
            adjust_permissions(config_guess_path, stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, add=True)
            log.info("Verified %s at %s, using it if required", config_guess, config_guess_path)

    return config_guess_path
Esempio n. 40
0
def get_kokkos_arch(cuda_cc, kokkos_arch):
    """
    Return KOKKOS ARCH in LAMMPS required format, which is either 'CPU_ARCH' or 'CPU_ARCH;GPU_ARCH'.

    see: https://lammps.sandia.gov/doc/Build_extras.html#kokkos
    """
    cuda = get_software_root('CUDA')
    processor_arch = None

    if kokkos_arch:
        if kokkos_arch not in KOKKOS_CPU_ARCH_LIST:
            warning_msg = "Specified CPU ARCH (%s) " % kokkos_arch
            warning_msg += "was not found in listed options [%s]." % KOKKOS_CPU_ARCH_LIST
            warning_msg += "Still might work though."
            print_warning(warning_msg)
        processor_arch = kokkos_arch

    else:
        warning_msg = "kokkos_arch not set. Trying to auto-detect CPU arch."
        print_warning(warning_msg)

        processor_arch = KOKKOS_CPU_MAPPING.get(get_cpu_arch())

        if not processor_arch:
            error_msg = "Couldn't determine CPU architecture, you need to set 'kokkos_arch' manually."
            raise EasyBuildError(error_msg)

        print_msg("Determined cpu arch: %s" % processor_arch)

    if cuda:
        # CUDA below
        gpu_arch = None
        for cc in sorted(cuda_cc, reverse=True):
            gpu_arch = KOKKOS_GPU_ARCH_TABLE.get(str(cc))
            if gpu_arch:
                break
            else:
                warning_msg = "(%s) GPU ARCH was not found in listed options." % cc
                print_warning(warning_msg)

        if not gpu_arch:
            error_msg = "Specified GPU ARCH (%s) " % cuda_cc
            error_msg += "was not found in listed options [%s]." % KOKKOS_GPU_ARCH_TABLE
            raise EasyBuildError(error_msg)

        kokkos_arch = "%s;%s" % (processor_arch, gpu_arch)

    else:
        kokkos_arch = processor_arch

    return kokkos_arch
Esempio n. 41
0
            fn = ec_filename_for(tmpfn)

            # get rid of temporary file
            os.remove(tmpfn)
        except OSError, err:
            raise EasyBuildError("Failed to determine suiting filename for tweaked easyconfig file: %s", err)

        if targetdir is None:
            targetdir = tempfile.gettempdir()
        tweaked_ec = os.path.join(targetdir, fn)
        _log.debug("Generated file name for tweaked easyconfig file: %s", tweaked_ec)

    # write out tweaked easyconfig file
    if os.path.exists(tweaked_ec):
        if build_option('force'):
            print_warning("Overwriting existing file at %s with tweaked easyconfig file (due to --force)", tweaked_ec)
        else:
            raise EasyBuildError("A file already exists at %s where tweaked easyconfig file would be written",
                                 tweaked_ec)

    write_file(tweaked_ec, ectxt)
    _log.info("Tweaked easyconfig file written to %s", tweaked_ec)

    return tweaked_ec


def pick_version(req_ver, avail_vers):
    """Pick version based on an optionally desired version and available versions.

    If a desired version is specifed, the most recent version that is less recent than or equal to
    the desired version will be picked; else, the most recent version will be picked.
Esempio n. 42
0
    def configure_step(self, cmd_prefix=''):
        """
        Configure step
        - typically ./configure --prefix=/install/path style
        """

        if self.cfg.get('configure_cmd_prefix'):
            if cmd_prefix:
                tup = (cmd_prefix, self.cfg['configure_cmd_prefix'])
                self.log.debug(
                    "Specified cmd_prefix '%s' is overruled by configure_cmd_prefix '%s'"
                    % tup)
            cmd_prefix = self.cfg['configure_cmd_prefix']

        if self.cfg.get('tar_config_opts'):
            # setting am_cv_prog_tar_ustar avoids that configure tries to figure out
            # which command should be used for tarring/untarring
            # am__tar and am__untar should be set to something decent (tar should work)
            tar_vars = {
                'am__tar': 'tar chf - "$$tardir"',
                'am__untar': 'tar xf -',
                'am_cv_prog_tar_ustar': 'easybuild_avoid_ustar_testing'
            }
            for (key, val) in tar_vars.items():
                self.cfg.update('preconfigopts', "%s='%s'" % (key, val))

        prefix_opt = self.cfg.get('prefix_opt')
        if prefix_opt is None:
            prefix_opt = '--prefix='

        configure_command = cmd_prefix + (self.cfg.get('configure_cmd')
                                          or DEFAULT_CONFIGURE_CMD)

        # avoid using config.guess from an Autoconf generated package as it is frequently out of date;
        # use the version downloaded by EasyBuild instead, and provide the result to the configure command;
        # it is possible that the configure script is generated using preconfigopts...
        # if so, we're at the mercy of the gods
        build_type_option = ''
        host_type_option = ''

        # note: reading contents of 'configure' script in bytes mode,
        # to avoid problems when non-UTF-8 characters are included
        # see https://github.com/easybuilders/easybuild-easyblocks/pull/1817
        if os.path.exists(
                configure_command) and AUTOCONF_GENERATED_MSG in read_file(
                    configure_command, mode='rb'):

            build_type = self.cfg.get('build_type')
            host_type = self.cfg.get('host_type')

            if build_type is None or host_type is None:

                # config.guess script may not be obtained yet despite the call in fetch_step,
                # for example when installing a Bundle component with ConfigureMake
                if self.config_guess is None:
                    self.config_guess = self.obtain_config_guess()

                if self.config_guess is None:
                    print_warning(
                        "No config.guess available, not setting '--build' option for configure step\n"
                        "EasyBuild attempts to download a recent config.guess but seems to have failed!"
                    )
                else:
                    self.check_config_guess()
                    system_type, _ = run_cmd(self.config_guess, log_all=True)
                    system_type = system_type.strip()
                    self.log.info("%s returned a system type '%s'",
                                  self.config_guess, system_type)

                    if build_type is None:
                        build_type = system_type
                        self.log.info(
                            "Providing '%s' as value to --build option of configure script",
                            build_type)

                    if host_type is None:
                        host_type = system_type
                        self.log.info(
                            "Providing '%s' as value to --host option of configure script",
                            host_type)

            if build_type is not None and build_type:
                build_type_option = '--build=' + build_type

            if host_type is not None and host_type:
                host_type_option = '--host=' + host_type

        cmd = ' '.join([
            self.cfg['preconfigopts'],
            configure_command,
            prefix_opt + self.installdir,
            build_type_option,
            host_type_option,
            self.cfg['configopts'],
        ])

        (out, _) = run_cmd(cmd, log_all=True, simple=False)

        return out
Esempio n. 43
0
def tweak(easyconfigs, build_specs, modtool, targetdirs=None):
    """Tweak list of easyconfigs according to provided build specifications."""
    tweaked_ecs_path, tweaked_ecs_deps_path = None, None
    if targetdirs is not None:
        tweaked_ecs_path, tweaked_ecs_deps_path = targetdirs
    # make sure easyconfigs all feature the same toolchain (otherwise we *will* run into trouble)
    toolchains = nub(['%(name)s/%(version)s' % ec['ec']['toolchain'] for ec in easyconfigs])
    if len(toolchains) > 1:
        raise EasyBuildError("Multiple toolchains featured in easyconfigs, --try-X not supported in that case: %s",
                             toolchains)
    # Toolchain is unique, let's store it
    source_toolchain = easyconfigs[-1]['ec']['toolchain']
    modifying_toolchains = False
    target_toolchain = {}
    src_to_dst_tc_mapping = {}
    revert_to_regex = False

    if 'toolchain_name' in build_specs or 'toolchain_version' in build_specs:
        keys = build_specs.keys()

        # Make sure there are no more build_specs, as combining --try-toolchain* with other options is currently not
        # supported
        if any(key not in ['toolchain_name', 'toolchain_version', 'toolchain'] for key in keys):
            warning_msg = "Combining --try-toolchain* with other build options is not fully supported: using regex"
            print_warning(warning_msg, silent=build_option('silent'))
            revert_to_regex = True

        if not revert_to_regex:
            # we're doing something with the toolchain,
            # so build specifications should be applied to whole dependency graph;
            # obtain full dependency graph for specified easyconfigs;
            # easyconfigs will be ordered 'top-to-bottom' (toolchains and dependencies appearing first)
            modifying_toolchains = True

            if 'toolchain_name' in keys:
                target_toolchain['name'] = build_specs['toolchain_name']
            else:
                target_toolchain['name'] = source_toolchain['name']

            if 'toolchain_version' in keys:
                target_toolchain['version'] = build_specs['toolchain_version']
            else:
                target_toolchain['version'] = source_toolchain['version']

            if build_option('map_toolchains'):
                try:
                    src_to_dst_tc_mapping = map_toolchain_hierarchies(source_toolchain, target_toolchain, modtool)
                except EasyBuildError as err:
                    # make sure exception was raised by match_minimum_tc_specs because toolchain mapping didn't work
                    if "No possible mapping from source toolchain" in err.msg:
                        error_msg = err.msg + '\n'
                        error_msg += "Toolchain %s is not equivalent to toolchain %s in terms of capabilities. "
                        error_msg += "(If you know what you are doing, "
                        error_msg += "you can use --disable-map-toolchains to proceed anyway.)"
                        raise EasyBuildError(error_msg, target_toolchain['name'], source_toolchain['name'])
                    else:
                        # simply re-raise the exception if something else went wrong
                        raise err
            else:
                msg = "Mapping of (sub)toolchains disabled, so falling back to regex mode, "
                msg += "disabling recursion and not changing (sub)toolchains for dependencies"
                _log.info(msg)
                revert_to_regex = True
                modifying_toolchains = False

        if not revert_to_regex:
            _log.debug("Applying build specifications recursively (no software name/version found): %s", build_specs)
            orig_ecs = resolve_dependencies(easyconfigs, modtool, retain_all_deps=True)

            # Filter out the toolchain hierarchy (which would only appear if we are applying build_specs recursively)
            # We can leave any dependencies they may have as they will only be used if required (or originally listed)
            _log.debug("Filtering out toolchain hierarchy for %s", source_toolchain)

            i = 0
            while i < len(orig_ecs):
                tc_names = [tc['name'] for tc in get_toolchain_hierarchy(source_toolchain)]
                if orig_ecs[i]['ec']['name'] in tc_names:
                    # drop elements in toolchain hierarchy
                    del orig_ecs[i]
                else:
                    i += 1
    else:
        revert_to_regex = True

    if revert_to_regex:
        # no recursion if software name/version build specification are included or we are amending something
        # in that case, do not construct full dependency graph
        orig_ecs = easyconfigs
        _log.debug("Software name/version found, so not applying build specifications recursively: %s" % build_specs)

    # keep track of originally listed easyconfigs (via their path)
    listed_ec_paths = [ec['spec'] for ec in easyconfigs]

    # generate tweaked easyconfigs, and continue with those instead
    tweaked_easyconfigs = []
    for orig_ec in orig_ecs:
        # Only return tweaked easyconfigs for easyconfigs which were listed originally on the command line
        # (and use the prepended path so that they are found first).
        # easyconfig files for dependencies are also generated but not included, they will be resolved via --robot
        # either from existing easyconfigs or, if that fails, from easyconfigs in the appended path

        tc_name = orig_ec['ec']['toolchain']['name']

        new_ec_file = None
        verification_build_specs = copy.copy(build_specs)
        if orig_ec['spec'] in listed_ec_paths:
            if modifying_toolchains:
                if tc_name in src_to_dst_tc_mapping:
                    new_ec_file = map_easyconfig_to_target_tc_hierarchy(orig_ec['spec'], src_to_dst_tc_mapping,
                                                                        tweaked_ecs_path)
                    # Need to update the toolchain in the build_specs to match the toolchain mapping
                    keys = verification_build_specs.keys()
                    if 'toolchain_name' in keys:
                        verification_build_specs['toolchain_name'] = src_to_dst_tc_mapping[tc_name]['name']
                    if 'toolchain_version' in keys:
                        verification_build_specs['toolchain_version'] = src_to_dst_tc_mapping[tc_name]['version']
                    if 'toolchain' in keys:
                        verification_build_specs['toolchain'] = src_to_dst_tc_mapping[tc_name]
            else:
                new_ec_file = tweak_one(orig_ec['spec'], None, build_specs, targetdir=tweaked_ecs_path)

            if new_ec_file:
                new_ecs = process_easyconfig(new_ec_file, build_specs=verification_build_specs)
                tweaked_easyconfigs.extend(new_ecs)
        else:
            # Place all tweaked dependency easyconfigs in the directory appended to the robot path
            if modifying_toolchains:
                if tc_name in src_to_dst_tc_mapping:
                    new_ec_file = map_easyconfig_to_target_tc_hierarchy(orig_ec['spec'], src_to_dst_tc_mapping,
                                                                        targetdir=tweaked_ecs_deps_path)
            else:
                new_ec_file = tweak_one(orig_ec['spec'], None, build_specs, targetdir=tweaked_ecs_deps_path)

    return tweaked_easyconfigs
Esempio n. 44
0
def process_software_build_specs(options):
    """
    Create a dictionary with specified software build options.
    The options arguments should be a parsed option list (as delivered by parse_options(args).options)
    """

    try_to_generate = False
    build_specs = {}

    # regular options: don't try to generate easyconfig, and search
    opts_map = {
        "name": options.software_name,
        "version": options.software_version,
        "toolchain_name": options.toolchain_name,
        "toolchain_version": options.toolchain_version,
    }

    # try options: enable optional generation of easyconfig
    try_opts_map = {
        "name": options.try_software_name,
        "version": options.try_software_version,
        "toolchain_name": options.try_toolchain_name,
        "toolchain_version": options.try_toolchain_version,
    }

    # process easy options
    for (key, opt) in opts_map.items():
        if opt:
            build_specs[key] = opt
            # remove this key from the dict of try-options (overruled)
            try_opts_map.pop(key)

    for (key, opt) in try_opts_map.items():
        if opt:
            build_specs[key] = opt
            # only when a try option is set do we enable generating easyconfigs
            try_to_generate = True

    # process --toolchain --try-toolchain (sanity check done in tools.options)
    tc = options.toolchain or options.try_toolchain
    if tc:
        if options.toolchain and options.try_toolchain:
            print_warning("Ignoring --try-toolchain, only using --toolchain specification.")
        elif options.try_toolchain:
            try_to_generate = True
        build_specs.update({"toolchain_name": tc[0], "toolchain_version": tc[1]})

    # provide both toolchain and toolchain_name/toolchain_version keys
    if "toolchain_name" in build_specs:
        build_specs["toolchain"] = {
            "name": build_specs["toolchain_name"],
            "version": build_specs.get("toolchain_version", None),
        }

    # process --amend and --try-amend
    if options.amend or options.try_amend:

        amends = []
        if options.amend:
            amends += options.amend
            if options.try_amend:
                print_warning("Ignoring options passed via --try-amend, only using those passed via --amend.")
        if options.try_amend:
            amends += options.try_amend
            try_to_generate = True

        for amend_spec in amends:
            # e.g., 'foo=bar=baz' => foo = 'bar=baz'
            param = amend_spec.split("=")[0]
            value = "=".join(amend_spec.split("=")[1:])
            # support list values by splitting on ',' if its there
            # e.g., 'foo=bar,baz' => foo = ['bar', 'baz']
            if "," in value:
                value = value.split(",")
            build_specs.update({param: value})

    return (try_to_generate, build_specs)
    def configure_step(self, cmd_prefix=''):
        """
        Configure step
        - typically ./configure --prefix=/install/path style
        """

        if self.cfg.get('configure_cmd_prefix'):
            if cmd_prefix:
                tup = (cmd_prefix, self.cfg['configure_cmd_prefix'])
                self.log.debug("Specified cmd_prefix '%s' is overruled by configure_cmd_prefix '%s'" % tup)
            cmd_prefix = self.cfg['configure_cmd_prefix']

        if self.cfg.get('tar_config_opts'):
            # setting am_cv_prog_tar_ustar avoids that configure tries to figure out
            # which command should be used for tarring/untarring
            # am__tar and am__untar should be set to something decent (tar should work)
            tar_vars = {
                'am__tar': 'tar chf - "$$tardir"',
                'am__untar': 'tar xf -',
                'am_cv_prog_tar_ustar': 'easybuild_avoid_ustar_testing'
            }
            for (key, val) in tar_vars.items():
                self.cfg.update('preconfigopts', "%s='%s'" % (key, val))

        prefix_opt = self.cfg.get('prefix_opt')
        if prefix_opt is None:
            prefix_opt = '--prefix='

        configure_command = cmd_prefix + (self.cfg.get('configure_cmd') or DEFAULT_CONFIGURE_CMD)

        # avoid using config.guess from an Autoconf generated package as it is frequently out of date;
        # use the version downloaded by EasyBuild instead, and provide the result to the configure command;
        # it is possible that the configure script is generated using preconfigopts...
        # if so, we're at the mercy of the gods
        build_type_option = ''
        host_type_option = ''
        if os.path.exists(configure_command) and AUTOCONF_GENERATED_MSG in read_file(configure_command):

            build_type = self.cfg.get('build_type')
            host_type = self.cfg.get('host_type')

            if build_type is None or host_type is None:

                # config.guess script may not be obtained yet despite the call in fetch_step,
                # for example when installing a Bundle component with ConfigureMake
                if self.config_guess is None:
                    self.config_guess = self.obtain_config_guess()

                if self.config_guess is None:
                    print_warning("No config.guess available, not setting '--build' option for configure step\n"
                                  "EasyBuild attempts to download a recent config.guess but seems to have failed!")
                else:
                    self.check_config_guess()
                    system_type, _ = run_cmd(self.config_guess, log_all=True)
                    system_type = system_type.strip()
                    self.log.info("%s returned a system type '%s'", self.config_guess, system_type)

                    if build_type is None:
                        build_type = system_type
                        self.log.info("Providing '%s' as value to --build option of configure script", build_type)

                    if host_type is None:
                        host_type = system_type
                        self.log.info("Providing '%s' as value to --host option of configure script", host_type)

            if build_type is not None and build_type:
                build_type_option = '--build=' + build_type

            if host_type is not None and host_type:
                host_type_option = '--host=' + host_type

        cmd = ' '.join([
            self.cfg['preconfigopts'],
            configure_command,
            prefix_opt + self.installdir,
            build_type_option,
            host_type_option,
            self.cfg['configopts'],
        ])

        (out, _) = run_cmd(cmd, log_all=True, simple=False)

        return out