def validate_compiler(compiler_val, flag): if compiler_val != 'llvm': import chpl_home_utils chpl_home = chpl_home_utils.get_chpl_home() comp_makefile = os.path.join(chpl_home, 'make', 'compiler', 'Makefile.{0}'.format(compiler_val)) if not os.path.isfile(comp_makefile): warning('Unknown compiler: "{0}"'.format(compiler_val)) if chpl_locale_model.get() == 'gpu' and flag == 'target': if compiler_val != 'llvm': error("The 'gpu' locale model can only be used with " "CHPL_TARGET_COMPILER=llvm.")
def get_uniq_cfg_path(): return '{0}-{1}'.format(third_party_utils.default_uniq_cfg_path(), chpl_locale_model.get())
values=['none', 'gmp', 'system'], default=chpl_gmp.get(), help_text='GMP ({var_name}) values to build.', ), Dimension( 'regexp', 'CHPL_REGEXP', values=['none', 're2'], default=chpl_regexp.get(), help_text='Regular expression ({var_name}) values to build.', ), Dimension( 'localeModel', 'CHPL_LOCALE_MODEL', values=['flat', 'knl', 'numa'], default=chpl_locale_model.get(), help_text='Locale model ({var_name}) values to build.', ), Dimension( 'llvm', 'CHPL_LLVM', values=['none', 'llvm', 'system'], default=chpl_llvm.get(), help_text='LLVM ({var_name}) values to build.', ), ] class Config(object): def __init__(self, *args, **kwargs): """Initialize new configuration value. Arguments are based on the dimensions
Dimension( 'gmp', 'CHPL_GMP', values=['none', 'gmp', 'system'], default=chpl_gmp.get(), help_text='GMP ({var_name}) values to build.', ), Dimension( 'regexp', 'CHPL_REGEXP', values=['none', 're2'], default=chpl_regexp.get(), help_text='Regular expression ({var_name}) values to build.', ), Dimension( 'localeModel', 'CHPL_LOCALE_MODEL', values=['flat', 'numa'], default=chpl_locale_model.get(), help_text='Locale model ({var_name}) values to build.', ), Dimension( 'llvm', 'CHPL_LLVM', values=['none', 'llvm'], default=chpl_llvm.get(), help_text='LLVM ({var_name}) values to build.', ), ] class Config(object): def __init__(self, *args, **kwargs): """Initialize new configuration value. Arguments are based on the dimensions
def get(flag='host'): if flag == 'host': compiler_val = overrides.get('CHPL_HOST_COMPILER', '') elif flag == 'target': compiler_val = overrides.get('CHPL_TARGET_COMPILER', '') else: error("Invalid flag: '{0}'".format(flag), ValueError) default_llvm = False if not compiler_val: default_llvm = default_to_llvm(flag) # If allowable, look at CC/CXX if should_consider_cc_cxx(flag): compiler_val = get_compiler_from_cc_cxx() if compiler_val: validate_compiler(compiler_val) return compiler_val prgenv_compiler = get_prgenv_compiler() if default_llvm: compiler_val = 'llvm' elif prgenv_compiler != 'none': # The cray platforms are a special case in that we want to # "cross-compile" by default. (the compiler is different between host # and target, but the platform is the same). if flag == 'host': compiler_val = 'gnu' else: compiler_val = prgenv_compiler else: platform_val = chpl_platform.get(flag) locale_model_val = chpl_locale_model.get() # Normal compilation (not "cross-compiling") # inherit the host compiler if the target compiler is not set and # the host and target platforms are the same if flag == 'target': if chpl_platform.get('host') == platform_val: compiler_val = get('host') elif platform_val.startswith('pwr'): compiler_val = 'ibm' elif platform_val == 'darwin' or platform_val == 'freebsd': if find_executable('clang'): compiler_val = 'clang' else: compiler_val = 'gnu' elif locale_model_val == 'gpu': if find_executable('clang'): compiler_val = 'clang' else: error("clang not found. The 'gpu' locale model is supported " "with clang only.") else: compiler_val = 'gnu' validate_compiler(compiler_val) return compiler_val
def get_uniq_cfg_path(): def_uniq_cfg = third_party_utils.default_uniq_cfg_path() lm = chpl_locale_model.get(); target_mem = chpl_mem.get('target') hwloc = chpl_hwloc.get() return '{0}-{1}-{2}-{3}'.format(def_uniq_cfg, lm, target_mem, hwloc)
def get_uniq_cfg_path(): def_uniq_cfg = third_party_utils.default_uniq_cfg_path() lm = chpl_locale_model.get() target_mem = chpl_mem.get('target') hwloc = chpl_hwloc.get() return '{0}-{1}-{2}-{3}'.format(def_uniq_cfg, lm, target_mem, hwloc)
def get(): if chpl_locale_model.get() == 'gpu': return 'cuda' else: return 'none'