def execute(self): """see Context.execute""" self.load() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() for g in self.groups: for tg in g: if isinstance(tg, TaskGen.task_gen): tg.post() try: # force the cache initialization self.get_tgen_by_name('') except: pass lst = list(self.task_gen_cache_names.keys()) lst.sort() for k in lst: Logs.pprint('GREEN', k)
def post_test(ctx,appname,dirs=['src'],remove=['*boost*','c++*']): diropts='' for i in dirs: diropts+=' -d '+i coverage_log=open('lcov-coverage.log','w') coverage_lcov=open('coverage.lcov','w') coverage_stripped_lcov=open('coverage-stripped.lcov','w') try: try: base='.' if g_is_child: base='..' subprocess.call(('lcov -c %s -b %s'%(diropts,base)).split(),stdout=coverage_lcov,stderr=coverage_log) subprocess.call(['lcov','--remove','coverage.lcov']+remove,stdout=coverage_stripped_lcov,stderr=coverage_log) if not os.path.isdir('coverage'): os.makedirs('coverage') subprocess.call('genhtml -o coverage coverage-stripped.lcov'.split(),stdout=coverage_log,stderr=coverage_log) except: Logs.warn('Failed to run lcov, no coverage report will be generated') finally: coverage_stripped_lcov.close() coverage_lcov.close() coverage_log.close() print('') Logs.pprint('GREEN',"Waf: Leaving directory `%s'"%os.path.abspath(os.getcwd())) top_level=(len(ctx.stack_path)>1) if top_level: cd_to_orig_dir(ctx,top_level) print('') Logs.pprint('BOLD','Coverage:',sep='') print('<file://%s>\n\n'%os.path.abspath('coverage/index.html'))
def end_msg(self, *k, **kw): """Prints the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`""" if kw.get("quiet"): return self.in_msg -= 1 if self.in_msg: return result = kw.get("result") or k[0] defcolor = "GREEN" if result == True: msg = "ok" elif result == False: msg = "not found" defcolor = "YELLOW" else: msg = str(result) self.to_log(msg) try: color = kw["color"] except KeyError: if len(k) > 1 and k[1] in Logs.colors_lst: # compatibility waf 1.7 color = k[1] else: color = defcolor Logs.pprint(color, msg)
def end_msg(self, *k, **kw): """Print the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`""" if kw.get('quiet', None): return self.in_msg -= 1 if self.in_msg: return result = kw.get('result', None) or k[0] defcolor = 'GREEN' if result == True: msg = 'ok' elif result == False: msg = 'not found' defcolor = 'YELLOW' else: msg = str(result) self.to_log(msg) try: color = kw['color'] except KeyError: if len(k) > 1 and k[1] in Logs.colors_lst: # compatibility waf 1.7 color = k[1] else: color = defcolor Logs.pprint(color, msg)
def summary(bld): """ Show the status of Google Test """ lst = getattr(bld, 'utest_results', []) if not lst: return Logs.pprint('CYAN', '[test summary]') nfails = 0 for (f, code, out, err, result) in lst: fail = int(result.attrib['failures']) if fail > 0: nfails += fail for failure in result.iter('failure'): message = failure.attrib['message'] message_body = '\n'.join(message.split('\n')[1:]) message = message.split('\n')[0] m = re.compile(r'^(.*):([0-9]+)$').match(message) body = m.group(1) num = int(m.group(2)) Logs.error('{}({}): error: {}'.format(body, num, message_body)) if nfails > 0: raise Errors.WafError('test failed')
def summary(bld): lst = getattr(bld, "utest_results", []) if not lst: return total = len(lst) fail = len([x for x in lst if x[1]]) Logs.pprint("CYAN", "test summary") Logs.pprint("CYAN", " tests that pass %d/%d" % (total - fail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint("GREEN", " %s" % f) if isinstance(Options.options.checkfilter, str): print(out) if fail > 0: Logs.pprint("RED", " tests that fail %d/%d" % (fail, total)) for (f, code, out, err) in lst: if code: Logs.pprint("RED", " %s" % f) print(out.decode("utf-8")) raise Errors.WafError("test failed")
def run(self, perf_run, available_params, configurations): test_build_path = self.path.make_node(self.bldnode.name + '_tests') Options.lockfile = Options.lockfile + '_tests' Options.options.out = test_build_path.abspath() Options.options.profiling = self.env.profiling Options.options.input_file = os.path.relpath(self.env.INPUT_FILE, self.path.abspath()) Options.options.reference_file = os.path.relpath(self.env.REFERENCE_FILE, self.path.abspath()) for configuration in configurations: for configuration_param in available_params: setattr(Options.options, configuration_param, configuration_param in configuration['modules']) Logs.pprint('PINK', 'Testing %s build...' % configuration['id']) Scripting.run_command('configure') Scripting.run_command('build') Scripting.run_command('perf' if perf_run else 'debug') self.exec_command('cp %s %s' % ( test_build_path.find_node('current_profile.txt').abspath(), self.bldnode.make_node('%s_profile.txt' % configuration['id']).abspath())) if perf_run: self.exec_command('cp %s %s' % ( test_build_path.find_node('current_profile_detailed.json').abspath(), self.bldnode.make_node('%s_profile_detailed.json' % configuration['id']).abspath())) Scripting.run_command('distclean')
def installlapack(ctx): filen = version+".tgz" atl.installsmthg_pre(ctx,"http://www.netlib.org/lapack/"+filen,filen) from waflib import Utils,Errors dii = {"FCC":ctx.env.FC,"FCFLAGS":" ".join(ctx.env.FCFLAGS+ctx.env.FCFLAGS_fcshlib),"FLINKFLAGS":" ".join(ctx.env.FCFLAGS+ctx.env.LINKFLAGS_fcshlib),"SO":ctx.env.shsuffix,"MFLAG":" ".join(ctx.env.FCFLAGS) } Logs.pprint("PINK","build blas") f=open("build/%s/make.inc"%version,"w") print >>f,make_inc_blas%dii f.close() cmdline = "cd build/%s; make blaslib"%version if ctx.exec_command(cmdline)!=0: raise Errors.WafError("Cannot build %s"%version) Logs.pprint("PINK","build lapack") f=open("build/%s/make.inc"%version,"w") print >>f,make_inc_lapack%dii f.close() cmdline = "cd build/%s; make lapacklib"%version if ctx.exec_command(cmdline)!=0: raise Errors.WafError("Cannot build %s"%version) import shutil shutil.copyfile("build/%s/liblapack_clik.%s"%(version,ctx.env.shsuffix), osp.join(ctx.env.LIBDIR,"liblapack_clik.%s"%ctx.env.shsuffix)) shutil.copyfile("build/%s/libblas_clik.%s"%(version,ctx.env.shsuffix), osp.join(ctx.env.LIBDIR,"libblas_clik.%s"%ctx.env.shsuffix)) do_include(ctx)
def unpack_archive (src_dir_node, name, filename = None, dest_dir_node = None): if not dest_dir_node: dest_dir_node = src_dir_node src_dir = src_dir_node.abspath() dest_dir = dest_dir_node.abspath() # path is the destination folder where the file is extracted path = os.path.join (dest_dir, name) if not os.path.isdir (path): # if not already extracted # extract the sources os.makedirs (path) if not filename: filename = "%s.tar.gz" % name Logs.pprint ("NORMAL", "Unpacking %s" % filename) t = tarfile.open (os.path.join (src_dir, filename)) t.extractall (dest_dir) node = dest_dir_node.find_dir (name) assert node return node
def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) """ lst = getattr(bld, 'runner_results', []) if lst: Logs.pprint('CYAN', 'Execution Summary:') total = len(lst) fail = len([x for x in lst if x[1]]) Logs.pprint('CYAN', ' successful runs %d/%d' % (total - fail, total)) for (filename, return_code, stdout, stderr) in lst: if return_code == 0: Logs.pprint('CYAN', ' %s' % filename) if fail != 0: Logs.pprint('CYAN', ' failed runs %d/%d' % (fail, total)) for (filename, return_code, stdout, stderr) in lst: if return_code != 0: Logs.pprint('CYAN', ' %s' % filename)
def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) """ lst = getattr(bld, 'utest_results', []) if lst: Logs.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('CYAN', ' %s' % f) Logs.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('CYAN', ' %s' % f)
def apply_intltool_po(self): try:self.meths.remove('process_source') except ValueError:pass self.ensure_localedir() appname=getattr(self,'appname',getattr(Context.g_module,Context.APPNAME,'set_your_app_name')) podir=getattr(self,'podir','.') inst=getattr(self,'install_path','${LOCALEDIR}') linguas=self.path.find_node(os.path.join(podir,'LINGUAS')) if linguas: file=open(linguas.abspath()) langs=[] for line in file.readlines(): if not line.startswith('#'): langs+=line.split() file.close() re_linguas=re.compile('[-a-zA-Z_@.]+') for lang in langs: if re_linguas.match(lang): node=self.path.find_resource(os.path.join(podir,re_linguas.match(lang).group()+'.po')) task=self.create_task('po',node,node.change_ext('.mo')) if inst: filename=task.outputs[0].name (langname,ext)=os.path.splitext(filename) inst_file=inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+appname+'.mo' self.bld.install_as(inst_file,task.outputs[0],chmod=getattr(self,'chmod',Utils.O644),env=task.env) else: Logs.pprint('RED',"Error no LINGUAS file found in po directory")
def run(self): filename = self.inputs[0].abspath() cwd = self.inputs[0].parent.abspath() try: proc = Utils.subprocess.Popen(filename, cwd=cwd, stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE) (stdout, stderr) = proc.communicate() except OSError: Logs.pprint('RED', 'Failed to run test: %s' % filename) return tup = (filename, proc.returncode, stdout, stderr) self.generator.utest_result = tup testlock.acquire() try: bld = self.generator.bld Logs.debug("ut: %r", tup) try: bld.utest_results.append(tup) except AttributeError: bld.utest_results = [tup] a = getattr(self.generator.bld, 'added_post_fun', False) if not a: self.generator.bld.add_post_fun(summary) self.generator.bld.added_post_fun = True finally: testlock.release()
def summary(bld): lst = getattr(bld, 'utest_results', []) if not lst: return total = len(lst) fail = len([x for x in lst if x[1]]) Logs.pprint('CYAN', 'test summary') Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-fail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('GREEN', ' %s' % f) if isinstance(Options.options.checkfilter, str): print(err) print(out) if fail>0: Logs.pprint('RED', ' tests that fail %d/%d' % (fail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('RED', ' %s' % f) print(err.decode('utf-8')) print(out.decode('utf-8')) raise Errors.WafError('test failed')
def execute(self): """ See :py:func:`waflib.Context.Context.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() try: # force the cache initialization self.get_tgen_by_name('') except Exception: pass lst = list(self.task_gen_cache_names.keys()) lst.sort() for k in lst: Logs.pprint('GREEN', k)
def serve(ctx): ctx.load('python') ctx.load('wurf_tools') print('Starting Development Server...') app_root = ctx.root.find_dir(ctx.env.APPENGINE_APP_ROOT) if not app_root: ctx.fatal('Unable to locate application directory ({0})'.format(ctx.env.APPENGINE_APP_ROOT)) cmd = ctx.env.PYTHON + [ctx.env.APPENGINE_SDK_DEVAPPSERVER, app_root.get_bld().abspath(), #app_root.abspath(), ] #if not ctx.options.port is None: # cmd += ['--port', str(ctx.options.port)] proc = Popen(cmd) try: proc.wait() except KeyboardInterrupt: Logs.pprint('RED', 'Development Server Interrupted... Shutting Down') proc.terminate()
def run(self): Logs.pprint('CYAN', "Process data...") keys = self.container[0].keys() count = len(self.container) total = {} detailed = {} for key in keys: total[key] = 0 detailed[key] = [] for test in self.container: for key in keys: total[key] += test[key] detailed[key].append(test[key]) with open(self.bldnode.make_node('current_profile.txt').abspath(), 'w') as profile: profile.write("// Verbose: 1\n") profile.write("{0}\n".format(len(self.container[0]))) for key in keys: profile.write("{0} {1}\n".format(key, total[key] / count)) with open(self.bldnode.make_node('current_profile_detailed.json').abspath(), 'w') as profile: json.dump(detailed, profile) return 0
def cd_to_build_dir(ctx, appname): orig_dir = os.path.abspath(os.curdir) top_level = (len(ctx.stack_path) > 1) if top_level: os.chdir(os.path.join('build', appname)) else: os.chdir('build') Logs.pprint('GREEN', "Waf: Entering directory `%s'" % os.path.abspath(os.getcwd()))
def cd_to_build_dir(ctx, appname): orig_dir = os.path.abspath(os.curdir) top_level = len(ctx.stack_path) > 1 if top_level: os.chdir(os.path.join("build", appname)) else: os.chdir("build") Logs.pprint("GREEN", "Waf: Entering directory `%s'" % os.path.abspath(os.getcwd()))
def end_tests(ctx, appname, name='*'): failures = ctx.autowaf_local_tests_failed if failures == 0: Logs.pprint('GREEN', '** Passed all %d %s tests' % ( ctx.autowaf_local_tests_total, tests_name(ctx, appname, name))) else: Logs.pprint('RED', '** Failed %d / %d %s tests' % ( failures, ctx.autowaf_local_tests_total, tests_name(ctx, appname, name)))
def install_cfitsio(ctx): atl.installsmthg_pre(ctx,"ftp://heasarc.gsfc.nasa.gov/software/fitsio/c/cfitsio3280.tar.gz","cfitsio3280.tar.gz") CCMACRO = "\"%s %s\""%(ctx.env.CC[0],ctx.env.mopt) CCMACRO = "CC=%s CXX=%s "%(CCMACRO,CCMACRO) CPPMACRO = "CPP=\"%s -E\" CXXCPP=\"g++ -E\" "%(ctx.env.CC[0]) cmdline = "cd build/%s; ./configure --prefix=%s %s %s %s; make clean;make -j %d ;make -j %d shared;make install"%("cfitsio",ctx.env.mprefix,"",CCMACRO, CPPMACRO,ctx.options.jobs,ctx.options.jobs) Logs.pprint("PINK",cmdline) if ctx.exec_command(cmdline)!=0: raise Errors.WafError("Cannot build %s"%"cfitsio")
def write(self, node): try: xml = node.read() except IOError: xml = '' newxml = self.document.toprettyxml() if xml != newxml: Logs.pprint('NORMAL', 'writing %s' % node.name) node.write(newxml)
def write(self, nodes): content = self.vcproj.file.getvalue() try: original = nodes[0].read() if original != content: Logs.pprint('NORMAL', 'writing %s' % nodes[0].abspath()) nodes[0].write(content) except Exception: nodes[0].write(content) self.vcproj.close()
def apidoc(ctx): """generate API reference documentation""" ctx = BuildContext() # create our own context to have ctx.top_dir basedir = ctx.top_dir doxygen = _find_program(ctx, 'doxygen') doxyfile = '%s/doc/Doxyfile' % ctx.out_dir Logs.pprint('CYAN', 'Generating API documentation') ret = ctx.exec_command('%s %s' % (doxygen, doxyfile)) if ret != 0: raise WafError('Generating API documentation failed')
def export(self): content = self.get_content() if not content: return node = self.make_node() if not node: return node.write(content) Logs.pprint('YELLOW', 'exported: %s' % node.abspath())
def apply_intltool_po(self): """ Create tasks to process po files:: def build(bld): bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") The relevant task generator arguments are: :param podir: directory of the .po files :type podir: string :param appname: name of the application :type appname: string :param install_path: installation directory :type install_path: string The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process. """ try: self.meths.remove('process_source') except ValueError: pass if not self.env.LOCALEDIR: self.env.LOCALEDIR = self.env.PREFIX + '/share/locale' appname = getattr(self, 'appname', 'set_your_app_name') podir = getattr(self, 'podir', '') inst = getattr(self, 'install_path', '${LOCALEDIR}') linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) if linguas: # scan LINGUAS file for locales to process file = open(linguas.abspath()) langs = [] for line in file.readlines(): # ignore lines containing comments if not line.startswith('#'): langs += line.split() file.close() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: # Make sure that we only process lines which contain locales if re_linguas.match(lang): node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po', node, node.change_ext('.mo')) if inst: filename = task.outputs[0].name (langname, ext) = os.path.splitext(filename) inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.bld.install_as(inst_file, task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644), env=task.env) else: Logs.pprint('RED', "Error no LINGUAS file found in po directory")
def configure(conf): conf.load('g++') conf.load('gcc') conf.find_program('ldd') conf.check_cxx(lib = 'z', errmsg = missing_pkg_msg(fedora = 'zlib-devel', ubuntu = 'zlib1g-dev')) ofed_ok = conf.check_ofed(mandatory = False) if ofed_ok: conf.check_cxx(lib = 'ibverbs', errmsg = 'Could not find library ibverbs, will use internal version.', mandatory = False) else: Logs.pprint('YELLOW', 'Warning: will use internal version of ibverbs. If you need to use Mellanox NICs, install OFED:\n' + 'https://trex-tgn.cisco.com/trex/doc/trex_manual.html#_mellanox_connectx_4_support')
def run(self): src_path = self.inputs[0].abspath() size_output = self.generator.bld.cmd_and_log([self.env.SIZE,src_path], quiet = waflib.Context.BOTH, output = waflib.Context.STDOUT) text_size, data_size, bss_size = \ [int(x)for x in size_output.splitlines()[1].split()[:3]] app_ram_size = data_size + bss_size + text_size max_app_ram = inject_metadata.MAX_APP_MEMORY_SIZE free_size = max_app_ram-app_ram_size Logs.pprint('BLUE',"Memory usage:\n=============\n""Total app footprint in RAM: %6u bytes / ~%ukb\n""Free RAM available (heap): %6u bytes\n"%(app_ram_size,max_app_ram/1024,free_size))
def _build_summary(bld): Logs.info('') text('BUILD SUMMARY') text('Build directory: ', bld.bldnode.abspath()) targets_suppressed = False if bld.targets == '*': taskgens = bld.get_all_task_gen() if len(taskgens) > MAX_TARGETS and not bld.options.summary_all: targets_suppressed = True taskgens = taskgens[:MAX_TARGETS] else: targets = bld.targets.split(',') if len(targets) > MAX_TARGETS and not bld.options.summary_all: targets_suppressed = True targets = targets[:MAX_TARGETS] taskgens = [bld.get_tgen_by_name(t) for t in targets] nodes = [] filtered_taskgens = [] for tg in taskgens: if not hasattr(tg, 'build_summary'): tg.init_summary_data() n = tg.build_summary.get('binary', None) if not n: t = getattr(tg, 'link_task', None) if not t: continue n = t.outputs[0] tg.build_summary['binary'] = n nodes.append(n) filtered_taskgens.append(tg) taskgens = filtered_taskgens if nodes: l = bld.size_summary(nodes) for i, data in enumerate(l): taskgens[i].build_summary.update(data) summary_data_list = [tg.build_summary for tg in taskgens] print_table(summary_data_list, bld.env.BUILD_SUMMARY_HEADER) if targets_suppressed: Logs.info('') Logs.pprint( 'NORMAL', 'Note: Some targets were suppressed. Use --summary-all if you want information of all targets.', ) if hasattr(bld, 'extra_build_summary'): bld.extra_build_summary(bld, sys.modules[__name__])
def hwaf_configure(ctx): dirs = ctx.hwaf_pkg_dirs() npkgs = len(dirs) fmt = "%%0%dd" % (len("%s" % npkgs),) hdr = "[%s/%s]" % (fmt,fmt) for i,d in enumerate(dirs): #ctx.msg("configuring", d) pkg = "[%s]:" % ctx.hwaf_pkg_name(d) msg.pprint('NORMAL', hdr % (i+1, npkgs), sep='') msg.pprint('GREEN', "%s configuring..." % pkg.ljust(60)) ctx.recurse(d) return
def test_summary(bld): from io import BytesIO import sys if not hasattr(bld, 'utest_results'): Logs.info('check: no test run') return fails = [] for filename, exit_code, out, err in bld.utest_results: Logs.pprint('GREEN' if exit_code == 0 else 'YELLOW', ' %s' % filename, 'returned %d' % exit_code) if exit_code != 0: fails.append(filename) elif not bld.options.check_verbose: continue if len(out): buf = BytesIO(out) for line in buf: print(" OUT: %s" % line.decode(), end='', file=sys.stderr) print() if len(err): buf = BytesIO(err) for line in buf: print(" ERR: %s" % line.decode(), end='', file=sys.stderr) print() if not fails: Logs.info('check: All %u tests passed!' % len(bld.utest_results)) return Logs.error('check: %u of %u tests failed' % (len(fails), len(bld.utest_results))) for filename in fails: Logs.error(' %s' % filename) bld.fatal('check: some tests failed')
def run_local(conf_file, serial=True): fnames, arguments = _sub_script_local(conf_file) threads = [] for (fname, directory) in fnames: s = "cd " + '"' + directory + '"' + " && " + "./" + fname + ' ' + arguments Logs.pprint('NORMAL', "Executing: %s" % s) if not serial: t = threading.Thread(target=run_local_one, args=( directory, s, )) threads.append(t) else: run_local_one(directory, s) if not serial: for i in range(len(threads)): threads[i].start() for i in range(len(threads)): threads[i].join()
def set_exit_code(bld): """ If any of the tests fails, waf will output the corresponding exit code. This is useful if you have an automated build system which need to report on errors from the tests. You may use it like this: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.set_exit_code) """ lst = getattr(bld, "runner_results", []) for (cmd, return_code, stdout) in lst: if return_code: # If this was a "silent" run, we should print the full output if bld.has_tool_option("run_silent"): Logs.pprint("RED", stdout) bld.fatal('Command "{}" failed with return code: {}'.format( cmd, return_code))
def run_local(conf_file, serial = True): if not json_ok: Logs.pprint('RED', 'ERROR: simplejson is not installed and as such you cannot read the json configuration file for running your experiments.') return fnames,arguments = _sub_script_local(conf_file) threads = [] for (fname, directory) in fnames: s = "cd " + '"' + directory + '"' + " && " + "./" + fname + ' ' + arguments Logs.pprint('NORMAL', "Executing: %s" % s) if not serial: t = threading.Thread(target=run_local_one, args=(directory,s,)) threads.append(t) else: run_local_one(directory,s) if not serial: for i in range(len(threads)): threads[i].start() for i in range(len(threads)): threads[i].join()
def configure(conf): conf.load('g++') conf.load('gcc') conf.find_program('ldd') conf.check_cxx(lib='z', errmsg=missing_pkg_msg(fedora='zlib-devel', ubuntu='zlib1g-dev')) ofed_ok = conf.check_ofed(mandatory=False) if ofed_ok: conf.check_cxx( lib='ibverbs', errmsg='Could not find library ibverbs, will use internal version.', mandatory=False) else: Logs.pprint( 'YELLOW', 'Warning: will use internal version of ibverbs. If you need to use Mellanox NICs, install OFED:\n' + 'https://trex-tgn.cisco.com/trex/doc/trex_manual.html#_mellanox_connectx_4_support' )
def summary(bld): lst = getattr(bld, 'utest_results', []) if lst: Logs.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, xml) in lst: if not code: Logs.pprint('CYAN', ' %s%s' % (f.ljust(30), get_nunit_stats(xml))) Logs.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total)) for (f, code, xml) in lst: if code: Logs.pprint('CYAN', ' %-20s%s' % (f.ljust(30), get_nunit_stats(xml))) if tfail: raise Errors.WafError(msg='%d out of %d test suites failed' % (tfail, total))
def process_removelibrcx(self): # features are also applied to Objects, so we will receive # a call for thumb_objs / arm_objs which don't have link task if not getattr(self, 'link_task', None): return tmp_list = list(self.link_task.env['STLIB']) for lib_to_remove in tmp_list: # find archives the name starts with 'rcx' # do not remove 'rcx_module*' and 'rcx_netxNNN_2portswitch' m = re.match( 'rcx(_(vol|bas|mid|midshm|hif_cifx|netx\d+(_physhif|_hif)?))?$', lib_to_remove) if m: # remove and warn, because there is a wrong use-component in wscript for this target Logs.pprint( 'YELLOW', "Warning: use-component '{0}' removed from build of target '{1}'" .format(lib_to_remove, self.name)) self.link_task.env['STLIB'].remove(lib_to_remove)
def get_subproject_env(ctx, path, log=False): # remove top dir path path = str(path) if path.startswith(ctx.top_dir): if ctx.top_dir[-1] != os.pathsep: path = path[len(ctx.top_dir) + 1:] else: path = path[len(ctx.top_dir):] # iterate through possible subprojects names folders = os.path.normpath(path).split(os.sep) # print(folders) for i in range(1, len(folders) + 1): name = folders[-i] # print(name) if name in ctx.all_envs: if log: Logs.pprint('YELLOW', 'env: changed to %s' % name) return ctx.all_envs[name] if log: Logs.pprint('YELLOW', 'env: changed to default env') raise IndexError('top env')
def set_exit_code(bld): """ If any of the tests fails waf will exit with that exit code. This is useful if you have an automated build system which need to report on errors from the tests. You may use it like this: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.set_exit_code) """ lst = getattr(bld, 'runner_results', []) for (filename, return_code, stdout, stderr) in lst: if return_code: msg = assemble_output(stdout, stderr) bld.fatal(os.linesep.join(msg)) elif not bld.has_tool_option('run_silent'): msg = assemble_output(stdout, stderr) Logs.pprint('WHITE', os.linesep.join(msg))
def get_svn_rev(conf): def in_git(): cmd = 'git ls-files >/dev/null 2>&1' return (conf.exec_command(cmd) == 0) def in_svn(): return os.path.exists('.svn') # try GIT if in_git(): cmds = [ 'git svn find-rev HEAD 2>/dev/null', 'git svn find-rev origin/trunk 2>/dev/null', 'git svn find-rev trunk 2>/dev/null', 'git svn find-rev master 2>/dev/null' ] for cmd in cmds: try: stdout = conf.cmd_and_log(cmd) if stdout: return int(stdout.strip()) except WafError: pass except ValueError: Logs.pprint('RED', 'Unparseable revision number') # try SVN elif in_svn(): try: _env = None if target_is_win32(conf) else dict(LANG='C') stdout = conf.cmd_and_log(cmd='svn info --non-interactive', env=_env) lines = stdout.splitlines(True) for line in lines: if line.startswith('Last Changed Rev'): value = line.split(': ', 1)[1] return int(value.strip()) except WafError: pass except (IndexError, ValueError): Logs.pprint('RED', 'Unparseable revision number') return 0
def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) appname = getattr( Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath()) ) p = PBXProject(appname, ("Xcode 3.2", 46), self.env) # If we don't create a Products group, then # XCode will create one, which entails that # we'll start to see duplicate files in the UI # for some reason. products_group = PBXGroup("Products") p.mainGroup.children.append(products_group) self.project = p self.products_group = products_group # post all task generators # the process_xcode method above will be called for each target if self.targets and self.targets != "*": (self._min_grp, self._exact_tg) = self.get_targets() self.current_group = 0 while self.current_group < len(self.groups): self.post_group() self.current_group += 1 node = self.bldnode.make_node("%s.xcodeproj" % appname) node.mkdir() node = node.make_node("project.pbxproj") with open(node.abspath(), "w") as f: p.write(f) Logs.pprint("GREEN", "Wrote %r" % node.abspath())
def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']): diropts = '' for i in dirs: diropts += ' -d ' + i coverage_log = open('lcov-coverage.log', 'w') coverage_lcov = open('coverage.lcov', 'w') coverage_stripped_lcov = open('coverage-stripped.lcov', 'w') try: try: base = '.' if g_is_child: base = '..' subprocess.call(('lcov -c %s -b %s' % (diropts, base)).split(), stdout=coverage_lcov, stderr=coverage_log) subprocess.call(['lcov', '--remove', 'coverage.lcov'] + remove, stdout=coverage_stripped_lcov, stderr=coverage_log) if not os.path.isdir('coverage'): os.makedirs('coverage') subprocess.call( 'genhtml -o coverage coverage-stripped.lcov'.split(), stdout=coverage_log, stderr=coverage_log) except: Logs.warn( 'Failed to run lcov, no coverage report will be generated') finally: coverage_stripped_lcov.close() coverage_lcov.close() coverage_log.close() print('') Logs.pprint( 'GREEN', "Waf: Leaving directory `%s'" % os.path.abspath(os.getcwd())) top_level = (len(ctx.stack_path) > 1) if top_level: cd_to_orig_dir(ctx, top_level) print('') Logs.pprint('BOLD', 'Coverage:', sep='') print('<file://%s>\n\n' % os.path.abspath('coverage/index.html'))
def execute(self): self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() self.timer = Utils.Timer() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() try: self.get_tgen_by_name('') except Errors.WafError: pass for k in sorted(self.task_gen_cache_names.keys()): Logs.pprint('GREEN', k)
def build(bld): global dpdk_includes_verb_path bld.add_pre_fun(pre_build) bld.add_post_fun(post_build) zmq_lib_path = 'external_libs/zmq/' bld.read_shlib(name='zmq', paths=[top + zmq_lib_path]) if bld.env.NO_MLX == False: if bld.env['LIB_IBVERBS']: Logs.pprint('GREEN', 'Info: Using external libverbs.') bld.read_shlib(name='ibverbs') else: Logs.pprint('GREEN', 'Info: Using internal libverbs.') ibverbs_lib_path = 'external_libs/ibverbs/' dpdk_includes_verb_path = ' \n ../external_libs/ibverbs/include/ \n' bld.read_shlib(name='ibverbs', paths=[top + ibverbs_lib_path]) check_ibverbs_deps(bld) for obj in build_types: build_type(bld, obj)
def cleanup(self): '''Deletes a **Visual Studio** solution or project file including associated files (e.g. *.ncb*). ''' cwd = self.get_cwd() for node in cwd.ant_glob('*.user'): node.delete() Logs.pprint('YELLOW', 'removed: %s' % node.abspath()) for node in cwd.ant_glob('*.ncb'): node.delete() Logs.pprint('YELLOW', 'removed: %s' % node.abspath()) for node in cwd.ant_glob('*.suo'): node.delete() Logs.pprint('YELLOW', 'removed: %s' % node.abspath()) for node in cwd.ant_glob('*.sln'): node.delete() Logs.pprint('YELLOW', 'removed: %s' % node.abspath()) node = self.find_node() if node: node.delete() Logs.pprint('YELLOW', 'removed: %s' % node.abspath())
def qsub(conf_file): tpl = """#!/bin/sh #? nom du job affiche #PBS -N @exp #PBS -o stdout #PBS -b stderr #PBS -M @email # maximum execution time #PBS -l walltime=@wall_time # mail parameters #PBS -m abe # number of nodes #PBS -l nodes=@nb_cores:ppn=@ppn #PBS -l pmem=5200mb -l mem=5200mb export LD_LIBRARY_PATH=@ld_lib_path exec @exec """ if not json_ok: Logs.pprint('RED', 'ERROR: simplejson is not installed and as such you cannot read the json configuration file for running your experiments.') return fnames = _sub_script(tpl, conf_file) for (fname, directory) in fnames: s = "qsub -d " + directory + " " + fname Logs.pprint('NORMAL', 'executing: %s' % s) retcode = subprocess.call(s, shell=True, env=None) Logs.pprint('NORMAL', 'qsub returned: %s' % str(retcode))
def summary(bld): lst = getattr(bld, 'utest_results', []) if not lst: return total = len(lst) fail = len([x for x in lst if x[1]]) Logs.pprint('CYAN', 'test summary') Logs.pprint('CYAN', ' tests that pass %d/%d' % (total - fail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('GREEN', ' %s' % f) if fail > 0: Logs.pprint('RED', ' tests that fail %d/%d' % (fail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('RED', ' %s' % f) print(out.decode('utf-8')) raise Errors.WafError('test failed')
def put_files_cache(self): """ New method for waf Task classes """ if WAFCACHE_NO_PUSH or getattr(self, 'cached', None) or not self.outputs: return files_from = [] for node in self.outputs: path = node.abspath() if not os.path.isfile(path): return files_from.append(path) bld = self.generator.bld sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) err = cache_command(ssig, files_from, []) if err.startswith(OK): if WAFCACHE_VERBOSITY: Logs.pprint('CYAN', ' Successfully uploaded %s to cache' % files_from) else: Logs.debug('wafcache: Successfully uploaded %r to cache', files_from) if WAFCACHE_STATS: self.generator.bld.cache_puts += 1 else: if WAFCACHE_VERBOSITY: Logs.pprint( 'RED', ' Error caching step results %s: %s' % (files_from, err)) else: Logs.debug('wafcache: Error caching results %s: %s', files_from, err) bld.task_sigs[self.uid()] = self.cache_sig
def check_pngwriter(conf): conf.env['PNGWRITER_FOUND'] = False if conf.options.pngwriter: conf.env.INCLUDES_PNGWRITER = [conf.options.pngwriter + '/include'] conf.env.LIBPATH_PNGWRITER = [conf.options.pngwriter + '/lib'] else: conf.env.INCLUDES_PNGWRITER = ['/usr/include', '/usr/local/include'] conf.env.LIBPATH_PNGWRITER = ['/usr/lib', '/usr/local/lib'] try: conf.start_msg('Checking for pngwriter include') conf.env.INCLUDES_PNGWRITER = __find_file_in_list( conf, PNGWRITER_HEADER_FILE, conf.env.INCLUDES_PNGWRITER) conf.find_file(PNGWRITER_HEADER_FILE, conf.env.INCLUDES_PNGWRITER) conf.end_msg('ok') if Logs.verbose: Logs.pprint('CYAN', ' Paths: %s' % conf.env.INCLUDES_PNGWRITER) conf.env['PNGWRITER_FOUND'] = True conf.env.LIB_PNGWRITER = ['pngwriter', 'png'] except: conf.end_msg('Not found', 'RED') if Logs.verbose: traceback.print_exc() return 1
def run_tests(ctx, appname, tests, desired_status=0, dirs=['src'], name='*', headers=False): failures = 0 diropts = '' for i in dirs: diropts += ' -d ' + i for i in tests: if not run_test(ctx, appname, i, desired_status, dirs, i, headers): failures += 1 print('') if failures == 0: Logs.pprint('GREEN', '** Pass: All %s.%s tests passed' % (appname, name)) else: Logs.pprint( 'RED', '** FAIL: %d %s.%s tests failed' % (failures, appname, name))
def export(bld): '''Exports all C and C++ task generators as **Visual Studio** projects and creates a **Visual Studio** solution containing references to those project. :param bld: a *waf* build instance from the top level *wscript*. :type bld: waflib.Build.BuildContext ''' if not bld.options.msdev and not hasattr(bld, 'msdev'): return Logs.pprint( 'RED', '''This tool is intended only to ease development for Windows-fags. Don't use it for release builds, as it doesn't enables WinXP compatibility for now!''' ) solution = MsDevSolution(bld) targets = get_targets(bld) saveenv = bld.env # root env for tgen in bld.task_gen_cache_names.values(): if targets and tgen.get_name() not in targets: continue if getattr(tgen, 'msdev_skipme', False): continue try: bld.env = get_subproject_env(bld, tgen.path, True) except IndexError: bld.env = saveenv if set(('c', 'cxx')) & set(getattr(tgen, 'features', [])): project = MsDevProject(bld, tgen) project.export() (name, fname, deps, pid) = project.get_metadata() solution.add_project(name, fname, deps, pid) solution.export()
def check_freetype(conf): conf.env['FREETYPE_FOUND'] = False if conf.options.freetype: conf.env.INCLUDES_FREETYPE = [ conf.options.freetype + '/include', conf.options.freetype + '/include/freetype2' ] conf.env.LIBPATH_FREETYPE = [conf.options.freetype + '/lib'] else: conf.env.INCLUDES_FREETYPE = [ '/usr/include', '/usr/local/include', '/usr/include/freetype2', '/usr/local/include/freetype2' ] conf.env.LIBPATH_FREETYPE = ['/usr/lib', '/usr/local/lib'] try: conf.start_msg('Checking for Freetype2 include') conf.env.INCLUDES_FREETYPE = __find_file_in_list( conf, FREETYPE_HEADER_FILE, conf.env.INCLUDES_FREETYPE) conf.find_file('ft2build.h', conf.env.INCLUDES_FREETYPE) # Freetype-config may provide additional paths that need to be added for freetype to work properly. # For this, we trust freetype-config, we will not check whether certain files are actually in this path. output = subprocess.check_output(["freetype-config", "--cflags"]) additional_includes = [ path[2:] for path in output.split() if path[0:2] == '-I' ] for include in additional_includes: if include not in conf.env.INCLUDES_FREETYPE: conf.env.INCLUDES_FREETYPE.append(include) conf.end_msg('ok') if Logs.verbose: Logs.pprint('CYAN', ' Paths: %s' % conf.env.INCLUDES_FREETYPE) conf.env['FREETYPE_FOUND'] = True conf.env.LIB_FREETYPE = ['freetype'] except: conf.end_msg('Not found', 'RED') return 1
def run_test(ctx,appname,test,desired_status=0,dirs=['src'],name='',header=False): s=test if type(test)==type([]): s=' '.join(i) if header: Logs.pprint('BOLD','** Test',sep='') Logs.pprint('NORMAL','%s'%s) cmd=test if Options.options.grind: cmd='valgrind '+test if subprocess.call(cmd,shell=True)==desired_status: Logs.pprint('GREEN','** Pass %s'%name) return True else: Logs.pprint('RED','** FAIL %s'%name) return False
def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram unites', source='main.c', target='app') from waflib.Tools import unites bld.add_post_fun(unites.summary) """ dic = getattr(bld, 'unites_summary', {}) if dic: total = len(dic) tfail = len([k for k in dic if dic[k]]) if tfail > 0: if getattr(Options.options, 'permissive_tests', False): col = 'YELLOW' else: col = 'RED' else: col = 'PINK' Logs.pprint(col, 'unites: %d of %d tests passed' % (total - tfail, total))
def start_msg(self, *k, **kw): """ Prints the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg` """ if kw.get('quiet'): return msg = kw.get('msg') or k[0] try: if self.in_msg: self.in_msg += 1 return except AttributeError: self.in_msg = 0 self.in_msg += 1 try: self.line_just = max(self.line_just, len(msg)) except AttributeError: self.line_just = max(40, len(msg)) for x in (self.line_just * '-', msg): self.to_log(x) Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def svn_export(bld, target, svn_url, *k, **kw): """ Function to export files from specified svn-location to local directory. <b> Mandatory parameter </b> @param target \b string: local destination directory to export files from svn @param svn_url \b string: svn url """ # create target dir if not os.path.exists(target): os.makedirs(target) # get target node n = bld.srcnode.find_node(target) if n: bld(features="svnexport", target=n, svn_url=svn_url, hidden_from_list=True, **kw) else: Logs.pprint( 'YELLOW', "warning: can't create or find target directory \'%s\'." % (target))
def qsub(conf_file): tpl = """#!/bin/sh #? nom du job affiche #PBS -N @exp #PBS -o stdout #PBS -b stderr #PBS -M @email # maximum execution time #PBS -l walltime=@wall_time # mail parameters #PBS -m abe # number of nodes #PBS -l nodes=@nb_cores:ppn=@ppn #PBS -l pmem=5200mb -l mem=5200mb export LD_LIBRARY_PATH=@ld_lib_path exec @exec """ fnames = _sub_script(tpl, conf_file) for (fname, directory) in fnames: s = "qsub -d " + directory + " " + fname Logs.pprint('NORMAL', 'executing: %s' % s) retcode = subprocess.call(s, shell=True, env=None) Logs.pprint('NORMAL', 'qsub returned: %s' % str(retcode))
def clar(bld, sources=None, sources_ant_glob=None, test_sources_ant_glob=None): if test_sources_ant_glob is None: raise Exception() product_sources = [] if sources is not None: product_sources.extend(sources) if sources_ant_glob is not None: product_sources.extend(bld.srcnode.ant_glob(sources_ant_glob)) test_sources = bld.path.ant_glob(test_sources_ant_glob) test_sources = [ s for s in test_sources if not os.path.basename(s.abspath()).startswith('clar') ] Logs.debug("ut: Test sources %r", test_sources) if len(test_sources) == 0: Logs.pprint('RED', 'No tests found for glob: %s' % test_sources_ant_glob) for test_source in test_sources: add_clar_test(bld, test_source, product_sources)
def apply_intltool_po(self): try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() appname = getattr( self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name')) podir = getattr(self, 'podir', '.') inst = getattr(self, 'install_path', '${LOCALEDIR}') linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) if linguas: file = open(linguas.abspath()) langs = [] for line in file.readlines(): if not line.startswith('#'): langs += line.split() file.close() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: if re_linguas.match(lang): node = self.path.find_resource( os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po', node, node.change_ext('.mo')) if inst: filename = task.outputs[0].name (langname, ext) = os.path.splitext(filename) inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.bld.install_as(inst_file, task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644), env=task.env) else: Logs.pprint('RED', "Error no LINGUAS file found in po directory")
def execute(self): """ See :py:func:`waflib.Context.Context.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() try: # force the cache initialization self.get_tgen_by_name('') except Exception: pass lst = list(self.task_gen_cache_names.keys()) lst.sort() hidden_suffixes = ['_lib', '_src', '_tst'] for k in lst: if len(k) > 4 and (k[-4:] in hidden_suffixes): continue Logs.pprint('GREEN', k)