def webidl_test(self, command_context, **kwargs): sys.path.insert( 0, os.path.join(command_context.topsrcdir, "other-licenses", "ply")) # Ensure the topobjdir exists. On a Taskcluster test run there won't be # an objdir yet. mkdir(command_context.topobjdir) # Make sure we drop our cached grammar bits in the objdir, not # wherever we happen to be running from. os.chdir(command_context.topobjdir) if kwargs["verbose"] is None: kwargs["verbose"] = False # Now we're going to create the cached grammar file in the # objdir. But we're going to try loading it as a python # module, so we need to make sure the objdir is in our search # path. sys.path.insert(0, command_context.topobjdir) import runtests return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])
def webidl_test(self, **kwargs): sys.path.insert(0, os.path.join(self.topsrcdir, 'other-licenses', 'ply')) # Make sure we drop our cached grammar bits in the objdir, not # wherever we happen to be running from. os.chdir(self.topobjdir) # Now we're going to create the cached grammar file in the # objdir. But we're going to try loading it as a python # module, so we need to make sure the objdir is in our search # path. sys.path.insert(0, self.topobjdir) import runtests return runtests.run_tests(kwargs["tests"], verbose=kwargs["verbose"])
def webidl_test(self, verbose=False): sys.path.insert(0, os.path.join(self.topsrcdir, 'other-licenses', 'ply')) # Make sure we drop our cached grammar bits in the objdir, not # wherever we happen to be running from. os.chdir(self.topobjdir) # Now we're going to create the cached grammar file in the # objdir. But we're going to try loading it as a python # module, so we need to make sure the objdir is in our search # path. sys.path.insert(0, self.topobjdir) from runtests import run_tests return run_tests(None, verbose=verbose)
def run_cov(): # Set coverage to b2wars app cov = coverage.Coverage(source=['b2wars/apps', 'b2wars/tests']) cov.start() # Run tests failures = run_tests(verbosity=1, interactive=False, failfast=True) cov.stop() cov.save() # Show report only when have not failures if not bool(failures): # Show report on terminal cov.report() # Save html report on htmlcov folder cov.html_report(directory='htmlcov')
#!/usr/bin/env python from runtests import run_tests if __name__ == '__main__': run_tests('axes.test_settings_proxy_custom_header', [ 'axes.tests.GetIPProxyCustomHeaderTest', ])
def build_version(ver, skip_release=False): print("Building version %s" % ver) clean_release() # a hack: checkin_comment_for_ver() might call svn log, which doesn't like # unversioning directories (like obj-rel or vs-premake), so we call it here, # after clean, to cache the result checkin_comment_for_ver(ver) svn_update_to_ver(ver) s3dir = "sumatrapdf/buildbot/%s/" % ver stats = Stats() # only run /analyze on newer builds since we didn't have the necessary # makefile logic before run_analyze = int(ver) >= g_first_analyze_build if not skip_release: start_time = datetime.datetime.now() build_release(stats, ver) dur = datetime.datetime.now() - start_time print("%s for release build" % str(dur)) if stats.rel_failed: # don't bother running analyze if release failed run_analyze = False s3.upload_data_public_with_content_type( stats.rel_build_log, s3dir + "release_build_log.txt", silent=True) if not stats.rel_failed: build_and_upload_efi_out(ver) if run_analyze: start_time = datetime.datetime.now() build_analyze(stats, ver) dur = datetime.datetime.now() - start_time print("%s for analyze build" % str(dur)) html = gen_analyze_html(stats, ver) p = os.path.join(get_logs_cache_dir(), "%s_analyze.html" % str(ver)) open(p, "w").write(html) s3.upload_data_public_with_content_type( html, s3dir + "analyze.html", silent=True) if not stats.rel_failed: build_and_upload_efi_txt_diff(ver) # TODO: it appears we might throw an exception after uploading analyze.html but # before/dufing uploading stats.txt. Would have to implement transactional # multi-upload to be robust aginst that, so will just let it be stats_txt = stats.to_s() s3.upload_data_public_with_content_type( stats_txt, s3dir + "stats.txt", silent=True) html = build_index_html(stats_for_ver, checkin_comment_for_ver) s3.upload_data_public_with_content_type( html, "sumatrapdf/buildbot/index.html", silent=True) json_s = build_sizes_json(get_stats_cache_dir, stats_for_ver) s3.upload_data_public_with_content_type( json_s, "sumatrapdf/buildbot/sizes.js", silent=True) if stats.rel_failed: email_build_failed(ver) return # don't run tests if build fails err = runtests.run_tests() if err != None: s3.upload_data_public_with_content_type( err, s3dir + "tests_error.txt", silent=True) email_tests_failed(ver, err) print("Tests failed. Error message:\n" + err) else: print("Tests passed!")
#!/usr/bin/env python from runtests import run_tests if __name__ == '__main__': run_tests('axes.test_settings_num_proxies', [ 'axes.tests.GetIPNumProxiesTest', ])
def webidl_test(self, verbose=False): sys.path.insert(0, os.path.join(self.topsrcdir, 'other-licenses', 'ply')) from runtests import run_tests return run_tests(None, verbose=verbose)
#!/usr/bin/env python from runtests import run_tests if __name__ == '__main__': run_tests('axes.test_settings_proxy', [ 'axes.tests.GetIPProxyTest', ])