def __init__(self, ignore=[], flaky=[], max_logs=20, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.maxLogs = int(max_logs) self.logObserver = LitLogObserver(self.maxLogs) self.addFactoryArguments(max_logs=max_logs) self.addLogObserver('stdio', self.logObserver)
def __init__(self, dbpool=None, test_type=None, test_info="", description=None, descriptionDone=None, autoCreateTables=False, textLimit=5, testNameLimit=16, parallel=4, logfiles=None, lazylogfiles=True, warningPattern="MTR's internal check of the test case '.*' failed", mtr_subdir="mysql-test", **kwargs): if logfiles is None: logfiles = {} if description is None: description = ["testing"] if test_type: description.append(test_type) if descriptionDone is None: descriptionDone = ["test"] if test_type: descriptionDone.append(test_type) Test.__init__(self, logfiles=logfiles, lazylogfiles=lazylogfiles, description=description, descriptionDone=descriptionDone, warningPattern=warningPattern, **kwargs) self.dbpool = dbpool self.test_type = test_type self.test_info = test_info self.autoCreateTables = autoCreateTables self.textLimit = textLimit self.testNameLimit = testNameLimit self.parallel = parallel self.mtr_subdir = mtr_subdir self.progressMetrics += ('tests',)
def __init__(self, fileloc=None, config=None, **kwargs): """ Create the Rpmlint object. @type fileloc: str @param fileloc: Location glob of the specs or rpms. @type config: str @param config: path to the rpmlint user config. @type kwargs: dict @param fileloc: all other keyword arguments. """ Test.__init__(self, **kwargs) if fileloc: self.fileloc = fileloc if config: self.config = config self.addFactoryArguments(fileloc=fileloc, config=config) self.command = ["rpmlint", "-i"] if self.config: self.command += ['-f', self.config] self.command.append(self.fileloc) self.obs = pkgutil.WEObserver() self.addLogObserver('stdio', self.obs)
def __init__(self, dbpool=None, test_type="mysql-test-run", test_info="", autoCreateTables=False, textLimit=5, testNameLimit=16, parallel=4, logfiles = {}, lazylogfiles = True, **kwargs): # Add mysql server logfiles. for mtr in range(0, parallel+1): for mysqld in range(1, 4+1): if mtr == 0: logname = "mysqld.%d.err" % mysqld filename = "mysql-test/var/log/mysqld.%d.err" % mysqld else: logname = "mysqld.%d.err.%d" % (mysqld, mtr) filename = "mysql-test/var/%d/log/mysqld.%d.err" % (mtr, mysqld) logfiles[logname] = filename Test.__init__(self, logfiles=logfiles, lazylogfiles=lazylogfiles, **kwargs) self.dbpool = dbpool self.test_type = test_type self.test_info = test_info self.autoCreateTables = autoCreateTables self.textLimit = textLimit self.testNameLimit = testNameLimit self.parallel = parallel self.progressMetrics += ('tests',) self.addFactoryArguments(dbpool=self.dbpool, test_type=self.test_type, test_info=self.test_info, autoCreateTables=self.autoCreateTables, textLimit=self.textLimit, testNameLimit=self.testNameLimit, parallel=self.parallel)
def __init__(self, dbpool=None, test_type="mysql-test-run", test_info="", autoCreateTables=False, textLimit=5, testNameLimit=16, parallel=4, logfiles = {}, lazylogfiles = True, warningPattern="MTR's internal check of the test case '.*' failed", mtr_subdir="mysql-test", **kwargs): Test.__init__(self, logfiles=logfiles, lazylogfiles=lazylogfiles, warningPattern=warningPattern, **kwargs) self.dbpool = dbpool self.test_type = test_type self.test_info = test_info self.autoCreateTables = autoCreateTables self.textLimit = textLimit self.testNameLimit = testNameLimit self.parallel = parallel self.mtr_subdir = mtr_subdir self.progressMetrics += ('tests',) self.addFactoryArguments(dbpool=self.dbpool, test_type=self.test_type, test_info=self.test_info, autoCreateTables=self.autoCreateTables, textLimit=self.textLimit, testNameLimit=self.testNameLimit, parallel=self.parallel, mtr_subdir=self.mtr_subdir)
def __init__(self, ignore=[], flaky=[], max_logs=20, parseSummaryOnly=False, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.maxLogs = int(max_logs) self.logObserver = LitLogObserver(self.maxLogs, parseSummaryOnly) self.addFactoryArguments(max_logs=max_logs) self.addFactoryArguments(parseSummaryOnly=parseSummaryOnly) self.addLogObserver('stdio', self.logObserver)
def setTestResults(self, total, failed, passed, total_statements, exec_statements): Test.setTestResults(self, total=total, failed=failed, passed=passed) total_statements += self.step_status.getStatistic("total-statements", 0) self.step_status.setStatistic("total-statements", total_statements) exec_statements += self.step_status.getStatistic("exec-statements", 0) self.step_status.setStatistic("exec-statements", exec_statements)
def __init__(self, fileloc="*rpm", **kwargs): """ Create the Rpmlint object. @type fileloc: str @param fileloc: Location glob of the specs or rpms. @type kwargs: dict @param fileloc: all other keyword arguments. """ Test.__init__(self, **kwargs) self.command = ["/usr/bin/rpmlint", "-i"] self.command.append(fileloc)
def __init__(self, source, configure="./configure", configureEnv=None, configureFlags=None, reconf=None, compile=None, test=None, distcheck=None): if configureEnv is None: configureEnv = {} if configureFlags is None: configureFlags = [] if compile is None: compile = ["make", "all"] if test is None: test = ["make", "check"] if distcheck is None: distcheck = ["make", "distcheck"] BuildFactory.__init__(self, [source]) if reconf is True: reconf = ["autoreconf", "-si"] if reconf is not None: self.addStep( ShellCommand(name="autoreconf", command=reconf, env=configureEnv)) if configure is not None: # we either need to wind up with a string (which will be # space-split), or with a list of strings (which will not). The # list of strings is the preferred form. if isinstance(configure, str): if configureFlags: assert " " not in configure # please use list instead command = [configure] + configureFlags else: command = configure else: assert isinstance(configure, (list, tuple)) command = configure + configureFlags self.addStep(Configure(command=command, env=configureEnv)) if compile is not None: self.addStep(Compile(command=compile, env=configureEnv)) if test is not None: self.addStep(Test(command=test, env=configureEnv)) if distcheck is not None: self.addStep(Test(command=distcheck, env=configureEnv))
def make_arm(): f = BuildFactory() f.addStep( GitNoBranch(repourl="https://github.com/dolphin-emu/dolphin.git", progress=True, mode="incremental")) f.addStep( ShellCommand(command=["mkdir", "-p", "build"], logEnviron=False, description="mkbuilddir", descriptionDone="mkbuilddir")) f.addStep( ShellCommand(command="cmake -DENABLE_QT=OFF -GNinja ..", workdir="build/build", description="configuring", descriptionDone="configure", haltOnFailure=True)) f.addStep( Compile(command=["ninja"], workdir="build/build", description="building", descriptionDone="build", haltOnFailure=True)) f.addStep( Test(command=["ninja", "unittests"], workdir="build/build", description="testing", descriptionDone="test", haltOnFailure=True)) return f
def __init__(self, source, configure="./configure", configureEnv={}, configureFlags=[], compile=["make", "all"], test=["make", "check"]): BuildFactory.__init__(self, [source]) if configure is not None: # we either need to wind up with a string (which will be # space-split), or with a list of strings (which will not). The # list of strings is the preferred form. if type(configure) is str: if configureFlags: assert not " " in configure # please use list instead command = [configure] + configureFlags else: command = configure else: assert isinstance(configure, (list, tuple)) command = configure + configureFlags self.addStep(Configure(command=command, env=configureEnv)) if compile is not None: self.addStep(Compile(command=compile)) if test is not None: self.addStep(Test(command=test))
def make_dolphin_debian_build(mode="normal"): f = BuildFactory() mode = mode.split(",") debug = "debug" in mode pr = "pr" in mode fifoci_golden = "fifoci_golden" in mode f.addStep( GitNoBranch(repourl="https://github.com/dolphin-emu/dolphin.git", progress=True, mode="incremental")) f.addStep( ShellCommand(command=["mkdir", "-p", "build"], logEnviron=False, description="mkbuilddir", descriptionDone="mkbuilddir")) cmake_cmd = ["cmake", "..", "-GNinja"] if debug: cmake_cmd.append("-DFASTLOG=ON") cmake_cmd.append("-DDISTRIBUTOR=dolphin-emu.org") f.addStep( ShellCommand(command=cmake_cmd, workdir="build/build", description="configuring", descriptionDone="configure", haltOnFailure=True)) f.addStep( Compile(command=["ninja"], workdir="build/build", description="building", descriptionDone="build", haltOnFailure=True)) f.addStep( Test(command=["ninja", "unittests"], workdir="build/build", description="testing", descriptionDone="test", haltOnFailure=True)) if fifoci_golden: if pr: f.addStep( Trigger(schedulerNames=["pr-fifoci-lin"], copy_properties=[ "pr_id", "repo", "headrev", "branchname", "shortrev" ])) else: f.addStep( TriggerIfBranch(schedulerNames=["fifoci-lin"], branchList=["master"], copy_properties=["shortrev"])) return f
def EmacsTest(*args, **kw): return Test( command=[Emacs(), '--no-splash', '--debug-init'] + (list(args) + reduce(lambda r, kv: r + ['--' + kv[0], kv[1]], kw.items(), [])), env={'HOME': WithProperties('%(FakeHome)s')}, timeout=kw.get('timeout', 40), logfiles=dict(testlog=dict(filename='test.log')))
def __init__(self, python, db, verbosity=2, **kwargs): kwargs["command"] = [ "../venv-python%s-%s%s/bin/python" % (python, db.name, db.version), "tests/runtests.py", "--settings=testsettings", "--verbosity=%s" % verbosity, ] kwargs["env"] = {"PYTHONPATH": "$PWD:$PWD/tests", "LC_ALL": "en_US.utf8"} Test.__init__(self, **kwargs) # Make sure not to spuriously count a warning from test cases # using the word "warning". So skip any "warnings" on lines starting # with "test_" self.addSuppression([(None, "^test_", None, None)]) self.addFactoryArguments(python=python, db=db, verbosity=verbosity)
def describe(self, done=False): description = Test.describe(self, done) for name, count in self.logObserver.resultCounts.iteritems(): if name in self.resultNames: description.append('{0} {1}'.format(count, self.resultNames[name])) else: description.append('Unexpected test result output ' + name) return description
def get_test_step(abi): # Run the test suite. command = [ "/usr/local/bin/python%s.%s" % (abi[2], abi[3]), "-B", "-m", "pytest", "tests", ] do_step = True if abi in ('cp27-cp27m', 'cp34-cp34m', 'cp35-cp35m'): do_step = is_branch('release/1.10.x') test = Test(name='test '+abi, command=command, env={"PYTHONPATH": outputdir}, haltOnFailure=True, doStepIf=do_step) return test
def describe(self, done=False): description = Test.describe(self, done) if done: if self.step_status.hasStatistic("total-statements"): total_statements = self.step_status.getStatistic("total-statements") exec_statements = self.step_status.getStatistic("exec-statements") if total_statements > 0: coverage_pct = (float(exec_statements) / float(total_statements) * 100) description.append('%d%% code coverage (%d / %d statements)' % (coverage_pct, exec_statements, total_statements)) return description
def make_dolphin_freebsd_build(mode="normal"): f = BuildFactory() mode = mode.split(",") f.addStep( GitNoBranch(repourl="https://github.com/dolphin-emu/dolphin.git", progress=True, mode="incremental")) f.addStep( ShellCommand(command=["mkdir", "-p", "build"], logEnviron=False, description="mkbuilddir", descriptionDone="mkbuilddir")) cmake_cmd = ["cmake", "..", "-GNinja"] cmake_cmd.append("-DDISTRIBUTOR=dolphin-emu.org") f.addStep( ShellCommand(command=cmake_cmd, workdir="build/build", description="configuring", descriptionDone="configure", haltOnFailure=True, env={"WX_CONFIG": "wxgtk2u-3.0-config"})) f.addStep( Compile(command=["ninja"], workdir="build/build", description="building", descriptionDone="build", haltOnFailure=True)) f.addStep( Test(command=["ninja", "unittests"], workdir="build/build", description="testing", descriptionDone="test", haltOnFailure=True)) return f
def evaluateCommand(self, cmd): if self.failed__: return FAILURE return Test.evaluateCommand(self, cmd)
def __init__(self, **kwargs): self.failed__ = False # there's a 'failed' method in Test, ouch! Test.__init__(self, **kwargs)
def evaluateCommand(self, cmd): if self.__result is not None: return self.__result return Test.evaluateCommand(self, cmd)
def __init__(self, **kwargs): self.__result = None Test.__init__(self, **kwargs)
def __init__(self, ignore=[], flaky=[], max_logs=20, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.logObserver = DejaGNULogObserver() self.addLogObserver("gdb.log", self.logObserver)
def __init__(self, source, python="python", test=None): BuildFactory.__init__(self, [source]) self.addStep(Compile(command=[python, "./setup.py", "build"])) if test is not None: self.addStep(Test(command=test))
def __init__(self, ignore=[], flaky=[], max_logs=20, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.logObserver = DejaGNULogObserver() self.addLogObserver('gdb.log', self.logObserver)
def describe(self, done=False): description = Test.describe(self, done) for name, count in self.logObserver.resultCounts.iteritems(): description.append('{0} {1}'.format(count, self.resultNames[name])) return description
def make_dolphin_win_build(build_type, mode="normal"): f = BuildFactory() mode = mode.split(",") normal = "normal" in mode debug = "debug" in mode wip = "wip" in mode pr = "pr" in mode fifoci_golden = "fifoci_golden" in mode f.addStep( GitNoBranch(repourl="https://github.com/dolphin-emu/dolphin.git", progress=True, mode="incremental")) f.addStep(RemoveDirectory(dir="build/Binary")) branch = WithProperties("%s", "branchname") env = {"DOLPHIN_BRANCH": branch, "DOLPHIN_DISTRIBUTOR": "dolphin-emu.org"} if normal: env["DOLPHIN_DEFAULT_UPDATE_TRACK"] = "beta" f.addStep( Compile(command=[ "msbuild.exe", "/v:m", "/p:Platform=x64", "/p:Configuration=%s" % build_type, "dolphin-emu.sln" ], env=env, workdir="build/Source", description="building", descriptionDone="build", haltOnFailure=True)) f.addStep( Test(command=[ "msbuild.exe", "/v:m", "/p:Platform=x64", "/p:Configuration=%s" % build_type, "/p:RunUnitTests=true", "dolphin-emu.sln" ], env=env, workdir="build/Source", description="testing", descriptionDone="test", haltOnFailure=True)) dolphin_name = "DolphinD" if debug else "Dolphin" f.addStep( ShellCommand(command=[ "C:\\buildbot\\signbin.bat", "Binary\\x64\\%s.exe" % dolphin_name ], logEnviron=False, description="signing binary", descriptionDone="sign binary")) f.addStep( ShellCommand( command=["xcopy", "Binary\\x64", "Dolphin-x64", "/S", "/I", "/Y"], logEnviron=False, description="copying output", descriptionDone="output copy")) out_filename = WithProperties("Dolphin-%s-%s-x64.7z", "branchname", "shortrev") f.addStep( ShellCommand(command=["7z", "a", "-r", out_filename, "Dolphin-x64"], logEnviron=False, description="compressing", descriptionDone="compression")) if debug: fn_arch = "dbg-x64" else: fn_arch = "x64" if "normal" in mode: master_filename = WithProperties( "/srv/http/dl/builds/dolphin-%%s-%%s-%s.7z" % fn_arch, "branchname", "shortrev") url = WithProperties( "https://dl.dolphin-emu.org/builds/dolphin-%%s-%%s-%s.7z" % fn_arch, "branchname", "shortrev") elif wip: master_filename = WithProperties( "/srv/http/dl/wips/%%s-dolphin-%%s-%%s-%s.7z" % fn_arch, "author", "branchname", "shortrev") url = WithProperties( "https://dl.dolphin-emu.org/wips/%%s-dolphin-%%s-%%s-%s.7z" % fn_arch, "author", "branchname", "shortrev") elif pr: master_filename = WithProperties( "/srv/http/dl/prs/%%s-dolphin-latest-%s.7z" % fn_arch, "branchname") url = WithProperties( "https://dl.dolphin-emu.org/prs/%%s-dolphin-latest-%s.7z" % fn_arch, "branchname") else: master_filename = url = "" f.addStep(SetProperty(property="build_url", value=url)) if master_filename and url: f.addStep( FileUpload(workersrc=out_filename, masterdest=master_filename, url=url, keepstamp=True, mode=0o644)) if fifoci_golden: if pr: f.addStep( Trigger(schedulerNames=["pr-fifoci-win"], copy_properties=[ "pr_id", "headrev", "branchname", "shortrev", "build_url" ])) else: f.addStep( TriggerIfBranch(schedulerNames=["fifoci-win"], branchList=["master"], copy_properties=["shortrev", "build_url"])) if "normal" in mode and "debug" not in mode: f.addStep( MasterShellCommand( command= "/home/buildbot/venv/bin/python /home/buildbot/bin/send_build.py", env={ "BRANCH": WithProperties("%s", "branchname"), "SHORTREV": WithProperties("%s", "shortrev"), "HASH": WithProperties("%s", "revision"), "AUTHOR": WithProperties("%s", "author"), "DESCRIPTION": WithProperties("%s", "description"), "TARGET_SYSTEM": "Windows x64", "USER_OS_MATCHER": "win", "BUILD_URL": url, }, description="notifying website", descriptionDone="website notice")) f.addStep( MasterShellCommand(command=[ "/home/buildbot/venv/bin/python", "/home/buildbot/bin/make_manifest.py", "--input", master_filename, "--version_hash", WithProperties("%s", "revision"), "--output-manifest-store", "/data/nas/update/manifest", "--output-content-store", "/data/nas/update/content", "--signing-key", "/home/buildbot/update.signing.key" ], description="writing update manifest", descriptionDone="update manifest write")) f.addStep( ShellCommand(command=["del", "/F", "/S", "/Q", out_filename], logEnviron=False, description="cleaning up files", descriptionDone="cleanup files")) f.addStep( ShellCommand(command=["rmdir", "/S", "/Q", "Dolphin-x64"], logEnviron=False, description="cleaning up dirs", descriptionDone="cleanup dirs")) return f
def getPerPlatformBuilders(self, platform): if not platform.canBuild(self): return [] src_path = "{0}/src/{1}".format(platform.workerdatapath, self.name) configure_path = src_path + "/configure" build_path = "{0}/builds/{1}/{2}".format(platform.workerdatapath, platform.name, self.name) packages_path = "{0}/packages/snapshots/{1}".format( platform.workerdatapath, self.name) env = platform.getEnv(self) f = factory.BuildFactory() f.useProgress = False f.addStep(steps.Clean(dir="", doStepIf=Property("clean", False))) f.addStep( steps.SetPropertyIfOlder(name="check config.mk freshness", src=configure_path, generated="config.mk", property="do_configure")) if self.verbose_build: platform_build_verbosity = "--enable-verbose-build" else: platform_build_verbosity = "" f.addStep( Configure(command=[ configure_path, "--enable-all-engines", "--disable-engine=testbed", platform_build_verbosity ] + platform.getConfigureArgs(self), doStepIf=Property("do_configure", default=True, defaultWhenFalse=False), env=env)) f.addStep(Compile(command=["make", "-j5"], env=env)) if platform.canBuildTests(self): if platform.run_tests: f.addStep(Test(env=env)) else: # Compile Tests (Runner), but do not execute (as binary is non-native) f.addStep(Test(command=["make", "test/runner"], env=env)) packaging_cmd = None if platform.getPackagingCmd(self) is not None: packaging_cmd = platform.getPackagingCmd(self) else: if platform.getStripCmd(self) is not None: f.addStep(steps.Strip(command=platform.getStripCmd())) if platform.canPackage(self): f.addStep( steps.Package( disttarget=packaging_cmd, srcpath=src_path, dstpath=packages_path, data_files=self.data_files, buildname="{0}-{1}".format(platform.name, self.name), platform_built_files=platform.getBuiltFiles(self), platform_data_files=platform.getDataFiles(self), archive_format=platform.archiveext, env=env)) return [ BuilderConfig( name="{0}-{1}".format(self.name, platform.name), workername=platform.workername, workerbuilddir=build_path, factory=f, locks=[ lock_build.access('counting'), self.lock_src.access("counting") ], tags=[self.name], properties={ "platformname": platform.name, "workerimage": platform.getWorkerImage(self), }, ) ]
step_compile_txt = Compile(command=["make", "clean", "txt"], description="compiling txt", descriptionDone="compile txt", logEnviron=False) step_compile_sql = Compile(command=["make", "clean", "sql"], description="compiling sql", descriptionDone="compile sql", logEnviron=False) step_compile_VS10 = Compile( command=["devenv.com", "eAthena-10.sln", "/REBUILD"], logEnviron=False) step_trigger_tests = Trigger( waitForFinish=True, schedulerNames=["test-Ubuntu-12.04-x64-scheduler"]) step_test_txt = Test(command=[ "gdb", "map-server", "-ex=run --run-once", "-ex=bt full", "-ex=kill", "-ex=quit" ], warningPattern="\[(Error|Warning)\]", description="testing txt", descriptionDone="test txt", logEnviron=False) step_test_sql = Test(command=[ "gdb", "map-server_sql", "-ex=run --run-once", "-ex=bt full", "-ex=kill", "-ex=quit" ], warningPattern="\[(Error|Warning)\]", description="testing sql", descriptionDone="test sql", logEnviron=False) f_unix = BuildFactory(steps=[step_svn_copy, step_configure, step_compile_all]) f_unix_64 = BuildFactory( steps=[step_svn_copy, step_configure_64, step_compile_all]) f_unix_64_trigger = BuildFactory(steps=[
def loadConfig(config): ####### CHANGESOURCES # the 'change_source' setting tells the buildmaster how it should find out # about source code changes. Here we point to the buildbot clone of pyflakes. from buildbot.changes.gitpoller import GitPoller from buildbot.changes.filter import ChangeFilter config['change_source'].append( GitPoller( repourl = '[email protected]:ORNL/xdd.git', workdir='gitpoller-workdir-xdd-master', pollinterval=120, branch='master', project='xdd')) xdd_filter = ChangeFilter( project = 'xdd', branch = 'testing') ####### BUILDERS # The 'builders' list defines the Builders, which tell Buildbot how to perform a build: # what steps, and which slaves can execute them. Note that any particular build will # only take place on one slave. from buildbot.process.factory import BuildFactory, GNUAutoconf from buildbot.steps.source import Git from buildbot.steps.shell import ShellCommand, Configure, Compile, Test xdd_factory = BuildFactory() # Check out the source xdd_factory.addStep(Git(repourl='[email protected]:ORNL/xdd.git', mode='copy', branch='master')) # Generate the test configuration xdd_factory.addStep(ShellCommand(command=['./contrib/buildbot_gen_test_config.sh'], name="configuring")) # Compile the code xdd_factory.addStep(Compile(description=["compiling"])) # Install the code xdd_factory.addStep(ShellCommand(command=['make', 'install'], name="make install")) # Perform make check xdd_factory.addStep(ShellCommand(command=['make', 'check'], name="make check", maxTime=600)) # Perform make test xdd_factory.addStep(Test(description=["make test"], maxTime=600)) # Perform cleanup xdd_factory.addStep(ShellCommand(command=['pkill', '-f', 'xdd', '||', 'echo ""'], name='process cleanup', maxTime=60)) # Add the XDD Build factory to each of the available builders described in the master.cfg from buildbot.config import BuilderConfig # config['builders'].append(BuilderConfig(name="xdd-rhel5-x86_64", slavenames=["pod7"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd')) # config['builders'].append(BuilderConfig(name="xdd-rhel6-x86_64", slavenames=["pod9"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"},category='xdd')) # config['builders'].append(BuilderConfig(name="xdd-sles10-x86_64", slavenames=["pod10"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd')) config['builders'].append(BuilderConfig(name="xdd-sles11-x86_64", slavenames=["pod11"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd')) config['builders'].append(BuilderConfig(name="xdd-osx-10-8", slavenames=["natureboy"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd')) # config['builders'].append(BuilderConfig(name="xdd-rhel6-ppc64", slavenames=["spry02"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd')) ####### SCHEDULERS # Configure the Schedulers, which decide how to react to incoming changes. In this # case, just kick off a 'runtests' build # Configure the nightly testing so that every test lives in the same buildset from buildbot.schedulers.basic import SingleBranchScheduler from buildbot.schedulers.timed import Periodic,Nightly build_nightly_xdd=Nightly(name="xdd-nightly1", branch = "master", properties={'owner' : ['*****@*****.**']}, builderNames=["xdd-sles11-x86_64", "xdd-osx-10-8"], hour = 2, minute = 3) config['schedulers'].append(build_nightly_xdd) # Configure each force build seperately so that they live in differing buildsets from buildbot.schedulers.forcesched import ForceScheduler # config['schedulers'].append(ForceScheduler(name="xdd-force1", builderNames=["xdd-rhel5-x86_64"])) # config['schedulers'].append(ForceScheduler(name="xdd-force2", builderNames=["xdd-rhel6-x86_64"])) # config['schedulers'].append(ForceScheduler(name="xdd-force3", builderNames=["xdd-sles10-x86_64"])) config['schedulers'].append(ForceScheduler(name="xdd-force4", builderNames=["xdd-sles11-x86_64"])) config['schedulers'].append(ForceScheduler(name="xdd-force6", builderNames=["xdd-osx-10-8"])) # config['schedulers'].append(ForceScheduler(name="xdd-force7", builderNames=["xdd-rhel6-ppc64"])) ####### STATUS TARGETS # 'status' is a list of Status Targets. The results of each build will be # pushed to these targets. buildbot/status/*.py has a variety to choose from, # including web pages, email senders, and IRC bots. from buildbot.status.mail import MailNotifier xddMN = MailNotifier(fromaddr="*****@*****.**", extraRecipients=['*****@*****.**'], categories='xdd', buildSetSummary=True, messageFormatter=xddSummaryMail) config['status'].append(xddMN)
def make_dolphin_osx_build(mode="normal"): f = BuildFactory() f.addStep( GitNoBranch(repourl="https://github.com/dolphin-emu/dolphin.git", progress=True, mode="incremental")) f.addStep( ShellCommand(command=["mkdir", "-p", "build"], logEnviron=False, description="mkbuilddir", descriptionDone="mkbuilddir")) f.addStep( ShellCommand(command=[ "cmake", "-GNinja", "-DDISTRIBUTOR=dolphin-emu.org", ".." ], workdir="build/build", description="configuring", descriptionDone="configure", haltOnFailure=True)) f.addStep( Compile(command=["ninja"], workdir="build/build", description="building", descriptionDone="build", haltOnFailure=True)) f.addStep( Test(command=["ninja", "unittests"], workdir="build/build", description="testing", descriptionDone="test", haltOnFailure=True)) f.addStep( ShellCommand(command="/build/codesign.sh --deep Binaries/Dolphin.app", workdir="build/build", description="signing", descriptionDone="sign", haltOnFailure=True)) f.addStep( ShellCommand(command=[ "hdiutil", "create", "dolphin.dmg", "-format", "UDBZ", "-srcfolder", "Binaries/dolphin.app", "-ov", "-volname", WithProperties("Dolphin %s-%s", "branchname", "shortrev") ], workdir="build/build", logEnviron=False, description="packaging", descriptionDone="package")) f.addStep( ShellCommand(command="/build/codesign.sh --deep dolphin.dmg", workdir="build/build", description="signing dmg", descriptionDone="sign dmg", haltOnFailure=True)) if mode == "normal": master_filename = WithProperties( "/srv/http/dl/builds/dolphin-%s-%s.dmg", "branchname", "shortrev") url = WithProperties( "https://dl.dolphin-emu.org/builds/dolphin-%s-%s.dmg", "branchname", "shortrev") elif mode == "wip": master_filename = WithProperties( "/srv/http/dl/wips/%s-dolphin-%s-%s.dmg", "author", "branchname", "shortrev") url = WithProperties( "https://dl.dolphin-emu.org/wips/%s-dolphin-%s-%s.dmg", "author", "branchname", "shortrev") elif mode == "pr": master_filename = WithProperties( "/srv/http/dl/prs/%s-dolphin-latest.dmg", "branchname") url = WithProperties( "https://dl.dolphin-emu.org/prs/%s-dolphin-latest.dmg", "branchname") else: master_filename = url = "" if master_filename and url: f.addStep( FileUpload(workersrc="build/dolphin.dmg", masterdest=master_filename, url=url, keepstamp=True, mode=0o644)) if mode == "normal": f.addStep( MasterShellCommand(command="/home/buildbot/bin/send_build.py", env={ "BRANCH": WithProperties("%s", "branchname"), "SHORTREV": WithProperties("%s", "shortrev"), "HASH": WithProperties("%s", "revision"), "AUTHOR": WithProperties("%s", "author"), "DESCRIPTION": WithProperties("%s", "description"), "TARGET_SYSTEM": "macOS", "USER_OS_MATCHER": "osx", "BUILD_URL": url, }, description="notifying website", descriptionDone="website notice")) return f
name = 'Boost' hub_repo = 'ryppl/boost-zero' include_features = ['os', 'cc'] repositories = [GitHub(hub_repo, protocol='https')] build_procedures = [ BuildProcedure('Integration').addSteps( *repositories[0].steps(workdir='boost', haltOnFailure=True)).addSteps( Configure(workdir='boost', command=cmake('%(clean:+clean)sconfigure'), haltOnFailure=True), CMakeBuild('Debug', workdir='boost/build', haltOnFailure=False), CMakeBuild('Release', workdir='boost/build', haltOnFailure=False), Test(workdir='boost', command=cmake('test'), haltOnFailure=False), ShellCommand(workdir='boost', command=cmake('documentation'), name='Docs')) ] transitions = {'successToFailure': 1, 'failureToSuccess': 1, 'exception': 1} status = [ IRC(host="irc.freenode.net", nick="rypbot", notify_events=transitions, channels=["#ryppl"]), MailNotifier(fromaddr="*****@*****.**", extraRecipients=["*****@*****.**"], mode='problem')
build_steps += [ # Run makepanda. Give it enough timeout (6h) since some steps take ages Compile(name="compile " + abi, timeout=6 * 60 * 60, command=get_build_command(abi, copy_python=copy_python), env={ "MAKEPANDA_THIRDPARTY": "C:\\thirdparty", "MAKEPANDA_SDKS": "C:\\sdks" }, haltOnFailure=True, doStepIf=do_step), # Run the test suite, but in a virtualenv. Test(name="test " + abi, command=get_test_command(abi, whl_filename), haltOnFailure=True, doStepIf=do_step), # Upload the wheel. FileUpload(name="upload whl " + abi, workersrc=whl_filename, masterdest=Interpolate("%s/%s", common.upload_dir, whl_filename), mode=0o664, haltOnFailure=True, doStepIf=do_step), # Clean up the created files. ShellCommand(name="del " + abi, command=["del", "/Q", whl_filename], haltOnFailure=False,
def CreateWinChromeFactory(builder): """Run chrome tests with the latest drmemory. Do *not* build TOT chrome or sync it. Building chrome takes a lot of resources and the tests are flaky, so we only do at known good revisions. We don't want to fall too far behind, or we're not really testing Chrome's full test suite. """ ret = factory.BuildFactory() sfx_name = 'drm-sfx' # TODO: add .exe when BB supports that, d'oh! ret.addStep( FileDownload(mastersrc=LATEST_WIN_BUILD, slavedest=(sfx_name + '.exe'), name='Download the latest build')) ret.addStep( ShellCommand(command=[sfx_name, '-ounpacked', '-y'], haltOnFailure=True, name='Unpack the build', description='unpack the build')) # Find out the revision number using -version def get_revision(rc, stdout, stderr): m = re.search(r'version \d+\.\d+\.(\d+)', stdout) if m: return {'got_revision': int(m.groups()[0])} return {'failed_to_parse': stdout} ret.addStep( SetProperty(command=['unpacked\\bin\\drmemory', '-version'], extract_fn=get_revision, name='Get the revision number', description='get revision', descriptionDone='get revision')) # VP8 tests # TODO(rnk): Add back the VP8 test step. We might be able to make this part # of the buildbot steps if it doesn't update often and builds incrementally. if False: ret.addStep( ToolStep(DrMemoryTest, 'windows', command=[ 'bash', 'E:\\vpx\\vp8-test-vectors\\run_tests.sh', ('--exec=unpacked/bin/drmemory.exe -batch ' '-no_check_leaks -no_count_leaks ' '-no_check_uninitialized ' 'e:/vpx/b/Win32/Debug/vpxdec.exe'), 'E:\\vpx\\vp8-test-vectors', ], env={'PATH': 'C:\\cygwin\\bin;%PATH%'}, name='VP8 tests', descriptionDone='VP8 tests', description='run vp8 tests')) # Chromium tests for test in [ 'url', 'printing', 'media', 'sql', 'crypto_unittests', 'remoting', 'ipc_tests', 'base_unittests', 'net', 'unit' ]: ret.addStep( Test( command=[ # Use the build dir of the chrome builder on this slave. ('..\\..\\' + builder + '\\build\\' + 'src\\tools\\valgrind\\chrome_tests.bat'), '-t', test, '--tool', 'drmemory_light', '--keep_logs', ], env={'DRMEMORY_COMMAND': 'unpacked/bin/drmemory.exe'}, name=('Chromium \'%s\' tests' % test), descriptionDone=('\'%s\' tests' % test), description=('run \'%s\' tests' % test))) return ret
ShellCommand(command=cloudimg_cmd, workdir="context"), # Build the Docker image. ShellCommand(name="setup", command=setup_cmd, workdir="context", haltOnFailure=True), # Invoke makepanda. Compile(name="compile py2", command=get_build_command(2), haltOnFailure=True, doStepIf=is_branch("release/1.10.x")), Compile(name="compile py3", command=get_build_command(3), haltOnFailure=True), # Run the test suite. Test(name="test py2", command=get_test_command(2), haltOnFailure=True, doStepIf=is_branch("release/1.10.x")), Test(name="test py3", command=get_test_command(3), haltOnFailure=True), # Build the installer. ShellCommand(name="package", command=package_cmd, haltOnFailure=True, doStepIf=lambda step:not step.getProperty("optimize", False)), # And the test scripts for deploy-ng. #Test(name="build_samples", command=test_deployng_cmd, doStepIf=is_branch("deploy-ng"), haltOnFailure=True), ] # Define a global lock, since reprepro won't allow simultaneous access to the repo. repo_lock = MasterLock('reprepro') # Steps to publish the runtime and SDK. publish_deb_steps = [
def CreateLinuxChromeFactory(): """Run chrome tests with the latest dynamorio. TODO(rnk): Run drmemory, not dynamorio. We use a build of chrome produced weekly from a known good revision on the same slave. """ cr_src = '../../linux-cr-builder/build/src' ret = factory.BuildFactory() ret.addStep( SVN(svnurl=dr_svnurl, workdir='dynamorio', mode='update', name='Checkout DynamoRIO')) # If we need to execute 32-bit children, we'll need a full exports package. ret.addStep( Configure(command=['cmake', '..', '-DDEBUG=OFF'], workdir='dynamorio/build', name='Configure release DynamoRIO')) ret.addStep( Compile(command=['make', '-j5'], workdir='dynamorio/build', name='Compile release DynamoRIO')) # Don't follow python children. This should speed up net_unittests, which # spawns a bunch of simple http servers to talk to. ret.addStep( ShellCommand( command=['bin64/drconfig', '-reg', 'python', '-norun', '-v'], workdir='dynamorio/build', name='don\'t follow python', description='don\'t follow python', descriptionDone='don\'t follow python')) # Chromium tests for test in LINUX_CHROME_TESTS: cmd = [ 'xvfb-run', '-a', '../dynamorio/build/bin64/drrun', '-stderr_mask', '12', # Show DR crashes '--', cr_src + '/out/Release/' + test ] if test == 'browser_tests': cmd += ['--gtest_filter=AutofillTest.BasicFormFill'] elif test == 'net_unittests': cmd += [ '--gtest_filter=-CertDatabaseNSSTest.ImportCACertHierarchy*' ] elif test == 'remoting_unittests': cmd += [ '--gtest_filter=' '-VideoFrameCapturerTest.Capture:' 'DesktopProcessTest.DeathTest' ] elif test == 'base_unittests': # crbug.com/308273: this test is flaky cmd += [ '--gtest_filter=-TraceEventTestFixture.TraceContinuousSampling' ] elif test == 'content_shell': cmd += ['-dump-render-tree', 'file:///home/chrome-bot/bb.html'] # We used to md5 the output, but that's too brittle. Just dump it to stdout # so humans can verify it. The return code will tell us if we crash. # TODO(rnk): We should run some selection of layout tests if we want to # verify output. ret.addStep( Test(command=cmd, env={'CHROME_DEVEL_SANDBOX': '/opt/chromium/chrome_sandbox'}, name=test, descriptionDone=test, description=test)) return ret
def afterRegisterInDB(self, insert_id): self.setProperty("mtr_id", insert_id) self.setProperty("mtr_warn_id", 0) Test.start(self)
def __init__(self, ignore=[], flaky=[], max_logs=20, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.logObserver = LitLogObserver() self.addLogObserver("stdio", self.logObserver)