def doGclientUpdate(self): """Sync the client """ dirname = os.path.join(self.builder.basedir, self.srcdir) command = [chromium_utils.GetGClientCommand(), 'sync', '--verbose', '--reset', '--manually_grab_svn_rev', '--force', '--with_branch_heads'] if self.delete_unversioned_trees_when_updating: command.append('--delete_unversioned_trees') if self.gclient_jobs: command.append('-j%d' % self.gclient_jobs) # Don't run hooks if it was patched or there is a patch since runhooks will # be run after. if self.gclient_nohooks or self.patch or self.was_patched: command.append('--nohooks') # GClient accepts --revision argument of two types 'module@rev' and 'rev'. if self.revision and not self.no_gclient_revision: command.append('--revision') # Ignore non-svn part of compound revisions. # Used for nacl.sdk.mono waterfall. if ':' in self.revision: command.append(self.revision.split(':')[0]) elif (not self.branch or self.no_gclient_branch or '@' in str(self.revision)): command.append(str(self.revision)) else: # Make the revision look like branch@revision. prefix = self.project if self.project else self.branch command.append('%s@%s' % (prefix, self.revision)) # We only add the transitive flag if we have a revision, otherwise it is # meaningless. if self.gclient_transitive: command.append('--transitive') if self.gclient_deps: command.append('--deps=' + self.gclient_deps) c = runprocess.RunProcess( self.builder, command, dirname, sendRC=False, timeout=self.timeout, keepStdout=True, environ=self.env) self.command = c return c.start()
def start(self): args = self.args # args['todir'] is relative to Builder directory, and is required. # args['fromdir'] is relative to Builder directory, and is required. assert args['todir'] is not None assert args['fromdir'] is not None fromdir = os.path.join(self.builder.basedir, args['fromdir']) todir = os.path.join(self.builder.basedir, args['todir']) self.timeout = args.get('timeout', 120) self.maxTime = args.get('maxTime', None) if runtime.platformType != "posix": d = threads.deferToThread(shutil.copytree, fromdir, todir) def cb(_): return 0 # rc=0 def eb(f): self.sendStatus({'header': 'exception from copytree\n' + f.getTraceback()}) return -1 # rc=-1 d.addCallbacks(cb, eb) @d.addCallback def send_rc(rc): self.sendStatus({'rc': rc}) else: if not os.path.exists(os.path.dirname(todir)): os.makedirs(os.path.dirname(todir)) if os.path.exists(todir): # I don't think this happens, but just in case.. log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir) command = ['cp', '-R', '-P', '-p', '-v', fromdir, todir] c = runprocess.RunProcess(self.builder, command, self.builder.basedir, sendRC=False, timeout=self.timeout, maxTime=self.maxTime, logEnviron=self.logEnviron, usePTY=False) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) d.addCallbacks(self._sendRC, self._checkAbandoned) return d
def testEnvironExpandVar(self): b = FakeSlaveBuilder(False, self.basedir) environ = {"EXPND": "-${PATH}-", "DOESNT_EXPAND": "-${---}-", "DOESNT_FIND": "-${DOESNT_EXISTS}-"} s = runprocess.RunProcess( b, stdoutCommand('hello'), self.basedir, environ=environ) d = s.start() def check(ign): headers = "".join([list(update.values())[0] for update in b.updates if list(update) == ["header"]]) self.failUnless("EXPND=-$" not in headers, "got:\n" + headers) self.failUnless("DOESNT_FIND=--" in headers, "got:\n" + headers) self.failUnless( "DOESNT_EXPAND=-${---}-" in headers, "got:\n" + headers) d.addCallback(check) return d
def testUnsetEnvironVar(self): b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, environ={"PATH": None}) d = s.start() def check(ign): headers = "".join([ update.values()[0] for update in b.updates if update.keys() == ["header"] ]) self.failUnless(not re.match('\bPATH=', headers), "got:\n" + headers) d.addCallback(check) return d
def do_test_double_fork(self, usePTY, useProcGroup=True, expectChildSurvival=False): # when a spawned process spawns another process, and then dies itself # (either intentionally or accidentally), we should be able to clean up # the child. parent_pidfile = self.newPidfile() self.parent_pid = None child_pidfile = self.newPidfile() self.child_pid = None b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, scriptCommand( 'double_fork', parent_pidfile, child_pidfile), self.basedir, usePTY=usePTY, useProcGroup=useProcGroup) runproc_d = s.start() # wait for both processes to start up, then call s.kill parent_pidfile_d = self.waitForPidfile(parent_pidfile) child_pidfile_d = self.waitForPidfile(child_pidfile) pidfiles_d = defer.gatherResults([parent_pidfile_d, child_pidfile_d]) def got_pids(pids): self.parent_pid, self.child_pid = pids pidfiles_d.addCallback(got_pids) def kill(_): s.kill("diaf") pidfiles_d.addCallback(kill) # check that both processes are dead after RunProcess is done d = defer.gatherResults([pidfiles_d, runproc_d]) def check_dead(_): self.assertDead(self.parent_pid) if expectChildSurvival: self.assertAlive(self.child_pid) else: self.assertDead(self.child_pid) d.addCallback(check_dead) return d
def doVCClean(self, res=None): """ Clean the repository after some pull or update This will remove untracked files (eg. *.pyc, junk) from the repo dir. """ bzr = self.getCommand('bzr') command = [ bzr, 'clean-tree', '-q', '--force', '--unknown', '--detritus' ] srcdir = os.path.join(self.builder.basedir, self.srcdir) c = runprocess.RunProcess(self.builder, command, srcdir, sendRC=False, timeout=self.timeout, logEnviron=False) self.command = c d = c.start() return d
def testMultiWordCommand(self): b = FakeSlaveBuilder(False, self.basedir) # careful! This command must execute the same on windows and UNIX s = runprocess.RunProcess(b, ['echo', 'Happy Days and Jubilation'], self.basedir) if runtime.platformType == "win32": # Twisted adds quotes to all arguments, and echo doesn't remove # them, so they appear in the output. exp = nl('"Happy Days and Jubilation"\n') else: exp = nl('Happy Days and Jubilation\n') d = s.start() def check(ign): self.failUnless({'stdout': exp} in b.updates, b.show()) self.failUnless({'rc': 0} in b.updates, b.show()) d.addCallback(check) return d
def doClobber(self, dummy, dirname, chmodDone=False): # TODO: remove the old tree in the background ## workdir = os.path.join(self.builder.basedir, self.workdir) ## deaddir = self.workdir + ".deleting" ## if os.path.isdir(workdir): ## try: ## os.rename(workdir, deaddir) ## # might fail if deaddir already exists: previous deletion ## # hasn't finished yet ## # start the deletion in the background ## # TODO: there was a solaris/NetApp/NFS problem where a ## # process that was still running out of the directory we're ## # trying to delete could prevent the rm-rf from working. I ## # think it stalled the rm, but maybe it just died with ## # permission issues. Try to detect this. ## os.commands("rm -rf %s &" % deaddir) ## except: ## # fall back to sequential delete-then-checkout ## pass d = os.path.join(self.builder.basedir, dirname) if runtime.platformType != "posix": # if we're running on w32, use rmtree instead. It will block, # but hopefully it won't take too long. utils.rmdirRecursive(d) return defer.succeed(0) command = ["rm", "-rf", d] c = runprocess.RunProcess(self.builder, command, self.builder.basedir, sendRC=0, timeout=self.timeout, maxTime=self.maxTime, usePTY=False) self.command = c # sendRC=0 means the rm command will send stdout/stderr to the # master, but not the rc=0 when it finishes. That job is left to # _sendRC d = c.start() # The rm -rf may fail if there is a left-over subdir with chmod 000 # permissions. So if we get a failure, we attempt to chmod suitable # permissions and re-try the rm -rf. if chmodDone: d.addCallback(self._abandonOnFailure) else: d.addCallback(lambda rc: self.doClobberTryChmodIfFail(rc, dirname)) return d
def testMultiWordStringCommandQuotes(self): b = FakeSlaveBuilder(False, self.basedir) # careful! This command must execute the same on windows and UNIX s = runprocess.RunProcess(b, 'echo "Happy Days and Jubilation"', self.basedir) if runtime.platformType == "win32": # echo doesn't parse out the quotes, so they come through in the # output exp = nl('"Happy Days and Jubilation"\n') else: exp = nl('Happy Days and Jubilation\n') d = s.start() def check(ign): self.failUnless({'stdout': exp} in b.updates, b.show()) self.failUnless({'rc': 0} in b.updates, b.show()) d.addCallback(check) return d
def doRevert(self, dummy): """Revert any modification done by a previous patch. This is done in 2 parts to trap potential errors at each step. Note that it is assumed that .orig and .rej files will be reverted, e.g. deleted by the 'gclient revert' command. If the try bot is configured with 'global-ignores=*.orig', patch failure will occur.""" dirname = os.path.join(self.builder.basedir, self.srcdir) command = [chromium_utils.GetGClientCommand(), 'revert', '--nohooks'] c = runprocess.RunProcess( self.builder, command, dirname, sendRC=False, timeout=self.timeout, keepStdout=True, environ=self.env) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) # Remove patch residues. d.addCallback(lambda _: self._doRevertRemoveSignalFile()) return d
def parseGotRevision(self): darcs = self.getCommand('darcs') # we use 'darcs context' to find out what we wound up with command = [darcs, "changes", "--context"] c = runprocess.RunProcess(self.builder, command, os.path.join(self.builder.basedir, self.srcdir), environ=self.env, timeout=self.timeout, sendStdout=False, sendStderr=False, sendRC=False, keepStdout=True, usePTY=False) d = c.start() d.addCallback(lambda res: c.stdout) return d
def _update(self, res): command = [self.mtn, 'update', '--db', self.database] if self.revision: command.extend(['--revision', self.revision]) else: command.extend(["-r", "h:" + self.branch]) command.extend(["-b", self.branch]) c = runprocess.RunProcess(self.builder, command, self._fullSrcdir(), environ=self.env, sendRC=False, timeout=self.timeout, maxTime=self.maxTime, keepStdout=True, usePTY=False, logEnviron=self.logEnviron) d = c.start() return d
def doVCUpdate(self): bk = self.getCommand('bk') # XXX revision is never used!! - bug #1715 # revision = self.args['revision'] or 'HEAD' # update: possible for mode in ('copy', 'update') d = os.path.join(self.builder.basedir, self.srcdir) # Revision is ignored since the BK free client doesn't support it. command = [bk, 'pull'] c = runprocess.RunProcess(self.builder, command, d, sendRC=False, timeout=self.timeout, keepStdout=True, logEnviron=self.logEnviron, usePTY=False) self.command = c return c.start()
def testNoLogEnviron(self): b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, environ={"FOO": "BAR"}, logEnviron=False) d = s.start() def check(ign): headers = "".join([ update.values()[0] for update in b.updates if update.keys() == ["header"] ]) self.failUnless("FOO=BAR" not in headers, "got:\n" + headers) d.addCallback(check) return d
def doVCFull(self): bk = self.getCommand('bk') revision_arg = '' if self.args['revision']: revision_arg = "-r%s" % self.args['revision'] d = self.builder.basedir command = [bk, 'clone', revision_arg] + self.bk_args + \ [self.bkurl, self.srcdir] c = runprocess.RunProcess(self.builder, command, d, sendRC=False, timeout=self.timeout, usePTY=False) self.command = c return c.start()
def _checkDb(self): # Don't send stderr. When there is no database, this might confuse # users, as they will see a mtn error message. But having no database # repo is not an error, just an indication that we need to pull one. c = runprocess.RunProcess(self.builder, [self.mtn, 'db', 'info', '--db', self.database], self.builder.basedir, environ=self.env, sendRC=False, keepStdout=True, sendStderr=False, usePTY=False) d = c.start() def afterCheckRepo(res, cdi): if type(res) is int and res != 0: log.msg("No database found, creating it") # mtn info fails, try to create shared repo. # We'll be doing an initial pull, so up the timeout to # 3 hours to make sure it will have time to complete. self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60) c = runprocess.RunProcess(self.builder, [self.mtn, 'db', 'init', '--db', self.database], self.builder.basedir, environ=self.env, sendRC=False, usePTY=False) self.command = c return c.start() elif cdi.stdout.find("(migration needed)") > 0: log.msg("Older format database found, migrating it") # mtn info fails, try to create shared repo. c = runprocess.RunProcess(self.builder, [self.mtn, 'db', 'migrate', '--db', self.database], self.builder.basedir, environ=self.env, sendRC=False, usePTY=False) self.command = c return c.start() elif cdi.stdout.find("(too new, cannot use)") > 0: raise MonotoneError, "The database is of a newer format than mtn can handle... Abort!" else: return defer.succeed(res) d.addCallback(afterCheckRepo, c) return d
def doPatch(self, res): patchlevel = self.patch[0] diff = FixDiffLineEnding(self.patch[1]) # Allow overwriting the root with an environment variable. root = self.env.get("GCLIENT_PATCH_ROOT", None) if len(self.patch) >= 3 and root is None: root = self.patch[2] command = [ self.getCommand("patch"), '-p%d' % patchlevel, '--remove-empty-files', '--force', '--forward', ] dirname = os.path.join(self.builder.basedir, self.workdir) # Mark the directory so we don't try to update it later. open(os.path.join(dirname, ".buildbot-patched"), "w").write("patched\n") # Update 'dirname' with the 'root' option. Make sure it is a subdirectory # of dirname. if (root and os.path.abspath(os.path.join(dirname, root)).startswith( os.path.abspath(dirname))): dirname = os.path.join(dirname, root) # Now apply the patch. c = runprocess.RunProcess(self.builder, command, dirname, sendRC=False, timeout=self.timeout, initialStdin=diff, environ=self.env) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) if diff.find('DEPS') != -1: d.addCallback(self.doVCUpdateOnPatch) d.addCallback(self._abandonOnFailure) return d
def _doRevertRemoveSignalFile(self): """Removes the file that signals that the checkout is patched. Must be called after a revert has been done and the patch residues have been removed.""" command = _RemoveFileCommand( os.path.join(self.builder.basedir, self.srcdir, '.buildbot-patched')) dirname = os.path.join(self.builder.basedir, self.srcdir) c = runprocess.RunProcess(self.builder, command, dirname, sendRC=False, timeout=self.timeout, keepStdout=True, environ=self.env) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) return d
def testEnvironPythonPath(self): b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, environ={"PYTHONPATH": 'a'}) d = s.start() def check(ign): headers = "".join([ list(update.values())[0] for update in b.updates if list(update) == ["header"] ]) self.failUnless( not re.match('\bPYTHONPATH=a%s' % (os.pathsep), headers), "got:\n" + headers) d.addCallback(check) return d
def testEnvironArray(self): b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, stdoutCommand('hello'), self.basedir, environ={"FOO": ['a', 'b']}) d = s.start() def check(ign): headers = "".join([ update.values()[0] for update in b.updates if update.keys() == ["header"] ]) self.failUnless( not re.match('\bFOO=a%sb\b' % (os.pathsep), headers), "got:\n" + headers) d.addCallback(check) return d
def _checkout(self, res): command = [ self.mtn, 'checkout', self._fullSrcdir(), '--db', self.database ] if self.revision: command.extend(['--revision', self.revision]) command.extend(['--branch', self.branch]) c = runprocess.RunProcess(self.builder, command, self.builder.basedir, environ=self.env, sendRC=False, timeout=self.timeout, maxTime=self.maxTime, keepStdout=True, usePTY=False, logEnviron=self.logEnviron) d = c.start() return d
def getGclientConfigCommand(self): """Return the command to run the gclient config step. """ dirname = os.path.join(self.builder.basedir, self.srcdir) command = [chromium_utils.GetGClientCommand(), 'config'] if self.gclient_spec: command.append('--spec=%s' % self.gclient_spec) else: command.append(self.svnurl) git_cache_dir = os.path.abspath( os.path.join(self.builder.basedir, os.pardir, os.pardir, os.pardir, 'git_cache')) command.append('--cache-dir=' + git_cache_dir) c = runprocess.RunProcess( self.builder, command, dirname, sendRC=False, timeout=self.timeout, keepStdout=True, environ=self.env) return c
def writeSourcedata(self, res): """Write the sourcedata file and remove any dead source directory.""" d = None dead_dir = os.path.join(self.builder.basedir, self.srcdir + '.dead') if os.path.isdir(dead_dir): msg = 'Removing dead source dir' self.sendStatus({'header': msg + '\n'}) log.msg(msg) command = self._RemoveDirectoryCommand(dead_dir) c = runprocess.RunProcess(self.builder, command, self.builder.basedir, sendRC=0, timeout=self.rm_timeout, environ=self.env) self.command = c d = c.start() d.addCallback(self._abandonOnFailure) open(self.sourcedatafile, 'w').write(self.sourcedata) return d
def do_test_pgroup(self, usePTY, useProcGroup=True, expectChildSurvival=False): # test that a process group gets killed parent_pidfile = self.newPidfile() self.parent_pid = None child_pidfile = self.newPidfile() self.child_pid = None b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, scriptCommand( 'spawn_child', parent_pidfile, child_pidfile), self.basedir, usePTY=usePTY, useProcGroup=useProcGroup) runproc_d = s.start() # wait for both processes to start up, then call s.kill parent_pidfile_d = self.waitForPidfile(parent_pidfile) child_pidfile_d = self.waitForPidfile(child_pidfile) pidfiles_d = defer.gatherResults([parent_pidfile_d, child_pidfile_d]) def got_pids(pids): self.parent_pid, self.child_pid = pids pidfiles_d.addCallback(got_pids) def kill(_): s.kill("diaf") pidfiles_d.addCallback(kill) # check that both processes are dead after RunProcess is done d = defer.gatherResults([pidfiles_d, runproc_d]) def check_dead(_): self.assertDead(self.parent_pid) if expectChildSurvival: self.assertAlive(self.child_pid) else: self.assertDead(self.child_pid) d.addCallback(check_dead) return d
def test_sigterm(self, interruptSignal=None): # Tests that the process will receive SIGTERM if sigtermTimeout # is not None pidfile = self.newPidfile() self.pid = None b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, scriptCommand( 'write_pidfile_and_sleep', pidfile), self.basedir, sigtermTime=1) runproc_d = s.start() pidfile_d = self.waitForPidfile(pidfile) self.receivedSIGTERM = False def check_alive(pid): # Create a mock process that will check if we recieve SIGTERM mock_process = Mock(wraps=s.process) mock_process.pgid = None # Skips over group SIGTERM mock_process.pid = pid process = s.process def _mock_signalProcess(sig): if sig == "TERM": self.receivedSIGTERM = True process.signalProcess(sig) mock_process.signalProcess = _mock_signalProcess s.process = mock_process self.pid = pid # for use in check_dead # test that the process is still alive self.assertAlive(pid) # and tell the RunProcess object to kill it s.kill("diaf") pidfile_d.addCallback(check_alive) def check_dead(_): self.failUnlessEqual(self.receivedSIGTERM, True) self.assertDead(self.pid) runproc_d.addCallback(check_dead) return defer.gatherResults([pidfile_d, runproc_d])
def doVCFull(self): git = self.getCommand("git") # If they didn't ask for a specific revision, we can get away with a # shallow clone. if not self.args.get('revision') and self.args.get('shallow'): cmd = [git, 'clone', '--depth', '1'] # If we have a reference repository, pass it to the clone command if self.reference: cmd.extend(['--reference', self.reference]) cmd.extend([self.repourl, self._fullSrcdir()]) c = runprocess.RunProcess(self.builder, cmd, self.builder.basedir, sendRC=False, timeout=self.timeout, maxTime=self.maxTime, usePTY=False) self.command = c cmdexec = c.start() cmdexec.addCallback(self._didInit) return cmdexec else: os.makedirs(self._fullSrcdir()) return self._dovccmd(['init'], self._didInit)
def testBadCommand(self): b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, ['command_that_doesnt_exist.exe'], self.basedir) s.workdir = 1 # cause an exception d = s.start() def check(err): err.trap(AbandonChain) stderr = [] # Here we're checking that the exception starting up the command # actually gets propogated back to the master. for u in b.updates: if 'stderr' in u: stderr.append(u['stderr']) stderr = "".join(stderr) self.failUnless("TypeError" in stderr, stderr) d.addBoth(check) d.addBoth(lambda _: self.flushLoggedErrors()) return d
def _tryChmod(self, rc): assert isinstance(rc, int) if rc == 0: return defer.succeed(0) # Attempt a recursive chmod and re-try the rm -rf after. command = ["chmod", "-Rf", "u+rwx", os.path.join(self.builder.basedir, self.dir)] if sys.platform.startswith('freebsd'): # Work around a broken 'chmod -R' on FreeBSD (it tries to recurse into a # directory for which it doesn't have permission, before changing that # permission) by running 'find' instead command = ["find", os.path.join(self.builder.basedir, self.dir), '-exec', 'chmod', 'u+rwx', '{}', ';'] c = runprocess.RunProcess(self.builder, command, self.builder.basedir, sendRC=0, timeout=self.timeout, maxTime=self.maxTime, logEnviron=self.logEnviron, usePTY=False) self.command = c d = c.start() d.addCallback(lambda dummy: self._clobber(dummy, True)) return d
def testUserParameter(self): """ Test that setting the 'user' parameter causes RunProcess to wrap the command using 'sudo'. """ user = '******' cmd = ['whatever'] b = FakeSlaveBuilder(False, self.basedir) s = runprocess.RunProcess(b, cmd, self.basedir, user=user) # Override the '_spawnProcess' method so that we can verify # that the command is run using 'sudo', as we expect. def _spawnProcess(*args, **kwargs): executable = args[1] args = args[2] self.assertEqual(executable, 'sudo') self.assertEqual(args, ['sudo', '-u', user, '-H'] + cmd) s._spawnProcess = _spawnProcess s.start() return s.finished(None, 0)
def doVCUpdate(self): bzr = self.getCommand('bzr') if self.revision: command = [ bzr, 'pull', self.sourcedata.split('\n')[0], '-q', '--overwrite', '-r', str(self.revision) ] else: command = [bzr, 'update', '-q'] srcdir = os.path.join(self.builder.basedir, self.srcdir) c = runprocess.RunProcess(self.builder, command, srcdir, sendRC=False, timeout=self.timeout, logEnviron=False) self.command = c d = c.start() d.addCallback(self.doVCClean) return d