def install_prerequisites(self): if os.path.exists(self.rietveld): print "Removing old rietveld dir" shutil.rmtree(self.rietveld) sdk_path = os.path.join(self.base_dir, 'google_appengine') if os.path.exists(sdk_path): print "Removing old appengine SDK dir" shutil.rmtree(sdk_path) previous = os.environ.get('DEPOT_TOOLS_UPDATE') os.environ['DEPOT_TOOLS_UPDATE'] = '0' try: if not os.path.isfile(os.path.join(self.infra, '.gclient')): print('Checking out infra...') shutil.rmtree(self.infra, ignore_errors=True) try: os.makedirs(self.infra) subprocess2.call([ sys.executable, os.path.join(DEPOT_TOOLS, 'fetch.py'), '--force', 'infra', '--managed=true' ], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to clone infra. \n%s' % e) else:
def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE)
def install_prerequisites(self): if os.path.exists(self.rietveld): print "Removing old rietveld dir" shutil.rmtree(self.rietveld) sdk_path = os.path.join(self.base_dir, 'google_appengine') if os.path.exists(sdk_path): print "Removing old appengine SDK dir" shutil.rmtree(sdk_path) previous = os.environ.get('DEPOT_TOOLS_UPDATE') os.environ['DEPOT_TOOLS_UPDATE'] = '0' try: if not os.path.isfile(os.path.join(self.infra, '.gclient')): print('Checking out infra...') shutil.rmtree(self.infra, ignore_errors=True) try: os.makedirs(self.infra) subprocess2.call( [sys.executable, os.path.join(DEPOT_TOOLS, 'fetch.py'), '--force', 'infra', '--managed=true'], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to clone infra. \n%s' % e) else:
def _start_roll(self, last_roll_revision, new_roll_revision): roll_branch = '%s_roll' % self._project cwd_kwargs = {'cwd': self._path_to_chrome} subprocess2.check_call(['git', 'clean', '-d', '-f'], **cwd_kwargs) subprocess2.call(['git', 'rebase', '--abort'], **cwd_kwargs) subprocess2.call(['git', 'branch', '-D', roll_branch], **cwd_kwargs) subprocess2.check_call(['git', 'checkout', 'origin/master', '-f'], **cwd_kwargs) subprocess2.check_call(['git', 'checkout', '-b', roll_branch, '-t', 'origin/master', '-f'], **cwd_kwargs) try: subprocess2.check_call(['roll-dep-svn', self._path_to_project, new_roll_revision], **cwd_kwargs) subprocess2.check_call(['git', 'add', 'DEPS'], **cwd_kwargs) subprocess2.check_call(['git', 'commit', '--no-edit'], **cwd_kwargs) commit_msg = subprocess2.check_output( ['git', 'log', '-n1', '--format=%B', 'HEAD'], **cwd_kwargs).decode('utf-8') if self._notry: commit_msg += NO_TRY_STR % { 'project': self._project } upload_cmd = ['git', 'cl', 'upload', '--bypass-hooks', '-f'] if self._cq_dry_run: upload_cmd.append('--cq-dry-run') else: upload_cmd.append('--use-commit-queue') if self._cq_extra_trybots: commit_msg += ('\n' + CQ_INCLUDE_TRYBOTS + ','.join(self._cq_extra_trybots)) tbr = '\nTBR=' emails = self._emails_to_cc_on_rolls() if emails: emails_str = ','.join(emails) tbr += emails_str upload_cmd.extend(['--cc', emails_str, '--send-mail']) commit_msg += tbr if self._include_commit_log: log_cmd = ['git', 'log', '--format=%h %ae %s', '%s..%s' % (last_roll_revision, new_roll_revision)] git_log = subprocess2.check_output(log_cmd, cwd=self._project_git_dir) commit_msg += '\n\nCommits in this roll:\n' + git_log.decode('utf-8') upload_cmd.extend(['-m', commit_msg]) subprocess2.check_call(upload_cmd, **cwd_kwargs) finally: subprocess2.check_call(['git', 'checkout', 'origin/master', '-f'], **cwd_kwargs) subprocess2.check_call( ['git', 'branch', '-D', roll_branch], **cwd_kwargs) # FIXME: It's easier to pull the issue id from rietveld rather than # parse it from the safely-roll-deps output. Once we inline # safely-roll-deps into this script this can go away. search_result = self._search_for_active_roll() if search_result: self._rietveld.add_comment(search_result['issue'], self.ROLL_BOT_INSTRUCTIONS)
def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo)
def run(args): if not args: return usage() tool_name = args[0] # Check to see if it is a infra tool first. infra_dir = os.path.join(TARGET_DIR, 'infra', 'infra', 'tools', *tool_name.split('.')) cipd_file = os.path.join(TARGET_DIR, 'infra', 'cipd', tool_name) if sys.platform.startswith('win'): cipd_file += '.exe' if (os.path.isdir(infra_dir) and os.path.isfile(os.path.join(infra_dir, '__main__.py'))): cmd = [ sys.executable, os.path.join(TARGET_DIR, 'infra', 'run.py'), 'infra.tools.%s' % tool_name ] elif os.path.isfile(cipd_file) and is_exe(cipd_file): cmd = [cipd_file] else: print('Unknown tool "%s"' % tool_name, file=sys.stderr) return usage() # Add the remaining arguments. cmd.extend(args[1:]) return subprocess.call(cmd)
def run(args): if not args: return usage() tool_name = args[0] # Check to see if it is a infra tool first. infra_dir = os.path.join( TARGET_DIR, 'infra', 'infra', 'tools', *tool_name.split('.')) cipd_file = os.path.join(TARGET_DIR, 'infra', 'cipd', tool_name) if sys.platform.startswith('win'): cipd_file += '.exe' if (os.path.isdir(infra_dir) and os.path.isfile(os.path.join(infra_dir, '__main__.py'))): cmd = [ sys.executable, os.path.join(TARGET_DIR, 'infra', 'run.py'), 'infra.tools.%s' % tool_name] elif os.path.isfile(cipd_file) and is_exe(cipd_file): cmd = [cipd_file] else: print >>sys.stderr, 'Unknown tool "%s"' % tool_name return usage() # Add the remaining arguments. cmd.extend(args[1:]) return subprocess.call(cmd)
def CMDrebase(parser, args): """rebase current branch on top of svn repo""" # Provide a wrapper for git svn rebase to help avoid accidental # git svn dcommit. # It's the only command that doesn't use parser at all since we just defer # execution to git-svn. return subprocess2.call(['git', 'svn', 'rebase'] + args)
def run(args): if not args: return usage() env = os.environ tool_name = args[0] # Check to see if it is a infra tool first. tool_dir = os.path.join(INFRA_DIR, 'infra', 'tools', *tool_name.split('.')) cipd_file = os.path.join(INFRA_DIR, 'cipd', tool_name) if sys.platform.startswith('win'): cipd_file += '.exe' if (os.path.isdir(tool_dir) and os.path.isfile(os.path.join(tool_dir, '__main__.py'))): cmd = [ 'vpython', '-vpython-spec', os.path.join(INFRA_DIR, '.vpython'), '-m', 'infra.tools.%s' % tool_name ] # Augment PYTHONPATH so that infra.tools.<tool_name> can be found without # running from that directory, which would mess up any relative paths passed # to the tool. env['PYTHONPATH'] = INFRA_DIR + os.pathsep + env['PYTHONPATH'] elif os.path.isfile(cipd_file) and is_exe(cipd_file): cmd = [cipd_file] else: print('Unknown tool "%s"' % tool_name, file=sys.stderr) return usage() # Add the remaining arguments. cmd.extend(args[1:]) return subprocess.call(cmd, env=env)
def call(self, *args): env = os.environ.copy() if self.boto_path: env['AWS_CREDENTIAL_FILE'] = self.boto_path return subprocess2.call((sys.executable, self.path) + args, env=env, timeout=self.timeout)
def return_code(*args, **kwargs): """subprocess.call() passing shell=True on Windows for git and subprocess2.VOID for stdout and stderr.""" kwargs.setdefault('shell', NEED_SHELL) kwargs.setdefault('stdout', subprocess2.VOID) kwargs.setdefault('stderr', subprocess2.VOID) return subprocess2.call(*args, **kwargs)
def call(self, *args): cmd = [sys.executable, self.path] if self.bypass_prodaccess: cmd.append('--bypass_prodaccess') cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call( [sys.executable, "-c", "import sys, os;" 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv ) self.assertEqual(rc, 1)
def _start_roll(self, new_roll_revision): roll_branch = '%s_roll' % self._project cwd_kwargs = {'cwd': self._path_to_chrome} subprocess2.check_call(['git', 'clean', '-d', '-f'], **cwd_kwargs) subprocess2.call(['git', 'rebase', '--abort'], **cwd_kwargs) subprocess2.call(['git', 'branch', '-D', roll_branch], **cwd_kwargs) subprocess2.check_call(['git', 'checkout', 'origin/master', '-f'], **cwd_kwargs) subprocess2.check_call(['git', 'checkout', '-b', roll_branch, '-t', 'origin/master', '-f'], **cwd_kwargs) try: subprocess2.check_call(['roll-dep', self._path_to_project, new_roll_revision], **cwd_kwargs) subprocess2.check_call(['git', 'add', 'DEPS'], **cwd_kwargs) subprocess2.check_call(['git', 'commit', '--no-edit'], **cwd_kwargs) commit_msg = subprocess2.check_output(['git', 'log', '-n1', '--format=%B', 'HEAD'], **cwd_kwargs) upload_cmd = ['git', 'cl', 'upload', '--bypass-hooks', '--use-commit-queue', '-f'] if self._cq_extra_trybots: commit_msg += ('\n\n' + CQ_EXTRA_TRYBOTS + ','.join(self._cq_extra_trybots)) tbr = '\nTBR=' emails = self._emails_to_cc_on_rolls() if emails: emails_str = ','.join(emails) tbr += emails_str upload_cmd.extend(['--cc', emails_str, '--send-mail']) commit_msg += tbr upload_cmd.extend(['-m', commit_msg]) subprocess2.check_call(upload_cmd, **cwd_kwargs) finally: subprocess2.check_call(['git', 'checkout', 'origin/master', '-f'], **cwd_kwargs) subprocess2.check_call( ['git', 'branch', '-D', roll_branch], **cwd_kwargs) # FIXME: It's easier to pull the issue id from rietveld rather than # parse it from the safely-roll-deps output. Once we inline # safely-roll-deps into this script this can go away. search_result = self._search_for_active_roll() if search_result: self._rietveld.add_comment(search_result['issue'], self.ROLL_BOT_INSTRUCTIONS)
def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1)
def test_call_string(self): # call() function with string argument on UNIX f, fname = self.mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec %s -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0o700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47)
def test_call_string(self): # call() function with string argument on UNIX f, fname = self.mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec %s -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47)
def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = subprocess.call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise subprocess.CalledProcessError(retcode, cmd) return retcode
def _call_git(self, args, **kwargs): """Like check_call but doesn't throw on failure.""" kwargs.setdefault('cwd', self.project_path) kwargs.setdefault('stdout', self.VOID) kwargs.setdefault('timeout', GLOBAL_TIMEOUT) return subprocess2.call(['git'] + args, **kwargs)
def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). cmd = r"import sys, os; sys.exit(os.write(sys.stdout.fileno(), '.\n'))" rc = subprocess.call([sys.executable, "-c", cmd], stdout=1) self.assertEquals(rc, 2)
def call(self, *args): cmd = [sys.executable, self.path, '--force-version', self.version] cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def call(self, *args): cmd = [self.VPYTHON3, self.path] cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env())
try: os.makedirs(self.infra) subprocess2.call([ sys.executable, os.path.join(DEPOT_TOOLS, 'fetch.py'), '--force', 'infra', '--managed=true' ], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to clone infra. \n%s' % e) else: print('Syncing infra...') try: subprocess2.call([ sys.executable, os.path.join(DEPOT_TOOLS, 'gclient.py'), 'sync', '--force' ], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to sync infra. \n%s' % e) finally: if previous is None: del os.environ['DEPOT_TOOLS_UPDATE'] else: os.environ['DEPOT_TOOLS_UPDATE'] = previous def start_server(self, verbose=False): self.install_prerequisites() assert not self.tempdir self.tempdir = tempfile.mkdtemp(prefix='rietveld_test') self.port = find_free_port(10000)
if not os.path.isfile(os.path.join(self.infra, '.gclient')): print('Checking out infra...') shutil.rmtree(self.infra, ignore_errors=True) try: os.makedirs(self.infra) subprocess2.call( [sys.executable, os.path.join(DEPOT_TOOLS, 'fetch.py'), '--force', 'infra', '--managed=true'], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to clone infra. \n%s' % e) else: print('Syncing infra...') try: subprocess2.call( [sys.executable, os.path.join(DEPOT_TOOLS, 'gclient.py'), 'sync', '--force'], cwd=self.infra) except (OSError, subprocess2.CalledProcessError), e: raise Failure('Failed to sync infra. \n%s' % e) finally: if previous is None: del os.environ['DEPOT_TOOLS_UPDATE'] else: os.environ['DEPOT_TOOLS_UPDATE'] = previous def start_server(self, verbose=False): self.install_prerequisites() assert not self.tempdir self.tempdir = tempfile.mkdtemp(prefix='rietveld_test') self.port = find_free_port(10000) admin_port = find_free_port(self.port + 1)
def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47)
def call(self, *args): cmd = [self.VPYTHON, self.path, '--force-version', self.version] cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env())
def CMDupload(parser, args): """upload the current changelist to codereview""" parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks', help='bypass upload presubmit hook') parser.add_option('-f', action='store_true', dest='force', help="force yes to questions (don't prompt)") parser.add_option('-m', dest='message', help='message for patch') parser.add_option('-r', '--reviewers', help='reviewer email addresses') parser.add_option('--cc', help='cc email addresses') parser.add_option('--send-mail', action='store_true', help='send email to reviewer immediately') parser.add_option("--emulate_svn_auto_props", action="store_true", dest="emulate_svn_auto_props", help="Emulate Subversion's auto properties feature.") parser.add_option("--desc_from_logs", action="store_true", dest="from_logs", help="""Squashes git commit logs into change description and uses message as subject""") parser.add_option('-c', '--use-commit-queue', action='store_true', help='tell the commit queue to commit this patchset') (options, args) = parser.parse_args(args) # Make sure index is up-to-date before running diff-index. RunGit(['update-index', '--refresh', '-q'], error_ok=True) if RunGit(['diff-index', 'HEAD']): print 'Cannot upload with a dirty tree. You must commit locally first.' return 1 cl = Changelist() if args: base_branch = args[0] else: # Default to diffing against the "upstream" branch. base_branch = cl.GetUpstreamBranch() args = [base_branch + "..."] if not options.bypass_hooks and not options.force: hook_results = cl.RunHook(committing=False, upstream_branch=base_branch, may_prompt=True, verbose=options.verbose, author=None) if not options.reviewers and hook_results.reviewers: options.reviewers = hook_results.reviewers # --no-ext-diff is broken in some versions of Git, so try to work around # this by overriding the environment (but there is still a problem if the # git config key "diff.external" is used). env = os.environ.copy() if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF'] subprocess2.call( ['git', 'diff', '--no-ext-diff', '--stat', '-M'] + args, env=env) upload_args = ['--assume_yes'] # Don't ask about untracked files. upload_args.extend(['--server', cl.GetRietveldServer()]) if options.emulate_svn_auto_props: upload_args.append('--emulate_svn_auto_props') if options.from_logs and not options.message: print 'Must set message for subject line if using desc_from_logs' return 1 change_desc = None if cl.GetIssue(): if options.message: upload_args.extend(['--message', options.message]) upload_args.extend(['--issue', cl.GetIssue()]) print ("This branch is associated with issue %s. " "Adding patch to that issue." % cl.GetIssue()) else: log_desc = CreateDescriptionFromLog(args) change_desc = ChangeDescription(options.message, log_desc, options.reviewers) if not options.from_logs: change_desc.Update() if change_desc.IsEmpty(): print "Description is empty; aborting." return 1 upload_args.extend(['--message', change_desc.subject]) upload_args.extend(['--description', change_desc.description]) if change_desc.reviewers: upload_args.extend(['--reviewers', change_desc.reviewers]) if options.send_mail: if not change_desc.reviewers: DieWithError("Must specify reviewers to send email.") upload_args.append('--send_mail') cc = ','.join(filter(None, (cl.GetCCList(), options.cc))) if cc: upload_args.extend(['--cc', cc]) # Include the upstream repo's URL in the change -- this is useful for # projects that have their source spread across multiple repos. remote_url = None if settings.GetIsGitSvn(): # URL is dependent on the current directory. data = RunGit(['svn', 'info'], cwd=settings.GetRoot()) if data: keys = dict(line.split(': ', 1) for line in data.splitlines() if ': ' in line) remote_url = keys.get('URL', None) else: if cl.GetRemoteUrl() and '/' in cl.GetUpstreamBranch(): remote_url = (cl.GetRemoteUrl() + '@' + cl.GetUpstreamBranch().split('/')[-1]) if remote_url: upload_args.extend(['--base_url', remote_url]) try: issue, patchset = upload.RealMain(['upload'] + upload_args + args) except KeyboardInterrupt: sys.exit(1) except: # If we got an exception after the user typed a description for their # change, back up the description before re-raising. if change_desc: backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE) print '\nGot exception while uploading -- saving description to %s\n' \ % backup_path backup_file = open(backup_path, 'w') backup_file.write(change_desc.description) backup_file.close() raise if not cl.GetIssue(): cl.SetIssue(issue) cl.SetPatchset(patchset) if options.use_commit_queue: cl.SetFlag('commit', '1') return 0
def SendUpstream(parser, args, cmd): """Common code for CmdPush and CmdDCommit Squashed commit into a single. Updates changelog with metadata (e.g. pointer to review). Pushes/dcommits the code upstream. Updates review and closes. """ parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks', help='bypass upload presubmit hook') parser.add_option('-m', dest='message', help="override review description") parser.add_option('-f', action='store_true', dest='force', help="force yes to questions (don't prompt)") parser.add_option('-c', dest='contributor', help="external contributor for patch (appended to " + "description and used as author for git). Should be " + "formatted as 'First Last <*****@*****.**>'") (options, args) = parser.parse_args(args) cl = Changelist() if not args or cmd == 'push': # Default to merging against our best guess of the upstream branch. args = [cl.GetUpstreamBranch()] if options.contributor: if not re.match('^.*\s<\S+@\S+>$', options.contributor): print "Please provide contibutor as 'First Last <*****@*****.**>'" return 1 base_branch = args[0] # Make sure index is up-to-date before running diff-index. RunGit(['update-index', '--refresh', '-q'], error_ok=True) if RunGit(['diff-index', 'HEAD']): print 'Cannot %s with a dirty tree. You must commit locally first.' % cmd return 1 # This rev-list syntax means "show all commits not in my branch that # are in base_branch". upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(), base_branch]).splitlines() if upstream_commits: print ('Base branch "%s" has %d commits ' 'not in this branch.' % (base_branch, len(upstream_commits))) print 'Run "git merge %s" before attempting to %s.' % (base_branch, cmd) return 1 if cmd == 'dcommit': # This is the revision `svn dcommit` will commit on top of. svn_head = RunGit(['log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H']) extra_commits = RunGit(['rev-list', '^' + svn_head, base_branch]) if extra_commits: print ('This branch has %d additional commits not upstreamed yet.' % len(extra_commits.splitlines())) print ('Upstream "%s" or rebase this branch on top of the upstream trunk ' 'before attempting to %s.' % (base_branch, cmd)) return 1 if not options.bypass_hooks and not options.force: author = None if options.contributor: author = re.search(r'\<(.*)\>', options.contributor).group(1) cl.RunHook(committing=True, upstream_branch=base_branch, may_prompt=True, verbose=options.verbose, author=author) if cmd == 'dcommit': # Check the tree status if the tree status URL is set. status = GetTreeStatus() if 'closed' == status: print ('The tree is closed. Please wait for it to reopen. Use ' '"git cl dcommit -f" to commit on a closed tree.') return 1 elif 'unknown' == status: print ('Unable to determine tree status. Please verify manually and ' 'use "git cl dcommit -f" to commit on a closed tree.') description = options.message if not description and cl.GetIssue(): description = cl.GetDescription() if not description: print 'No description set.' print 'Visit %s/edit to set it.' % (cl.GetIssueURL()) return 1 if cl.GetIssue(): description += "\n\nReview URL: %s" % cl.GetIssueURL() if options.contributor: description += "\nPatch from %s." % options.contributor print 'Description:', repr(description) branches = [base_branch, cl.GetBranchRef()] if not options.force: subprocess2.call(['git', 'diff', '--stat'] + branches) ask_for_data('About to commit; enter to confirm.') # We want to squash all this branch's commits into one commit with the # proper description. # We do this by doing a "reset --soft" to the base branch (which keeps # the working copy the same), then dcommitting that. MERGE_BRANCH = 'git-cl-commit' # Delete the merge branch if it already exists. if RunGitWithCode(['show-ref', '--quiet', '--verify', 'refs/heads/' + MERGE_BRANCH])[0] == 0: RunGit(['branch', '-D', MERGE_BRANCH]) # We might be in a directory that's present in this branch but not in the # trunk. Move up to the top of the tree so that git commands that expect a # valid CWD won't fail after we check out the merge branch. rel_base_path = RunGit(['rev-parse', '--show-cdup']).strip() if rel_base_path: os.chdir(rel_base_path) # Stuff our change into the merge branch. # We wrap in a try...finally block so if anything goes wrong, # we clean up the branches. retcode = -1 try: RunGit(['checkout', '-q', '-b', MERGE_BRANCH]) RunGit(['reset', '--soft', base_branch]) if options.contributor: RunGit(['commit', '--author', options.contributor, '-m', description]) else: RunGit(['commit', '-m', description]) if cmd == 'push': # push the merge branch. remote, branch = cl.FetchUpstreamTuple() retcode, output = RunGitWithCode( ['push', '--porcelain', remote, 'HEAD:%s' % branch]) logging.debug(output) else: # dcommit the merge branch. retcode, output = RunGitWithCode(['svn', 'dcommit', '--no-rebase', '--rmdir']) finally: # And then swap back to the original branch and clean up. RunGit(['checkout', '-q', cl.GetBranch()]) RunGit(['branch', '-D', MERGE_BRANCH]) if cl.GetIssue(): if cmd == 'dcommit' and 'Committed r' in output: revision = re.match('.*?\nCommitted r(\\d+)', output, re.DOTALL).group(1) elif cmd == 'push' and retcode == 0: match = (re.match(r'.*?([a-f0-9]{7})\.\.([a-f0-9]{7})$', l) for l in output.splitlines(False)) match = filter(None, match) if len(match) != 1: DieWithError("Couldn't parse ouput to extract the committed hash:\n%s" % output) revision = match[0].group(2) else: return 1 viewvc_url = settings.GetViewVCUrl() if viewvc_url and revision: cl.description += ('\n\nCommitted: ' + viewvc_url + revision) print ('Closing issue ' '(you may be prompted for your codereview password)...') cl.CloseIssue() cl.SetIssue(0) if retcode == 0: hook = POSTUPSTREAM_HOOK_PATTERN % cmd if os.path.isfile(hook): RunCommand([hook, base_branch], error_ok=True) return 0
def call(self, *args): return subprocess2.call((sys.executable, self.path) + args, env=self.get_sub_env(), timeout=self.timeout)
def main(): parser = optparse.OptionParser(usage=SS_USAGE) parser.add_option("-n", "--shards_per_core", type="int", default=SS_DEFAULT_SHARDS_PER_CORE, help="number of shards to generate per CPU") parser.add_option("-r", "--runs_per_core", type="int", default=SS_DEFAULT_RUNS_PER_CORE, help="number of shards to run in parallel per CPU") parser.add_option( "-c", "--color", action="store_true", default=sys.platform != "win32" and sys.stdout.isatty(), help="force color output, also used by gtest if --gtest_color is not" " specified") parser.add_option("--no-color", action="store_false", dest="color", help="disable color output") parser.add_option("-s", "--runshard", type="int", help="single shard index to run") parser.add_option( "--reorder", action="store_true", help="ensure that all output from an earlier shard is printed before" " output from a later shard") # TODO(charleslee): for backwards compatibility with master.cfg file parser.add_option( "--original-order", action="store_true", help="print shard output in its orginal jumbled order of execution" " (useful for debugging flaky tests)") parser.add_option( "--prefix", action="store_true", help="prefix each line of shard output with 'N>', where N is the shard" " index (forced True when --original-order is True)") parser.add_option("--random-seed", action="store_true", help="shuffle the tests with a random seed value") parser.add_option("--retry-failed", action="store_true", help="retry tests that did not pass serially") parser.add_option( "--retry-percent", type="int", default=SS_DEFAULT_RETRY_PERCENT, help="ignore --retry-failed if more than this percent fail [0, 100]" " (default = %i)" % SS_DEFAULT_RETRY_PERCENT) parser.add_option( "-t", "--timeout", type="int", default=SS_DEFAULT_TIMEOUT, help="timeout in seconds to wait for a shard (default=%default s)") parser.add_option( "--total-slaves", type="int", default=1, help="if running a subset, number of slaves sharing the test") parser.add_option( "--slave-index", type="int", default=0, help="if running a subset, index of the slave to run tests for") parser.disable_interspersed_args() (options, args) = parser.parse_args() if not args: parser.error("You must specify a path to test!") if not os.path.exists(args[0]): parser.error("%s does not exist!" % args[0]) num_cores = DetectNumCores() if options.shards_per_core < 1: parser.error("You must have at least 1 shard per core!") num_shards_to_run = num_cores * options.shards_per_core if options.runs_per_core < 1: parser.error("You must have at least 1 run per core!") num_runs = num_cores * options.runs_per_core test = args[0] gtest_args = [ "--gtest_color=%s" % { True: "yes", False: "no" }[options.color] ] + args[1:] if options.original_order: options.prefix = True # TODO(charleslee): for backwards compatibility with buildbot's log_parser if options.reorder: options.original_order = False options.prefix = True if options.random_seed: seed = random.randint(1, 99999) gtest_args.extend(["--gtest_shuffle", "--gtest_random_seed=%i" % seed]) if options.retry_failed: if options.retry_percent < 0 or options.retry_percent > 100: parser.error("Retry percent must be an integer [0, 100]!") else: options.retry_percent = -1 if options.runshard != None: # run a single shard and exit if (options.runshard < 0 or options.runshard >= num_shards_to_run): parser.error("Invalid shard number given parameters!") shard = RunShard(test, num_shards_to_run, options.runshard, gtest_args, None, None) shard.communicate() return shard.poll() # When running browser_tests, load the test binary into memory before running # any tests. This is needed to prevent loading it from disk causing the first # run tests to timeout flakily. See: http://crbug.com/124260 if "browser_tests" in test: args = [test] args.extend(gtest_args) args.append("--warmup") result = subprocess.call(args, bufsize=0, universal_newlines=True) # If the test fails, don't run anything else. if result != 0: return result # shard and run the whole test ss = ShardingSupervisor(test, num_shards_to_run, num_runs, options.color, options.original_order, options.prefix, options.retry_percent, options.timeout, options.total_slaves, options.slave_index, gtest_args) return ss.ShardTest()
def main(): parser = optparse.OptionParser(usage=SS_USAGE) parser.add_option( "-n", "--shards_per_core", type="int", default=SS_DEFAULT_SHARDS_PER_CORE, help="number of shards to generate per CPU") parser.add_option( "-r", "--runs_per_core", type="int", default=SS_DEFAULT_RUNS_PER_CORE, help="number of shards to run in parallel per CPU") parser.add_option( "-c", "--color", action="store_true", default=sys.platform != "win32" and sys.stdout.isatty(), help="force color output, also used by gtest if --gtest_color is not" " specified") parser.add_option( "--no-color", action="store_false", dest="color", help="disable color output") parser.add_option( "-s", "--runshard", type="int", help="single shard index to run") parser.add_option( "--reorder", action="store_true", help="ensure that all output from an earlier shard is printed before" " output from a later shard") # TODO(charleslee): for backwards compatibility with master.cfg file parser.add_option( "--original-order", action="store_true", help="print shard output in its orginal jumbled order of execution" " (useful for debugging flaky tests)") parser.add_option( "--prefix", action="store_true", help="prefix each line of shard output with 'N>', where N is the shard" " index (forced True when --original-order is True)") parser.add_option( "--random-seed", action="store_true", help="shuffle the tests with a random seed value") parser.add_option( "--retry-failed", action="store_true", help="retry tests that did not pass serially") parser.add_option( "--retry-percent", type="int", default=SS_DEFAULT_RETRY_PERCENT, help="ignore --retry-failed if more than this percent fail [0, 100]" " (default = %i)" % SS_DEFAULT_RETRY_PERCENT) parser.add_option( "-t", "--timeout", type="int", default=SS_DEFAULT_TIMEOUT, help="timeout in seconds to wait for a shard (default=%default s)") parser.add_option( "--total-slaves", type="int", default=1, help="if running a subset, number of slaves sharing the test") parser.add_option( "--slave-index", type="int", default=0, help="if running a subset, index of the slave to run tests for") parser.disable_interspersed_args() (options, args) = parser.parse_args() if not args: parser.error("You must specify a path to test!") if not os.path.exists(args[0]): parser.error("%s does not exist!" % args[0]) num_cores = DetectNumCores() if options.shards_per_core < 1: parser.error("You must have at least 1 shard per core!") num_shards_to_run = num_cores * options.shards_per_core if options.runs_per_core < 1: parser.error("You must have at least 1 run per core!") num_runs = num_cores * options.runs_per_core test = args[0] gtest_args = ["--gtest_color=%s" % { True: "yes", False: "no"}[options.color]] + args[1:] if options.original_order: options.prefix = True # TODO(charleslee): for backwards compatibility with buildbot's log_parser if options.reorder: options.original_order = False options.prefix = True if options.random_seed: seed = random.randint(1, 99999) gtest_args.extend(["--gtest_shuffle", "--gtest_random_seed=%i" % seed]) if options.retry_failed: if options.retry_percent < 0 or options.retry_percent > 100: parser.error("Retry percent must be an integer [0, 100]!") else: options.retry_percent = -1 if options.runshard != None: # run a single shard and exit if (options.runshard < 0 or options.runshard >= num_shards_to_run): parser.error("Invalid shard number given parameters!") shard = RunShard( test, num_shards_to_run, options.runshard, gtest_args, None, None) shard.communicate() return shard.poll() # When running browser_tests, load the test binary into memory before running # any tests. This is needed to prevent loading it from disk causing the first # run tests to timeout flakily. See: http://crbug.com/124260 if "browser_tests" in test: args = [test] args.extend(gtest_args) args.append("--warmup") result = subprocess.call(args, bufsize=0, universal_newlines=True) # If the test fails, don't run anything else. if result != 0: return result # shard and run the whole test ss = ShardingSupervisor( test, num_shards_to_run, num_runs, options.color, options.original_order, options.prefix, options.retry_percent, options.timeout, options.total_slaves, options.slave_index, gtest_args) return ss.ShardTest()
def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47)