def _discard_pending(self, pending, message): """Discards a pending commit. Attach an optional message to the review.""" logging.debug('_discard_pending(%s, %s)', pending.issue, message) try: try: if pending.get_state() != base.IGNORED: self.context.rietveld.set_flag(pending.issue, pending.patchset, 'commit', 'False') except urllib2.HTTPError as e: logging.error( 'Failed to set the flag to False for %s with message %s' % (pending.pending_name(), message)) traceback.print_stack() errors.send_stack(e) if message: try: self.context.rietveld.add_comment(pending.issue, message) except urllib2.HTTPError as e: logging.error( 'Failed to add comment for %s with message %s' % (pending.pending_name(), message)) traceback.print_stack() errors.send_stack(e) self.context.status.send(pending, { 'verification': 'abort', 'payload': { 'output': message } }) finally: # Most importantly, remove the PendingCommit from the queue. self.queue.remove(pending.issue)
def _worker_thread(self): """Sends the packets in a loop through HTTP POST.""" params = { 'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain' } done = False try: while not done: items = self._get_items() if self._TERMINATE in items: done = True logging.debug('Worker thread exiting') items.remove(self._TERMINATE) url = self.url + '/receiver' logging.debug('Sending %d items to %s' % (len(items), url)) try: data = [('p', json.dumps(item)) for item in items] data.append(('password', self.password)) urllib.urlopen(url, urllib.urlencode(data), params).read() except IOError as e: logging.error(e) for item in items: self.queue.put(item) if not done: time.sleep(1) # Don't retry if done. except Exception as e: traceback.print_exc() errors.send_stack(e)
def update_status(self): """Updates the status for each pending commit verifier.""" why_nots = dict((p.issue, p.why_not()) for p in self.queue.iterate()) for verifier in self.all_verifiers: try: verifier.update_status(self.queue.iterate()) except base.DiscardPending as e: # It's not efficient since it takes a full loop for each pending # commit to discard. self._discard_pending(e.pending, e.status) except Exception as e: traceback.print_exc() # Swallow every exception in that code and move on. Make sure to send # a stack trace though. errors.send_stack(e) for pending in self.queue.iterate(): why_not = pending.why_not() if why_nots[pending.issue] != why_not: self.context.status.send(pending, { 'verification': 'why not', 'payload': { 'message': why_not } })
def _discard_pending(self, pending, message): """Discards a pending commit. Attach an optional message to the review.""" try: if pending.get_state() != base.IGNORED: self.context.rietveld.set_flag( pending.issue, pending.patchset, 'commit', 'False') except urllib2.HTTPError, e: logging.error( 'Failed to set the flag to False for %s with message %s' % ( pending.pending_name(), message)) traceback.print_stack() errors.send_stack(e)
def update_status(self): """Updates the status for each pending commit verifier.""" for verifier in self.all_verifiers: try: verifier.update_status(self.queue.pending_commits) except base.DiscardPending, e: # It's not efficient since it takes a full loop for each pending # commit to discard. self._discard_pending(e.pending, e.status) except Exception, e: # Swallow every exception in that code and move on. Make sure to send # a stack trace though. errors.send_stack(e)
def look_for_new_pending_commit(self): """Looks for new reviews on self.context.rietveld with c+ set. Calls _new_pending_commit() on all new review found. """ try: new_issues = self._fetch_pending_issues() # If there is an issue in processed_issues that is not in new_issues, # discard it. for pending in self.queue.iterate(): # Note that pending.issue is a int but self.queue.pending_commits keys # are str due to json support. if pending.issue not in new_issues: logging.info('Flushing issue %d' % pending.issue) self.context.status.send( pending, { 'verification': 'abort', 'payload': { 'output': 'CQ bit was unchecked on CL. Ignoring.' } }) pending.get_state = lambda: base.IGNORED self._discard_pending(pending, None) # Find new issues. for issue_id in new_issues: if str(issue_id) not in self.queue.pending_commits: issue_data = self.context.rietveld.get_issue_properties( issue_id, True) # This assumption needs to hold. assert issue_id == issue_data['issue'] if issue_data['patchsets'] and issue_data['commit']: logging.info('Found new issue %d' % issue_id) self.queue.add( PendingCommit( issue=issue_id, owner=issue_data['owner_email'], reviewers=issue_data['reviewers'], patchset=issue_data['patchsets'][-1], base_url=issue_data['base_url'], description=issue_data['description'].replace( '\r', ''), messages=issue_data['messages'])) except Exception as e: traceback.print_exc() # Swallow every exception in that code and move on. Make sure to send a # stack trace though. errors.send_stack(e)
def process(checkout, patch): """Enforces current year in Chromium copyright.""" pattern = ( r'^(.*)Copyright (?:\(c\) )?\d{4}(|-\d{4}) The Chromium Authors. ' r'All rights reserved.$') replacement = ( r'\1Copyright %s The Chromium Authors. All rights reserved.' % datetime.date.today().year) if not patch.is_new or patch.is_binary: return filepath = os.path.join(checkout.project_path, patch.filename) try: with open(filepath, 'rb') as f: lines = f.read().splitlines(True) except IOError, e: errors.send_stack(e) lines = None
def look_for_new_pending_commit(self): """Looks for new reviews on self.context.rietveld with c+ set. Calls _new_pending_commit() on all new review found. """ try: new_issues = self._fetch_pending_issues() # If there is an issue in processed_issues that is not in new_issues, # discard it. for pending in self.queue.pending_commits: if not pending.issue in new_issues: logging.info('Flushing issue %d' % pending.issue) self.context.status.send( pending, { 'verification': 'abort', 'payload': { 'output': 'CQ bit was unchecked on CL. Ignoring.' }}) pending.get_state = lambda: base.IGNORED self._discard_pending(pending, None) # Find new issues. known_issues = [c.issue for c in self.queue.pending_commits] for issue_id in new_issues: if issue_id not in known_issues: issue_data = self.context.rietveld.get_issue_properties( issue_id, True) if issue_data['patchsets'] and issue_data['commit']: logging.info('Found new issue %d' % issue_data['issue']) self.queue.pending_commits.append( PendingCommit( issue_data['issue'], issue_data['owner_email'], issue_data['reviewers'], issue_data['patchsets'][-1], issue_data['base_url'], issue_data['description'].replace('\r', ''), issue_data['messages'])) except Exception, e: # Swallow every exception in that code and move on. Make sure to send a # stack trace though. errors.send_stack(e)
def process_new_pending_commit(self): """Starts verification on newly found pending commits.""" expected = set(i.name for i in self.all_verifiers) for pending in self.queue.pending_commits[:]: try: # Take in account the case where a verifier was removed. done = set(pending.verifications.keys()) missing = expected - done if (not missing or pending.get_state() != base.PROCESSING): continue logging.info( 'Processing issue %s (%s, %d)' % ( pending.issue, missing, pending.get_state())) self._verify_pending(pending) except base.DiscardPending, e: self._discard_pending(e.pending, e.status) except Exception, e: # Swallow every exception in that code and move on. Make sure to send a # stack trace though. errors.send_stack(e)
def scan_results(self): """Scans pending commits that can be committed or discarded.""" for pending in self.queue.pending_commits[:]: state = pending.get_state() if state == base.FAILED: self._discard_pending( pending, pending.error_message() or self.FAILED_NO_MESSAGE) elif state == base.SUCCEEDED: if self._throttle(pending): continue # The item is removed right away. self.queue.pending_commits.remove(pending) try: # Runs checks. It's be nice to run the test before the postpone, # especially if the tree is closed for a long moment but at the same # time it would keep fetching the rietveld status constantly. self._last_minute_checks(pending) self._commit_patch(pending) except base.DiscardPending, e: self._discard_pending(e.pending, e.status) except Exception, e: errors.send_stack(e) self._discard_pending(pending, self.INTERNAL_EXCEPTION)
def scan_results(self): """Scans pending commits that can be committed or discarded.""" for pending in self.queue.iterate(): state = pending.get_state() if state == base.FAILED: self._discard_pending( pending, pending.error_message() or self.FAILED_NO_MESSAGE) elif state == base.SUCCEEDED: if self._throttle(pending): continue try: # Runs checks. It's be nice to run the test before the postpone, # especially if the tree is closed for a long moment but at the same # time it would keep fetching the rietveld status constantly. self._last_minute_checks(pending) self.context.status.send(pending, { 'verification': 'why not', 'payload': { 'message': '' } }) self._commit_patch(pending) except base.DiscardPending as e: self._discard_pending(e.pending, e.status) except Exception as e: traceback.print_exc() errors.send_stack(e) self._discard_pending(pending, self.INTERNAL_EXCEPTION) else: # When state is IGNORED, we need to keep this issue so it's not fetched # another time but we can't discard it since we don't want to remove the # commit bit for another project hosted on the same code review # instance. assert state in (base.PROCESSING, base.IGNORED)
def main(): parser = optparse.OptionParser( description=sys.modules['__main__'].__doc__) project_choices = projects.supported_projects() parser.add_option('-v', '--verbose', action='store_true') parser.add_option( '--no-dry-run', action='store_false', dest='dry_run', default=True, help='Run for real instead of dry-run mode which is the default. ' 'WARNING: while the CQ won\'t touch rietveld in dry-run mode, the ' 'Try Server will. So it is recommended to use --only-issue') parser.add_option( '--only-issue', type='int', help='Limits to a single issue. Useful for live testing; WARNING: it ' 'will fake that the issue has the CQ bit set, so only try with an ' 'issue you don\'t mind about.') parser.add_option( '--fake', action='store_true', help='Run with a fake checkout to speed up testing') parser.add_option( '--no-try', action='store_true', help='Don\'t send try jobs.') parser.add_option( '-p', '--poll-interval', type='int', default=10, help='Minimum delay between each polling loop, default: %default') parser.add_option( '--query-only', action='store_true', help='Return internal state') parser.add_option( '--project', choices=project_choices, help='Project to run the commit queue against: %s' % ', '.join(project_choices)) parser.add_option( '-u', '--user', default='*****@*****.**', help='User to use instead of %default') options, args = parser.parse_args() if args: parser.error('Unsupported args: %s' % args) if not options.project: parser.error('Need to pass a valid project to --project.\nOptions are: %s' % ', '.join(project_choices)) logging.getLogger().setLevel(logging.DEBUG) if options.verbose: level = logging.DEBUG else: level = logging.INFO console_logging = logging.StreamHandler() console_logging.setFormatter(logging.Formatter( '%(asctime)s %(levelname)7s %(message)s')) console_logging.setLevel(level) logging.getLogger().addHandler(console_logging) log_directory = 'logs-' + options.project if not os.path.exists(log_directory): os.mkdir(log_directory) logging_rotating_file = logging.handlers.RotatingFileHandler( filename=os.path.join(log_directory, 'commit_queue.log'), maxBytes= 10*1024*1024, backupCount=50) logging_rotating_file.setLevel(logging.DEBUG) logging_rotating_file.setFormatter(logging.Formatter( '%(asctime)s %(levelname)-8s %(module)15s(%(lineno)4d): %(message)s')) logging.getLogger().addHandler(logging_rotating_file) try: work_dir = os.path.join(ROOT_DIR, 'workdir') # Use our specific subversion config. checkout.SvnMixIn.svn_config = checkout.SvnConfig( os.path.join(ROOT_DIR, 'subversion_config')) url = 'https://chromiumcodereview.appspot.com' gaia_creds = creds.Credentials(os.path.join(work_dir, '.gaia_pwd')) if options.dry_run: logging.debug('Dry run - skipping SCM check.') if options.only_issue: print( 'Using read-only Rietveld; using only issue %d' % options.only_issue) else: print('Using read-only Rietveld') # Make sure rietveld is not modified. rietveld_obj = ReadOnlyRietveld( url, options.user, gaia_creds.get(options.user), None, options.only_issue) else: AlertOnUncleanCheckout() print('WARNING: The Commit Queue is going to commit stuff') if options.only_issue: print('Using only issue %d' % options.only_issue) rietveld_obj = OnlyIssueRietveld( url, options.user, gaia_creds.get(options.user), None, options.only_issue) else: rietveld_obj = rietveld.Rietveld( url, options.user, gaia_creds.get(options.user), None) pc = projects.load_project( options.project, options.user, work_dir, rietveld_obj, options.no_try) if options.dry_run: if options.fake: # Disable the checkout. print 'Using no checkout' pc.context.checkout = FakeCheckout() else: print 'Using read-only checkout' pc.context.checkout = checkout.ReadOnlyCheckout(pc.context.checkout) # Save pushed events on disk. print 'Using read-only chromium-status interface' pc.context.status = async_push.AsyncPushStore() db_path = os.path.join(work_dir, pc.context.checkout.project_name + '.json') if os.path.isfile(db_path): try: pc.load(db_path) except ValueError: os.remove(db_path) sig_handler.installHandlers( signal.SIGINT, signal.SIGHUP ) # Sync every 5 minutes. SYNC_DELAY = 5*60 try: if options.query_only: pc.look_for_new_pending_commit() pc.update_status() print(str(pc.queue)) return 0 now = time.time() next_loop = now + options.poll_interval # First sync is on second loop. next_sync = now + options.poll_interval * 2 while True: # In theory, we would gain in performance to parallelize these tasks. In # practice I'm not sure it matters. pc.look_for_new_pending_commit() pc.process_new_pending_commit() pc.update_status() pc.scan_results() if sig_handler.getTriggeredSignals(): raise KeyboardInterrupt() # Save the db at each loop. The db can easily be in the 1mb range so # it's slowing down the CQ a tad but it in the 100ms range even for that # size. pc.save(db_path) # More than a second to wait and due to sync. now = time.time() if (next_loop - now) >= 1 and (next_sync - now) <= 0: if sys.stdout.isatty(): sys.stdout.write('Syncing while waiting \r') sys.stdout.flush() try: pc.context.checkout.prepare(None) except subprocess2.CalledProcessError as e: # Don't crash, most of the time it's the svn server that is dead. # How fun. Send a stack trace to annoy the maintainer. errors.send_stack(e) next_sync = time.time() + SYNC_DELAY now = time.time() next_loop = max(now, next_loop) while True: # Abort if any signals are set if sig_handler.getTriggeredSignals(): raise KeyboardInterrupt() delay = next_loop - now if delay <= 0: break if sys.stdout.isatty(): sys.stdout.write('Sleeping for %1.1f seconds \r' % delay) sys.stdout.flush() time.sleep(min(delay, 0.1)) now = time.time() if sys.stdout.isatty(): sys.stdout.write('Running (please do not interrupt) \r') sys.stdout.flush() next_loop = time.time() + options.poll_interval finally: print >> sys.stderr, 'Saving db... ' pc.save(db_path) pc.close() print >> sys.stderr, 'Done! ' except KeyboardInterrupt as e: print 'Bye bye' # 23 is an arbitrary value to signal loop.sh that it must stop looping. return 23 except SystemExit as e: traceback.print_exc() print >> sys.stderr, ('Tried to exit: %s', e) return e.code except errors.ConfigurationError as e: parser.error(str(e)) return 1 return 0
pending.issue, pending.patchset, 'commit', 'False') except urllib2.HTTPError, e: logging.error( 'Failed to set the flag to False for %s with message %s' % ( pending.pending_name(), message)) traceback.print_stack() errors.send_stack(e) if message: try: self.context.rietveld.add_comment(pending.issue, message) except urllib2.HTTPError, e: logging.error( 'Failed to add comment for %s with message %s' % ( pending.pending_name(), message)) traceback.print_stack() errors.send_stack(e) self.context.status.send( pending, { 'verification': 'abort', 'payload': { 'output': message }}) try: self.queue.pending_commits.remove(pending) except ValueError: pass def _commit_patch(self, pending): """Commits the pending patch to the repository. Do the checkout and applies the patch. """
def _commit_patch(self, pending): """Commits the pending patch to the repository. Do the checkout and applies the patch. """ try: try: # Make sure to apply on HEAD. pending.revision = None pending.apply_patch(self.context, True) # Commit it. commit_desc = git_cl.ChangeDescription(pending.description) if (self.context.server_hooks_missing and self.context.rietveld.email != pending.owner): commit_desc.update_reviewers(pending.reviewers) commit_desc.append_footer('Author: ' + pending.owner) commit_desc.append_footer( 'Review URL: %s/%s' % (self.context.rietveld.url, pending.issue)) pending.revision = self.context.checkout.commit( commit_desc.description, pending.owner) if not pending.revision: raise base.DiscardPending(pending, 'Failed to commit patch.') # Note that the commit succeeded for commit throttling. self.recent_commit_timestamps.append(time.time()) self.recent_commit_timestamps = ( self.recent_commit_timestamps[-(self.MAX_COMMIT_BURST + 1):]) viewvc_url = self.context.checkout.get_settings('VIEW_VC') issue_desc = git_cl.ChangeDescription(pending.description) msg = 'Committed: %s' % pending.revision if viewvc_url: viewvc_url = '%s%s' % (viewvc_url.rstrip('/'), pending.revision) msg = 'Committed: %s' % viewvc_url issue_desc.append_footer(msg) # Update the CQ dashboard. self.context.status.send( pending, { 'verification': 'commit', 'payload': { 'revision': pending.revision, 'output': msg, 'url': viewvc_url } }) # Closes the issue on Rietveld. # TODO(csharp): Retry if exceptions are encountered. try: self.context.rietveld.close_issue(pending.issue) self.context.rietveld.update_description( pending.issue, issue_desc.description) self.context.rietveld.add_comment( pending.issue, 'Change committed as %s' % pending.revision) except (urllib2.HTTPError, urllib2.URLError) as e: # Ignore AppEngine flakiness. logging.warning('Unable to fully close the issue') # And finally remove the issue. If the close_issue() call above failed, # it is possible the dashboard will be confused but it is harmless. try: self.queue.get(pending.issue) except KeyError: logging.error('Internal inconsistency for %d', pending.issue) self.queue.remove(pending.issue) except (checkout.PatchApplicationFailed, patch.UnsupportedPatchFormat) as e: raise base.DiscardPending(pending, str(e)) except subprocess2.CalledProcessError as e: stdout = getattr(e, 'stdout', None) out = 'Failed to apply the patch.' if stdout: out += '\n%s' % stdout raise base.DiscardPending(pending, out) except base.DiscardPending as e: self._discard_pending(e.pending, e.status) except Exception as e: traceback.print_exc() # Swallow every exception in that code and move on. Make sure to send a # stack trace though. errors.send_stack(e)