def save(self, **pins): for k, v in pins.iteritems(): assert isinstance(k, basestring) assert isinstance(v, basestring) LOGGER.debug('Writing pin file [%s]: %s', self._path, pins) with open(self._path, 'w') as fd: json.dump(pins, fd, indent=2, sort_keys=True)
def _destroy_directory(d): LOGGER.debug('Destorying directory: %s', d) def log_failure(_function, path, excinfo): LOGGER.warning('Failed when destroying [%s]: %s', path, excinfo[1].message) shutil.rmtree(d, onerror=log_failure)
def fetch(cls, path): LOGGER.info("Fetching => %s (This can take a while.)", path) gclient_path = os.path.join(path, '.gclient') with open(gclient_path, 'w') as fd: fd.write(cls.GCLIENT_TEMPLATE) execute.check_call(['gclient', 'sync', '--nohooks', '--noprehooks'], cwd=path)
def use(cls, *args, **kwargs): c = None try: c = cls.create(*args, **kwargs) LOGGER.debug('Using checkout at: %s', c.path) yield c finally: if c: c.teardown()
def fetch(cls, build_revision, path): LOGGER.info("Fetching => %s (This can take a while.)", path) gclient_path = os.path.join(path, '.gclient') with open(gclient_path, 'w') as fd: fd.write(cls.GCLIENT_TEMPLATE) cmd = ['gclient', 'sync', '--nohooks', '--noprehooks'] if build_revision: LOGGER.info("Using build.git revision %s", build_revision) cmd.extend(['--revision', build_revision]) execute.check_call(cmd, cwd=path)
def checkout_for_args(args): """A contextmanager that supplies the Checkout configured in args. The Checkout's teardown() method will be invoked on cleanup. Args: args (argparse.Options): Parsed option list. """ LOGGER.warning('Checking out temporary repositories. This may take a few ' 'minutes.') return checkout.Checkout.use( build_revision=args.build_revision, path=args.checkout_path)
def checkout_for_args(args): """A contextmanager that supplies the Checkout configured in args. The Checkout's teardown() method will be invoked on cleanup. Args: args (argparse.Options): Parsed option list. """ LOGGER.warning('Checking out temporary repositories. This may take a few ' 'minutes.') return checkout.Checkout.use( path=args.checkout_path)
def _regenerate_slave_pool(self, master): LOGGER.debug('Regenerating slave pool for: %s', master) cmd = [ os.path.join(*self.RUNIT_PY), os.path.join(*self.SLAVE_ALLOC_UPDATE), ] cmd += logging_verbosity() cmd.append(master) rv, stdout = execute.call(cmd, cwd=self._c.path) if rv != 0: LOGGER.exception('Failed to update slaves for master [%s] (%d):\n%s', master, rv, stdout) raise SlavePoolUpdateError()
def subcommand_add_release(args): """Add a new release branch to the list of pins.""" with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) add_release = (get_release_version(args.branch), args.branch) if add_release[0] is None: raise ValueError("Invalid release branch: [%s]" % (args.branch,)) # Build a list of releases and their versions. pf = pfe.load(pinfile.INTERNAL) releases = [add_release] for name, _ in pf.iterpins(): v = get_release_version(name) if v == add_release[0]: LOGGER.error('Release [%s] (%d) is already pinned.', add_release[1], add_release[0]) return 1 if v is not None: releases.append((v, name)) releases.sort(reverse=True) # Shave off the top [stable_count+1] releases. count = args.stable_count+1 releases, deleted = releases[:count], releases[count:] # Check if LTS release in deleted releases. for v, name in deleted: if ((v - BASE_LTS_MILESTONE) % NEXT_LTS_MILESTONE_MODULO) == 0: releases.append((v, name)) deleted.remove((v, name)) # We allow one milestone to exist in LTS window. # LTS window starts after stable window. break if add_release not in releases: raise ValueError("Updated releases do not include added (%s):\n%s" % ( add_release[1], '\n'.join(r[1] for r in releases))) # Set the new releases. tracker.add(pinfile.INTERNAL, pf.update(add_release[1], create=True)) for _, r in deleted: tracker.add(pinfile.INTERNAL, pf.remove(r)) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. try: tracker.update() except SlavePoolUpdateError as e: LOGGER.error('Failed to generate pin bump CLs: %s', e) return 1 LOGGER.info('Created issues:\n%s', '\n'.join(tracker.issues)) return 0
def subcommand_update(args): """Update a single Chromite pin.""" require = (args.target != 'existing') target_pins = [] if args.target in ('external', 'both', 'existing'): target_pins.append(pinfile.EXTERNAL) if args.target in ('internal', 'both', 'existing'): target_pins.append(pinfile.INTERNAL) with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) for pin in target_pins: logging.debug('Updating target pin [%s]', pin) # Update the pin. pf = pfe.load(pin) if not (require or pf.has_pin(args.name)): LOGGER.debug('Pin not found in [%s]. Only updating existing pins.', pin) continue update = pf.update(args.name, version=args.version, create=require) if not update: LOGGER.debug('Did not update pins for [%s]', pin) continue tracker.add(pin, update) LOGGER.debug('Updated pin set: %s', update) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. tracker.update() for i in tracker.issues: LOGGER.warning('Created Issue: %s', i) return 0
def update(self): LOGGER.info('Updating repositories: %s', self._updated) affected_masters = set() for pin in self._updated.iterkeys(): affected_masters.update(pin.masters) failed_slave_pool_masters = [] for m in sorted(affected_masters): try: self._regenerate_slave_pool(m) except SlavePoolUpdateError: failed_slave_pool_masters.append(m) if failed_slave_pool_masters: LOGGER.error('Failed to update slave pools for %s. You may need to ' 'add additional slaves the pool(s).', failed_slave_pool_masters) raise SlavePoolUpdateError("Failed to update slave pools.") # Upload CLs for the affected repositories. for pin, updates in self._updated.iteritems(): self._upload_patch( self._c.subpath(*pin.base), self._generate_commit_message(updates))
def update(self, pin_name, create=False, version=None): """Updates a single pin value.""" if not version: LOGGER.debug('Resolving version for pin [%s]', pin_name) version = self._editor.get_commit(pin_name) elif self._editor._validate: LOGGER.debug('Validating pin [%s]', pin_name) self._editor.validate_pin(version) with self.edit() as pins: current = pins.get(pin_name) if current == version: LOGGER.warning('Pin [%s.%s] is already at version [%s]', self._pin.name, pin_name, current) return None LOGGER.info('Updating pin [%s.%s]: [%s] => [%s]', self._pin.name, pin_name, current, version) if not (current or create): raise ReadOnlyError("Pin does not exist [%s]" % (pin_name,)) pins[pin_name] = version return PinUpdate(pin_name, current, version)
def update(self, pin_name, create=False, version=None): """Updates a single pin value.""" if not version: LOGGER.debug('Resolving version for pin [%s]', pin_name) version = self._editor.get_commit(pin_name) elif self._editor._validate: LOGGER.debug('Validating pin [%s]', pin_name) self._editor.validate_pin(version) with self.edit() as pins: current = pins.get(pin_name) if current == version: LOGGER.warning('Pin [%s.%s] is already at version [%s]', self._pin.name, pin_name, current) return None LOGGER.info('Updating pin [%s.%s]: [%s] => [%s]', self._pin.name, pin_name, current, version) if not (current or create): raise ReadOnlyError("Pin does not exist [%s]" % (pin_name, )) pins[pin_name] = version return PinUpdate(pin_name, current, version)
def subcommand_add_release(args): """Add a new release branch to the list of pins.""" with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) add_release = (get_release_version(args.branch), args.branch) if add_release[0] is None: raise ValueError("Invalid release branch: [%s]" % (args.branch,)) # Build a list of releases and their versions. pf = pfe.load(pinfile.INTERNAL) releases = [add_release] for name, _ in pf.iterpins(): v = get_release_version(name) if v == add_release[0]: LOGGER.error('Release [%s] (%d) is already pinned.', add_release[1], add_release[0]) return 1 if v is not None: releases.append((v, name)) releases.sort(reverse=True) # Shave off the top [stable_count+1] releases. count = args.stable_count+1 releases, deleted = releases[:count], releases[count:] if add_release not in releases: raise ValueError("Updated releases do not include added (%s):\n%s" % ( add_release[1], '\n'.join(r[1] for r in releases))) # Set the new releases. tracker.add(pinfile.INTERNAL, pf.update(add_release[1], create=True)) for _, r in deleted: tracker.add(pinfile.INTERNAL, pf.remove(r)) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. tracker.update() LOGGER.info('Created issues:\n%s', '\n'.join(tracker.issues)) return 0
def call(cmd, cwd=None, dry_run=False): LOGGER.info("Executing command %s (cwd=%s)", cmd, (cwd or os.getcwd())) if dry_run: LOGGER.info('Dry Run: Not actually executing.') return (0, "") output = [] proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) for line in iter(proc.stdout.readline, b''): LOGGER.debug('[%s]: %s', cmd[0], line.rstrip()) output.append(line) proc.wait() return proc.returncode, ''.join(output)
def update(self): LOGGER.info('Updating repositories: %s', self._updated) affected_masters = set() for pin in self._updated.iterkeys(): affected_masters.update(pin.masters) failed_slave_pool_masters = [] for m in sorted(affected_masters): try: self._regenerate_slave_pool(m) except SlavePoolUpdateError: LOGGER.exception("Failed to update slave pools for [%s].", m) failed_slave_pool_masters.append(m) if failed_slave_pool_masters: LOGGER.error('Failed to update slave pools for: %s. You may need to ' 'add additional slaves the pool(s).', failed_slave_pool_masters) raise SlavePoolUpdateError("Failed to update slave pools.") # Upload CLs for the affected repositories. for pin, updates in self._updated.iteritems(): self._upload_patch( self._c.subpath(*pin.base), self._generate_commit_message(updates))
def _upload_patch(self, repo_path, commit_msg): # Check if the Git repository actually has changes. diff_args = ['git', 'diff', '--no-ext-diff', '--exit-code'] if not LOGGER.isEnabledFor(logging.DEBUG): diff_args.append('--quiet') rv, diff = execute.call(diff_args, cwd=repo_path) LOGGER.debug('Diff for [%s]:\n%s', repo_path, diff) if rv == 0: LOGGER.warning('No changes in repository; refusing to commit.') return commit_msg_file = self._c.mktempfile(commit_msg) LOGGER.warning('Creating commit in [%s] with message:\n%s', repo_path, commit_msg) execute.check_call( ['git', 'checkout', '-b', '_cros_pin'], cwd=repo_path) execute.check_call( ['git', 'commit', '--all', '-F', commit_msg_file], cwd=repo_path) LOGGER.debug('Uploading CL!') args = [ 'git', 'cl', 'upload', '--bypass-hooks', # The CQ will take care of them! '--send-mail', '--message-file', commit_msg_file, '-f', ] if self._cq: print 'Commit? [Y/n]:', input_string = raw_input() if input_string != '' and not distutils.util.strtobool(input_string): LOGGER.warning('User opted not to commit; aborting.') return args.append('--use-commit-queue') if not self._reviewers: args.append('--tbr-owners') output = execute.check_call(args, cwd=repo_path, dry_run=self._dry_run) issue = None for line in output.splitlines(): for rx in self.RE_ISSUE_CREATED: match = rx.match(line) if match: issue = match.group(1) LOGGER.debug('Extracted issue from output: %s', issue) self._issues.add(issue) break else: LOGGER.warning("Unable to extract issue from patch submission from:\n%s", output)
def log_failure(_function, path, excinfo): LOGGER.warning('Failed when destroying [%s]: %s', path, excinfo[1].message)
def _upload_patch(self, repo_path, commit_msg): # Check if the Git repository actually has changes. diff_args = ['git', 'diff', '--no-ext-diff', '--exit-code'] if not LOGGER.isEnabledFor(logging.DEBUG): diff_args.append('--quiet') rv, diff = execute.call(diff_args, cwd=repo_path) LOGGER.debug('Diff for [%s]:\n%s', repo_path, diff) if rv == 0: LOGGER.warning('No changes in repository; refusing to commit.') return LOGGER.warning('Creating commit in [%s] with message:\n%s', repo_path, commit_msg) execute.check_call( ['git', 'checkout', '-b', '_cros_pin'], cwd=repo_path) execute.check_call( ['git', 'commit', '--all', '--message', commit_msg], cwd=repo_path) LOGGER.debug('Uploading CL!') args = [ 'git', 'cl', 'upload', '--bypass-hooks', # The CQ will take care of them! '-t', commit_msg, '-m', 'Auto-generated by `%s`' % (__name__,), '-f', ] if self._cq: print 'Commit? [Y/n]:', input_string = raw_input() if input_string != '' and not distutils.util.strtobool(input_string): LOGGER.warning('User opted not to commit; aborting.') return args.append('--use-commit-queue') if not self._reviewers: args.append('--tbr-owners') output = execute.check_call(args, cwd=repo_path, dry_run=self._dry_run) issue = None for line in output.splitlines(): match = self.RE_ISSUE_CREATED.match(line) if match: issue = match.group(1) LOGGER.debug('Extracted issue from output: %s', issue) self._issues.add(issue) break else: LOGGER.warning("Unable to extract issue from patch submission.")