def subcommand_add_release(args): """Add a new release branch to the list of pins.""" with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) add_release = (get_release_version(args.branch), args.branch) if add_release[0] is None: raise ValueError("Invalid release branch: [%s]" % (args.branch,)) # Build a list of releases and their versions. pf = pfe.load(pinfile.INTERNAL) releases = [add_release] for name, _ in pf.iterpins(): v = get_release_version(name) if v == add_release[0]: LOGGER.error('Release [%s] (%d) is already pinned.', add_release[1], add_release[0]) return 1 if v is not None: releases.append((v, name)) releases.sort(reverse=True) # Shave off the top [stable_count+1] releases. count = args.stable_count+1 releases, deleted = releases[:count], releases[count:] if add_release not in releases: raise ValueError("Updated releases do not include added (%s):\n%s" % ( add_release[1], '\n'.join(r[1] for r in releases))) # Set the new releases. tracker.add(pinfile.INTERNAL, pf.update(add_release[1], create=True)) for _, r in deleted: tracker.add(pinfile.INTERNAL, pf.remove(r)) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. tracker.update() LOGGER.info('Created issues:\n%s', '\n'.join(tracker.issues)) return 0
def subcommand_add_release(args): """Add a new release branch to the list of pins.""" with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) add_release = (get_release_version(args.branch), args.branch) if add_release[0] is None: raise ValueError("Invalid release branch: [%s]" % (args.branch,)) # Build a list of releases and their versions. pf = pfe.load(pinfile.INTERNAL) releases = [add_release] for name, _ in pf.iterpins(): v = get_release_version(name) if v == add_release[0]: LOGGER.error('Release [%s] (%d) is already pinned.', add_release[1], add_release[0]) return 1 if v is not None: releases.append((v, name)) releases.sort(reverse=True) # Shave off the top [stable_count+1] releases. count = args.stable_count+1 releases, deleted = releases[:count], releases[count:] # Check if LTS release in deleted releases. for v, name in deleted: if ((v - BASE_LTS_MILESTONE) % NEXT_LTS_MILESTONE_MODULO) == 0: releases.append((v, name)) deleted.remove((v, name)) # We allow one milestone to exist in LTS window. # LTS window starts after stable window. break if add_release not in releases: raise ValueError("Updated releases do not include added (%s):\n%s" % ( add_release[1], '\n'.join(r[1] for r in releases))) # Set the new releases. tracker.add(pinfile.INTERNAL, pf.update(add_release[1], create=True)) for _, r in deleted: tracker.add(pinfile.INTERNAL, pf.remove(r)) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. try: tracker.update() except SlavePoolUpdateError as e: LOGGER.error('Failed to generate pin bump CLs: %s', e) return 1 LOGGER.info('Created issues:\n%s', '\n'.join(tracker.issues)) return 0
def subcommand_update(args): """Update a single Chromite pin.""" require = (args.target != 'existing') target_pins = [] if args.target in ('external', 'both', 'existing'): target_pins.append(pinfile.EXTERNAL) if args.target in ('internal', 'both', 'existing'): target_pins.append(pinfile.INTERNAL) with checkout_for_args(args) as c: pfe = pinfile_editor_from_args(args, c) tracker = UpdateTracker.from_args(args, c) for pin in target_pins: logging.debug('Updating target pin [%s]', pin) # Update the pin. pf = pfe.load(pin) if not (require or pf.has_pin(args.name)): LOGGER.debug('Pin not found in [%s]. Only updating existing pins.', pin) continue update = pf.update(args.name, version=args.version, create=require) if not update: LOGGER.debug('Did not update pins for [%s]', pin) continue tracker.add(pin, update) LOGGER.debug('Updated pin set: %s', update) if not tracker: LOGGER.error('No pins were updated.') return 1 # Regenerate slave pools for affected masters. tracker.update() for i in tracker.issues: LOGGER.warning('Created Issue: %s', i) return 0
def update(self): LOGGER.info('Updating repositories: %s', self._updated) affected_masters = set() for pin in self._updated.iterkeys(): affected_masters.update(pin.masters) failed_slave_pool_masters = [] for m in sorted(affected_masters): try: self._regenerate_slave_pool(m) except SlavePoolUpdateError: failed_slave_pool_masters.append(m) if failed_slave_pool_masters: LOGGER.error('Failed to update slave pools for %s. You may need to ' 'add additional slaves the pool(s).', failed_slave_pool_masters) raise SlavePoolUpdateError("Failed to update slave pools.") # Upload CLs for the affected repositories. for pin, updates in self._updated.iteritems(): self._upload_patch( self._c.subpath(*pin.base), self._generate_commit_message(updates))
def update(self): LOGGER.info('Updating repositories: %s', self._updated) affected_masters = set() for pin in self._updated.iterkeys(): affected_masters.update(pin.masters) failed_slave_pool_masters = [] for m in sorted(affected_masters): try: self._regenerate_slave_pool(m) except SlavePoolUpdateError: LOGGER.exception("Failed to update slave pools for [%s].", m) failed_slave_pool_masters.append(m) if failed_slave_pool_masters: LOGGER.error('Failed to update slave pools for: %s. You may need to ' 'add additional slaves the pool(s).', failed_slave_pool_masters) raise SlavePoolUpdateError("Failed to update slave pools.") # Upload CLs for the affected repositories. for pin, updates in self._updated.iteritems(): self._upload_patch( self._c.subpath(*pin.base), self._generate_commit_message(updates))