def CreateTransaction(self, change, limit_to=None): """Given a change, resolve it into a transaction. In this case, a transaction is defined as a group of commits that must land for the given change to be merged- specifically its parent deps, and its CQ-DEPEND. Args: change: A cros_patch.GitRepoPatch instance to generate a transaction for. limit_to: If non-None, limit the allowed uncommitted patches to what's in that container/mapping. Returns: A sequence of the necessary cros_patch.GitRepoPatch objects for this transaction. Raises: DependencyError: If we could not resolve a dependency. GerritException or GOBError: If there is a failure in querying gerrit. """ plan = [] gerrit_deps_seen = cros_patch.PatchCache() cq_deps_seen = cros_patch.PatchCache() self._AddChangeToPlanWithDeps(change, plan, gerrit_deps_seen, cq_deps_seen, limit_to=limit_to) return plan
def __init__(self, path, helper_pool=None, forced_manifest=None, deps_filter_fn=None, is_submitting=False): """Constructor. Args: path: Path to the buildroot. helper_pool: Pool of allowed GerritHelpers to be used for fetching patches. Defaults to allowing both internal and external fetches. forced_manifest: A manifest object to use for mapping projects to repositories. Defaults to the buildroot. deps_filter_fn: A function which specifies what patches you would like to accept. It is passed a patch and is expected to return True or False. is_submitting: Whether we are currently submitting patchsets. This is used to print better error messages. """ self.manifest = forced_manifest if helper_pool is None: helper_pool = HelperPool.SimpleCreate(cros_internal=True, cros=True) self._helper_pool = helper_pool self._path = path if deps_filter_fn is None: deps_filter_fn = lambda x: True self.deps_filter_fn = deps_filter_fn self._is_submitting = is_submitting self.failed_tot = {} # A mapping of ChangeId to exceptions if the patch failed against # ToT. Primarily used to keep the resolution/applying from going # down known bad paths. self._committed_cache = cros_patch.PatchCache() self._lookup_cache = cros_patch.PatchCache() self._change_deps_cache = {}
def Apply(self, changes, frozen=True, honor_ordering=False, changes_filter=None): """Applies changes from pool into the build root specified by the manifest. This method resolves each given change down into a set of transactions- the change and its dependencies- that must go in, then tries to apply the largest transaction first, working its way down. If a transaction cannot be applied, then it is rolled back in full- note that if a change is involved in multiple transactions, if an earlier attempt fails, that change can be retried in a new transaction if the failure wasn't caused by the patch being incompatible to ToT. Args: changes: A sequence of cros_patch.GitRepoPatch instances to resolve and apply. frozen: If True, then resolving of the given changes is explicitly limited to just the passed in changes, or known committed changes. This is basically CQ/Paladin mode, used to limit the changes being pulled in/committed to just what we allow. honor_ordering: Apply normally will reorder the transactions it computes, trying the largest first, then degrading through smaller transactions if the larger of the two fails. If honor_ordering is False, then the ordering given via changes is preserved- this is mainly of use for cbuildbot induced patching, and shouldn't be used for CQ patching. changes_filter: If not None, must be a functor taking two arguments: series, changes; it must return the changes to work on. This is invoked after the initial changes have been fetched, thus this is a way for consumers to do last minute checking of the changes being inspected, and expand the changes if necessary. Primarily this is of use for cbuildbot patching when dealing w/ uploaded/remote patches. Returns: A tuple of changes-applied, Exceptions for the changes that failed against ToT, and Exceptions that failed inflight; These exceptions are cros_patch.PatchException instances. """ resolved, applied, failed = [], [], [] # Prefetch the changes; we need accurate change_id/id's, which is # guaranteed via Fetch. changes, not_in_manifest = self.FetchChanges(changes) failed.extend(not_in_manifest) if changes_filter: changes = changes_filter(self, changes) self.InjectLookupCache(changes) limit_to = cros_patch.PatchCache(changes) if frozen else None planned = set() for change, plan, ex in self.CreateTransactions(changes, limit_to=limit_to): if ex is not None: logging.info('Failed creating transaction for %s: %s', change, ex) failed.append(ex) else: resolved.append((change, plan)) logging.info('Transaction for %s is %s.', change, ', '.join(str(x) for x in resolved[-1][-1])) planned.update(plan) if not resolved: # No work to do; either no changes were given to us, or all failed # to be resolved. return [], failed, [] if not honor_ordering: # Sort by length, falling back to the order the changes were given to us. # This is done to prefer longer transactions (more painful to rebase) # over shorter transactions. position = dict( (change, idx) for idx, change in enumerate(changes)) def mk_key(data): change, plan = data ids = [x.id for x in plan] return -len(ids), position[change] resolved.sort(key=mk_key) for inducing_change, transaction_changes in resolved: try: with self._Transaction(transaction_changes): logging.debug( 'Attempting transaction for %s: changes: %s', inducing_change, ', '.join(str(x) for x in transaction_changes)) self._ApplyChanges(inducing_change, transaction_changes) except cros_patch.PatchException as e: logging.info('Failed applying transaction for %s: %s', inducing_change, e) failed.append(e) else: applied.extend(transaction_changes) self.InjectCommittedPatches(transaction_changes) # Uniquify while maintaining order. def _uniq(l): s = set() for x in l: if x not in s: yield x s.add(x) applied = list(_uniq(applied)) self._is_submitting = True failed = [x for x in failed if x.patch not in applied] failed_tot = [x for x in failed if not x.inflight] failed_inflight = [x for x in failed if x.inflight] return applied, failed_tot, failed_inflight