예제 #1
0
 def is_rate_limit_pass(self, sandesh):
     #Check if buffer resize is reqd
     if (self.__class__.rate_limit_buffer.maxlen != \
             SandeshSystem.get_sandesh_send_rate_limit()):
         temp_buffer = copy.deepcopy(self.__class__.rate_limit_buffer)
         self.__class__.rate_limit_buffer = util.deque(temp_buffer, \
             maxlen=SandeshSystem.get_sandesh_send_rate_limit())
         del temp_buffer
     #If buffer size 0 return
     if self.__class__.rate_limit_buffer.maxlen == 0:
         return False
     cur_time = int(time.time())
     #Check if circular buffer is full
     if len(self.__class__.rate_limit_buffer) == \
         self.__class__.rate_limit_buffer.maxlen :
         # Read the element in buffer and compare with cur_time
         if (self.__class__.rate_limit_buffer[0] == cur_time):
             #Sender generating more messages/sec than the
             #buffer_threshold size
             if self.__class__.do_rate_limit_drop_log:
                 sandesh._logger.error('SANDESH: Ratelimit Drop ' \
                     '(%d messages/sec): for %s' % \
                     (self.__class__.rate_limit_buffer.maxlen, \
                      self.__class__.__name__))
                 #Disable logging
                 self.__class__.do_rate_limit_drop_log = False
             return False
     #If logging is disabled enable it
     self.__class__.do_rate_limit_drop_log = True
     self.__class__.rate_limit_buffer.append(cur_time)
     return True
예제 #2
0
    def __iter__(self):
        """Generate the ancestors of _initrevs in reverse topological order.

        If inclusive is False, yield a sequence of revision numbers starting
        with the parents of each revision in revs, i.e., each revision is *not*
        considered an ancestor of itself.  Results are in breadth-first order:
        parents of each rev in revs, then parents of those, etc.

        If inclusive is True, yield all the revs first (ignoring stoprev),
        then yield all the ancestors of revs as when inclusive is False.
        If an element in revs is an ancestor of a different rev it is not
        yielded again."""
        seen = set()
        revs = self._initrevs
        if self._inclusive:
            for rev in revs:
                yield rev
            seen.update(revs)

        parentrevs = self._parentrevs
        stoprev = self._stoprev
        visit = util.deque(revs)

        while visit:
            for parent in parentrevs(visit.popleft()):
                if parent >= stoprev and parent not in seen:
                    visit.append(parent)
                    seen.add(parent)
                    yield parent
예제 #3
0
def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
    # if nodes is empty we scan the entire graph
    if nodes:
        heads = dag.headsetofconnecteds(nodes)
    else:
        heads = dag.heads()
    dist = {}
    visit = util.deque(heads)
    seen = set()
    factor = 1
    while visit:
        curr = visit.popleft()
        if curr in seen:
            continue
        d = dist.setdefault(curr, 1)
        if d > factor:
            factor *= 2
        if d == factor:
            if curr not in always: # need this check for the early exit below
                sample.add(curr)
                if quicksamplesize and (len(sample) >= quicksamplesize):
                    return
        seen.add(curr)
        for p in dag.parents(curr):
            if not nodes or p in nodes:
                dist.setdefault(p, d + 1)
                visit.append(p)
예제 #4
0
def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
    # if nodes is empty we scan the entire graph
    if nodes:
        heads = dag.headsetofconnecteds(nodes)
    else:
        heads = dag.heads()
    dist = {}
    visit = util.deque(heads)
    seen = set()
    factor = 1
    while visit:
        curr = visit.popleft()
        if curr in seen:
            continue
        d = dist.setdefault(curr, 1)
        if d > factor:
            factor *= 2
        if d == factor:
            if curr not in always:  # need this check for the early exit below
                sample.add(curr)
                if quicksamplesize and (len(sample) >= quicksamplesize):
                    return
        seen.add(curr)
        for p in dag.parents(curr):
            if not nodes or p in nodes:
                dist.setdefault(p, d + 1)
                visit.append(p)
예제 #5
0
 def is_rate_limit_pass(self,sandesh):
     #If buffer size 0 return
     if self.__class__.rate_limit_buffer.maxlen == 0:
         return False
     #Check if buffer resize is reqd
     if (self.__class__.rate_limit_buffer.maxlen != \
             SandeshSystem.get_sandesh_send_rate_limit()):
         temp_buffer = copy.deepcopy(self.__class__.rate_limit_buffer)
         self.__class__.rate_limit_buffer = util.deque(temp_buffer, \
             maxlen=SandeshSystem.get_sandesh_send_rate_limit())
         del temp_buffer
     cur_time=int(time.time())
     #Check if circular buffer is full
     if len(self.__class__.rate_limit_buffer) == \
         self.__class__.rate_limit_buffer.maxlen :
         # Read the element in buffer and compare with cur_time
         if(self.__class__.rate_limit_buffer[0] == cur_time):
             #Sender generating more messages/sec than the
             #buffer_threshold size
             if self.__class__.do_rate_limit_drop_log:
                 sandesh._logger.error('SANDESH: Ratelimit Drop ' \
                     '(%d messages/sec): for %s' % \
                     (self.__class__.rate_limit_buffer.maxlen, \
                      self.__class__.__name__))
                 #Disable logging
                 self.__class__.do_rate_limit_drop_log = False
             return False
     #If logging is disabled enable it
     self.__class__.do_rate_limit_drop_log = True
     self.__class__.rate_limit_buffer.append(cur_time)
     return True
예제 #6
0
    def __iter__(self):
        """Generate the ancestors of _initrevs in reverse topological order.

        If inclusive is False, yield a sequence of revision numbers starting
        with the parents of each revision in revs, i.e., each revision is *not*
        considered an ancestor of itself.  Results are in breadth-first order:
        parents of each rev in revs, then parents of those, etc.

        If inclusive is True, yield all the revs first (ignoring stoprev),
        then yield all the ancestors of revs as when inclusive is False.
        If an element in revs is an ancestor of a different rev it is not
        yielded again."""
        seen = set()
        revs = self._initrevs
        if self._inclusive:
            for rev in revs:
                yield rev
            seen.update(revs)

        parentrevs = self._parentrevs
        stoprev = self._stoprev
        visit = util.deque(revs)

        while visit:
            for parent in parentrevs(visit.popleft()):
                if parent >= stoprev and parent not in seen:
                    visit.append(parent)
                    seen.add(parent)
                    yield parent
예제 #7
0
    def findcommonmissing(self, common=None, heads=None):
        """Return a tuple of the ancestors of common and the ancestors of heads
        that are not ancestors of common. In revset terminology, we return the
        tuple:

          ::common, (::heads) - (::common)

        The list is sorted by revision number, meaning it is
        topologically sorted.

        'heads' and 'common' are both lists of node IDs.  If heads is
        not supplied, uses all of the revlog's heads.  If common is not
        supplied, uses nullid."""
        if common is None:
            common = [nullid]
        if heads is None:
            heads = self.heads()

        common = [self.rev(n) for n in common]
        heads = [self.rev(n) for n in heads]

        # we want the ancestors, but inclusive
        has = set(self.ancestors(common))
        has.add(nullrev)
        has.update(common)

        # take all ancestors from heads that aren't in has
        missing = set()
        visit = util.deque(r for r in heads if r not in has)
        while visit:
            r = visit.popleft()
            if r in missing:
                continue
            else:
                missing.add(r)
                for p in self.parentrevs(r):
                    if p not in has:
                        visit.append(p)
        missing = list(missing)
        missing.sort()
        return has, [self.node(r) for r in missing]
예제 #8
0
    def findcommonmissing(self, common=None, heads=None):
        """Return a tuple of the ancestors of common and the ancestors of heads
        that are not ancestors of common. In revset terminology, we return the
        tuple:

          ::common, (::heads) - (::common)

        The list is sorted by revision number, meaning it is
        topologically sorted.

        'heads' and 'common' are both lists of node IDs.  If heads is
        not supplied, uses all of the revlog's heads.  If common is not
        supplied, uses nullid."""
        if common is None:
            common = [nullid]
        if heads is None:
            heads = self.heads()

        common = [self.rev(n) for n in common]
        heads = [self.rev(n) for n in heads]

        # we want the ancestors, but inclusive
        has = set(self.ancestors(common))
        has.add(nullrev)
        has.update(common)

        # take all ancestors from heads that aren't in has
        missing = set()
        visit = util.deque(r for r in heads if r not in has)
        while visit:
            r = visit.popleft()
            if r in missing:
                continue
            else:
                missing.add(r)
                for p in self.parentrevs(r):
                    if p not in has:
                        visit.append(p)
        missing = list(missing)
        missing.sort()
        return has, [self.node(r) for r in missing]
예제 #9
0
def _updatesample(dag, nodes, sample, quicksamplesize=0):
    """update an existing sample to match the expected size

    The sample is updated with nodes exponentially distant from each head of the
    <nodes> set. (H~1, H~2, H~4, H~8, etc).

    If a target size is specified, the sampling will stop once this size is
    reached. Otherwise sampling will happen until roots of the <nodes> set are
    reached.

    :dag: a dag object from dagutil
    :nodes:  set of nodes we want to discover (if None, assume the whole dag)
    :sample: a sample to update
    :quicksamplesize: optional target size of the sample"""
    # if nodes is empty we scan the entire graph
    if nodes:
        heads = dag.headsetofconnecteds(nodes)
    else:
        heads = dag.heads()
    dist = {}
    visit = util.deque(heads)
    seen = set()
    factor = 1
    while visit:
        curr = visit.popleft()
        if curr in seen:
            continue
        d = dist.setdefault(curr, 1)
        if d > factor:
            factor *= 2
        if d == factor:
            sample.add(curr)
            if quicksamplesize and (len(sample) >= quicksamplesize):
                return
        seen.add(curr)
        for p in dag.parents(curr):
            if not nodes or p in nodes:
                dist.setdefault(p, d + 1)
                visit.append(p)
def _updatesample(dag, nodes, sample, quicksamplesize=0):
    """update an existing sample to match the expected size

    The sample is updated with nodes exponentially distant from each head of the
    <nodes> set. (H~1, H~2, H~4, H~8, etc).

    If a target size is specified, the sampling will stop once this size is
    reached. Otherwise sampling will happen until roots of the <nodes> set are
    reached.

    :dag: a dag object from dagutil
    :nodes:  set of nodes we want to discover (if None, assume the whole dag)
    :sample: a sample to update
    :quicksamplesize: optional target size of the sample"""
    # if nodes is empty we scan the entire graph
    if nodes:
        heads = dag.headsetofconnecteds(nodes)
    else:
        heads = dag.heads()
    dist = {}
    visit = util.deque(heads)
    seen = set()
    factor = 1
    while visit:
        curr = visit.popleft()
        if curr in seen:
            continue
        d = dist.setdefault(curr, 1)
        if d > factor:
            factor *= 2
        if d == factor:
            sample.add(curr)
            if quicksamplesize and (len(sample) >= quicksamplesize):
                return
        seen.add(curr)
        for p in dag.parents(curr):
            if not nodes or p in nodes:
                dist.setdefault(p, d + 1)
                visit.append(p)
예제 #11
0
def findcommonincoming(repo, remote, heads=None, force=False):
    """Return a tuple (common, fetch, heads) used to identify the common
    subset of nodes between repo and remote.

    "common" is a list of (at least) the heads of the common subset.
    "fetch" is a list of roots of the nodes that would be incoming, to be
      supplied to changegroupsubset.
    "heads" is either the supplied heads, or else the remote's heads.
    """

    knownnode = repo.changelog.hasnode
    search = []
    fetch = set()
    seen = set()
    seenbranch = set()
    base = set()

    if not heads:
        heads = remote.heads()

    if repo.changelog.tip() == nullid:
        base.add(nullid)
        if heads != [nullid]:
            return [nullid], [nullid], list(heads)
        return [nullid], [], heads

    # assume we're closer to the tip than the root
    # and start by examining the heads
    repo.ui.status(_("searching for changes\n"))

    unknown = []
    for h in heads:
        if not knownnode(h):
            unknown.append(h)
        else:
            base.add(h)

    if not unknown:
        return list(base), [], list(heads)

    req = set(unknown)
    reqcnt = 0

    # search through remote branches
    # a 'branch' here is a linear segment of history, with four parts:
    # head, root, first parent, second parent
    # (a branch always has two parents (or none) by definition)
    unknown = util.deque(remote.branches(unknown))
    while unknown:
        r = []
        while unknown:
            n = unknown.popleft()
            if n[0] in seen:
                continue

            repo.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
            if n[0] == nullid:  # found the end of the branch
                pass
            elif n in seenbranch:
                repo.ui.debug("branch already found\n")
                continue
            elif n[1] and knownnode(n[1]):  # do we know the base?
                repo.ui.debug("found incomplete branch %s:%s\n" %
                              (short(n[0]), short(n[1])))
                search.append(n[0:2])  # schedule branch range for scanning
                seenbranch.add(n)
            else:
                if n[1] not in seen and n[1] not in fetch:
                    if knownnode(n[2]) and knownnode(n[3]):
                        repo.ui.debug("found new changeset %s\n" % short(n[1]))
                        fetch.add(n[1])  # earliest unknown
                    for p in n[2:4]:
                        if knownnode(p):
                            base.add(p)  # latest known

                for p in n[2:4]:
                    if p not in req and not knownnode(p):
                        r.append(p)
                        req.add(p)
            seen.add(n[0])

        if r:
            reqcnt += 1
            repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
            repo.ui.debug("request %d: %s\n" %
                          (reqcnt, " ".join(map(short, r))))
            for p in xrange(0, len(r), 10):
                for b in remote.branches(r[p:p + 10]):
                    repo.ui.debug("received %s:%s\n" %
                                  (short(b[0]), short(b[1])))
                    unknown.append(b)

    # do binary search on the branches we found
    while search:
        newsearch = []
        reqcnt += 1
        repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
        for n, l in zip(search, remote.between(search)):
            l.append(n[1])
            p = n[0]
            f = 1
            for i in l:
                repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
                if knownnode(i):
                    if f <= 2:
                        repo.ui.debug("found new branch changeset %s\n" %
                                      short(p))
                        fetch.add(p)
                        base.add(i)
                    else:
                        repo.ui.debug("narrowed branch search to %s:%s\n" %
                                      (short(p), short(i)))
                        newsearch.append((p, i))
                    break
                p, f = i, f * 2
            search = newsearch

    # sanity check our fetch list
    for f in fetch:
        if knownnode(f):
            raise error.RepoError(_("already have changeset ") + short(f[:4]))

    base = list(base)
    if base == [nullid]:
        if force:
            repo.ui.warn(_("warning: repository is unrelated\n"))
        else:
            raise util.Abort(_("repository is unrelated"))

    repo.ui.debug("found new changesets starting at " +
                  " ".join([short(f) for f in fetch]) + "\n")

    repo.ui.progress(_('searching'), None)
    repo.ui.debug("%d total queries\n" % reqcnt)

    return base, list(fetch), heads
예제 #12
0
def findcommonincoming(repo, remote, heads=None, force=False):
    """Return a tuple (common, fetch, heads) used to identify the common
    subset of nodes between repo and remote.

    "common" is a list of (at least) the heads of the common subset.
    "fetch" is a list of roots of the nodes that would be incoming, to be
      supplied to changegroupsubset.
    "heads" is either the supplied heads, or else the remote's heads.
    """

    m = repo.changelog.nodemap
    search = []
    fetch = set()
    seen = set()
    seenbranch = set()
    base = set()

    if not heads:
        heads = remote.heads()

    if repo.changelog.tip() == nullid:
        base.add(nullid)
        if heads != [nullid]:
            return [nullid], [nullid], list(heads)
        return [nullid], [], heads

    # assume we're closer to the tip than the root
    # and start by examining the heads
    repo.ui.status(_("searching for changes\n"))

    unknown = []
    for h in heads:
        if h not in m:
            unknown.append(h)
        else:
            base.add(h)

    if not unknown:
        return list(base), [], list(heads)

    req = set(unknown)
    reqcnt = 0

    # search through remote branches
    # a 'branch' here is a linear segment of history, with four parts:
    # head, root, first parent, second parent
    # (a branch always has two parents (or none) by definition)
    unknown = util.deque(remote.branches(unknown))
    while unknown:
        r = []
        while unknown:
            n = unknown.popleft()
            if n[0] in seen:
                continue

            repo.ui.debug("examining %s:%s\n"
                          % (short(n[0]), short(n[1])))
            if n[0] == nullid: # found the end of the branch
                pass
            elif n in seenbranch:
                repo.ui.debug("branch already found\n")
                continue
            elif n[1] and n[1] in m: # do we know the base?
                repo.ui.debug("found incomplete branch %s:%s\n"
                              % (short(n[0]), short(n[1])))
                search.append(n[0:2]) # schedule branch range for scanning
                seenbranch.add(n)
            else:
                if n[1] not in seen and n[1] not in fetch:
                    if n[2] in m and n[3] in m:
                        repo.ui.debug("found new changeset %s\n" %
                                      short(n[1]))
                        fetch.add(n[1]) # earliest unknown
                    for p in n[2:4]:
                        if p in m:
                            base.add(p) # latest known

                for p in n[2:4]:
                    if p not in req and p not in m:
                        r.append(p)
                        req.add(p)
            seen.add(n[0])

        if r:
            reqcnt += 1
            repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
            repo.ui.debug("request %d: %s\n" %
                        (reqcnt, " ".join(map(short, r))))
            for p in xrange(0, len(r), 10):
                for b in remote.branches(r[p:p + 10]):
                    repo.ui.debug("received %s:%s\n" %
                                  (short(b[0]), short(b[1])))
                    unknown.append(b)

    # do binary search on the branches we found
    while search:
        newsearch = []
        reqcnt += 1
        repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
        for n, l in zip(search, remote.between(search)):
            l.append(n[1])
            p = n[0]
            f = 1
            for i in l:
                repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
                if i in m:
                    if f <= 2:
                        repo.ui.debug("found new branch changeset %s\n" %
                                          short(p))
                        fetch.add(p)
                        base.add(i)
                    else:
                        repo.ui.debug("narrowed branch search to %s:%s\n"
                                      % (short(p), short(i)))
                        newsearch.append((p, i))
                    break
                p, f = i, f * 2
            search = newsearch

    # sanity check our fetch list
    for f in fetch:
        if f in m:
            raise error.RepoError(_("already have changeset ")
                                  + short(f[:4]))

    base = list(base)
    if base == [nullid]:
        if force:
            repo.ui.warn(_("warning: repository is unrelated\n"))
        else:
            raise util.Abort(_("repository is unrelated"))

    repo.ui.debug("found new changesets starting at " +
                 " ".join([short(f) for f in fetch]) + "\n")

    repo.ui.progress(_('searching'), None)
    repo.ui.debug("%d total queries\n" % reqcnt)

    return base, list(fetch), heads
예제 #13
0
파일: hbisect.py 프로젝트: RayFerr000/PLTL
def bisect(changelog, state):
    """find the next node (if any) for testing during a bisect search.
    returns a (nodes, number, good) tuple.

    'nodes' is the final result of the bisect if 'number' is 0.
    Otherwise 'number' indicates the remaining possible candidates for
    the search and 'nodes' contains the next bisect target.
    'good' is True if bisect is searching for a first good changeset, False
    if searching for a first bad one.
    """

    clparents = changelog.parentrevs
    skip = set([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        goodrev = min(goodrevs)
        # build visit array
        ancestors = [None] * (len(changelog) + 1) # an extra for [-1]

        # set nodes descended from goodrevs
        for rev in goodrevs:
            ancestors[rev] = []
        for rev in changelog.revs(goodrev + 1):
            for prev in clparents(rev):
                if ancestors[prev] == []:
                    ancestors[rev] = []

        # clear good revs from array
        for rev in goodrevs:
            ancestors[rev] = None
        for rev in changelog.revs(len(changelog), goodrev):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = False
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors: # looking for bad to good transition?
        good = True
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors: # now we're confused
        if (len(state['bad']) == 1 and len(state['good']) == 1 and
            state['bad'] != state['good']):
            raise util.Abort(_("starting revisions are not directly related"))
        raise util.Abort(_("inconsistent state, %s:%s is good and bad")
                         % (badrev, short(bad)))

    # build children dict
    children = {}
    visit = util.deque([badrev])
    candidates = []
    while visit:
        rev = visit.popleft()
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    # or have all other possible candidates besides 'bad' have been skipped?
    tot = len(candidates)
    unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
    if tot == 1 or not unskipped:
        return ([changelog.node(rev) for rev in candidates], 0, good)
    perfect = tot // 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = set()
    for rev in candidates:
        if rev in poison:
            # poison children
            poison.update(children.get(rev, []))
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a) # number of ancestors
        y = tot - x # number of non-ancestors
        value = min(x, y) # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect: # found a perfect candidate? quit early
                break

        if y < perfect and rev not in skip: # all downhill from here?
            # poison children
            poison.update(children.get(rev, []))
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = list(set(ancestors[c] + a))
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return ([best_node], tot, good)
예제 #14
0
def bisect(changelog, state):
    """find the next node (if any) for testing during a bisect search.
    returns a (nodes, number, good) tuple.

    'nodes' is the final result of the bisect if 'number' is 0.
    Otherwise 'number' indicates the remaining possible candidates for
    the search and 'nodes' contains the next bisect target.
    'good' is True if bisect is searching for a first good changeset, False
    if searching for a first bad one.
    """

    clparents = changelog.parentrevs
    skip = set([changelog.rev(n) for n in state['skip']])

    def buildancestors(bad, good):
        # only the earliest bad revision matters
        badrev = min([changelog.rev(n) for n in bad])
        goodrevs = [changelog.rev(n) for n in good]
        goodrev = min(goodrevs)
        # build visit array
        ancestors = [None] * (len(changelog) + 1)  # an extra for [-1]

        # set nodes descended from goodrevs
        for rev in goodrevs:
            ancestors[rev] = []
        for rev in changelog.revs(goodrev + 1):
            for prev in clparents(rev):
                if ancestors[prev] == []:
                    ancestors[rev] = []

        # clear good revs from array
        for rev in goodrevs:
            ancestors[rev] = None
        for rev in changelog.revs(len(changelog), goodrev):
            if ancestors[rev] is None:
                for prev in clparents(rev):
                    ancestors[prev] = None

        if ancestors[badrev] is None:
            return badrev, None
        return badrev, ancestors

    good = False
    badrev, ancestors = buildancestors(state['bad'], state['good'])
    if not ancestors:  # looking for bad to good transition?
        good = True
        badrev, ancestors = buildancestors(state['good'], state['bad'])
    bad = changelog.node(badrev)
    if not ancestors:  # now we're confused
        if (len(state['bad']) == 1 and len(state['good']) == 1
                and state['bad'] != state['good']):
            raise util.Abort(_("starting revisions are not directly related"))
        raise util.Abort(
            _("inconsistent state, %s:%s is good and bad") %
            (badrev, short(bad)))

    # build children dict
    children = {}
    visit = util.deque([badrev])
    candidates = []
    while visit:
        rev = visit.popleft()
        if ancestors[rev] == []:
            candidates.append(rev)
            for prev in clparents(rev):
                if prev != -1:
                    if prev in children:
                        children[prev].append(rev)
                    else:
                        children[prev] = [rev]
                        visit.append(prev)

    candidates.sort()
    # have we narrowed it down to one entry?
    # or have all other possible candidates besides 'bad' have been skipped?
    tot = len(candidates)
    unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
    if tot == 1 or not unskipped:
        return ([changelog.node(rev) for rev in candidates], 0, good)
    perfect = tot // 2

    # find the best node to test
    best_rev = None
    best_len = -1
    poison = set()
    for rev in candidates:
        if rev in poison:
            # poison children
            poison.update(children.get(rev, []))
            continue

        a = ancestors[rev] or [rev]
        ancestors[rev] = None

        x = len(a)  # number of ancestors
        y = tot - x  # number of non-ancestors
        value = min(x, y)  # how good is this test?
        if value > best_len and rev not in skip:
            best_len = value
            best_rev = rev
            if value == perfect:  # found a perfect candidate? quit early
                break

        if y < perfect and rev not in skip:  # all downhill from here?
            # poison children
            poison.update(children.get(rev, []))
            continue

        for c in children.get(rev, []):
            if ancestors[c]:
                ancestors[c] = list(set(ancestors[c] + a))
            else:
                ancestors[c] = a + [c]

    assert best_rev is not None
    best_node = changelog.node(best_rev)

    return ([best_node], tot, good)