コード例 #1
0
ファイル: sched.py プロジェクト: mefyl/drake
class RoundRobin(SchedulingPolicy):

  def __init__(self):
    self.__coroutines = OrderedSet()

  @property
  def busy(self):
    return bool(self.__coroutines)

  def add(self, coroutine):
    self.__coroutines.add(coroutine)

  def remove(self, coroutine):
    self.__coroutines.remove(coroutine)

  def freeze(self, coroutine):
    self.__coroutines.remove(coroutine)

  def unfreeze(self, coroutine):
    self.__coroutines.add(coroutine)

  def round(self):
    for coro in list(self.__coroutines):
      assert coro is not None
      yield coro

  def dump(self):
    def dump(c, idt = 0):
      print('{}{}{}'.format('  ' * idt, c, ' (frozen)' if c.frozen else ''))
      for child in self.__hierarchy.get(c, []):
        dump(child, idt + 1)
    for root in self.__hierarchy.get(None, []):
      dump(root)
コード例 #2
0
class RoundRobin(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        self.__coroutines.add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        for coro in list(self.__coroutines):
            assert coro is not None
            yield coro

    def dump(self):
        def dump(c, idt=0):
            print('{}{}{}'.format('  ' * idt, c,
                                  ' (frozen)' if c.frozen else ''))
            for child in self.__hierarchy.get(c, []):
                dump(child, idt + 1)

        for root in self.__hierarchy.get(None, []):
            dump(root)
コード例 #3
0
class GlutSpyInputs(Thread):
    def __init__(self, app, *args, **kwargs):
        super().__init__(name="SpyInputs", daemon=True)
        self._app = app
        self._key_pressed = OrderedSet()

        glutInit(*args, *kwargs)
        glutInitDisplayMode(GLUT_SINGLE)

        glutInitWindowSize(800, 400)
        glutInitWindowPosition(100, 100)
        glutCreateWindow("SpyInputs")

        glutKeyboardFunc(self.on_key_press)
        glutKeyboardUpFunc(self.on_key_up)

    def on_key_press(self, key, x, y):
        self._key_pressed.add(key)
        self.on_key_update()

    def on_key_up(self, key, x, y):
        self._key_pressed.remove(key)
        self.on_key_update()

    def on_key_update(self):
        print(" | ".join([v.decode() for v in self._key_pressed]))

    def run(self):
        glutMainLoop()
コード例 #4
0
class Taggable(ABC):
    def __init__(self):
        self._tags = OrderedSet()

    @abstractmethod
    def get_required_tags(self):
        raise NotImplementedError

    def get_tags(self):
        """Gets the comma-separated list of tags.

        Returns: A comma separated list of tags, or an empty string if
            there are no tags.
        """
        return ','.join(self.get_required_tags() | self._tags)

    def set_tags(self, tags):
        """Sets from a comma-separated list of tags.
        """
        if tags is None:  # Weird, but for compatibility with the Java version
            return

        self._tags.clear()
        self._tags.update(tags.split(','))

    def add_tags(self, *tags):
        self._tags.update(tags)

    def remove_tag(self, tag):
        if tag:
            self._tags.remove(tag)

    def has_tag(self, tag):
        return tag in self._tags
コード例 #5
0
class SetQueue(queue.Queue):
    def _init(self, maxsize):
        self.queue = OrderedSet()

    def _put(self, item):
        self.queue.add(item)

    def _get(self):
        head = self.queue.__getitem__(0)
        self.queue.remove(head)
        return head
コード例 #6
0
class DepthFirst(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()
        self.__hierarchy = {}

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        parent = coroutine.parent
        self.__coroutines.add(coroutine)
        self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)
        children = self.__hierarchy.pop(coroutine, None)
        if children is not None:
            assert len(children) == 0
        self.__hierarchy.get(coroutine.parent).remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        c = self.__round(self.__hierarchy.get(None, ()))
        assert c is not None
        return (c, )

    def __round(self, coroutines):
        for coroutine in coroutines:
            assert coroutines is not None
            active = coroutine in self.__coroutines
            if active and coroutine.exception:
                return coroutine
            sub = self.__round(self.__hierarchy.get(coroutine, ()))
            if sub is not None:
                return sub
            if active:
                return coroutine

    def dump(self):
        def dump(c, idt=0):
            print('{}{}{}'.format('  ' * idt, c,
                                  ' (frozen)' if c.frozen else ''))
            for child in self.__hierarchy.get(c, []):
                dump(child, idt + 1)

        for root in self.__hierarchy.get(None, []):
            dump(root)
コード例 #7
0
ファイル: sched.py プロジェクト: mefyl/drake
class DepthFirst(SchedulingPolicy):

  def __init__(self):
    self.__coroutines = OrderedSet()
    self.__hierarchy = {}

  @property
  def busy(self):
    return bool(self.__coroutines)

  def add(self, coroutine):
    parent = coroutine.parent
    self.__coroutines.add(coroutine)
    self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)

  def remove(self, coroutine):
    self.__coroutines.remove(coroutine)
    children = self.__hierarchy.pop(coroutine, None)
    if children is not None:
      assert len(children) == 0
    self.__hierarchy.get(coroutine.parent).remove(coroutine)

  def freeze(self, coroutine):
    self.__coroutines.remove(coroutine)

  def unfreeze(self, coroutine):
    self.__coroutines.add(coroutine)

  def round(self):
    c = self.__round(self.__hierarchy.get(None, ()))
    assert c is not None
    return (c,)

  def __round(self, coroutines):
    for coroutine in coroutines:
      assert coroutines is not None
      active = coroutine in self.__coroutines
      if active and coroutine.exception:
        return coroutine
      sub = self.__round(self.__hierarchy.get(coroutine, ()))
      if sub is not None:
        return sub
      if active:
        return coroutine

  def dump(self):
    def dump(c, idt = 0):
      print('{}{}{}'.format('  ' * idt, c, ' (frozen)' if c.frozen else ''))
      for child in self.__hierarchy.get(c, []):
        dump(child, idt + 1)
    for root in self.__hierarchy.get(None, []):
      dump(root)
コード例 #8
0
ファイル: turn_game.py プロジェクト: konahart/turn-taker
class TurnGame(object):
    def __init__(self, advance_func=None):
        self._player_queue = OrderedSet()
        self.advance_func = advance_func or self._default_advance_turn

    def reset(self):
        self._player_queue.clear()

    def advance_turn(self):
        self.advance_func(self._player_queue)

    @staticmethod
    def _default_advance_turn(player_queue):
        # Move current player to end of queue
        current_player = player_queue.pop(last=False)
        player_queue.add(current_player)

    def get_current_player(self):
        return self._player_queue[0]

    def get_next_player(self):
        return self.advance_func(self.get_players())[0]

    def fast_forward(self, player):
        # Ensure player is in the queue
        self.add_player(player)

        # Rotate player to the front
        for _ in range(len(self._player_queue)):
            if self._player_queue[0] == player:
                break
            TurnGame._default_advance_turn(self._player_queue)

    def get_players(self):
        return OrderedSet(self._player_queue)

    def get_player_count(self):
        return len(self._player_queue)

    def add_player(self, player) -> bool:
        if player not in self._player_queue:
            self._player_queue.add(player)
            return True
        else:
            return False

    def remove_player(self, player) -> bool:
        if player in self._player_queue:
            self._player_queue.remove(player)
            return True
        else:
            return False
コード例 #9
0
ファイル: coins.py プロジェクト: mattdinhnguyen/algos
 def sumCoins(l: List[int],
              k: int) -> List[int]:  # from smallest sum to target
     l.sort()
     s = OrderedSet([0])  # start with no coin
     inc = sys.maxsize  # min increment based on the coin values
     for i in range(1, len(l)):
         inc = min(inc, l[i] - l[i - 1])
     for i in range(l[0], k + 1, inc):  # potential sum i's in min increment
         for c in l:
             if i - c in s:  # potential (sum i reduced by each coin value) were already in s, becomes 1 comb sum
                 s.add(i)
                 break
     s.remove(0)
     return list(s)
コード例 #10
0
    def _X_feature_polynomial_columns(self, dataframe: DataFrame) -> list:
        excluded_prefixes = list( self.params['X_feature_exclude'] ) \
                          + list( self.params['X_feature_onehot']  )

        columns = OrderedSet(dataframe.columns.values)
        for column_prefix in excluded_prefixes:
            column_prefix = re.sub(r'\d+(st|nd|rd)?$', '',
                                   column_prefix)  # remove 1st, 2nd, 3rd
            column_prefix = column_prefix + '_'  # BUGFIX HeatingQC_Numeric startswith Heating
            for column in dataframe.columns.values:
                if column in columns:
                    if str(column).startswith(column_prefix):
                        columns.remove(
                            column)  # .discard() is .remove() without
                        continue

        columns = list(columns)
        return columns
コード例 #11
0
ファイル: molecule.py プロジェクト: evanfeinberg/InterMol
class Molecule(object):
    """An abstract molecule object.
    """
    __slots__ = ['name', '_atoms']

    def __init__(self, name=None):
        """Initialize the molecule

        Args:
            name (str): name of the molecule
        """
        if name != None:
            self.name = name
        else:
            # TODO Fix the naming resolution
            self.name = "Untitled"
        self._atoms = OrderedSet()

    def addAtom(self, atom):
        """Add and atom

        Args:
            atom (atom): the atom to add into the molecule
        """
        self._atoms.add(atom)

    def removeAtom(self, atom):
        """Remove Atom

        Args:
            atom (atom): the atom to remove from the molecule
        """
        self._atoms.remove(atom)

    def getAtoms(self):
        """Return an orderedset of atoms
        """
        return self._atoms

    def __repr__(self):
        return self.name
コード例 #12
0
ファイル: molecule.py プロジェクト: evanfeinberg/InterMol
class Molecule(object):
    """An abstract molecule object.
    """
    __slots__ = ['name', '_atoms']
    def __init__(self, name = None):
        """Initialize the molecule

        Args:
            name (str): name of the molecule
        """
        if name != None:
            self.name = name
        else:
            # TODO Fix the naming resolution
            self.name = "Untitled"
        self._atoms = OrderedSet()

    def addAtom(self, atom):
        """Add and atom

        Args:
            atom (atom): the atom to add into the molecule
        """
        self._atoms.add(atom)

    def removeAtom(self, atom):
        """Remove Atom

        Args:
            atom (atom): the atom to remove from the molecule
        """
        self._atoms.remove(atom)

    def getAtoms(self):
        """Return an orderedset of atoms
        """
        return self._atoms

    def __repr__(self):
        return self.name
コード例 #13
0
class RoundRobin(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        self.__coroutines.add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        for coro in list(self.__coroutines):
            yield coro
コード例 #14
0
class DepthFirst(SchedulingPolicy):
    def __init__(self):
        self.__coroutines = OrderedSet()
        self.__hierarchy = {}

    @property
    def busy(self):
        return bool(self.__coroutines)

    def add(self, coroutine):
        parent = coroutine.parent
        self.__coroutines.add(coroutine)
        self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)

    def remove(self, coroutine):
        self.__coroutines.remove(coroutine)
        self.__hierarchy.get(coroutine.parent).remove(coroutine)

    def freeze(self, coroutine):
        self.__coroutines.remove(coroutine)

    def unfreeze(self, coroutine):
        self.__coroutines.add(coroutine)

    def round(self):
        return (self.__round(self.__hierarchy.get(None, ())), )

    def __round(self, coroutines):
        for coroutine in coroutines:
            active = coroutine in self.__coroutines
            if active and coroutine.exception:
                return coroutine
            sub = self.__round(self.__hierarchy.get(coroutine, ()))
            if sub is not None:
                return sub
            if active:
                return coroutine
コード例 #15
0
class ExOpBlock(ExecutionGraphElt):
    """
    A list of exops to be executed sequentially.

    Attributes:
        computation_decl: The associated computation graph.
        prev_exop: The latst exop.
        next_exop: The first exop.
        root_set: Set of exops whose values are needed.

    """

    def __init__(self, computation_decl=None, **kwargs):
        if computation_decl is None:
            raise ValueError("computation_decl must be specified.")
        super(ExOpBlock, self).__init__(execution_graph=computation_decl.execution_graph,
                                        **kwargs)
        self.computation_decl = computation_decl
        # Doubly linked loop, with self as termination
        self.prev_exop = self
        self.next_exop = self
        # All ops handled by the block.
        self.all_ops = set()

        self.root_set = OrderedSet()

    @property
    def is_exop_end_of_list(self):
        """

        Returns:
            True if this represents the guard past the exop list. See ExecuteOp.

        """
        return True

    class ExOpForwardIterator(object):

        def __init__(self, exop_term):
            self.exop_term = exop_term
            self.exop = self.exop_term.next_exop

        def next(self):
            if self.exop.is_exop_end_of_list:
                raise StopIteration
            result = self.exop
            self.exop = result.next_exop
            return result

        __next__ = next  # Python 3.X compatibility

    class ExOpReversedIterator(object):

        def __init__(self, exop_term):
            self.exop_term = exop_term
            self.exop = self.exop_term.prev_exop

        def __iter__(self):
            return self

        def next(self):
            if self.exop.is_exop_end_of_list:
                raise StopIteration
            result = self.exop
            self.exop = result.prev_exop
            return result

        __next__ = next  # Python 3.X compatibility

    def __iter__(self):
        return ExOpBlock.ExOpForwardIterator(self)

    def __reversed__(self):
        return ExOpBlock.ExOpReversedIterator(self)

    def add_ops(self, roots, after_exop=None):
        """
        Add exops needed to compute ops in roots.

        Args:
            roots: A collection of ops whose values are needed.
            after_exop: Where in the list to add the ops. Defaults to the end.

        """
        if after_exop is None:
            after_exop = self.prev_exop

        # Get computation graph ops that are already inserted.
        available = OrderedSet()
        counts = dict()
        parents = defaultdict(OrderedSet)
        ready = OrderedSet()

        # Some ops in roots may have been replaced by other ops; if so, they
        # are in the graph already, although maybe not in this block. Get the
        # op from the exop so we have the current version.
        for op in roots:
            exop = self.computation_decl.get_exop(op, None)
            if exop is not None:
                op = exop.op
            available.add(op)

        while available:
            op = available.pop()
            if op in counts or op in self.all_ops:
                continue

            nchildren = 0
            for child in op.all_deps:
                exop = self.computation_decl.get_exop(child, None)
                if exop is not None:
                    child = exop.op
                if child not in self.all_ops:
                    parents[child].add(op)
                    available.add(child)
                    nchildren += 1
            if nchildren > 0:
                counts[op] = nchildren
            else:
                ready.add(op)

        while ready:
            op = ready.pop()
            after_exop = self.add_op(op, after_exop=after_exop)
            for p in parents.get(op, []):
                count = counts[p] - 1
                if count == 0:
                    ready.add(p)
                    del counts[p]
                else:
                    counts[p] = count
        if len(counts) > 0:
            raise ValueError("Graph not a DAG")

    def add_op(self, op, after_exop):
        """
        Add an exop for op to be executed after after_exop.

        Args:
            op: The op.
            after_exop: The exop to precede op.

        Returns:
            The new last op. If the op is executable, it will be the added exop,
            othwerwise the previous after_exop.

        """
        if after_exop is None:
            after_exop = self
        if op.is_sequencing_op:
            return after_exop

        exec_op = ExOp(computation_decl=self.computation_decl, op=op)
        return self.add_exop(exec_op, after_exop)

    def add_exop(self, exop, after_exop=None):
        """
        Add exop to the list of exops, after after_exop.

        Args:
            exop:
                The exop to add.

            after_exop:
                If specified, the exop that should be added after after_exop. Defaults to the
                last exop added.

        Returns:
            The exop.

        """
        if after_exop is None:
            after_exop = self.prev_exop

        # Insert between after_exop and the op after after_exop
        before_exop = after_exop.next_exop

        # Add after after_exop
        after_exop.next_exop = exop
        exop.prev_exop = after_exop

        # Add before before_exop
        before_exop.prev_exop = exop
        exop.next_exop = before_exop

        self.all_ops.add(exop.op)

        return exop

    def move_exop_to_after_exop(self, exop, after_exop):
        exop.prev_exop.next_exop = exop.next_exop
        exop.next_exop.prev_exop = exop.prev_exop
        exop.prev_exop = after_exop
        exop.next_exop = after_exop.next_exop
        after_exop.next_exop = exop
        exop.next_exop.prev_exop = exop

    def remove_exop(self, exop):
        exop.prev_exop.next_exop = exop.next_exop
        exop.next_exop.prev_exop = exop.prev_exop
        for input_decl in exop.input_decls:
            input_decl.source_output_decl.user_input_decls.remove(input_decl)
        self.all_ops.remove(exop.op)

    def replace_op(self, old_op, new_op):
        # TODO Replacing an op can remove ops. For example, (x + 2) * 1 -> x + 2
        # replaces the * with +, so * and 1 drop out
        # 1 dropping out means one less constant tensor, if it's not used
        # anywhere else
        # * dropping out means a change to sequencing.
        new_op = as_op(new_op)
        old_exop = self.computation_decl.get_exop(old_op)
        after_exop = old_exop.prev_exop
        self.remove_exop(old_exop)

        # FIXME: find better way to update dependencies
        next_op = old_exop.next_exop.op
        if old_op in next_op.control_deps:
            next_op.remove_control_dep(old_op)
            next_op.add_control_dep(new_op)

        # FIXME: find better way to preserve metadata
        if hasattr(old_op, 'metadata') and hasattr(new_op, 'metadata') and \
           len(old_op.metadata) > len(new_op.metadata):
            new_op.metadata = old_op.metadata

        if old_op is new_op:
            # Hetr bashes some ops. See MutateInsteadOfCopyWithNewArgsMixin, issue #1410
            self.add_ops([new_op], after_exop=after_exop)
            return
        new_exop = self.computation_decl.get_exop(new_op, None)
        if new_exop is None:
            self.add_ops([new_op], after_exop=after_exop)
            new_exop = self.computation_decl.get_exop(new_op, None)
        self.replace_users(old_exop, new_exop)
        if old_exop in self.root_set:
            self.root_set.remove(old_exop)
            self.root_set.add(new_exop)

    def replace_users(self, old_exop, new_exop):
        """
        Replace all users of old_exop with new_exop.

        Args:
            old_exop: The original exop.
            new_exop: The replaceent exop.

        """
        for old_output_decl, new_output_decl in zip(old_exop.output_decls, new_exop.output_decls):
            self.replace_output_decl(old_output_decl, new_output_decl)
        for op in old_exop.ref_ops:
            new_exop.add_ref_op(op)
        self.computation_decl.ops[old_exop.op] = new_exop

    def replace_output_decl(self, old_output_decl, new_output_decl):
        for input_decl in set(old_output_decl.user_input_decls):
            input_decl.source_output_decl = new_output_decl
        new_output_decl.tensor_decl.merge_flags(old_output_decl.tensor_decl)
        old_output_decl.exop.output_decls[old_output_decl.pos] = new_output_decl

    def replace_exop(self, old_exop, new_exop):
        prev_exop = old_exop.prev_exop
        self.remove_exop(old_exop)
        self.add_exop(new_exop, prev_exop)
        self.replace_users(old_exop, new_exop)

    def merge_exop(self, old_exop, new_exop):
        """
        new_exop, which should already exist, takes over for old_exop.

        Args:
            old_exop:
            new_exop:

        """
        self.replace_users(old_exop, new_exop)
        self.remove_exop(old_exop)

    def memory_footprint(self):
        max_mem = 0
        for node in self:
            max_mem = max([node.memory_footprint(), max_mem])
        return max_mem

    def worst_case_footprint(self):
        mem = 0
        for var in self.get_temp_vars():
            mem += var.tensor_view_decl.tensor_decl.size
        return mem

    def memory_efficiency(self):
        footprint = self.memory_footprint()
        usage = 0
        for node in self.ops:
            usage = max(usage, node.memory_usage())
        result = 100
        if footprint > 0:
            result = int(round((float(usage) / float(footprint)) * 100))
        return result

    def persistent_size(self):
        mem = 0
        for var in self.get_persistent_vars():
            mem += var.tensor_view_decl.tensor_decl.size
        return mem

    def get_vars(self):
        vars = set()
        for exop in self:
            vars |= set(input_decl.source_output_decl for input_decl in exop.input_decls)
            vars |= set(exop.output_decls)
        return vars

    def get_temp_vars(self):
        result = list()
        for var in self.get_vars():
            if not var.tensor_view_decl.tensor_decl.is_persistent:
                result.append(var)
        return result

    def get_persistent_vars(self):
        result = list()
        for var in self.get_vars():
            if var.tensor_view_decl.tensor_decl.is_persistent:
                result.append(var)
        return result
コード例 #16
0
class DownloadManager(object):
    def __init__(self):
        # type: () -> None

        self.loop = asyncio.get_event_loop()
        self.timeout = aiohttp.ClientTimeout(total=None, sock_read=60)
        self.session = aiohttp.ClientSession(loop=self.loop,
                                             timeout=self.timeout,
                                             auto_decompress=False)
        self.concurrent_downloads = 3
        #self.sem = asyncio.Semaphore(1000)
        self.chunksize = 1024 * 1024  # file write buffer

        self.queue = OrderedSet()
        self.active = OrderedSet()
        self.done = OrderedSet()
        self.error = OrderedSet()

    def status(self):
        # type: () -> str

        total_active = sum(t.downloaded for t in self.active)
        total_done = sum(t.downloaded for t in self.done)
        total_error = sum(t.downloaded for t in self.error)

        return "Queued: {}, active: {}, done: {}, error: {}\nDownload active: {}, done: {}, error: {}".format(
            len(self.queue), len(self.active), len(self.done), len(self.error),
            total_active, total_done, total_error)

    def _enqueue(self, task, priority):
        # type: (DownloadTask, Any) -> None

        self.queue.add(task)

    def _start(self, task):
        # type: (DownloadTask, ) -> asyncio.Task

        self.active.add(task)
        atask = asyncio.ensure_future(self._download(task))
        return atask

    def _trystart(self):
        # type: () -> Optional[asyncio.Task]

        if len(self.active) < self.concurrent_downloads:
            try:
                task = self.queue.pop()
                return self._start(task)
            except KeyError:
                if not self.active:
                    logger.info("all done")
                    #self.loop.stop()
                    #task = asyncio.ensure_future(self._close())

        return None

    async def _download(self, task):
        # type: (DownloadTask, ) -> None

        task.start()
        #await asyncio.sleep(10)

        # send http head request first to check for range support

        try:

            #async with self.session.get(task.url, headers={"Range": "bytes=0-10"}) as response:
            async with self.session.get(task.url, headers={}) as response:
                stream = response.content
                try:
                    size = int(response.headers.get("content-length",
                                                    ""))  # type: Optional[int]
                except (ValueError, TypeError):
                    size = None

                accept_range = response.headers.get('Accept-Ranges',
                                                    'none').lower()

                if response.status == 200:  # range not supported
                    pass
                elif response.status == 206:  # range supported
                    if accept_range != "bytes":
                        raise RuntimeError(
                            "Only bytes content ranges are supported")
                    bytes_range = response.headers.get(
                        'Content-Range')  # 'bytes 0-10/46239'
                    raise RuntimeError(
                        "Range requests are not supported yet: {}".format(
                            bytes_range))

                with open(task.path, "wb", buffering=self.chunksize) as fw:
                    async for data in stream.iter_any():
                        task.downloaded += len(data)
                        fw.write(data)

                if size and size != task.downloaded:
                    print("incomplete", task.downloaded, "of", size)

        except asyncio.TimeoutError:
            self.error.add(task)
        else:
            self.done.add(task)

        task.done()
        self.active.remove(task)
        self._trystart()

    def download(self, url, path="tmp.txt", priority=0, force=False):
        # type: (str, str, int, bool) -> Optional[asyncio.Task]

        logger.info("starting download")
        task = DownloadTask(url, path)
        if force:
            return self._start(task)
        else:
            self._enqueue(task, priority)
            return self._trystart()

    async def _close(self):
        await self.session.close()
コード例 #17
0
class Location(simpy.Resource):
    def __init__(self,
                 env,
                 rng,
                 capacity=simpy.core.Infinity,
                 name='Safeway',
                 location_type='stores',
                 lat=None,
                 lon=None,
                 area=None,
                 cont_prob=None,
                 surface_prob=[0.2, 0.2, 0.2, 0.2, 0.2]):
        super().__init__(env, capacity)
        self.humans = OrderedSet(
        )  #OrderedSet instead of set for determinism when iterating
        self.name = name
        self.rng = rng
        self.lat = lat
        self.lon = lon
        self.area = area
        self.location_type = location_type
        self.social_contact_factor = cont_prob
        self.env = env
        self.contamination_timestamp = datetime.datetime.min
        self.contaminated_surface_probability = surface_prob
        self.max_day_contamination = 0

    def infectious_human(self):
        return any([h.is_infectious for h in self.humans])

    def __repr__(self):
        return f"{self.name} - occ:{len(self.humans)}/{self.capacity} - I:{self.infectious_human()}"

    def add_human(self, human):
        self.humans.add(human)
        if human.is_infectious:
            self.contamination_timestamp = self.env.timestamp
            rnd_surface = float(
                self.rng.choice(a=MAX_DAYS_CONTAMINATION,
                                size=1,
                                p=self.contaminated_surface_probability))
            self.max_day_contamination = max(self.max_day_contamination,
                                             rnd_surface)

    def remove_human(self, human):
        self.humans.remove(human)

    @property
    def is_contaminated(self):
        return self.env.timestamp - self.contamination_timestamp <= datetime.timedelta(
            days=self.max_day_contamination)

    @property
    def contamination_probability(self):
        if self.is_contaminated:
            lag = (self.env.timestamp - self.contamination_timestamp)
            lag /= datetime.timedelta(days=1)
            p_infection = 1 - lag / self.max_day_contamination  # linear decay; &envrionmental_contamination
            return self.social_contact_factor * p_infection
        return 0.0

    def __hash__(self):
        return hash(self.name)
コード例 #18
0
def _improvement_callback(route_data, callback_datastructures, sweep_rhos,
                          sweep_phis, sweep_J_pos, step_inc, routed):
    """ This callback implements the Gillett and Miller (1974) improvement
    heuristic. It involves trying to remove a node and insertion of several 
    candidates. Therefore, the algorithm involves Steps 8-15 in the appedix
    of Gillett and Miller (1974). """

    # unpack callback data structures (packed in pack_datastructures)
    N, D, NN_D, d, C, L, node_to_pos, pos_to_node, avr = callback_datastructures

    # do not try to improve, if the J node is already routed (already Swept
    #  full circle)
    node_J = pos_to_node[sweep_J_pos]
    if routed[node_J]:
        # nothing to do, no nodes added, no nodes removed, route is complete
        return route_data, [], [], True

    # unpack route information, (route, route_cost, route_demand)
    D1_route, D1, D1_demand, D1_nodes = route_data

    # G&M Step 8. This messy looking line vectorizes the minimization of
    #  R(K(I))+An(K(I))*AVR
    #
    # What makes it a little more messier, is that the angle is pointing
    #  "the right way" depending on the cw/ccw direction (encoded in step_inc)
    # +1 is there to convert indexing as there is no depot in node_rho_phis
    route_K_nodes = list(D1_nodes[1:])
    route_K_positions = [node_to_pos[n] for n in route_K_nodes]
    route_rhos = sweep_rhos[route_K_positions]
    route_phis = sweep_phis[route_K_positions]

    rem_choose_function = route_rhos + route_phis * avr
    to_remove_node_KII = route_K_nodes[np.argmin(rem_choose_function)]

    if __debug__:
        log(
            DEBUG - 2,
            "G&M improvement phase for route %s (%.2f). Trying to replace KII=n%d."
            % (str(D1_route), D1, to_remove_node_KII))
        log(
            DEBUG - 3, "This is due to R(K(I))+An(K(I)*AVR = %s" %
            str(zip(route_K_nodes, route_phis, list(rem_choose_function))))
    # take the node J-1 (almost always the node last added on the route)
    sweep_prev_of_J_pos = _step(sweep_J_pos, -step_inc, N - 2)
    prev_node_J = pos_to_node[sweep_prev_of_J_pos]

    # Get the insertion candidates
    try:
        candidate_node_JJX = next((node_idx
                                   for node_idx, _ in NN_D[prev_node_J]
                                   if not routed[node_idx]))
    except StopIteration:
        if __debug__:
            log(DEBUG - 2,
                "G&M Step 9, not enough unrouted nodes left for JJX.")
            log(DEBUG - 2, "-> EXIT with no changes")
        return route_data, [], [], False
    try:
        candidate_node_JII = next(
            (node_idx for node_idx, _ in NN_D[candidate_node_JJX]
             if (not routed[node_idx] and node_idx != candidate_node_JJX)))
    except StopIteration:
        candidate_node_JII = None

    # construct and route to get the modified route cost D2
    D2_route_nodes = OrderedSet(D1_nodes)
    D2_route_nodes.remove(to_remove_node_KII)
    D2_route_nodes.add(candidate_node_JJX)
    D2_route, D2 = solve_tsp(D, list(D2_route_nodes))
    D2_demand = D1_demand - d[to_remove_node_KII] + d[
        candidate_node_JJX] if C else 0

    ## G&M Step 9

    if not ((L and D2 - S_EPS < L) and (C and D2_demand - C_EPS <= C)):
        if __debug__:
            log(
                DEBUG - 2,
                "G&M Step 9, rejecting replacement of KII=n%d with JJX=n%d" %
                (to_remove_node_KII, candidate_node_JJX))
            log(
                DEBUG - 3, " which would have formed a route %s (%.2f)" %
                (str(D2_route), D2))
            if (C and D2_demand - C_EPS > C):
                log(DEBUG - 3, " violating the capacity constraint")
            else:
                log(DEBUG - 3, " violating the maximum route cost constraint")
            log(DEBUG - 2, " -> EXIT with no changes")

        # go to G&M Step 10 ->
        # no changes, no skipping, route complete
        return route_data, [], [], True

    ## G&M Step 11
    D3_nodes = OrderedSet()  # the min. dist. from 0 through J,J+1...J+4 to J+5
    D4_nodes = OrderedSet()  # the min. dist. /w JJX excluded, KII included
    D6_nodes = OrderedSet()  # the min. dist. /w JJX and JII excl., KII incl.
    JJX_in_chain = False
    JII_in_chain = False

    # step back so that the first node to lookahead is J
    lookahead_pos = sweep_prev_of_J_pos
    for i in range(5):
        lookahead_pos = _step(lookahead_pos, step_inc, N - 2)
        lookahead_node = pos_to_node[lookahead_pos]
        if routed[lookahead_node]:
            continue

        D3_nodes.add(lookahead_node)
        if lookahead_node == candidate_node_JJX:
            # inject KII instead of JJX
            D4_nodes.add(to_remove_node_KII)
            D6_nodes.add(to_remove_node_KII)
            JJX_in_chain = True
        elif lookahead_node == candidate_node_JII:
            D4_nodes.add(lookahead_node)
            JII_in_chain = True
        else:
            D4_nodes.add(lookahead_node)
            D6_nodes.add(lookahead_node)

    # if JJX was not in the sequence J, J+1, ... J+5
    if not JJX_in_chain:
        if __debug__:
            log(
                DEBUG - 2, "G&M Step 11, JJX=n%d not in K(J)..K(J+4)" %
                candidate_node_JJX)
            log(DEBUG - 3, " which consists of nodes %s" % str(list(D3_nodes)))
            log(DEBUG - 2, "-> EXIT with no changes")
        # go to G&M Step 10 ->
        # no changes, no skipping, route complete
        return route_data, [], [], True

    # The chain *end point* J+5
    last_chain_pos = _step(lookahead_pos, step_inc, N - 2)
    last_chain_node = pos_to_node[last_chain_pos]

    if routed[last_chain_node]:
        last_chain_node = 0

    # D3 -> EVALUATE the MINIMUM distance from 0 through J,J+1...J+4 to J+5
    _, D3 = _shortest_path_through_nodes(D, 0, last_chain_node, D3_nodes)
    # D4 -> DETERMINE the MINIMUM distance with JJX excluded, KII included
    _, D4 = _shortest_path_through_nodes(D, 0, last_chain_node, D4_nodes)

    if not (D1 + D3 < D2 + D4):
        ## G&M Step 12
        if __debug__:
            log(
                DEBUG - 2, "G&M Step 12, accept an improving move where " +
                "KII=n%d is removed and JJX=n%d is added" %
                (to_remove_node_KII, candidate_node_JJX))
            log(DEBUG - 3,
                " which forms a route %s (%.2f)" % (str(D2_route), D2))
            log(DEBUG - 2, " -> EXIT and continue adding nodes")

        ignored_nodes = [to_remove_node_KII]
        if candidate_node_JJX != node_J:
            ignored_nodes += [node_J]

        # go to G&M Step 4 ->
        # route changed, KII removed and skip current node J, not complete
        return RouteData(D2_route, D2, D2_demand, D2_route_nodes),\
               [candidate_node_JJX], ignored_nodes, False

    else:
        ## G&M Step 13

        # JII and JJX (checked earlier) should be in K(J)...K(J+4) to continue
        if not JII_in_chain:
            if __debug__:
                if candidate_node_JII is None:
                    log(DEBUG - 2,
                        "G&M Step 13, no unrouted nodes left for JII.")
                else:
                    log(
                        DEBUG - 2, "G&M Step 13, JII=n%d not in K(J)..K(J+4)" %
                        candidate_node_JII)
                    log(DEBUG - 3,
                        " which consists of nodes %s" % str(list(D3_nodes)))
                log(DEBUG - 2, "-> EXIT with no changes")
            # go to G&M Step 10 -> no changes, no skipping, route complete
            return route_data, [], [], True

        # construct and route to get the modified route cost D2
        D5_route_nodes = D2_route_nodes
        D5_route_nodes.add(candidate_node_JII)
        D5_route, D5 = solve_tsp(D, list(D5_route_nodes))
        D5_demand = D2_demand + d[candidate_node_JII] if C else 0
        if not ((L and D5 - S_EPS < L) and (C and D5_demand - C_EPS <= C)):
            if __debug__:
                log(
                    DEBUG - 2,
                    "G&M Step 13, rejecting replacement of KII=n%d with JJX=n%d and JII=n%d"
                    % (to_remove_node_KII, candidate_node_JJX,
                       candidate_node_JII))
                log(
                    DEBUG - 3, "  which would have formed a route %s (%.2f)" %
                    (str(D5_route), D5))
                if D5_demand - C_EPS > C:
                    log(DEBUG - 3, " violating the capacity constraint")
                else:
                    log(DEBUG - 3,
                        " violating the maximum route cost constraint")
                log(DEBUG - 2, "-> EXIT with no changes")
            # go to G&M Step 10 -> no changes, no skipping, route complete
            return route_data, [], [], True

        ## G&M Step 14
        # D6 -> DETERMINE the MINIMUM distance with JJX and JII excluded and
        #  KII ncluded
        _, D6 = _shortest_path_through_nodes(D, 0, last_chain_node, D6_nodes)

        if D1 + D3 < D5 + D6:
            if __debug__:
                log(
                    DEBUG - 2,
                    "G&M Step 14, rejecting replacement of KII=n%d with JJX=n%d and JII=n%d"
                    % (to_remove_node_KII, candidate_node_JJX,
                       candidate_node_JII))
                log(
                    DEBUG - 3, " which would have formed a route %s (%.2f)" %
                    (str(D5_route), D5))
                log(DEBUG - 2, "-> EXIT with no changes")
            # go to G&M Step 10 -> no changes, no skipping, route complete
            return route_data, [], [], True

        ## G&M Step 15
        if __debug__:
            log(
                DEBUG - 2, "G&M Step 15, accept improving move where " +
                "KII=n%d is removed and JJX=n%d and JII=n%d are added" %
                (to_remove_node_KII, candidate_node_JJX, candidate_node_JII))
            log(DEBUG - 3,
                " which forms a route %s (%.2f)" % (str(D2_route), D2))
            log(DEBUG - 2, " -> EXIT and continue adding nodes")

        ignored_nodes = [to_remove_node_KII]
        if candidate_node_JJX != node_J and candidate_node_JII != node_J:
            ignored_nodes += [node_J]

        # go to G&M Step 4 ->
        # route changed, KII removed and skip current node J, not complete
        return RouteData(D5_route, D5, D5_demand, D5_route_nodes),\
               [candidate_node_JJX, candidate_node_JII],\
               ignored_nodes, False
コード例 #19
0
class Matrix:

    def __init__(self, rows=None, col_ids=None):
        self.rows = OrderedSet()
        self.cols = []
        if rows:
            for row in rows:
                self.rows.add(Row(bitstring=row))
            self.create_cols(col_ids)
        else:
            for col_id in col_ids:
                self.cols.append(Column(_id=col_id))

    def __str__(self):
        if self.is_empty():
            return 'Empty matrix'
        s = ' '.join(['%5s' % col.get_id() for col in self.cols]) + '\n'
        for row in self.rows:
            s += ' '.join(['%5s' % bit for bit in str(row)]) + '\n'
        return s

    def get_cols(self):
        return self.cols

    def get_col_by_id(self, col_id):
        for col in self.cols:
            if col.get_id() == col_id:
                return col
        return None

    def add_row(self, row):
        self.rows.add(row)

    def create_cols(self, col_ids):
        if not col_ids:
            col_ids = [str(i + 1) for i in range(len(self.rows[0]))]
        for i in range(len(self.rows[0])):
            col = Column(_id=col_ids[i])
            for row in self.rows:
                col.append(row[i])
            self.cols.append(col)

    def is_empty(self):
        return len(self.rows) == 0

    def update_rows(self):
        if not self.cols:
            self.rows = OrderedSet()
            return
        rows = OrderedSet()
        for i in range(len(self.rows)):
            row = Row()
            for col in self.cols:
                row.append(col[i])
            rows.add(row)
        self.rows = rows

    def update_cols(self):
        if not self.rows:
            self.cols = []
            return
        cols = []
        for i in range(len(self.cols)):
            col_id = self.cols[i].get_id()
            col = Column(_id=col_id)
            for row in self.rows:
                col.append(row[i])
            cols.append(col)
        self.cols = cols

    def submatrix(self, removed_rows=None, removed_cols=None):
        rows = OrderedSet()
        cols = []
        if removed_rows:
            for i, row in enumerate(self.rows):
                if i not in removed_rows:
                    rows.add(row)
            self.rows = rows
            self.update_cols()
        if self.cols:
            if removed_cols:
                for i, col in enumerate(self.cols):
                    if i not in removed_cols:
                        cols.append(col)
                self.cols = cols
                self.update_rows()
                self.update_cols()

    def max_grey_level(self):
        result = self.rows[0].get_grey_level()
        for i in range(1, len(self.rows)):
            row = self.rows[i]
            if row.get_grey_level() > result:
                result = row.get_grey_level()
        return result

    def get_grey_scale_count(self):
        grey_scale_count = {}
        for row in self.rows:
            grey_level = row.get_grey_level()
            if grey_level in grey_scale_count:
                grey_scale_count[grey_level] += 1
            else:
                grey_scale_count[grey_level] = 1
        return grey_scale_count

    def check_for_hit_grey_rows(self, col_id):
        grey_scale_count = self.get_grey_scale_count()
        removed_rows = []
        col = self.get_col_by_id(col_id)
        for i, bit in enumerate(col):
            if bit:
                if self.rows[i].get_grey_level() > 0:
                    removed_rows.append(i)
                grey_level = self.rows[i].get_grey_level()
                grey_scale_count[grey_level] -= 1
                if grey_scale_count[grey_level] == 0:
                    if grey_level != 0:
                        return True
        self.submatrix(removed_rows=removed_rows)
        return False

    def set_grey_rows(self, col_id):
        new_grey_level = self.max_grey_level() + 1
        col = self.get_col_by_id(col_id)
        for i, bit in enumerate(col):
            if bit:
                self.rows[i].set_grey_level(new_grey_level)

    def hit_rows(self, col_index):
        hit_rows = []
        col = self.cols[col_index]
        for i, bit in enumerate(col):
            if bit:
                hit_rows.append(i)
        return hit_rows

    def remove_redundant_rows(self):
        i = 0
        while i < len(self.rows) - 1:
            j = i + 1
            while j < len(self.rows):
                if (self.rows[i] & self.rows[j]) == self.rows[i]:
                    self.rows.remove(self.rows[j])
                elif (self.rows[i] & self.rows[j]) == self.rows[j]:
                    self.rows.remove(self.rows[i])
                    j = i + 1
                else:
                    j += 1
            i += 1
        self.update_cols()

    def remove_equals_cols(self):
        substitutions_map = {}
        i = 0
        while i < len(self.cols) - 1:
            col_i = self.cols[i]
            j = i + 1
            while j < len(self.cols):
                col_j = self.cols[j]
                if col_i == col_j:
                    if col_i.get_id() in substitutions_map:
                        substitutions_map[col_i.get_id()].append(col_j.get_id())
                    else:
                        substitutions_map[col_i.get_id()] = [col_j.get_id()]
                    self.cols.pop(j)
                else:
                    j += 1
            i += 1
        self.update_rows()
        return substitutions_map

    def process_cols(self, remove_singletons=True):
        singletons = []
        i = 0
        while i < len(self.cols):
            if not self.cols[i].any():
                self.cols.pop(i)
            elif remove_singletons and self.cols[i].all():
                singletons.append(self.cols[i].get_id())
                self.cols.pop(i)
            else:
                i += 1
        self.update_rows()
        return singletons

    def process_rows_with_unique_1(self):
        removed_rows = []
        removed_cols = []
        everywhere_ids = []
        for i, row in enumerate(self.rows):
            if i not in removed_rows:
                if row.count() == 1:
                    col_index = int(row.index(True))
                    removed_cols.append(col_index)
                    everywhere_ids.append(self.cols[col_index].get_id())
                    removed_rows.append(i)
        self.submatrix(removed_rows, removed_cols)
        return everywhere_ids

    def get_super_cols(self, col_index):
        input_col = self.cols[col_index]
        super_cols = []
        for i, col in enumerate(self.cols):
            if i != col_index:
                if (col & input_col) == input_col:
                    super_cols.append(i)
        return super_cols

    def check_for_rows_without_1(self):
        for row in self.rows:
            if not row.any():
                return True
        return False

    def partition(self):
        partitions = [deepcopy(self.rows[0])]
        for i in range(1, len(self.rows)):
            matching_partitions = []
            for j in range(len(partitions)):
                if (self.rows[i] & partitions[j]).any():
                    partitions[j] |= self.rows[i]
                    matching_partitions.append(j)
            if not matching_partitions:
                partitions.append(deepcopy(self.rows[i]))
            else:
                new_partition = deepcopy(partitions[matching_partitions[0]])
                for k in matching_partitions[1:]:
                    new_partition |= partitions[k]
                partitions = filter(lambda p: partitions.index(p) not in matching_partitions, partitions)
                partitions.append(new_partition)
        submatrices = None
        if len(partitions) > 1:
            col_ids = map(lambda col: col.get_id(), self.cols)
            submatrices = [Matrix(col_ids=col_ids) for _ in range(len(partitions))]
            for row in self.rows:
                for i, partition in enumerate(partitions):
                    if (row & partition).any():
                        submatrices[i].add_row(deepcopy(row))
                        break
            for submatrix in submatrices:
                submatrix.update_cols()
                submatrix.process_cols(remove_singletons=False)
        return submatrices

    def preprocessing(self, enable_map):
        old_size = (-1, -1)
        new_size = (len(self.rows), len(self.cols))
        singletons = []
        everywhere_ids = []
        self.remove_redundant_rows()
        substitution_map = None
        if enable_map:
            substitution_map = self.remove_equals_cols()
        while new_size != (0, 0) and new_size != old_size:
            old_size = new_size
            singletons.append(self.process_cols())
            if not self.check_for_rows_without_1():
                everywhere_ids.append(self.process_rows_with_unique_1())
            else:
                everywhere_ids.append([])
            new_size = (len(self.rows), len(self.cols))
        return singletons, everywhere_ids, substitution_map

    def find_next_col(self):
        for row in self.rows:
            if row.count() == 1:
                return row.index()
        # return 0
        zero_counters = [self.count_0_hit_by_col(col_index) for col_index in range(len(self.cols))]
        return zero_counters.index(max(zero_counters))
        # one_counters = [self.cols[i].count() for i in range(len(self.cols))]
        # return one_counters.index(max(one_counters))

    def count_0_hit_by_col(self, col_index):
        # col = self.cols[col_index]
        # count = 0
        # for i in range(len(col)):
        #     if col[i]:
        #         count += self.rows[i].count(False)
        # return count
        col = self.cols[col_index]
        first_one_index = col.index(True)
        max_count = self.rows[first_one_index].count(False)
        for i in range(first_one_index + 1, len(col)):
            if col[i]:
                count = self.rows[i].count(False)
                if count > max_count:
                    max_count = count
        return max_count

    def to_sets(self):
        sets = []
        for row in self.rows:
            _set = set()
            for j, bit in enumerate(row):
                if bit:
                    _set.add(self.cols[j].get_id())
            sets.append(_set)
        return sets
コード例 #20
0
ファイル: replica.py プロジェクト: evernym/plenum
class Replica(HasActionQueue, MessageProcessor):
    def __init__(self, node: 'plenum.server.node.Node', instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        super().__init__()
        self.stats = Stats(TPCStat)

        self.config = getConfig()

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))

        routerArgs.append((Checkpoint, self.processCheckpoint))
        routerArgs.append((ThreePCState, self.process3PhaseState))

        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router(
                (PrePrepare, self.processPrePrepare),
                (Prepare, self.processPrepare),
                (Commit, self.processCommit)
        )

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None    # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # PRE-PREPAREs that are waiting to be processed but do not have the
        # corresponding request digest. Happens when replica has not been
        # forwarded the request by the node but is getting 3 phase messages.
        # The value is a list since a malicious entry might send PRE-PREPARE
        # with a different digest and since we dont have the request finalised,
        # we store all PRE-PPREPARES
        self.prePreparesPendingReqDigest = {}   # type: Dict[Tuple[str, int], List]

        # PREPAREs that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
        # received
        self.commitsWaitingForPrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a tuple of Request Digest and time
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a tuple of Request Digest and time
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> ((identifier, reqId), {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]

        self.commits = Commits()    # type: Dict[Tuple[int, int],
        # Tuple[Tuple[str, int], Set[str]]]

        # Set of tuples to keep track of ordered requests. Each tuple is
        # (viewNo, ppSeqNo)
        self.ordered = OrderedSet()        # type: OrderedSet[Tuple[int, int]]

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()       # type: Set[Tuple]

        # Commits which are not being ordered since commits with lower view
        # numbers and sequence numbers have not been ordered yet. Key is the
        # viewNo and value a map of pre-prepare sequence number to commit
        self.stashedCommitsForOrdering = {}         # type: Dict[int,
        # Dict[int, Commit]]

        self.checkpoints = SortedDict(lambda k: k[0])

        self.stashingWhileOutsideWaterMarks = deque()

        # Low water mark
        self._h = 0              # type: int

        # High water mark
        self.H = self._h + self.config.LOG_SIZE   # type: int

        self.lastPrePrepareSeqNo = self.h  # type: int

    @property
    def h(self) -> int:
        return self._h

    @h.setter
    def h(self, n):
        self._h = n
        self.H = self._h + self.config.LOG_SIZE

    @property
    def requests(self):
        return self.node.requests

    def shouldParticipate(self, viewNo: int, ppSeqNo: int):
        # Replica should only participating in the consensus process and the
        # replica did not stash any of this request's 3-phase request
        return self.node.isParticipating and (viewNo, ppSeqNo) \
                                             not in self.stashingWhileCatchingUp

    @staticmethod
    def generateName(nodeName: str, instId: int):
        """
        Create and return the name for a replica using its nodeName and
        instanceId.
         Ex: Alpha:1
        """
        return "{}:{}".format(nodeName, instId)

    @staticmethod
    def getNodeName(replicaName: str):
        return replicaName.split(":")[0]

    @property
    def isPrimary(self):
        """
        Is this node primary?

        :return: True if this node is primary, False otherwise
        """
        return self._primaryName == self.name if self._primaryName is not None \
            else None

    @property
    def primaryName(self):
        """
        Name of the primary replica of this replica's instance

        :return: Returns name if primary is known, None otherwise
        """
        return self._primaryName

    @primaryName.setter
    def primaryName(self, value: Optional[str]) -> None:
        """
        Set the value of isPrimary.

        :param value: the value to set isPrimary to
        """
        if not value == self._primaryName:
            self._primaryName = value
            self.primaryNames[self.viewNo] = value
            logger.debug("{} setting primaryName for view no {} to: {}".
                         format(self, self.viewNo, value))
            logger.debug("{}'s primaryNames for views are: {}".
                         format(self, self.primaryNames))
            self._stateChanged()

    def _stateChanged(self):
        """
        A series of actions to be performed when the state of this replica
        changes.

        - UnstashInBox (see _unstashInBox)
        """
        self._unstashInBox()
        if self.isPrimary is not None:
            # TODO handle suspicion exceptions here
            self.process3PhaseReqsQueue()
            # TODO handle suspicion exceptions here
            try:
                self.processPostElectionMsgs()
            except SuspiciousNode as ex:
                self.outBox.append(ex)
                self.discard(ex.msg, ex.reason, logger.warning)

    def _stashInBox(self, msg):
        """
        Stash the specified message into the inBoxStash of this replica.

        :param msg: the message to stash
        """
        self.inBoxStash.append(msg)

    def _unstashInBox(self):
        """
        Append the inBoxStash to the right of the inBox.
        """
        self.inBox.extend(self.inBoxStash)
        self.inBoxStash.clear()

    def __repr__(self):
        return self.name

    @property
    def f(self) -> int:
        """
        Return the number of Byzantine Failures that can be tolerated by this
        system. Equal to (N - 1)/3, where N is the number of nodes in the
        system.
        """
        return self.node.f

    @property
    def viewNo(self):
        """
        Return the current view number of this replica.
        """
        return self.node.viewNo

    def isPrimaryInView(self, viewNo: int) -> Optional[bool]:
        """
        Return whether a primary has been selected for this view number.
        """
        return self.primaryNames[viewNo] == self.name

    def isMsgForLaterView(self, msg):
        """
        Return whether this request's view number is greater than the current
        view number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo > self.viewNo

    def isMsgForCurrentView(self, msg):
        """
        Return whether this request's view number is equal to the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo == self.viewNo

    def isMsgForPrevView(self, msg):
        """
        Return whether this request's view number is less than the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo < self.viewNo

    def isPrimaryForMsg(self, msg) -> Optional[bool]:
        """
        Return whether this replica is primary if the request's view number is
        equal this replica's view number and primary has been selected for
        the current view.
        Return None otherwise.

        :param msg: message
        """
        if self.isMsgForLaterView(msg):
            self.discard(msg,
                         "Cannot get primary status for a request for a later "
                         "view {}. Request is {}".format(self.viewNo, msg),
                         logger.error)
        else:
            return self.isPrimary if self.isMsgForCurrentView(msg) \
                else self.isPrimaryInView(msg.viewNo)

    def isMsgFromPrimary(self, msg, sender: str) -> bool:
        """
        Return whether this message was from primary replica
        :param msg:
        :param sender:
        :return:
        """
        if self.isMsgForLaterView(msg):
            logger.error("{} cannot get primary for a request for a later "
                         "view. Request is {}".format(self, msg))
        else:
            return self.primaryName == sender if self.isMsgForCurrentView(
                msg) else self.primaryNames[msg.viewNo] == sender

    def _preProcessReqDigest(self, rd: ReqDigest) -> None:
        """
        Process request digest if this replica is not a primary, otherwise stash
        the message into the inBox.

        :param rd: the client Request Digest
        """
        if self.isPrimary is not None:
            self.processReqDigest(rd)
        else:
            logger.debug("{} stashing request digest {} since it does not know "
                         "its primary status".
                         format(self, (rd.identifier, rd.reqId)))
            self._stashInBox(rd)

    def serviceQueues(self, limit=None):
        """
        Process `limit` number of messages in the inBox.

        :param limit: the maximum number of messages to process
        :return: the number of messages successfully processed
        """
        # TODO should handle SuspiciousNode here
        r = self.inBoxRouter.handleAllSync(self.inBox, limit)
        r += self._serviceActions()
        return r
        # Messages that can be processed right now needs to be added back to the
        # queue. They might be able to be processed later

    def processPostElectionMsgs(self):
        """
        Process messages waiting for the election of a primary replica to
        complete.
        """
        while self.postElectionMsgs:
            msg = self.postElectionMsgs.popleft()
            logger.debug("{} processing pended msg {}".format(self, msg))
            self.dispatchThreePhaseMsg(*msg)

    def process3PhaseReqsQueue(self):
        """
        Process the 3 phase requests from the queue whose view number is equal
        to the current view number of this replica.
        """
        unprocessed = deque()
        while self.threePhaseMsgsForLaterView:
            request, sender = self.threePhaseMsgsForLaterView.popleft()
            logger.debug("{} processing pended 3 phase request: {}"
                         .format(self, request))
            # If the request is for a later view dont try to process it but add
            # it back to the queue.
            if self.isMsgForLaterView(request):
                unprocessed.append((request, sender))
            else:
                self.processThreePhaseMsg(request, sender)
        self.threePhaseMsgsForLaterView = unprocessed

    @property
    def quorum(self) -> int:
        r"""
        Return the quorum of this RBFT system. Equal to :math:`2f + 1`.
        Return None if `f` is not yet determined.
        """
        return self.node.quorum

    def dispatchThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str) -> Any:
        """
        Create a three phase request to be handled by the threePhaseRouter.

        :param msg: the ThreePhaseMsg to dispatch
        :param sender: the name of the node that sent this request
        """
        senderRep = self.generateName(sender, self.instId)
        if self.isPpSeqNoAcceptable(msg.ppSeqNo):
            try:
                self.threePhaseRouter.handleSync((msg, senderRep))
            except SuspiciousNode as ex:
                self.node.reportSuspiciousNodeEx(ex)
        else:
            logger.debug("{} stashing 3 phase message {} since ppSeqNo {} is "
                         "not between {} and {}".
                         format(self, msg, msg.ppSeqNo, self.h, self.H))
            self.stashingWhileOutsideWaterMarks.append((msg, sender))

    def processReqDigest(self, rd: ReqDigest):
        """
        Process a request digest. Works only if this replica has decided its
        primary status.

        :param rd: the client request digest to process
        """
        self.stats.inc(TPCStat.ReqDigestRcvd)
        if self.isPrimary is False:
            self.dequeuePrePrepare(rd.identifier, rd.reqId)
        else:
            self.doPrePrepare(rd)

    def processThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str):
        """
        Process a 3-phase (pre-prepare, prepare and commit) request.
        Dispatch the request only if primary has already been decided, otherwise
        stash it.

        :param msg: the Three Phase message, one of PRE-PREPARE, PREPARE,
            COMMIT
        :param sender: name of the node that sent this message
        """
        # Can only proceed further if it knows whether its primary or not
        if self.isMsgForLaterView(msg):
            self.threePhaseMsgsForLaterView.append((msg, sender))
            logger.debug("{} pended received 3 phase request for a later view: "
                         "{}".format(self, msg))
        else:
            if self.isPrimary is None:
                self.postElectionMsgs.append((msg, sender))
                logger.debug("Replica {} pended request {} from {}".
                             format(self, msg, sender))
            else:
                self.dispatchThreePhaseMsg(msg, sender)

    def processPrePrepare(self, pp: PrePrepare, sender: str):
        """
        Validate and process the PRE-PREPARE specified.
        If validation is successful, create a PREPARE and broadcast it.

        :param pp: a prePrepareRequest
        :param sender: name of the node that sent this message
        """
        key = (pp.viewNo, pp.ppSeqNo)
        logger.debug("{} Receiving PRE-PREPARE{} at {} from {}".
                     format(self, key, time.perf_counter(), sender))
        if self.canProcessPrePrepare(pp, sender):
            if not self.node.isParticipating:
                self.stashingWhileCatchingUp.add(key)
            self.addToPrePrepares(pp)
            logger.info("{} processed incoming PRE-PREPARE{}".
                        format(self, key))

    def tryPrepare(self, pp: PrePrepare):
        """
        Try to send the Prepare message if the PrePrepare message is ready to
        be passed into the Prepare phase.
        """
        if self.canSendPrepare(pp):
            self.doPrepare(pp)
        else:
            logger.debug("{} cannot send PREPARE".format(self))

    def processPrepare(self, prepare: Prepare, sender: str) -> None:
        """
        Validate and process the PREPARE specified.
        If validation is successful, create a COMMIT and broadcast it.

        :param prepare: a PREPARE msg
        :param sender: name of the node that sent the PREPARE
        """
        # TODO move this try/except up higher
        logger.debug("{} received PREPARE{} from {}".
                     format(self, (prepare.viewNo, prepare.ppSeqNo), sender))
        try:
            if self.isValidPrepare(prepare, sender):
                self.addToPrepares(prepare, sender)
                self.stats.inc(TPCStat.PrepareRcvd)
                logger.debug("{} processed incoming PREPARE {}".
                             format(self, (prepare.viewNo, prepare.ppSeqNo)))
            else:
                # TODO let's have isValidPrepare throw an exception that gets
                # handled and possibly logged higher
                logger.warning("{} cannot process incoming PREPARE".
                               format(self))
        except SuspiciousNode as ex:
            self.node.reportSuspiciousNodeEx(ex)

    def processCommit(self, commit: Commit, sender: str) -> None:
        """
        Validate and process the COMMIT specified.
        If validation is successful, return the message to the node.

        :param commit: an incoming COMMIT message
        :param sender: name of the node that sent the COMMIT
        """
        logger.debug("{} received COMMIT {} from {}".
                     format(self, commit, sender))
        if self.isValidCommit(commit, sender):
            self.stats.inc(TPCStat.CommitRcvd)
            self.addToCommits(commit, sender)
            logger.debug("{} processed incoming COMMIT{}".
                         format(self, (commit.viewNo, commit.ppSeqNo)))

    def tryCommit(self, prepare: Prepare):
        """
        Try to commit if the Prepare message is ready to be passed into the
        commit phase.
        """
        if self.canCommit(prepare):
            self.doCommit(prepare)
        else:
            logger.debug("{} not yet able to send COMMIT".format(self))

    def tryOrder(self, commit: Commit):
        """
        Try to order if the Commit message is ready to be ordered.
        """
        canOrder, reason = self.canOrder(commit)
        if canOrder:
            logger.debug("{} returning request to node".format(self))
            self.tryOrdering(commit)
        else:
            logger.trace("{} cannot return request to node: {}".
                         format(self, reason))

    def doPrePrepare(self, reqDigest: ReqDigest) -> None:
        """
        Broadcast a PRE-PREPARE to all the replicas.

        :param reqDigest: a tuple with elements identifier, reqId, and digest
        """
        if not self.node.isParticipating:
            logger.error("Non participating node is attempting PRE-PREPARE. "
                         "This should not happen.")
            return

        if self.lastPrePrepareSeqNo == self.H:
            logger.debug("{} stashing PRE-PREPARE {} since outside greater "
                         "than high water mark {}".
                         format(self, (self.viewNo, self.lastPrePrepareSeqNo+1),
                                self.H))
            self.stashingWhileOutsideWaterMarks.append(reqDigest)
            return
        self.lastPrePrepareSeqNo += 1
        tm = time.time()*1000
        logger.debug("{} Sending PRE-PREPARE {} at {}".
                     format(self, (self.viewNo, self.lastPrePrepareSeqNo),
                            time.perf_counter()))
        prePrepareReq = PrePrepare(self.instId,
                                   self.viewNo,
                                   self.lastPrePrepareSeqNo,
                                   *reqDigest,
                                   tm)
        self.sentPrePrepares[self.viewNo, self.lastPrePrepareSeqNo] = (reqDigest.key,
                                                                       tm)
        self.send(prePrepareReq, TPCStat.PrePrepareSent)

    def doPrepare(self, pp: PrePrepare):
        logger.debug("{} Sending PREPARE {} at {}".
                     format(self, (pp.viewNo, pp.ppSeqNo), time.perf_counter()))
        prepare = Prepare(self.instId,
                          pp.viewNo,
                          pp.ppSeqNo,
                          pp.digest,
                          pp.ppTime)
        self.send(prepare, TPCStat.PrepareSent)
        self.addToPrepares(prepare, self.name)

    def doCommit(self, p: Prepare):
        """
        Create a commit message from the given Prepare message and trigger the
        commit phase
        :param p: the prepare message
        """
        logger.debug("{} Sending COMMIT{} at {}".
                     format(self, (p.viewNo, p.ppSeqNo), time.perf_counter()))
        commit = Commit(self.instId,
                        p.viewNo,
                        p.ppSeqNo,
                        p.digest,
                        p.ppTime)
        self.send(commit, TPCStat.CommitSent)
        self.addToCommits(commit, self.name)

    def canProcessPrePrepare(self, pp: PrePrepare, sender: str) -> bool:
        """
        Decide whether this replica is eligible to process a PRE-PREPARE,
        based on the following criteria:

        - this replica is non-primary replica
        - the request isn't in its list of received PRE-PREPAREs
        - the request is waiting to for PRE-PREPARE and the digest value matches

        :param pp: a PRE-PREPARE msg to process
        :param sender: the name of the node that sent the PRE-PREPARE msg
        :return: True if processing is allowed, False otherwise
        """
        # TODO: Check whether it is rejecting PRE-PREPARE from previous view
        # PRE-PREPARE should not be sent from non primary
        if not self.isMsgFromPrimary(pp, sender):
            raise SuspiciousNode(sender, Suspicions.PPR_FRM_NON_PRIMARY, pp)

        # A PRE-PREPARE is being sent to primary
        if self.isPrimaryForMsg(pp) is True:
            raise SuspiciousNode(sender, Suspicions.PPR_TO_PRIMARY, pp)

        # A PRE-PREPARE is sent that has already been received
        if (pp.viewNo, pp.ppSeqNo) in self.prePrepares:
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_PPR_SENT, pp)

        key = (pp.identifier, pp.reqId)
        if not self.requests.isFinalised(key):
            self.enqueuePrePrepare(pp, sender)
            return False

        # A PRE-PREPARE is sent that does not match request digest
        if self.requests.digest(key) != pp.digest:
            raise SuspiciousNode(sender, Suspicions.PPR_DIGEST_WRONG, pp)

        return True

    def addToPrePrepares(self, pp: PrePrepare) -> None:
        """
        Add the specified PRE-PREPARE to this replica's list of received
        PRE-PREPAREs.

        :param pp: the PRE-PREPARE to add to the list
        """
        key = (pp.viewNo, pp.ppSeqNo)
        self.prePrepares[key] = \
            ((pp.identifier, pp.reqId), pp.ppTime)
        self.dequeuePrepares(*key)
        self.dequeueCommits(*key)
        self.stats.inc(TPCStat.PrePrepareRcvd)
        self.tryPrepare(pp)

    def hasPrepared(self, request) -> bool:
        return self.prepares.hasPrepareFrom(request, self.name)

    def canSendPrepare(self, request) -> bool:
        """
        Return whether the request identified by (identifier, requestId) can
        proceed to the Prepare step.

        :param request: any object with identifier and requestId attributes
        """
        return self.shouldParticipate(request.viewNo, request.ppSeqNo) \
            and not self.hasPrepared(request) \
            and self.requests.isFinalised((request.identifier,
                                           request.reqId))

    def isValidPrepare(self, prepare: Prepare, sender: str) -> bool:
        """
        Return whether the PREPARE specified is valid.

        :param prepare: the PREPARE to validate
        :param sender: the name of the node that sent the PREPARE
        :return: True if PREPARE is valid, False otherwise
        """
        key = (prepare.viewNo, prepare.ppSeqNo)
        primaryStatus = self.isPrimaryForMsg(prepare)

        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares

        # If a non primary replica and receiving a PREPARE request before a
        # PRE-PREPARE request, then proceed

        # PREPARE should not be sent from primary
        if self.isMsgFromPrimary(prepare, sender):
            raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)

        # If non primary replica
        if primaryStatus is False:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT, prepare)
            # If PRE-PREPARE not received for the PREPARE, might be slow network
            if key not in ppReqs:
                self.enqueuePrepare(prepare, sender)
                return False
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG,
                                     prepare)
            else:
                return True
        # If primary replica
        else:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT, prepare)
            # If PRE-PREPARE was not sent for this PREPARE, certainly
            # malicious behavior
            elif key not in ppReqs:
                raise SuspiciousNode(sender, Suspicions.UNKNOWN_PR_SENT, prepare)
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG,
                                     prepare)
            else:
                return True

    def addToPrepares(self, prepare: Prepare, sender: str):
        self.prepares.addVote(prepare, sender)
        self.tryCommit(prepare)

    def hasCommitted(self, request) -> bool:
        return self.commits.hasCommitFrom(ThreePhaseKey(
            request.viewNo, request.ppSeqNo), self.name)

    def canCommit(self, prepare: Prepare) -> bool:
        """
        Return whether the specified PREPARE can proceed to the Commit
        step.

        Decision criteria:

        - If this replica has got just 2f PREPARE requests then commit request.
        - If less than 2f PREPARE requests then probably there's no consensus on
            the request; don't commit
        - If more than 2f then already sent COMMIT; don't commit

        :param prepare: the PREPARE
        """
        return self.shouldParticipate(prepare.viewNo, prepare.ppSeqNo) and \
            self.prepares.hasQuorum(prepare, self.f) and \
            not self.hasCommitted(prepare)

    def isValidCommit(self, commit: Commit, sender: str) -> bool:
        """
        Return whether the COMMIT specified is valid.

        :param commit: the COMMIT to validate
        :return: True if `request` is valid, False otherwise
        """
        primaryStatus = self.isPrimaryForMsg(commit)
        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares
        key = (commit.viewNo, commit.ppSeqNo)
        if key not in ppReqs:
            self.enqueueCommit(commit, sender)
            return False

        if (key not in self.prepares and
                key not in self.preparesWaitingForPrePrepare):
            logger.debug("{} rejecting COMMIT{} due to lack of prepares".
                         format(self, key))
            # raise SuspiciousNode(sender, Suspicions.UNKNOWN_CM_SENT, commit)
            return False
        elif self.commits.hasCommitFrom(commit, sender):
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit)
        elif commit.digest != self.getDigestFor3PhaseKey(ThreePhaseKey(*key)):
            raise SuspiciousNode(sender, Suspicions.CM_DIGEST_WRONG, commit)
        elif key in ppReqs and commit.ppTime != ppReqs[key][1]:
            raise SuspiciousNode(sender, Suspicions.CM_TIME_WRONG,
                                 commit)
        else:
            return True

    def addToCommits(self, commit: Commit, sender: str):
        """
        Add the specified COMMIT to this replica's list of received
        commit requests.

        :param commit: the COMMIT to add to the list
        :param sender: the name of the node that sent the COMMIT
        """
        self.commits.addVote(commit, sender)
        self.tryOrder(commit)

    def hasOrdered(self, viewNo, ppSeqNo) -> bool:
        return (viewNo, ppSeqNo) in self.ordered

    def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]:
        """
        Return whether the specified commitRequest can be returned to the node.

        Decision criteria:

        - If have got just 2f+1 Commit requests then return request to node
        - If less than 2f+1 of commit requests then probably don't have
            consensus on the request; don't return request to node
        - If more than 2f+1 then already returned to node; don't return request
            to node

        :param commit: the COMMIT
        """
        if not self.commits.hasQuorum(commit, self.f):
            return False, "no quorum: {} commits where f is {}".\
                          format(commit, self.f)

        if self.hasOrdered(commit.viewNo, commit.ppSeqNo):
            return False, "already ordered"

        if not self.isNextInOrdering(commit):
            viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
            if viewNo not in self.stashedCommitsForOrdering:
                self.stashedCommitsForOrdering[viewNo] = {}
            self.stashedCommitsForOrdering[viewNo][ppSeqNo] = commit
            # self._schedule(self.orderStashedCommits, 2)
            self.startRepeating(self.orderStashedCommits, 2)
            return False, "stashing {} since out of order".\
                format(commit)

        return True, None

    def isNextInOrdering(self, commit: Commit):
        viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
        if self.ordered and self.ordered[-1] == (viewNo, ppSeqNo-1):
            return True
        for (v, p) in self.commits:
            if v < viewNo:
                # Have commits from previous view that are unordered.
                # TODO: Question: would commits be always ordered, what if
                # some are never ordered and its fine, go to PBFT.
                return False
            if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
                # If unordered commits are found with lower ppSeqNo then this
                # cannot be ordered.
                return False

        # TODO: Revisit PBFT paper, how to make sure that last request of the
        # last view has been ordered? Need change in `VIEW CHANGE` mechanism.
        # Somehow view change needs to communicate what the last request was.
        # Also what if some COMMITs were completely missed in the same view
        return True

    def orderStashedCommits(self):
        # TODO: What if the first few commits were out of order and stashed?
        # `self.ordered` would be empty
        if self.ordered:
            lastOrdered = self.ordered[-1]
            vToRemove = set()
            for v in self.stashedCommitsForOrdering:
                if v < lastOrdered[0] and self.stashedCommitsForOrdering[v]:
                    raise RuntimeError("{} found commits from previous view {}"
                                       " that were not ordered but last ordered"
                                       " is {}".format(self, v, lastOrdered))
                pToRemove = set()
                for p, commit in self.stashedCommitsForOrdering[v].items():
                    if (v == lastOrdered[0] and lastOrdered == (v, p - 1)) or \
                            (v > lastOrdered[0] and
                                self.isLowestCommitInView(commit)):
                        logger.debug("{} ordering stashed commit {}".
                                     format(self, commit))
                        if self.tryOrdering(commit):
                            lastOrdered = (v, p)
                            pToRemove.add(p)

                for p in pToRemove:
                    del self.stashedCommitsForOrdering[v][p]
                if not self.stashedCommitsForOrdering[v]:
                    vToRemove.add(v)

            for v in vToRemove:
                del self.stashedCommitsForOrdering[v]

            # if self.stashedCommitsForOrdering:
            #     self._schedule(self.orderStashedCommits, 2)
            if not self.stashedCommitsForOrdering:
                self.stopRepeating(self.orderStashedCommits)

    def isLowestCommitInView(self, commit):
        # TODO: Assumption: This assumes that at least one commit that was sent
        #  for any request by any node has been received in the view of this
        # commit
        ppSeqNos = []
        for v, p in self.commits:
            if v == commit.viewNo:
                ppSeqNos.append(p)
        return min(ppSeqNos) == commit.ppSeqNo if ppSeqNos else True

    def tryOrdering(self, commit: Commit) -> None:
        """
        Attempt to send an ORDERED request for the specified COMMIT to the
        node.

        :param commit: the COMMIT message
        """
        key = (commit.viewNo, commit.ppSeqNo)
        logger.debug("{} trying to order COMMIT{}".format(self, key))
        reqKey = self.getReqKeyFrom3PhaseKey(key)   # type: Tuple
        digest = self.getDigestFor3PhaseKey(key)
        if not digest:
            logger.error("{} did not find digest for {}, request key {}".
                         format(self, key, reqKey))
            return
        self.doOrder(*key, *reqKey, digest, commit.ppTime)
        return True

    def doOrder(self, viewNo, ppSeqNo, identifier, reqId, digest, ppTime):
        key = (viewNo, ppSeqNo)
        self.addToOrdered(*key)
        ordered = Ordered(self.instId,
                          viewNo,
                          identifier,
                          reqId,
                          ppTime)
        # TODO: Should not order or add to checkpoint while syncing
        # 3 phase state.
        self.send(ordered, TPCStat.OrderSent)
        if key in self.stashingWhileCatchingUp:
            self.stashingWhileCatchingUp.remove(key)
        logger.debug("{} ordered request {}".format(self, (viewNo, ppSeqNo)))
        self.addToCheckpoint(ppSeqNo, digest)

    def processCheckpoint(self, msg: Checkpoint, sender: str):
        if self.checkpoints:
            seqNo = msg.seqNo
            _, firstChk = self.firstCheckPoint
            if firstChk.isStable:
                if firstChk.seqNo == seqNo:
                    self.discard(msg, reason="Checkpoint already stable",
                                 logMethod=logger.debug)
                    return
                if firstChk.seqNo > seqNo:
                    self.discard(msg, reason="Higher stable checkpoint present",
                                 logMethod=logger.debug)
                    return
            for state in self.checkpoints.values():
                if state.seqNo == seqNo:
                    if state.digest == msg.digest:
                        state.receivedDigests[sender] = msg.digest
                        break
                    else:
                        logger.error("{} received an incorrect digest {} for "
                                     "checkpoint {} from {}".format(self,
                                                                    msg.digest,
                                                                    seqNo,
                                                                    sender))
                        return
            if len(state.receivedDigests) == 2*self.f:
                self.markCheckPointStable(msg.seqNo)
        else:
            self.discard(msg, reason="No checkpoints present to tally",
                         logMethod=logger.warn)

    def _newCheckpointState(self, ppSeqNo, digest) -> CheckpointState:
        s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ - 1
        logger.debug("{} adding new checkpoint state for {}".
                     format(self, (s, e)))
        state = CheckpointState(ppSeqNo, [digest, ], None, {}, False)
        self.checkpoints[s, e] = state
        return state

    def addToCheckpoint(self, ppSeqNo, digest):
        for (s, e) in self.checkpoints.keys():
            if s <= ppSeqNo <= e:
                state = self.checkpoints[s, e]  # type: CheckpointState
                state.digests.append(digest)
                state = updateNamedTuple(state, seqNo=ppSeqNo)
                self.checkpoints[s, e] = state
                break
        else:
            state = self._newCheckpointState(ppSeqNo, digest)
            s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ

        if len(state.digests) == self.config.CHK_FREQ:
            state = updateNamedTuple(state, digest=serialize(state.digests),
                                     digests=[])
            self.checkpoints[s, e] = state
            self.send(Checkpoint(self.instId, self.viewNo, ppSeqNo,
                                 state.digest))

    def markCheckPointStable(self, seqNo):
        previousCheckpoints = []
        for (s, e), state in self.checkpoints.items():
            if e == seqNo:
                state = updateNamedTuple(state, isStable=True)
                self.checkpoints[s, e] = state
                break
            else:
                previousCheckpoints.append((s, e))
        else:
            logger.error("{} could not find {} in checkpoints".
                         format(self, seqNo))
            return
        self.h = seqNo
        for k in previousCheckpoints:
            logger.debug("{} removing previous checkpoint {}".format(self, k))
            self.checkpoints.pop(k)
        self.gc(seqNo)
        logger.debug("{} marked stable checkpoint {}".format(self, (s, e)))
        self.processStashedMsgsForNewWaterMarks()

    def gc(self, tillSeqNo):
        logger.debug("{} cleaning up till {}".format(self, tillSeqNo))
        tpcKeys = set()
        reqKeys = set()
        for (v, p), (reqKey, _) in self.sentPrePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)
        for (v, p), (reqKey, _) in self.prePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)

        logger.debug("{} found {} 3 phase keys to clean".
                     format(self, len(tpcKeys)))
        logger.debug("{} found {} request keys to clean".
                     format(self, len(reqKeys)))

        for k in tpcKeys:
            self.sentPrePrepares.pop(k, None)
            self.prePrepares.pop(k, None)
            self.prepares.pop(k, None)
            self.commits.pop(k, None)
            if k in self.ordered:
                self.ordered.remove(k)

        for k in reqKeys:
            self.requests.pop(k, None)

    def processStashedMsgsForNewWaterMarks(self):
        while self.stashingWhileOutsideWaterMarks:
            item = self.stashingWhileOutsideWaterMarks.pop()
            logger.debug("{} processing stashed item {} after new stable "
                         "checkpoint".format(self, item))

            if isinstance(item, ReqDigest):
                self.doPrePrepare(item)
            elif isinstance(item, tuple) and len(tuple) == 2:
                self.dispatchThreePhaseMsg(*item)
            else:
                logger.error("{} cannot process {} "
                             "from stashingWhileOutsideWaterMarks".
                             format(self, item))

    @property
    def firstCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(0)

    @property
    def lastCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(-1)

    def isPpSeqNoAcceptable(self, ppSeqNo: int):
        return self.h < ppSeqNo <= self.H

    def addToOrdered(self, viewNo: int, ppSeqNo: int):
        self.ordered.add((viewNo, ppSeqNo))

    def enqueuePrePrepare(self, request: PrePrepare, sender: str):
        logger.debug("Queueing pre-prepares due to unavailability of finalised "
                     "Request. Request {} from {}".format(request, sender))
        key = (request.identifier, request.reqId)
        if key not in self.prePreparesPendingReqDigest:
            self.prePreparesPendingReqDigest[key] = []
        self.prePreparesPendingReqDigest[key].append((request, sender))

    def dequeuePrePrepare(self, identifier: int, reqId: int):
        key = (identifier, reqId)
        if key in self.prePreparesPendingReqDigest:
            pps = self.prePreparesPendingReqDigest[key]
            for (pp, sender) in pps:
                logger.debug("{} popping stashed PRE-PREPARE{}".
                             format(self, key))
                if pp.digest == self.requests.digest(key):
                    self.prePreparesPendingReqDigest.pop(key)
                    self.processPrePrepare(pp, sender)
                    logger.debug(
                        "{} processed {} PRE-PREPAREs waiting for finalised "
                        "request for identifier {} and reqId {}".
                        format(self, pp, identifier, reqId))
                    break

    def enqueuePrepare(self, request: Prepare, sender: str):
        logger.debug("Queueing prepares due to unavailability of PRE-PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.preparesWaitingForPrePrepare:
            self.preparesWaitingForPrePrepare[key] = deque()
        self.preparesWaitingForPrePrepare[key].append((request, sender))

    def dequeuePrepares(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.preparesWaitingForPrePrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.preparesWaitingForPrePrepare[key]:
                prepare, sender = self.preparesWaitingForPrePrepare[
                    key].popleft()
                logger.debug("{} popping stashed PREPARE{}".format(self, key))
                self.processPrepare(prepare, sender)
                i += 1
            self.preparesWaitingForPrePrepare.pop(key)
            logger.debug("{} processed {} PREPAREs waiting for PRE-PREPARE for"
                         " view no {} and seq no {}".
                         format(self, i, viewNo, ppSeqNo))

    def enqueueCommit(self, request: Commit, sender: str):
        logger.debug("Queueing commit due to unavailability of PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.commitsWaitingForPrepare:
            self.commitsWaitingForPrepare[key] = deque()
        self.commitsWaitingForPrepare[key].append((request, sender))

    def dequeueCommits(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.commitsWaitingForPrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.commitsWaitingForPrepare[key]:
                commit, sender = self.commitsWaitingForPrepare[
                    key].popleft()
                logger.debug("{} popping stashed COMMIT{}".format(self, key))
                self.processCommit(commit, sender)
                i += 1
            self.commitsWaitingForPrepare.pop(key)
            logger.debug("{} processed {} COMMITs waiting for PREPARE for"
                         " view no {} and seq no {}".
                         format(self, i, viewNo, ppSeqNo))

    def getDigestFor3PhaseKey(self, key: ThreePhaseKey) -> Optional[str]:
        reqKey = self.getReqKeyFrom3PhaseKey(key)
        digest = self.requests.digest(reqKey)
        if not digest:
            logger.debug("{} could not find digest in sent or received "
                         "PRE-PREPAREs or PREPAREs for 3 phase key {} and req "
                         "key {}".format(self, key, reqKey))
            return None
        else:
            return digest

    def getReqKeyFrom3PhaseKey(self, key: ThreePhaseKey):
        reqKey = None
        if key in self.sentPrePrepares:
            reqKey = self.sentPrePrepares[key][0]
        elif key in self.prePrepares:
            reqKey = self.prePrepares[key][0]
        elif key in self.prepares:
            reqKey = self.prepares[key][0]
        else:
            logger.debug("Could not find request key for 3 phase key {}".
                         format(key))
        return reqKey

    @property
    def threePhaseState(self):
        # TODO: This method is incomplete
        # Gets the current stable and unstable checkpoints and creates digest
        # of unstable checkpoints
        if self.checkpoints:
            pass
        else:
            state = []
        return ThreePCState(self.instId, state)

    def process3PhaseState(self, msg: ThreePCState, sender: str):
        # TODO: This is not complete
        pass

    def send(self, msg, stat=None) -> None:
        """
        Send a message to the node on which this replica resides.

        :param msg: the message to send
        """
        logger.display("{} sending {}".format(self, msg.__class__.__name__),
                       extra={"cli": True})
        logger.trace("{} sending {}".format(self, msg))
        if stat:
            self.stats.inc(stat)
        self.outBox.append(msg)
コード例 #21
0
ファイル: cmt_2phase.py プロジェクト: mschmidt87/VeRyPy
def _phase_one(lambda_multiplier, D,d,C,L, seed_f, rr):
    """ This imlements the fist phase of the algorithm. Sequentally
     add nodes to an emerging node. Different seed node selection 
     functions (above) can be used. """
    
    route_seeds = []
    N = len(D)
    sol = []
    total_cost = 0.0
    
    customer_nodes = range(1,N)
    if rr is not None:
        shuffle(customer_nodes)
        rr-=1
    unrouted = OrderedSet(customer_nodes)
    
    if __debug__:
        log(DEBUG, "## Sequential route bulding phase ##")
    
    route_idx = 0
    
    try:
        while unrouted:
            ## Step 1: choose unrouted i_k to act as a route seed point
            
            route_seed_k = seed_f(D, d, unrouted)
            unrouted.remove(route_seed_k)
            
            route_seeds.append(route_seed_k)
            route_demand = d[route_seed_k] if C else 0
            route = [0, route_seed_k]
            route_cost = D[0,route_seed_k]+D[route_seed_k,0]
            route_l_updated = True
            route_idx += 1
            
            if __debug__:
                log(DEBUG, "Initialize route #%d with n%d"%
                           (route_idx, route_seed_k))
            
            ## Step 2: Compute savings
            
            s_vals = (D[[0],unrouted]+lambda_multiplier*
                      D[unrouted,[route_seed_k]]).tolist()
            savings =  zip(s_vals, unrouted)
            savings.sort()
            for best_saving, i in savings:
                ## Step 3: insert until feasibility is broken
            
                if __debug__:
                    log(DEBUG, "Check feasibility of inserting n%d with savings=%.2f"%
                               (i,best_saving))
        
                #TODO: To improve performance keep track of minimal unrouted d and
                # break the savings loop if we see that there are no feasible
                # insertions to be made.
                
                if C and route_demand+d[i]-C_EPS>C:
                    # capacity constraint violated, route complete
                    if __debug__:
                        log(DEBUG,"Insertion would break C constraint, skip.")
                    continue
    
                #TODO: To improve performance, keep track of minimal possible 
                # cost increase to the route. This would involve updating e.g.
                # Held-Karp (1970) lower bound for the route.
                
                # Use upper bound estimate to save some computations. We know 
                #  that the maximum route cost constraint cannot be violated.
                UB_route_cost = route_cost-D[route[-1],0]+D[route[-1],i]+D[i,0]            
                if L and UB_route_cost-S_EPS>L:
                    new_route, new_route_cost = solve_tsp(D, route+[i])
                    if __debug__:
                        log(DEBUG-1,"Got TSP solution %s (%.2f)"%
                            (list(new_route),new_route_cost))
             
                    if new_route_cost-S_EPS>L:
                        if __debug__:
                            log(DEBUG, "Insertion would break L constraint, skip.")
                        continue
                    route_cost = new_route_cost
                    route_l_updated = True
                else:
                    route_l_updated = False
                    route_cost = UB_route_cost
                    new_route = route+[i,0]
                    
                # accept including node i
                route=new_route[:-1]
                if C: route_demand+=d[i]
                unrouted.remove(i)
    
                if __debug__:
                    log(DEBUG, "Inserted n%d to create a route %s (%.2f)."%
                        (i, route, route_cost))
        
            # if L is not set, optimize TSP after the route is full
            if not route_l_updated:
                new_route, route_cost = solve_tsp(D, route)
                route = new_route[:-1]
                if __debug__:
                    if not L:
                        log(DEBUG-1,"Got TSP solution %s (%.2f)" %
                            (str(route+[0]), route_cost))
            if __debug__:
                log(DEBUG, "Route %s (%.2f) complete.\n"%
                          (str(route+[0]),route_cost))
     
            total_cost += route_cost
            sol+=route
    except KeyboardInterrupt:  #or SIGINT
        interrupted_sol = sol+routes2sol([n] for n in unrouted if n not in sol)
        raise KeyboardInterrupt(interrupted_sol)
        
    sol+=[0]
    
    if __debug__:
        log(DEBUG, "Phase 1 solution %s (%.2f) complete.\n"%(str(sol),total_cost))
        log(DEBUG-1, "Pass on route seeds %s to Phase 2.\n"%str(route_seeds))            
    
    return route_seeds, sol, total_cost, rr
コード例 #22
0
ファイル: replica.py プロジェクト: loxadim/plenum
class Replica(HasActionQueue, MessageProcessor):
    def __init__(self,
                 node: 'plenum.server.node.Node',
                 instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        HasActionQueue.__init__(self)
        self.stats = Stats(TPCStat)

        self.config = getConfig()

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))

        routerArgs.append((Checkpoint, self.processCheckpoint))
        routerArgs.append((ThreePCState, self.process3PhaseState))

        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router((PrePrepare, self.processPrePrepare),
                                       (Prepare, self.processPrepare),
                                       (Commit, self.processCommit))

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None  # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # PRE-PREPAREs that are waiting to be processed but do not have the
        # corresponding request digest. Happens when replica has not been
        # forwarded the request by the node but is getting 3 phase messages.
        # The value is a list since a malicious entry might send PRE-PREPARE
        # with a different digest and since we dont have the request finalised,
        # we store all PRE-PPREPARES
        self.prePreparesPendingReqDigest = {
        }  # type: Dict[Tuple[str, int], List]

        # PREPAREs that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
        # received
        self.commitsWaitingForPrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a tuple of Request Digest and time
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a tuple of Request Digest and time
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> ((identifier, reqId), {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]

        self.commits = Commits()  # type: Dict[Tuple[int, int],
        # Tuple[Tuple[str, int], Set[str]]]

        # Set of tuples to keep track of ordered requests. Each tuple is
        # (viewNo, ppSeqNo)
        self.ordered = OrderedSet()  # type: OrderedSet[Tuple[int, int]]

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()  # type: Set[Tuple]

        # Commits which are not being ordered since commits with lower view
        # numbers and sequence numbers have not been ordered yet. Key is the
        # viewNo and value a map of pre-prepare sequence number to commit
        self.stashedCommitsForOrdering = {}  # type: Dict[int,
        # Dict[int, Commit]]

        self.checkpoints = SortedDict(lambda k: k[0])

        self.stashingWhileOutsideWaterMarks = deque()

        # Low water mark
        self._h = 0  # type: int

        # High water mark
        self.H = self._h + self.config.LOG_SIZE  # type: int

        self.lastPrePrepareSeqNo = self.h  # type: int

    @property
    def h(self) -> int:
        return self._h

    @h.setter
    def h(self, n):
        self._h = n
        self.H = self._h + self.config.LOG_SIZE

    @property
    def requests(self):
        return self.node.requests

    def shouldParticipate(self, viewNo: int, ppSeqNo: int):
        # Replica should only participating in the consensus process and the
        # replica did not stash any of this request's 3-phase request
        return self.node.isParticipating and (viewNo, ppSeqNo) \
                                             not in self.stashingWhileCatchingUp

    @staticmethod
    def generateName(nodeName: str, instId: int):
        """
        Create and return the name for a replica using its nodeName and
        instanceId.
         Ex: Alpha:1
        """
        return "{}:{}".format(nodeName, instId)

    @staticmethod
    def getNodeName(replicaName: str):
        return replicaName.split(":")[0]

    @property
    def isPrimary(self):
        """
        Is this node primary?

        :return: True if this node is primary, False otherwise
        """
        return self._primaryName == self.name if self._primaryName is not None \
            else None

    @property
    def primaryName(self):
        """
        Name of the primary replica of this replica's instance

        :return: Returns name if primary is known, None otherwise
        """
        return self._primaryName

    @primaryName.setter
    def primaryName(self, value: Optional[str]) -> None:
        """
        Set the value of isPrimary.

        :param value: the value to set isPrimary to
        """
        if not value == self._primaryName:
            self._primaryName = value
            self.primaryNames[self.viewNo] = value
            logger.debug("{} setting primaryName for view no {} to: {}".format(
                self, self.viewNo, value))
            logger.debug("{}'s primaryNames for views are: {}".format(
                self, self.primaryNames))
            self._stateChanged()

    def _stateChanged(self):
        """
        A series of actions to be performed when the state of this replica
        changes.

        - UnstashInBox (see _unstashInBox)
        """
        self._unstashInBox()
        if self.isPrimary is not None:
            # TODO handle suspicion exceptions here
            self.process3PhaseReqsQueue()
            # TODO handle suspicion exceptions here
            try:
                self.processPostElectionMsgs()
            except SuspiciousNode as ex:
                self.outBox.append(ex)
                self.discard(ex.msg, ex.reason, logger.warning)

    def _stashInBox(self, msg):
        """
        Stash the specified message into the inBoxStash of this replica.

        :param msg: the message to stash
        """
        self.inBoxStash.append(msg)

    def _unstashInBox(self):
        """
        Append the inBoxStash to the right of the inBox.
        """
        self.inBox.extend(self.inBoxStash)
        self.inBoxStash.clear()

    def __repr__(self):
        return self.name

    @property
    def f(self) -> int:
        """
        Return the number of Byzantine Failures that can be tolerated by this
        system. Equal to (N - 1)/3, where N is the number of nodes in the
        system.
        """
        return self.node.f

    @property
    def viewNo(self):
        """
        Return the current view number of this replica.
        """
        return self.node.viewNo

    def isPrimaryInView(self, viewNo: int) -> Optional[bool]:
        """
        Return whether a primary has been selected for this view number.
        """
        return self.primaryNames[viewNo] == self.name

    def isMsgForLaterView(self, msg):
        """
        Return whether this request's view number is greater than the current
        view number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo > self.viewNo

    def isMsgForCurrentView(self, msg):
        """
        Return whether this request's view number is equal to the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo == self.viewNo

    def isMsgForPrevView(self, msg):
        """
        Return whether this request's view number is less than the current view
        number of this replica.
        """
        viewNo = getattr(msg, "viewNo", None)
        return viewNo < self.viewNo

    def isPrimaryForMsg(self, msg) -> Optional[bool]:
        """
        Return whether this replica is primary if the request's view number is
        equal this replica's view number and primary has been selected for
        the current view.
        Return None otherwise.

        :param msg: message
        """
        if self.isMsgForLaterView(msg):
            self.discard(
                msg, "Cannot get primary status for a request for a later "
                "view {}. Request is {}".format(self.viewNo, msg),
                logger.error)
        else:
            return self.isPrimary if self.isMsgForCurrentView(msg) \
                else self.isPrimaryInView(msg.viewNo)

    def isMsgFromPrimary(self, msg, sender: str) -> bool:
        """
        Return whether this message was from primary replica
        :param msg:
        :param sender:
        :return:
        """
        if self.isMsgForLaterView(msg):
            logger.error("{} cannot get primary for a request for a later "
                         "view. Request is {}".format(self, msg))
        else:
            return self.primaryName == sender if self.isMsgForCurrentView(
                msg) else self.primaryNames[msg.viewNo] == sender

    def _preProcessReqDigest(self, rd: ReqDigest) -> None:
        """
        Process request digest if this replica is not a primary, otherwise stash
        the message into the inBox.

        :param rd: the client Request Digest
        """
        if self.isPrimary is not None:
            self.processReqDigest(rd)
        else:
            logger.debug(
                "{} stashing request digest {} since it does not know "
                "its primary status".format(self, (rd.identifier, rd.reqId)))
            self._stashInBox(rd)

    def serviceQueues(self, limit=None):
        """
        Process `limit` number of messages in the inBox.

        :param limit: the maximum number of messages to process
        :return: the number of messages successfully processed
        """
        # TODO should handle SuspiciousNode here
        r = self.inBoxRouter.handleAllSync(self.inBox, limit)
        r += self._serviceActions()
        return r
        # Messages that can be processed right now needs to be added back to the
        # queue. They might be able to be processed later

    def processPostElectionMsgs(self):
        """
        Process messages waiting for the election of a primary replica to
        complete.
        """
        while self.postElectionMsgs:
            msg = self.postElectionMsgs.popleft()
            logger.debug("{} processing pended msg {}".format(self, msg))
            self.dispatchThreePhaseMsg(*msg)

    def process3PhaseReqsQueue(self):
        """
        Process the 3 phase requests from the queue whose view number is equal
        to the current view number of this replica.
        """
        unprocessed = deque()
        while self.threePhaseMsgsForLaterView:
            request, sender = self.threePhaseMsgsForLaterView.popleft()
            logger.debug("{} processing pended 3 phase request: {}".format(
                self, request))
            # If the request is for a later view dont try to process it but add
            # it back to the queue.
            if self.isMsgForLaterView(request):
                unprocessed.append((request, sender))
            else:
                self.processThreePhaseMsg(request, sender)
        self.threePhaseMsgsForLaterView = unprocessed

    @property
    def quorum(self) -> int:
        r"""
        Return the quorum of this RBFT system. Equal to :math:`2f + 1`.
        Return None if `f` is not yet determined.
        """
        return self.node.quorum

    def dispatchThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str) -> Any:
        """
        Create a three phase request to be handled by the threePhaseRouter.

        :param msg: the ThreePhaseMsg to dispatch
        :param sender: the name of the node that sent this request
        """
        senderRep = self.generateName(sender, self.instId)
        if self.isPpSeqNoAcceptable(msg.ppSeqNo):
            try:
                self.threePhaseRouter.handleSync((msg, senderRep))
            except SuspiciousNode as ex:
                self.node.reportSuspiciousNodeEx(ex)
        else:
            logger.debug("{} stashing 3 phase message {} since ppSeqNo {} is "
                         "not between {} and {}".format(
                             self, msg, msg.ppSeqNo, self.h, self.H))
            self.stashingWhileOutsideWaterMarks.append((msg, sender))

    def processReqDigest(self, rd: ReqDigest):
        """
        Process a request digest. Works only if this replica has decided its
        primary status.

        :param rd: the client request digest to process
        """
        self.stats.inc(TPCStat.ReqDigestRcvd)
        if self.isPrimary is False:
            self.dequeuePrePrepare(rd.identifier, rd.reqId)
        else:
            self.doPrePrepare(rd)

    def processThreePhaseMsg(self, msg: ThreePhaseMsg, sender: str):
        """
        Process a 3-phase (pre-prepare, prepare and commit) request.
        Dispatch the request only if primary has already been decided, otherwise
        stash it.

        :param msg: the Three Phase message, one of PRE-PREPARE, PREPARE,
            COMMIT
        :param sender: name of the node that sent this message
        """
        # Can only proceed further if it knows whether its primary or not
        if self.isMsgForLaterView(msg):
            self.threePhaseMsgsForLaterView.append((msg, sender))
            logger.debug(
                "{} pended received 3 phase request for a later view: "
                "{}".format(self, msg))
        else:
            if self.isPrimary is None:
                self.postElectionMsgs.append((msg, sender))
                logger.debug("Replica {} pended request {} from {}".format(
                    self, msg, sender))
            else:
                self.dispatchThreePhaseMsg(msg, sender)

    def processPrePrepare(self, pp: PrePrepare, sender: str):
        """
        Validate and process the PRE-PREPARE specified.
        If validation is successful, create a PREPARE and broadcast it.

        :param pp: a prePrepareRequest
        :param sender: name of the node that sent this message
        """
        key = (pp.viewNo, pp.ppSeqNo)
        logger.debug("{} Receiving PRE-PREPARE{} at {} from {}".format(
            self, key, time.perf_counter(), sender))
        if self.canProcessPrePrepare(pp, sender):
            if not self.node.isParticipating:
                self.stashingWhileCatchingUp.add(key)
            self.addToPrePrepares(pp)
            logger.info("{} processed incoming PRE-PREPARE{}".format(
                self, key))

    def tryPrepare(self, pp: PrePrepare):
        """
        Try to send the Prepare message if the PrePrepare message is ready to
        be passed into the Prepare phase.
        """
        if self.canSendPrepare(pp):
            self.doPrepare(pp)
        else:
            logger.debug("{} cannot send PREPARE".format(self))

    def processPrepare(self, prepare: Prepare, sender: str) -> None:
        """
        Validate and process the PREPARE specified.
        If validation is successful, create a COMMIT and broadcast it.

        :param prepare: a PREPARE msg
        :param sender: name of the node that sent the PREPARE
        """
        # TODO move this try/except up higher
        logger.debug("{} received PREPARE{} from {}".format(
            self, (prepare.viewNo, prepare.ppSeqNo), sender))
        try:
            if self.isValidPrepare(prepare, sender):
                self.addToPrepares(prepare, sender)
                self.stats.inc(TPCStat.PrepareRcvd)
                logger.debug("{} processed incoming PREPARE {}".format(
                    self, (prepare.viewNo, prepare.ppSeqNo)))
            else:
                # TODO let's have isValidPrepare throw an exception that gets
                # handled and possibly logged higher
                logger.warning(
                    "{} cannot process incoming PREPARE".format(self))
        except SuspiciousNode as ex:
            self.node.reportSuspiciousNodeEx(ex)

    def processCommit(self, commit: Commit, sender: str) -> None:
        """
        Validate and process the COMMIT specified.
        If validation is successful, return the message to the node.

        :param commit: an incoming COMMIT message
        :param sender: name of the node that sent the COMMIT
        """
        logger.debug("{} received COMMIT {} from {}".format(
            self, commit, sender))
        if self.isValidCommit(commit, sender):
            self.stats.inc(TPCStat.CommitRcvd)
            self.addToCommits(commit, sender)
            logger.debug("{} processed incoming COMMIT{}".format(
                self, (commit.viewNo, commit.ppSeqNo)))

    def tryCommit(self, prepare: Prepare):
        """
        Try to commit if the Prepare message is ready to be passed into the
        commit phase.
        """
        if self.canCommit(prepare):
            self.doCommit(prepare)
        else:
            logger.debug("{} not yet able to send COMMIT".format(self))

    def tryOrder(self, commit: Commit):
        """
        Try to order if the Commit message is ready to be ordered.
        """
        canOrder, reason = self.canOrder(commit)
        if canOrder:
            logger.debug("{} returning request to node".format(self))
            self.tryOrdering(commit)
        else:
            logger.trace("{} cannot return request to node: {}".format(
                self, reason))

    def doPrePrepare(self, reqDigest: ReqDigest) -> None:
        """
        Broadcast a PRE-PREPARE to all the replicas.

        :param reqDigest: a tuple with elements identifier, reqId, and digest
        """
        if not self.node.isParticipating:
            logger.error("Non participating node is attempting PRE-PREPARE. "
                         "This should not happen.")
            return

        if self.lastPrePrepareSeqNo == self.H:
            logger.debug("{} stashing PRE-PREPARE {} since outside greater "
                         "than high water mark {}".format(
                             self, (self.viewNo, self.lastPrePrepareSeqNo + 1),
                             self.H))
            self.stashingWhileOutsideWaterMarks.append(reqDigest)
            return
        self.lastPrePrepareSeqNo += 1
        tm = time.time() * 1000
        logger.debug("{} Sending PRE-PREPARE {} at {}".format(
            self, (self.viewNo, self.lastPrePrepareSeqNo),
            time.perf_counter()))
        prePrepareReq = PrePrepare(self.instId, self.viewNo,
                                   self.lastPrePrepareSeqNo, *reqDigest, tm)
        self.sentPrePrepares[self.viewNo,
                             self.lastPrePrepareSeqNo] = (reqDigest.key, tm)
        self.send(prePrepareReq, TPCStat.PrePrepareSent)

    def doPrepare(self, pp: PrePrepare):
        logger.debug("{} Sending PREPARE {} at {}".format(
            self, (pp.viewNo, pp.ppSeqNo), time.perf_counter()))
        prepare = Prepare(self.instId, pp.viewNo, pp.ppSeqNo, pp.digest,
                          pp.ppTime)
        self.send(prepare, TPCStat.PrepareSent)
        self.addToPrepares(prepare, self.name)

    def doCommit(self, p: Prepare):
        """
        Create a commit message from the given Prepare message and trigger the
        commit phase
        :param p: the prepare message
        """
        logger.debug("{} Sending COMMIT{} at {}".format(
            self, (p.viewNo, p.ppSeqNo), time.perf_counter()))
        commit = Commit(self.instId, p.viewNo, p.ppSeqNo, p.digest, p.ppTime)
        self.send(commit, TPCStat.CommitSent)
        self.addToCommits(commit, self.name)

    def canProcessPrePrepare(self, pp: PrePrepare, sender: str) -> bool:
        """
        Decide whether this replica is eligible to process a PRE-PREPARE,
        based on the following criteria:

        - this replica is non-primary replica
        - the request isn't in its list of received PRE-PREPAREs
        - the request is waiting to for PRE-PREPARE and the digest value matches

        :param pp: a PRE-PREPARE msg to process
        :param sender: the name of the node that sent the PRE-PREPARE msg
        :return: True if processing is allowed, False otherwise
        """
        # TODO: Check whether it is rejecting PRE-PREPARE from previous view
        # PRE-PREPARE should not be sent from non primary
        if not self.isMsgFromPrimary(pp, sender):
            raise SuspiciousNode(sender, Suspicions.PPR_FRM_NON_PRIMARY, pp)

        # A PRE-PREPARE is being sent to primary
        if self.isPrimaryForMsg(pp) is True:
            raise SuspiciousNode(sender, Suspicions.PPR_TO_PRIMARY, pp)

        # A PRE-PREPARE is sent that has already been received
        if (pp.viewNo, pp.ppSeqNo) in self.prePrepares:
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_PPR_SENT, pp)

        key = (pp.identifier, pp.reqId)
        if not self.requests.isFinalised(key):
            self.enqueuePrePrepare(pp, sender)
            return False

        # A PRE-PREPARE is sent that does not match request digest
        if self.requests.digest(key) != pp.digest:
            raise SuspiciousNode(sender, Suspicions.PPR_DIGEST_WRONG, pp)

        return True

    def addToPrePrepares(self, pp: PrePrepare) -> None:
        """
        Add the specified PRE-PREPARE to this replica's list of received
        PRE-PREPAREs.

        :param pp: the PRE-PREPARE to add to the list
        """
        key = (pp.viewNo, pp.ppSeqNo)
        self.prePrepares[key] = \
            ((pp.identifier, pp.reqId), pp.ppTime)
        self.dequeuePrepares(*key)
        self.dequeueCommits(*key)
        self.stats.inc(TPCStat.PrePrepareRcvd)
        self.tryPrepare(pp)

    def hasPrepared(self, request) -> bool:
        return self.prepares.hasPrepareFrom(request, self.name)

    def canSendPrepare(self, request) -> bool:
        """
        Return whether the request identified by (identifier, requestId) can
        proceed to the Prepare step.

        :param request: any object with identifier and requestId attributes
        """
        return self.shouldParticipate(request.viewNo, request.ppSeqNo) \
            and not self.hasPrepared(request) \
            and self.requests.isFinalised((request.identifier,
                                           request.reqId))

    def isValidPrepare(self, prepare: Prepare, sender: str) -> bool:
        """
        Return whether the PREPARE specified is valid.

        :param prepare: the PREPARE to validate
        :param sender: the name of the node that sent the PREPARE
        :return: True if PREPARE is valid, False otherwise
        """
        key = (prepare.viewNo, prepare.ppSeqNo)
        primaryStatus = self.isPrimaryForMsg(prepare)

        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares

        # If a non primary replica and receiving a PREPARE request before a
        # PRE-PREPARE request, then proceed

        # PREPARE should not be sent from primary
        if self.isMsgFromPrimary(prepare, sender):
            raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)

        # If non primary replica
        if primaryStatus is False:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT,
                                     prepare)
            # If PRE-PREPARE not received for the PREPARE, might be slow network
            if key not in ppReqs:
                self.enqueuePrepare(prepare, sender)
                return False
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG,
                                     prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG, prepare)
            else:
                return True
        # If primary replica
        else:
            if self.prepares.hasPrepareFrom(prepare, sender):
                raise SuspiciousNode(sender, Suspicions.DUPLICATE_PR_SENT,
                                     prepare)
            # If PRE-PREPARE was not sent for this PREPARE, certainly
            # malicious behavior
            elif key not in ppReqs:
                raise SuspiciousNode(sender, Suspicions.UNKNOWN_PR_SENT,
                                     prepare)
            elif prepare.digest != self.requests.digest(ppReqs[key][0]):
                raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG,
                                     prepare)
            elif prepare.ppTime != ppReqs[key][1]:
                raise SuspiciousNode(sender, Suspicions.PR_TIME_WRONG, prepare)
            else:
                return True

    def addToPrepares(self, prepare: Prepare, sender: str):
        self.prepares.addVote(prepare, sender)
        self.tryCommit(prepare)

    def hasCommitted(self, request) -> bool:
        return self.commits.hasCommitFrom(
            ThreePhaseKey(request.viewNo, request.ppSeqNo), self.name)

    def canCommit(self, prepare: Prepare) -> bool:
        """
        Return whether the specified PREPARE can proceed to the Commit
        step.

        Decision criteria:

        - If this replica has got just 2f PREPARE requests then commit request.
        - If less than 2f PREPARE requests then probably there's no consensus on
            the request; don't commit
        - If more than 2f then already sent COMMIT; don't commit

        :param prepare: the PREPARE
        """
        return self.shouldParticipate(prepare.viewNo, prepare.ppSeqNo) and \
            self.prepares.hasQuorum(prepare, self.f) and \
            not self.hasCommitted(prepare)

    def isValidCommit(self, commit: Commit, sender: str) -> bool:
        """
        Return whether the COMMIT specified is valid.

        :param commit: the COMMIT to validate
        :return: True if `request` is valid, False otherwise
        """
        primaryStatus = self.isPrimaryForMsg(commit)
        ppReqs = self.sentPrePrepares if primaryStatus else self.prePrepares
        key = (commit.viewNo, commit.ppSeqNo)
        if key not in ppReqs:
            self.enqueueCommit(commit, sender)
            return False

        if (key not in self.prepares
                and key not in self.preparesWaitingForPrePrepare):
            logger.debug(
                "{} rejecting COMMIT{} due to lack of prepares".format(
                    self, key))
            # raise SuspiciousNode(sender, Suspicions.UNKNOWN_CM_SENT, commit)
            return False
        elif self.commits.hasCommitFrom(commit, sender):
            raise SuspiciousNode(sender, Suspicions.DUPLICATE_CM_SENT, commit)
        elif commit.digest != self.getDigestFor3PhaseKey(ThreePhaseKey(*key)):
            raise SuspiciousNode(sender, Suspicions.CM_DIGEST_WRONG, commit)
        elif key in ppReqs and commit.ppTime != ppReqs[key][1]:
            raise SuspiciousNode(sender, Suspicions.CM_TIME_WRONG, commit)
        else:
            return True

    def addToCommits(self, commit: Commit, sender: str):
        """
        Add the specified COMMIT to this replica's list of received
        commit requests.

        :param commit: the COMMIT to add to the list
        :param sender: the name of the node that sent the COMMIT
        """
        self.commits.addVote(commit, sender)
        self.tryOrder(commit)

    def hasOrdered(self, viewNo, ppSeqNo) -> bool:
        return (viewNo, ppSeqNo) in self.ordered

    def canOrder(self, commit: Commit) -> Tuple[bool, Optional[str]]:
        """
        Return whether the specified commitRequest can be returned to the node.

        Decision criteria:

        - If have got just 2f+1 Commit requests then return request to node
        - If less than 2f+1 of commit requests then probably don't have
            consensus on the request; don't return request to node
        - If more than 2f+1 then already returned to node; don't return request
            to node

        :param commit: the COMMIT
        """
        if not self.commits.hasQuorum(commit, self.f):
            return False, "no quorum: {} commits where f is {}".\
                          format(commit, self.f)

        if self.hasOrdered(commit.viewNo, commit.ppSeqNo):
            return False, "already ordered"

        if not self.isNextInOrdering(commit):
            viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
            if viewNo not in self.stashedCommitsForOrdering:
                self.stashedCommitsForOrdering[viewNo] = {}
            self.stashedCommitsForOrdering[viewNo][ppSeqNo] = commit
            # self._schedule(self.orderStashedCommits, 2)
            self.startRepeating(self.orderStashedCommits, 2)
            return False, "stashing {} since out of order".\
                format(commit)

        return True, None

    def isNextInOrdering(self, commit: Commit):
        viewNo, ppSeqNo = commit.viewNo, commit.ppSeqNo
        if self.ordered and self.ordered[-1] == (viewNo, ppSeqNo - 1):
            return True
        for (v, p) in self.commits:
            if v < viewNo:
                # Have commits from previous view that are unordered.
                # TODO: Question: would commits be always ordered, what if
                # some are never ordered and its fine, go to PBFT.
                return False
            if v == viewNo and p < ppSeqNo and (v, p) not in self.ordered:
                # If unordered commits are found with lower ppSeqNo then this
                # cannot be ordered.
                return False

        # TODO: Revisit PBFT paper, how to make sure that last request of the
        # last view has been ordered? Need change in `VIEW CHANGE` mechanism.
        # Somehow view change needs to communicate what the last request was.
        # Also what if some COMMITs were completely missed in the same view
        return True

    def orderStashedCommits(self):
        # TODO: What if the first few commits were out of order and stashed?
        # `self.ordered` would be empty
        if self.ordered:
            lastOrdered = self.ordered[-1]
            vToRemove = set()
            for v in self.stashedCommitsForOrdering:
                if v < lastOrdered[0] and self.stashedCommitsForOrdering[v]:
                    raise RuntimeError(
                        "{} found commits from previous view {}"
                        " that were not ordered but last ordered"
                        " is {}".format(self, v, lastOrdered))
                pToRemove = set()
                for p, commit in self.stashedCommitsForOrdering[v].items():
                    if (v == lastOrdered[0] and lastOrdered == (v, p - 1)) or \
                            (v > lastOrdered[0] and
                                self.isLowestCommitInView(commit)):
                        logger.debug("{} ordering stashed commit {}".format(
                            self, commit))
                        if self.tryOrdering(commit):
                            lastOrdered = (v, p)
                            pToRemove.add(p)

                for p in pToRemove:
                    del self.stashedCommitsForOrdering[v][p]
                if not self.stashedCommitsForOrdering[v]:
                    vToRemove.add(v)

            for v in vToRemove:
                del self.stashedCommitsForOrdering[v]

            # if self.stashedCommitsForOrdering:
            #     self._schedule(self.orderStashedCommits, 2)
            if not self.stashedCommitsForOrdering:
                self.stopRepeating(self.orderStashedCommits)

    def isLowestCommitInView(self, commit):
        # TODO: Assumption: This assumes that at least one commit that was sent
        #  for any request by any node has been received in the view of this
        # commit
        ppSeqNos = []
        for v, p in self.commits:
            if v == commit.viewNo:
                ppSeqNos.append(p)
        return min(ppSeqNos) == commit.ppSeqNo if ppSeqNos else True

    def tryOrdering(self, commit: Commit) -> None:
        """
        Attempt to send an ORDERED request for the specified COMMIT to the
        node.

        :param commit: the COMMIT message
        """
        key = (commit.viewNo, commit.ppSeqNo)
        logger.debug("{} trying to order COMMIT{}".format(self, key))
        reqKey = self.getReqKeyFrom3PhaseKey(key)  # type: Tuple
        digest = self.getDigestFor3PhaseKey(key)
        if not digest:
            logger.error(
                "{} did not find digest for {}, request key {}".format(
                    self, key, reqKey))
            return
        self.doOrder(*key, *reqKey, digest, commit.ppTime)
        return True

    def doOrder(self, viewNo, ppSeqNo, identifier, reqId, digest, ppTime):
        key = (viewNo, ppSeqNo)
        self.addToOrdered(*key)
        ordered = Ordered(self.instId, viewNo, identifier, reqId, ppTime)
        # TODO: Should not order or add to checkpoint while syncing
        # 3 phase state.
        self.send(ordered, TPCStat.OrderSent)
        if key in self.stashingWhileCatchingUp:
            self.stashingWhileCatchingUp.remove(key)
        logger.debug("{} ordered request {}".format(self, (viewNo, ppSeqNo)))
        self.addToCheckpoint(ppSeqNo, digest)

    def processCheckpoint(self, msg: Checkpoint, sender: str):
        if self.checkpoints:
            seqNo = msg.seqNo
            _, firstChk = self.firstCheckPoint
            if firstChk.isStable:
                if firstChk.seqNo == seqNo:
                    self.discard(msg,
                                 reason="Checkpoint already stable",
                                 logMethod=logger.debug)
                    return
                if firstChk.seqNo > seqNo:
                    self.discard(msg,
                                 reason="Higher stable checkpoint present",
                                 logMethod=logger.debug)
                    return
            for state in self.checkpoints.values():
                if state.seqNo == seqNo:
                    if state.digest == msg.digest:
                        state.receivedDigests[sender] = msg.digest
                        break
                    else:
                        logger.error("{} received an incorrect digest {} for "
                                     "checkpoint {} from {}".format(
                                         self, msg.digest, seqNo, sender))
                        return
            if len(state.receivedDigests) == 2 * self.f:
                self.markCheckPointStable(msg.seqNo)
        else:
            self.discard(msg,
                         reason="No checkpoints present to tally",
                         logMethod=logger.warn)

    def _newCheckpointState(self, ppSeqNo, digest) -> CheckpointState:
        s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ - 1
        logger.debug("{} adding new checkpoint state for {}".format(
            self, (s, e)))
        state = CheckpointState(ppSeqNo, [
            digest,
        ], None, {}, False)
        self.checkpoints[s, e] = state
        return state

    def addToCheckpoint(self, ppSeqNo, digest):
        for (s, e) in self.checkpoints.keys():
            if s <= ppSeqNo <= e:
                state = self.checkpoints[s, e]  # type: CheckpointState
                state.digests.append(digest)
                state = updateNamedTuple(state, seqNo=ppSeqNo)
                self.checkpoints[s, e] = state
                break
        else:
            state = self._newCheckpointState(ppSeqNo, digest)
            s, e = ppSeqNo, ppSeqNo + self.config.CHK_FREQ

        if len(state.digests) == self.config.CHK_FREQ:
            state = updateNamedTuple(state,
                                     digest=serialize(state.digests),
                                     digests=[])
            self.checkpoints[s, e] = state
            self.send(
                Checkpoint(self.instId, self.viewNo, ppSeqNo, state.digest))

    def markCheckPointStable(self, seqNo):
        previousCheckpoints = []
        for (s, e), state in self.checkpoints.items():
            if e == seqNo:
                state = updateNamedTuple(state, isStable=True)
                self.checkpoints[s, e] = state
                break
            else:
                previousCheckpoints.append((s, e))
        else:
            logger.error("{} could not find {} in checkpoints".format(
                self, seqNo))
            return
        self.h = seqNo
        for k in previousCheckpoints:
            logger.debug("{} removing previous checkpoint {}".format(self, k))
            self.checkpoints.pop(k)
        self.gc(seqNo)
        logger.debug("{} marked stable checkpoint {}".format(self, (s, e)))
        self.processStashedMsgsForNewWaterMarks()

    def gc(self, tillSeqNo):
        logger.debug("{} cleaning up till {}".format(self, tillSeqNo))
        tpcKeys = set()
        reqKeys = set()
        for (v, p), (reqKey, _) in self.sentPrePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)
        for (v, p), (reqKey, _) in self.prePrepares.items():
            if p <= tillSeqNo:
                tpcKeys.add((v, p))
                reqKeys.add(reqKey)

        logger.debug("{} found {} 3 phase keys to clean".format(
            self, len(tpcKeys)))
        logger.debug("{} found {} request keys to clean".format(
            self, len(reqKeys)))

        for k in tpcKeys:
            self.sentPrePrepares.pop(k, None)
            self.prePrepares.pop(k, None)
            self.prepares.pop(k, None)
            self.commits.pop(k, None)
            if k in self.ordered:
                self.ordered.remove(k)

        for k in reqKeys:
            self.requests.pop(k, None)

    def processStashedMsgsForNewWaterMarks(self):
        while self.stashingWhileOutsideWaterMarks:
            item = self.stashingWhileOutsideWaterMarks.pop()
            logger.debug("{} processing stashed item {} after new stable "
                         "checkpoint".format(self, item))

            if isinstance(item, ReqDigest):
                self.doPrePrepare(item)
            elif isinstance(item, tuple) and len(tuple) == 2:
                self.dispatchThreePhaseMsg(*item)
            else:
                logger.error("{} cannot process {} "
                             "from stashingWhileOutsideWaterMarks".format(
                                 self, item))

    @property
    def firstCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(0)

    @property
    def lastCheckPoint(self) -> Tuple[Tuple[int, int], CheckpointState]:
        if not self.checkpoints:
            return None
        else:
            return self.checkpoints.peekitem(-1)

    def isPpSeqNoAcceptable(self, ppSeqNo: int):
        return self.h < ppSeqNo <= self.H

    def addToOrdered(self, viewNo: int, ppSeqNo: int):
        self.ordered.add((viewNo, ppSeqNo))

    def enqueuePrePrepare(self, request: PrePrepare, sender: str):
        logger.debug(
            "Queueing pre-prepares due to unavailability of finalised "
            "Request. Request {} from {}".format(request, sender))
        key = (request.identifier, request.reqId)
        if key not in self.prePreparesPendingReqDigest:
            self.prePreparesPendingReqDigest[key] = []
        self.prePreparesPendingReqDigest[key].append((request, sender))

    def dequeuePrePrepare(self, identifier: int, reqId: int):
        key = (identifier, reqId)
        if key in self.prePreparesPendingReqDigest:
            pps = self.prePreparesPendingReqDigest[key]
            for (pp, sender) in pps:
                logger.debug("{} popping stashed PRE-PREPARE{}".format(
                    self, key))
                if pp.digest == self.requests.digest(key):
                    self.prePreparesPendingReqDigest.pop(key)
                    self.processPrePrepare(pp, sender)
                    logger.debug(
                        "{} processed {} PRE-PREPAREs waiting for finalised "
                        "request for identifier {} and reqId {}".format(
                            self, pp, identifier, reqId))
                    break

    def enqueuePrepare(self, request: Prepare, sender: str):
        logger.debug("Queueing prepares due to unavailability of PRE-PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.preparesWaitingForPrePrepare:
            self.preparesWaitingForPrePrepare[key] = deque()
        self.preparesWaitingForPrePrepare[key].append((request, sender))

    def dequeuePrepares(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.preparesWaitingForPrePrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.preparesWaitingForPrePrepare[key]:
                prepare, sender = self.preparesWaitingForPrePrepare[
                    key].popleft()
                logger.debug("{} popping stashed PREPARE{}".format(self, key))
                self.processPrepare(prepare, sender)
                i += 1
            self.preparesWaitingForPrePrepare.pop(key)
            logger.debug("{} processed {} PREPAREs waiting for PRE-PREPARE for"
                         " view no {} and seq no {}".format(
                             self, i, viewNo, ppSeqNo))

    def enqueueCommit(self, request: Commit, sender: str):
        logger.debug("Queueing commit due to unavailability of PREPARE. "
                     "Request {} from {}".format(request, sender))
        key = (request.viewNo, request.ppSeqNo)
        if key not in self.commitsWaitingForPrepare:
            self.commitsWaitingForPrepare[key] = deque()
        self.commitsWaitingForPrepare[key].append((request, sender))

    def dequeueCommits(self, viewNo: int, ppSeqNo: int):
        key = (viewNo, ppSeqNo)
        if key in self.commitsWaitingForPrepare:
            i = 0
            # Keys of pending prepares that will be processed below
            while self.commitsWaitingForPrepare[key]:
                commit, sender = self.commitsWaitingForPrepare[key].popleft()
                logger.debug("{} popping stashed COMMIT{}".format(self, key))
                self.processCommit(commit, sender)
                i += 1
            self.commitsWaitingForPrepare.pop(key)
            logger.debug("{} processed {} COMMITs waiting for PREPARE for"
                         " view no {} and seq no {}".format(
                             self, i, viewNo, ppSeqNo))

    def getDigestFor3PhaseKey(self, key: ThreePhaseKey) -> Optional[str]:
        reqKey = self.getReqKeyFrom3PhaseKey(key)
        digest = self.requests.digest(reqKey)
        if not digest:
            logger.debug("{} could not find digest in sent or received "
                         "PRE-PREPAREs or PREPAREs for 3 phase key {} and req "
                         "key {}".format(self, key, reqKey))
            return None
        else:
            return digest

    def getReqKeyFrom3PhaseKey(self, key: ThreePhaseKey):
        reqKey = None
        if key in self.sentPrePrepares:
            reqKey = self.sentPrePrepares[key][0]
        elif key in self.prePrepares:
            reqKey = self.prePrepares[key][0]
        elif key in self.prepares:
            reqKey = self.prepares[key][0]
        else:
            logger.debug(
                "Could not find request key for 3 phase key {}".format(key))
        return reqKey

    @property
    def threePhaseState(self):
        # TODO: This method is incomplete
        # Gets the current stable and unstable checkpoints and creates digest
        # of unstable checkpoints
        if self.checkpoints:
            pass
        else:
            state = []
        return ThreePCState(self.instId, state)

    def process3PhaseState(self, msg: ThreePCState, sender: str):
        # TODO: This is not complete
        pass

    def send(self, msg, stat=None) -> None:
        """
        Send a message to the node on which this replica resides.

        :param msg: the message to send
        """
        logger.display("{} sending {}".format(self, msg.__class__.__name__),
                       extra={"cli": True})
        logger.trace("{} sending {}".format(self, msg))
        if stat:
            self.stats.inc(stat)
        self.outBox.append(msg)
コード例 #23
0
class Location(simpy.Resource):
    """
    Class representing generic locations used in the simulator
    """
    def __init__(self, env, rng, conf, area, name, location_type, lat, lon,
                 capacity):
        """
        Locations are created with city.create_location(), not instantiated directly

        Args:
            env (covid19sim.Env): Shared environment
            rng (np.random.RandomState): Random number generator
            conf (dict): yaml configuration of the experiment
            area (float): Area of the location
            name (str): The location's name
            type (str): Location's type, see
            lat (float): Location's latitude
            lon (float): Location's longitude
            social_contact_factor (float): how much people are close to each other
                see contamination_probability() (this scales the contamination pbty)
            capacity (int): Daily intake capacity for the location (infinity if None).
            surface_prob (float): distribution of surface types in the Location. As
                different surface types have different contamination probabilities
                and virus "survival" durations, this will influence the contamination
                of humans at this location.
                Surfaces: aerosol, copper, cardboard, steel, plastic
        """

        assert location_type in ALL_LOCATIONS, "not a valid location"
        if capacity is None:
            capacity = simpy.core.Infinity

        super().__init__(env, capacity)
        self.humans = OrderedSet(
        )  # OrderedSet instead of set for determinism when iterating
        self.conf = conf
        self.name = name
        self.rng = np.random.RandomState(rng.randint(2**16))
        self.lat = lat
        self.lon = lon
        self.area = area
        self.location_type = location_type
        self.env = env
        self.contamination_timestamp = datetime.datetime.min
        self.max_day_contamination = 0
        self.is_open_for_business = True
        self.binned_humans = {bin: OrderedSet() for bin in AGE_BIN_WIDTH_5}
        self.social_contact_factor = conf[f'{location_type}_CONTACT_FACTOR']
        self.contaminated_surface_probability = conf[
            f'{location_type}_SURFACE_PROB']

        # occupation related constants
        OPEN_CLOSE_TIMES = conf[f'{location_type}_OPEN_CLOSE_HOUR_MINUTE']
        OPEN_DAYS = conf[f'{location_type}_OPEN_DAYS']
        # /!\ opening and closing time are in seconds relative to midnight
        self.opening_time = OPEN_CLOSE_TIMES[0][
            0] * SECONDS_PER_HOUR + OPEN_CLOSE_TIMES[0][1]
        self.closing_time = OPEN_CLOSE_TIMES[1][
            0] * SECONDS_PER_HOUR + OPEN_CLOSE_TIMES[1][1]
        self.open_days = OPEN_DAYS

        # parameters related to sampling contacts
        if location_type in ["SENIOR_RESIDENCE", "HOUSEHOLD"]:
            key = "HOUSEHOLD"
        elif location_type == "SCHOOL":
            key = "SCHOOL"
        elif location_type in ["WORKPLACE", "HOSPITAL"]:
            key = "WORKPLACE"
        else:
            key = "OTHER"

        # contact related constants
        self.MEAN_DAILY_KNOWN_CONTACTS = conf.get(
            f'{key}_MEAN_DAILY_INTERACTIONS', None)
        self.P_CONTACT = np.array(conf[f'P_CONTACT_MATRIX_{key}'])
        self.ADJUSTED_CONTACT_MATRIX = np.array(
            conf[f'ADJUSTED_CONTACT_MATRIX_{key}'])
        self.MEAN_DAILY_KNOWN_CONTACTS_FOR_AGEGROUP = self.ADJUSTED_CONTACT_MATRIX.sum(
            axis=0)
        self.MEAN_DAILY_KNOWN_CONTACTS_FOR_AGEGROUP = _adjust_surveyed_contacts_to_regional_contacts(
            self.MEAN_DAILY_KNOWN_CONTACTS_FOR_AGEGROUP, conf,
            self.MEAN_DAILY_KNOWN_CONTACTS)

        # duration matrices
        # self.MEAN_DAILY_CONTACT_DURATION_SECONDS = np.array(conf[f'{key}_CONTACT_DURATION_NORMAL_MEAN_SECONDS_MATRIX'])
        # self.STDDEV_DAILY_CONTACT_DURATION_SECONDS = np.array(conf[f'{key}_CONTACT_DURATION_NORMAL_SIGMA_SECONDS_MATRIX'])
        self.MEAN_DAILY_CONTACT_DURATION_SECONDS = np.array(
            conf[f'CONTACT_DURATION_NORMAL_MEAN_SECONDS_MATRIX'])
        self.STDDEV_DAILY_CONTACT_DURATION_SECONDS = np.array(
            conf[f'CONTACT_DURATION_NORMAL_MEAN_SECONDS_MATRIX'])

        for matrix in [self.P_CONTACT, self.ADJUSTED_CONTACT_MATRIX]:
            assert matrix.shape[0] == matrix.shape[
                1], "contact matrix is not square"

    def infectious_human(self):
        """
        Returns:
            bool: Is there an infectious human currently at that location
        """
        for h in self.humans:
            if h.is_infectious:
                return True
        return False

    def __repr__(self):
        """
        Returns:
            str: Representation of the Location
        """
        return f"{self.name} - occ:{len(self.humans)}/{self.capacity} - I:{self.is_contaminated}"

    def add_human(self, human):
        """
        Adds a human instance to the OrderedSet of humans at the location.

        Args:
            human (covid19sim.human.Human): The human to add.
        """
        self.humans.add(human)
        self.binned_humans[human.age_bin_width_5.bin].add(human)

    def remove_human(self, human):
        """
        Remove a given human from location.human
        If they are infectious, then location.contamination_timestamp is set to the
        env's timestamp and the duration of this contamination is set
        (location.max_day_contamination) according to the distribution of surfaces
        (location.contaminated_surface_probability) and the survival of the virus
        per surface type (MAX_DAYS_CONTAMINATION)
        /!\ Human is not returned

        Args:
            human (covid19sim.human.Human): The human to remove
        """
        if human in self.humans:
            if human.is_infectious:
                self.contamination_timestamp = self.env.timestamp
                rnd_surface = float(
                    self.rng.choice(a=human.conf.get("MAX_DAYS_CONTAMINATION"),
                                    size=1,
                                    p=self.contaminated_surface_probability))
                self.max_day_contamination = max(self.max_day_contamination,
                                                 rnd_surface)
            self.humans.remove(human)
            self.binned_humans[human.age_bin_width_5.bin].remove(human)

    @property
    def is_contaminated(self):
        """
        Is the location currently contaminated? It is if one of these two
        conditions is true :
        - an infectious human is present at the location (location is
          considered constantly reinfected by the human)
        - there are no infectious humans but the location was contaminated
          recently (see remove_human()). This depends on the time the last
          infectious human left, the current time and the duration of the
          contamination (location.max_day_contamination).

        Returns:
            bool: Is the place currently contaminating?
        """
        if self.infectious_human():
            return True
        else:
            return (self.env.timestamp - self.contamination_timestamp <=
                    datetime.timedelta(days=self.max_day_contamination))

    @property
    def contamination_probability(self):
        """
        Contamination depends on the time the virus has been sitting on a given surface
        (location.max_day_contamination) and is linearly decayed over time.
        Then it is scaled by location.social_contact_factor

        If not location.is_contaminated, return 0.0

        Returns:
            float: probability that a human is contaminated when going to this location.
        """
        if self.is_contaminated:
            if self.infectious_human():
                # Location constantly reinfected by the infectious human
                p_infection = 1.0
            else:
                # Linear decay of p_infection depending on time since last infection
                lag = (self.env.timestamp - self.contamination_timestamp)
                lag /= datetime.timedelta(days=1)
                p_infection = 1 - lag / self.max_day_contamination
        else:
            p_infection = 0.0

        return p_infection

    def _sample_interactee(self, type, human, n=1):
        """
        Samples encounter partner of `type` for `human`

        Args:
            type (string): type of interaction to sample. expects "known", "unknown"
            human (covid19sim.human.Human): human who will interact with the sampled human
            n (int): number of `other_human`s to sample

        Returns:
            other_human (covid19sim.human.Human): `human` with whom this `human` will interact
        """
        if len(self.humans) == 1:
            return [None]

        PREFERENTIAL_ATTACHMENT_FACTOR = self.conf[
            '_CURRENT_PREFERENTIAL_ATTACHMENT_FACTOR']

        if type == "known":
            if len(self.humans) - 1 == n:
                return [
                    h for h in self.humans
                    if h != human and h in human.known_connections
                ]

            human_bin = human.age_bin_width_5.index
            candidate_humans = [h for h in self.humans if human != h]
            other_humans, h_vector, known_vector, reduction_factor = list(
                zip(*
                    [_extract_attrs(human, h, self)
                     for h in candidate_humans]))

            p_contact = self.P_CONTACT[h_vector, human_bin] * (
                1 - PREFERENTIAL_ATTACHMENT_FACTOR)
            p_contact += np.array(known_vector) * self.P_CONTACT[
                h_vector, human_bin] * PREFERENTIAL_ATTACHMENT_FACTOR
            # reduction factor is due to mutual interaction sampling where other_human's reduction factor is taken into account
            p_contact *= (1 - np.array(reduction_factor))
            if p_contact.sum() == 0:
                return [None]

            p_contact /= p_contact.sum()

        elif type == "unknown":
            if len(self.humans) - 1 == n:
                return [h for h in self.humans if h != human]

            other_humans = [x for x in self.humans if x != human]
            p_contact = np.ones_like(other_humans,
                                     dtype=np.float) / len(other_humans)

        else:
            raise

        return self.rng.choice(other_humans, size=n, p=p_contact,
                               replace=True).tolist()

    def _sample_interaction_with_type(self, type, human):
        """
        Samples interactions of type `type` for `human`.

        Args:
            type (string): type of interaction to sample. expects "known", "unknown"
            human (covid19sim.human.Human): human who will interact with the sampled human

        Returns:
            interactions (list): each element is as follows -
                human (covid19sim.human.Human): other human with whom to have `type` of interaction
                distance_profile (covid19sim.locations.location.DistanceProfile): distance from which these two humans met (cms)
                duration (float): duration for which this encounter took place (seconds)
        """

        if type == "known":
            mean_daily_interactions = self.MEAN_DAILY_KNOWN_CONTACTS_FOR_AGEGROUP[
                human.age_bin_width_5.index]
            mean_daily_interactions *= (
                1 -
                human.intervened_behavior.daily_interaction_reduction_factor(
                    self))
            min_dist_encounter = self.conf['MIN_DIST_KNOWN_CONTACT']
            max_dist_encounter = self.conf['MAX_DIST_KNOWN_CONTACT']
            mean_interaction_time = None
        elif type == "unknown":
            mean_daily_interactions = self.conf['_MEAN_DAILY_UNKNOWN_CONTACTS']
            min_dist_encounter = self.conf['MIN_DIST_UNKNOWN_CONTACT']
            max_dist_encounter = self.conf['MAX_DIST_UNKNOWN_CONTACT']
            mean_interaction_time = self.conf["GAMMA_UNKNOWN_CONTACT_DURATION"]
        else:
            raise

        mean_daily_interactions += 1e-6  # to avoid error in sampling with 0 mean from negative binomial
        scale_factor_interaction_time = self.conf[
            'SCALE_FACTOR_CONTACT_DURATION']
        # (assumption) maximum allowable distance is when humans are uniformly spaced
        packing_term = 100 * np.sqrt(self.area / len(self.humans))

        interactions = []
        n_interactions = min(
            len(self.humans) - 1,
            self.rng.negative_binomial(mean_daily_interactions, 0.5))
        interactees = self._sample_interactee(type, human, n=n_interactions)
        for other_human in interactees:
            if other_human is None:
                continue

            assert other_human != human, "sampling with self is not allowed"
            # sample duration of encounter (seconds)
            t_overlap = (min(human.location_leaving_time,
                             other_human.location_leaving_time) -
                         max(human.location_start_time,
                             other_human.location_start_time))

            # if the overlap duration is less than a relevant duration for infection, it is of no use.
            if t_overlap < min(self.conf['MIN_MESSAGE_PASSING_DURATION'],
                               self.conf['INFECTION_DURATION']):
                continue

            # sample distance of encounter
            encounter_term = self.rng.uniform(min_dist_encounter,
                                              max_dist_encounter)
            distance = np.clip(encounter_term, a_min=0, a_max=packing_term)
            distance_profile = DistanceProfile(encounter_term=encounter_term,
                                               packing_term=packing_term,
                                               social_distancing_term=None,
                                               distance=distance)

            if type == "known":
                age_bin = human.age_bin_width_5.index
                other_bin = other_human.age_bin_width_5.index
                mean_duration = self.MEAN_DAILY_CONTACT_DURATION_SECONDS[
                    other_bin, age_bin]
                sigma_duration = self.STDDEV_DAILY_CONTACT_DURATION_SECONDS[
                    other_bin, age_bin]
                # surveyed data gives us minutes per day. Here we use it to sample rate of minutes spend per second of overlap in an encounter.
                # duration = (_sample_positive_normal(mean_duration, sigma_duration, self.rng) / SECONDS_PER_DAY) * t_overlap * SECONDS_PER_MINUTE
                duration = _sample_positive_normal(mean_duration,
                                                   sigma_duration,
                                                   self.rng,
                                                   upper_limit=t_overlap)

            elif type == "unknown":
                duration = self.rng.gamma(
                    mean_interaction_time / scale_factor_interaction_time,
                    scale_factor_interaction_time)

            else:
                raise ValueError(f"Unknown interaction type: {type}")

            # (debug)
            # if self.location_type ==  "HOUSEHOLD" and type == "known" and human.mobility_planner.current_activity.name != "socialize":# and human.workplace != self:
            #     print(human, "-->", other_human, "for", duration / SECONDS_PER_MINUTE, "tota humans", len(self.humans), "t_overlap", t_overlap / SECONDS_PER_MINUTE, human.mobility_planner.current_activity)

            # add to the list
            interactions.append((other_human, distance_profile, duration))

        return interactions

    def sample_interactions(self, human, unknown_only=False):
        """
        samples how `human` interacts with other `human`s at this location (`self`) at this time.

        Args:
            human (covid19sim.human.Human): human for whom interactions need to be sampled
            unknown_only (bool): whether to sample interactions of type `unknown` only

        Returns:
            known_interactions (list): each element is as follows -
                human (covid19sim.human.Human): other human with whom to interact
                distance_profile (covid19sim.locations.location.DistanceProfile): distance from which this encounter took place (cms)
                duration (float): duration for which this encounter took place (minutes)
            unknown_interactions (list): each element is as follows -
                human (covid19sim.human.Human): other human who was nearby and unknown to `human`
                distance_profile (covid19sim.locations.location.DistanceProfile): distance from which this encounter took place (cms)
                duration (float): duration for which this encounter took place (minutes)
        """
        # only `human` is at this location. There will be no interactions.
        if len(self.humans) == 1:
            assert human == self.humans[0]
            return [], []

        known_interactions = []
        unknown_interactions = self._sample_interaction_with_type(
            "unknown", human)
        if not unknown_only:
            known_interactions = self._sample_interaction_with_type(
                "known", human)

        return known_interactions, unknown_interactions

    def check_environmental_infection(self, human):
        """
        Determines whether `human` gets infected due to the virus in environment.

        Environmental infection is modeled via surfaces. We consider the following study -
        https://www.nejm.org/doi/pdf/10.1056/NEJMc2004973?articleTools=true
        It shows the duration for which virus remains on a surface. Following surfaces are considered -
        aerosol    copper      cardboard       steel       plastic

        We sample a surface using surface_prob and infect the surface for MAX_DAYS_CONTAMINATION[surface_index] days.
        NOTE: self.surface_prob is experimental, and there is no data on which surfaces are dominant at a location.
        NOTE: our final objective is to make sure that environmental infection is around 10-15% of all transmissions to comply with the data.
        We do that via ENVIRONMENTAL_INFECTION_KNOB.

        Args:
            human (covid19sim.human.Human): `human` for whom to check environmental infection.

        Returns:
            (bool): whether `human` was infected via environmental contamination.
        """
        p_contamination = self.contamination_probability
        if not (human.is_susceptible and p_contamination > 0):
            return

        #
        p_transmission = get_environment_human_p_transmission(
            p_contamination,
            human,
            self.conf.get("_ENVIRONMENTAL_INFECTION_KNOB"),
        )

        x_environment = self.rng.random() < p_transmission

        # track infection related stats
        human.city.tracker.track_infection(source="environment",
                                           from_human=None,
                                           to_human=human,
                                           location=self,
                                           timestamp=self.env.timestamp,
                                           p_infection=p_transmission,
                                           success=x_environment)
        if x_environment:
            human._get_infected(initial_viral_load=self.rng.random())

        return

    def is_open(self, date):
        """
        Checks if `self` is open on `date`.

        Args:
            date (datetime.date): date for which this is to be checked.

        Returns:
            (bool): True if its open
        """
        return date.weekday() in self.open_days

    def __hash__(self):
        """
        Hash of the location is the hash of its name

        Returns:
            int: hash
        """
        return hash(self.name)

    def serialize(self):
        """
        This function serializes the location object by deleting
        non-serializable keys

        Returns:
            dict: serialized location
        """
        s = self.__dict__
        if s.get('env'):
            del s['env']
        if s.get('rng'):
            del s['rng']
        if s.get('_env'):
            del s['_env']
        if s.get('contamination_timestamp'):
            del s['contamination_timestamp']
        if s.get('residents'):
            del s['residents']
        if s.get('humans'):
            del s['humans']
        return s
コード例 #24
0
ファイル: __main__.py プロジェクト: Zeturic/pluck
        if srcfile not in static_objects:
            relocatable_objects.add(objfile)

relocatable = os.path.join(obj, "relocatable.o")

# ensures the file exists
with open(relocatable, "w"): pass

# grabs all the requested object files from libgcc.a
# and marks them for inclusion in relocatable.o
if libgcc:
    with open(os.path.join(DEVKITARM, "lib", "gcc", "arm-none-eabi", "5.3.0", "thumb", "libgcc.a"), "rb") as library:
        data = kewensis.parse(library)
    for datum in data:
        if datum.filename in libgcc:
            libgcc.remove(datum.filename)
            with open(os.path.join(obj, datum.filename), "wb") as objfile:
                objfile.write(datum.contents)
            relocatable_objects.add(datum.filename)
        if not libgcc:
            break

if relocatable_objects:
    exit_code = subprocess.run([
        LD,
        *LDFLAGS,
        *(os.path.join(obj, objfile) for objfile in relocatable_objects),
        "-o",
        relocatable
    ]).returncode
コード例 #25
0
ファイル: manager.py プロジェクト: gooddata/sgmanager
    def update_remote_groups(self, dry_run=True, threshold=None, remove=True):
        '''Update remote configuration with the local one.'''
        # Copy those so that we can modify them even with dry-run
        local = OrderedSet(self.local)
        remote = OrderedSet(self.remote)

        validate_groups(local)

        def parse_groups(groups, remote):
            if remote:
                self._process_remote_groups(groups)
            groups = {group.name: group for group in groups if group.name != 'default'}
            keys = OrderedSet(groups.keys())
            return groups, keys

        lgroups, lkeys = parse_groups(local, False)
        rgroups, rkeys = parse_groups(remote, True)

        changes = 0
        unchanged = 0
        groups_added = OrderedSet()
        groups_updated = OrderedSet()
        groups_removed = OrderedSet()
        rules_added = OrderedSet()
        rules_removed = OrderedSet()

        # Added groups
        for group in (lgroups[name] for name in lkeys - rkeys):
            grp = Group(group.name, group.description)
            groups_added.add(grp)
            rgroups[group.name] = grp
            rkeys.add(grp.name)
            changes += 1

        # Changed groups
        for rgroup, lgroup in ((rgroups[name], lgroups[name])
                               for name in rkeys & lkeys):
            if rgroup not in groups_added:
                unchanged += 1

            if rgroup.description != lgroup.description:
                # XXX: https://review.openstack.org/596609
                # groups_updated.add((rgroup, lgroup))
                pass

            # FIXME: when comparing using OrderedSet, added rules part contains
            #        all elements rather than different ones.
            lrules, rrules = set(lgroup.rules), set(rgroup.rules)

            if rrules != lrules:
                # Added rules
                for rule in lrules - rrules:
                    rules_added.add((rgroup.name, rule))
                    changes += 1

                # Removed rules
                for rule in rrules - lrules:
                    if remove:
                        rules_removed.add((rgroup.name, rule))
                        changes += 1
                    else:
                        unchanged += 1
            unchanged += len(rrules & lrules)

        # Removed groups
        for group in (rgroups[name] for name in rkeys - lkeys):
            if remove:
                if group._project is None:
                    continue
                groups_removed.add(group)
                changes += len(group.rules) + 1
            else:
                unchanged += len(group.rules) + 1

        if changes == 0 and not groups_updated:
            return

        # Report result
        logger.info(f'{changes:d} changes to be made:')
        for group in groups_added:
            logger.info(f'  - Create group {group.name!r}')
        for rgroup, lgroup in groups_updated:
            logger.info(f'  - Update description for {rgroup.name!r}:'
                        f' {rgroup.description!r} → {lgroup.description!r}')
        for group_name, rule in rules_added:
            logger.info(f'  - Create {rule!r} in group {group_name!r}')
        for group_name, rule in rules_removed:
            logger.info(f'  - Remove {rule!r} from group {group_name!r}')
        for group in groups_removed:
            logger.info(f'  - Remove group {group.name!r} with {len(group.rules)} rules')

        if threshold is not None:
            changes_percentage = changes / (unchanged + changes) * 100
            if changes_percentage > threshold:
                raise ThresholdException(f'Amount of changes is {changes_percentage:f}%'
                                         f' which is more than allowed ({threshold:f}%)')

        if dry_run:
            return

        # We've modified 'remote', so copy it again
        remote = OrderedSet(self.remote)
        rgroups, rkeys = parse_groups(remote, True)

        # Added groups
        for group in groups_added:
            ginfo = self.connection.create_security_group(
                name=group.name,
                description=group.description)
            remote.add(Group.from_remote(**ginfo))
        rgroups, rkeys = parse_groups(remote, True)

        # Updated groups
        for rgroup, lgroup in groups_updated:
            self.connection.update_security_group(
                name_or_id=rgroup._id,
                description=lgroup.description)
            # Updating group should not change its ID
            rgroup.description = lgroup.description

        # Added rules
        for group_name, rule in rules_added:
            rgroup = rgroups[group_name]
            cidr = str(rule.cidr) if rule.cidr is not None else None
            group_id = rgroups[rule.group]._id if rule.group is not None else None
            rinfo = self.connection.create_security_group_rule(
                secgroup_name_or_id=rgroup._id,
                port_range_min=rule.port_min,
                port_range_max=rule.port_max,
                protocol=rule.protocol.value,
                remote_ip_prefix=cidr,
                remote_group_id=group_id,
                direction=rule.direction.value,
                ethertype=rule.ethertype.value)
            rgroup.rules.add(Rule.from_remote(**rinfo))

        if remove:
            # Removed rules
            for group_name, rule in rules_removed:
                rgroup = rgroups[group_name]
                self.connection.delete_security_group_rule(
                    rule_id=rule._id)
                rgroup.rules.remove(rule)

            # Removed groups
            for group in groups_removed:
                self.connection.delete_security_group(
                    name_or_id=group._id)
                remote.remove(group)

        self.remote = remote
コード例 #26
0
class State(object):
    def __init__(self, game, size, move=None, expr=""):
        """
        Initialises a game state
        :param game:
        :param move:
        :param expr:
        """
        # game to play
        self.game = game
        # state size
        self.size = size

        # current expression
        if not expr:
            expr = self.game.initial_move
        self.expr = expr

        # all moves
        self.moves = game.moves
        # moves that can be taken
        if size:
            self.remaining_moves = game.moves["u{}".format(size)].copy()
        else:
            self.remaining_moves = OrderedSet()

        # top-most-right-most index
        self.tprm_index = -1

        # current move
        self.current_move = move

    def is_terminal(self):
        if self.game.is_terminal(self.expr):
            return True

    def is_non_terminal(self):
        return not self.is_terminal()

    def next_state(self, random=True):
        """
        Randomly determines the next state
        :param random: bool, flag how to choose next state
        :return: State
        """
        # init top-most-right-most index
        if not self.tprm_index >= 0:
            self.tprm = top_most_right_most(self.expr)

        non_terminal = self.expr.split(" ")[self.tprm]

        # chose next move randomly
        if random:
            move = choice(list(self.remaining_moves))
        else:
            move = list(self.remaining_moves[non_terminal]).pop(0)
        # remove move from list
        self.remaining_moves.remove(move)

        # replace non-terminal
        expr = replace_nth_occurrence(self.expr, move, self.tprm)

        # new state size
        tprm = top_most_right_most(expr)
        non_terminal = expr.split(" ")[tprm]

        if not self.game.is_terminal(expr):
            size = int(non_terminal.strip("u"))
        else:
            size = 0
        # new state
        state = State(self.game, size, move=move, expr=expr)

        return state

    def __eq__(self, other):
        return self.expr == other.expr

    def __ne__(self, other):
        return not self.__eq__(other)

    def __str__(self):
        return str(self.expr)

    def __repr__(self):
        return self.__str__()

    def __hash__(self):
        return hash(self.expr)
コード例 #27
0
class Simulation:
    """Agent-based simulation."""

    time: Timestamp
    agents: Set[TrafficDynamicAgent]
    active: Set[TrafficDynamicAgent]
    ready_buffer: int
    _queue: Deque[Tuple[Callable, Tuple]]
    _listeners = List[Listener]

    def __init__(self):
        self.time = 0
        self.agents = OrderedSet()
        self.active = OrderedSet()
        self.ready_buffer = 0
        self._queue = deque()
        self._listeners = []

    @property
    def target_buffer(self) -> int:
        """Get index of current target buffer."""
        return (self.ready_buffer + 1) % 2

    def add(self, agent: TrafficDynamicAgent):
        """Add agent to the simulation."""
        self.agents.add(agent)
        self.update_active_set(agent)
        self.raise_event('new_agent')

    def update(self, dt: Duration):
        """Update the simulation with time step of `dt`."""
        ready = self.ready_buffer
        target = (ready + 1) % 2

        for agent in self.active:
            agent.update(dt, ready, target)

        while self._queue:
            callable_, args = self._queue.popleft()
            callable_(*args)

        old_time = self.time
        self.time += dt
        self.flip_buffer()
        self.raise_event('simulation_step', dt)
        self._check_time_events(old_time)

    def _check_time_events(self, old_time: Timestamp):
        """Raise time events when appropriate."""
        old_time, time = floor(old_time), floor(self.time)
        if old_time == time:
            return
        self.raise_event('passed_second')

        for timespan, event in TIME_EVENTS:
            if old_time // timespan == time // timespan:
                return
            self.raise_event(event)

    def flip_buffer(self):
        """Flip ready buffer index."""
        self.ready_buffer = self.target_buffer

    def update_active_set(self, agent: TrafficDynamicAgent):
        """Activate or deactivate agent according to its `active` attribute.

        When an agent is activated or deactivated during update, this should be
        called instead of adding or removing directly from the active set,
        because the set is being iterated and cannot change during iteration.
        """
        def _update_active_set():
            if agent.active:
                self.active.add(agent)
            else:
                self.active.discard(agent)

        self.enqueue(_update_active_set)
        self.raise_event('active_set_updated')

    def remove(self, agent: TrafficDynamicAgent):
        """Remove agent from simulation."""
        if agent in self.agents:
            self.agents.remove(agent)
            self.raise_event('removed_agent', agent)

    def enqueue(self, callable_: Callable, args: Tuple = ()):
        """Enqueue function to be called after update loop ends."""
        self._queue.append((callable_, args))

    def register_listener(self, listener: Listener):
        """Register listener to receive raised events."""
        if listener not in self._listeners:
            self._listeners.append(listener)

    def raise_event(self, name: str, *args):
        """Raise event to all registered listeners."""
        for listener in self._listeners:
            listener(name, *args)
コード例 #28
0
class TestFacility(object):
    """
    Implements queue behavior for tests.
    It keeps a queue of `Human`s who need a test.
    Depending on the daily budget of testing, tests are administered to `Human` according to a scoring function.
    """

    def __init__(self, test_type_preference, max_capacity_per_test_type, env, conf):
        self.test_type_preference = test_type_preference
        self.max_capacity_per_test_type = max_capacity_per_test_type

        self.test_count_today = defaultdict(int)
        self.env = env
        self.conf = conf
        self.test_queue = OrderedSet()
        self.last_date_to_check_tests = self.env.timestamp.date()

    def reset_tests_capacity(self):
        """
        Resets the tests capactiy back to the allowed budget each day.
        """
        if self.last_date_to_check_tests != self.env.timestamp.date():
            self.last_date_to_check_tests = self.env.timestamp.date()
            for k in self.test_count_today.keys():
                self.test_count_today[k] = 0

            # clear queue
            # TODO : check more scenarios about when the person can be removed from a queue
            to_remove = []
            for human in self.test_queue:
                if not any(human.symptoms) and not human._test_recommended:
                    to_remove.append(human)

            _ = [self.test_queue.remove(human) for human in to_remove]

    def get_available_test(self):
        """
        Returns a first type that is available according to preference hierarchy

        See TEST_TYPES in core.yaml

        Returns:
            str: available test_type
        """
        for test_type in self.test_type_preference:
            if self.test_count_today[test_type] < self.max_capacity_per_test_type[test_type]:
                self.test_count_today[test_type] += 1
                return test_type

    def add_to_test_queue(self, human):
        """
        Adds `Human` to the test queue.

        Args:
            human (Human): `Human` object.
        """
        if human in self.test_queue:
            return
        self.test_queue.add(human)

    def clear_test_queue(self):
        """
        It is called at the same frequency as `while` in City.run.
        Triages `Human` in queue to administer tests.
        With probability P_FALSE_NEGATIVE the test will be negative, otherwise it will be positive

        See TEST_TYPES in core.yaml
        """
        # reset here. if reset at end, it results in carry-over of remaining test at the 0th hour.
        self.reset_tests_capacity()
        test_triage = sorted(list(self.test_queue), key=lambda human: -self.score_test_need(human))
        for human in test_triage:
            test_type = self.get_available_test()
            if test_type:
                if human.infection_timestamp is not None:
                    if human.rng.rand() < get_test_false_negative_rate(test_type, human.days_since_covid, human.conf):
                        unobserved_result = NEGATIVE_TEST_RESULT
                    else:
                        unobserved_result = POSITIVE_TEST_RESULT
                else:
                    if human.rng.rand() < self.conf['TEST_TYPES'][test_type]["P_FALSE_POSITIVE"]:
                        unobserved_result = POSITIVE_TEST_RESULT
                    else:
                        unobserved_result = NEGATIVE_TEST_RESULT

                human.set_test_info(test_type, unobserved_result)  # /!\ sets other attributes related to tests
                self.test_queue.remove(human)

            else:
                # no more tests available
                break

        logging.debug(f"Cleared the test queue for {len(test_triage)} humans. "
                      f"Out of those, {len(test_triage) - len(self.test_queue)} "
                      f"were tested")

    def score_test_need(self, human):
        """
        Score `Human`s according to some criterion. Highest score gets the test first.
        Note: this can be replaced by a better heuristic.

        Args:
            human (Human): `Human` object.

        Returns:
            (float): score value indicating chances of `Human` getting a test.
        """
        score = 0

        if SEVERE in human.symptoms or EXTREMELY_SEVERE in human.symptoms:
            score += self.conf['P_TEST_SEVERE']
        elif MODERATE in human.symptoms:
            score += self.conf['P_TEST_MODERATE']
        elif MILD in human.symptoms:
            score += self.conf['P_TEST_MILD']

        if human._test_recommended:
            score += self.conf['P_TEST_RECOMMENDED']

        return score
コード例 #29
0
ファイル: base.py プロジェクト: walexi/covid_p2p_simulation
class Location(simpy.Resource):

    def __init__(self, env, rng, area, name, location_type, lat, lon,
            social_contact_factor, capacity, surface_prob):

        if capacity is None:
            capacity = simpy.core.Infinity

        super().__init__(env, capacity)
        self.humans = OrderedSet() #OrderedSet instead of set for determinism when iterating
        self.name = name
        self.rng = rng
        self.lat = lat
        self.lon = lon
        self.area = area
        self.location_type = location_type
        self.social_contact_factor = social_contact_factor
        self.env = env
        self.contamination_timestamp = datetime.datetime.min
        self.contaminated_surface_probability = surface_prob
        self.max_day_contamination = 0

    def infectious_human(self):
        return any([h.is_infectious for h in self.humans])

    def __repr__(self):
        return f"{self.name} - occ:{len(self.humans)}/{self.capacity} - I:{self.infectious_human()}"

    def add_human(self, human):
        self.humans.add(human)
        if human.is_infectious:
            self.contamination_timestamp = self.env.timestamp
            rnd_surface = float(self.rng.choice(a=MAX_DAYS_CONTAMINATION, size=1, p=self.contaminated_surface_probability))
            self.max_day_contamination = max(self.max_day_contamination, rnd_surface)

    def remove_human(self, human):
        self.humans.remove(human)

    @property
    def is_contaminated(self):
        return self.env.timestamp - self.contamination_timestamp <= datetime.timedelta(days=self.max_day_contamination)

    @property
    def contamination_probability(self):
        if self.is_contaminated:
            lag = (self.env.timestamp - self.contamination_timestamp)
            lag /= datetime.timedelta(days=1)
            p_infection = 1 - lag / self.max_day_contamination # linear decay; &envrionmental_contamination
            return self.social_contact_factor * p_infection
        return 0.0

    def __hash__(self):
        return hash(self.name)

    def serialize(self):
        """ This function serializes the location object"""
        s = self.__dict__
        if s.get('env'):
            del s['env']
        if s.get('rng'):
            del s['rng']
        if s.get('_env'):
            del s['_env']
        if s.get('contamination_timestamp'):
            del s['contamination_timestamp']
        if s.get('residents'):
            del s['residents']
        if s.get('humans'):
            del s['humans']
        return s
コード例 #30
0
ファイル: cmt_2phase.py プロジェクト: mschmidt87/VeRyPy
def _phase_two(mu_multiplier,route_seeds, D,d,C,L, rr,
               choose_most_associated_route = True,
               repeated_association_with_n_routes=1):
    ## Step 0: reuse seed nodes from phase 1 to act as route seed points
    N = len(D)
    K = len(route_seeds)

    customer_nodes = range(1,N)
    if rr is not None: 
        #->stochastic version, resolve the ties randomly
        shuffle(customer_nodes)
    unrouted_nodes = OrderedSet(customer_nodes)
    unrouted_nodes.difference_update( route_seeds )
    
    # routes are stored in dict with a key of route seed,
    #  the route, demand, cost, and if it is updated are all stored
    routes = [ RouteState( [0, rs],            #initial route
                            d[rs] if C else 0, #initial demand 
                            D[0,rs]+D[rs,0],   #initial cost
                            True) for rs in route_seeds ]
    
    insertion_infeasible = [[False]*N for rs in route_seeds ] 
            
    if __debug__:
        log(DEBUG, "## Parallel route building phase with seeds %s ##" %
                  str(list(route_seeds)))
    
    ## Step 1.1: vectorized calculation of eps
    # TODO: this also calculates eps for depot and seeds. Omitting those would 
    #  be possible and save few CPU cycles, but it would make indexing more
    #  complex and because accuracy>simplicity>speed, it is the way it is.
        
    eps = ( np.tile(D[[0],:],(K, 1)) 
            +mu_multiplier*D[:,route_seeds].transpose()
            -np.tile(D[0,route_seeds],(N,1)).transpose() )
    
    associate_to_nth_best_route = 1
    insertions_made = False
    route_seed_idxs = []
    insertions_made = False
    first_try = True
    
    try:
        while unrouted_nodes:                
            ## Main while loop bookkeeping
            if not route_seed_idxs:
                idxs = range(K)
                if rr is not None: #->stocastic version, construct routes in random order
                    shuffle(idxs)
                route_seed_idxs = deque(idxs)
                
                if not first_try:
                    # The CMT1979 exits when all routes have been tried
                    if repeated_association_with_n_routes is None:
                        break            
                
                    if not insertions_made:
                        associate_to_nth_best_route+=1 # for the next round
                    if associate_to_nth_best_route>\
                       repeated_association_with_n_routes:
                        break
                
                first_try = False
                insertions_made = False
                                
            ## Step 2.1: Choose a (any) route to add customers to.        
        
            # some nodes may have been routed, update these
            eps_unrouted = eps[:,unrouted_nodes]
            
            ## Step 1.2: Associate each node to a route
            # note: the assignments cannot be calculated beforehand, as we do 
            #  not know which customers will be (were?) "left over" in the 
            #  previous route building steps 3. 
            
            if associate_to_nth_best_route==1 or len(route_seed_idxs)==1:
                r_stars_unrouted = np.argmin(eps_unrouted[route_seed_idxs,:], axis=0)    
            else:
                ## note: an extension for the deterministic variant,
                # get smallest AND 2. smallest at the same time using argpartition
                #top = np.argsort(eps, axis=0)[:associate_with_n_routes, :]
                #r_stars = [ top[i,:] for i in range(associate_with_n_routes) ]
                if len(route_seed_idxs)<associate_to_nth_best_route:
                    route_seed_idxs = []
                    continue
                    
                #take_nth = min(len(route_seed_idxs), associate_to_nth_best_route)-1
                take_nth = associate_to_nth_best_route-1
                reorder_rows_per_col_idxs = np.argpartition(eps_unrouted[route_seed_idxs,:],
                                              take_nth, axis=0)
                nth_best, unrouted_node_order = np.where(
                        reorder_rows_per_col_idxs==take_nth)
                r_stars_unrouted = nth_best[ np.argsort(unrouted_node_order) ]
            
            if choose_most_associated_route:
                unique, counts = np.unique(r_stars_unrouted, return_counts=True)
                seed_idx_idx = unique[np.argmax(counts)]
                route_seed_idx = route_seed_idxs[seed_idx_idx]
                route_seed_idxs.remove(route_seed_idx)
                associated_cols = list(np.where(r_stars_unrouted==seed_idx_idx)[0])
            else:
                route_seed_idx = route_seed_idxs.popleft()
                associated_cols = list(np.where(r_stars_unrouted==0)[0])
                
            route,route_demand,route_cost,route_l_updated = routes[route_seed_idx]
                
            ## Step 2.2: Vectorized calculation of sigma score for the customers
            #             associated to the chosen route.
            eps_bar = eps_unrouted[route_seed_idx,associated_cols]
            
            # NOTE: CMT 1979 does not specify what happens if S is empty, we assume
            #  we need (and can) omit the calculation of eps_prime in this case.
            
            brdcast_rs_idxs = [[rsi] for rsi in route_seed_idxs]
            if route_seed_idxs:
                eps_prime = np.min(eps_unrouted[brdcast_rs_idxs, associated_cols],
                                   axis=0)
                sigmas = eps_prime-eps_bar
            else:
                # last route, try to add rest of the nodes
                eps_prime = None
                sigmas = -eps_bar
                
            col_to_node = [unrouted_nodes[c] for c in associated_cols]
            sigma_ls = zip(sigmas.tolist(), col_to_node)
            sigma_ls.sort(reverse=True)
            
            if __debug__:
                log(DEBUG, "Assigning associated nodes %s to a route %s (seed n%d)"%
                          (str(col_to_node), str(route+[0]),route_seeds[route_seed_idx]))
            
            ## Step 3: insert feasible customers from the biggest sigma first
            for sigma, l_star in sigma_ls:
                if __debug__:
                    log(DEBUG-1, "Check feasibility of inserting "+\
                        "n%d with sigma=%.2f"%(l_star,sigma))
                
                if C and route_demand+d[l_star]-C_EPS>C:
                    if __debug__:
                        log(DEBUG-1, "Insertion would break C constraint.")
                    continue
                
                # use cached L feasibility check
                if L and insertion_infeasible[route_seed_idx][l_star]:
                    continue
                
                # Do not run TSP algorithm after every insertion, instead calculate
                #  a simple a upper bound for the route_cost and use that.
                UB_route_cost = (route_cost-
                                   D[route[-1],0]+
                                   D[route[-1],l_star]+D[l_star,0])
                if L and UB_route_cost-S_EPS>L:
                       
                    # check the real TSP cost
                    new_route, new_route_cost = solve_tsp(D, route+[l_star])
    
                    if __debug__:
                        log(DEBUG-1, "Got TSP solution %s (%.2f)" %
                            (str(new_route), new_route_cost, ))
                        
                    if new_route_cost-S_EPS>L:
                        if __debug__:
                            log(DEBUG-1,"DEBUG: Insertion would break L constraint.")
                        insertion_infeasible[route_seed_idx][l_star] = True
                        continue
                    
                    route_cost = new_route_cost
                    route=new_route[:-1]
                    route_l_updated = True
                else:
                    route_l_updated = False
                    route_cost = UB_route_cost
                    route = route+[l_star]
                
                if C: route_demand+=d[l_star]
                unrouted_nodes.remove(l_star)
                insertions_made = True
                
                if __debug__:
                    log(DEBUG, "Inserted n%d to create a route %s."%(l_star, route))
        
        
            # All feasible insertions of the associated customers is done, record
            #  the modified route.
            if insertions_made:
                routes[route_seed_idx] =  RouteState( route,       #updated route
                                                  route_demand,    #updated demand
                                                  route_cost,      #updated cost 
                                                  route_l_updated) #cost state
    except KeyboardInterrupt: #or SIGINT
        rs_sol, _ = _routestates2solution(routes, D)
        interrupted_sol = rs_sol[:-1]+routes2sol([n] for n in unrouted_nodes
                                                if n not in rs_sol)
        raise KeyboardInterrupt(interrupted_sol)
    
    ## Step 4: Redo step 1 or construct the solution and exit 
    if len(unrouted_nodes)>0:
        if __debug__:
            log(DEBUG, "Phase 2 failed to create feasbile solution with %d routes."%K)
            log(DEBUG-1, "Nodes %s remain unrouted."%str(list(unrouted_nodes)))
        return 0, None, None, rr
    else:
        sol, total_cost = _routestates2solution(routes, D)
        if __debug__:
            log(DEBUG, "Phase 2 solution %s (%.2f) complete."%(str(sol),total_cost))        
        return K, sol, total_cost, rr
コード例 #31
0
class SortedIOMemory(IOMemory):
    """\
            A sorted triples flavor of the IOMemory
    """
    context_aware = True
    formula_aware = True
    graph_aware = True

    # The following variable name conventions are used in this class:
    #
    # subject, predicate, object             unencoded triple parts
    # triple = (subject, predicate, object)  unencoded triple
    # context:                               unencoded context
    #
    # sid, pid, oid                          integer-encoded triple parts
    # enctriple = (sid, pid, oid)            integer-encoded triple
    # cid                                    integer-encoded context

    def __init__(self, configuration=None, identifier=None):
        IOMemory.__init__(self, configuration, identifier)
        self.__namespace = OrderedDict() #{}
        self.__prefix = OrderedDict() #{}

        # Mappings for encoding RDF nodes using integer keys, to save memory
        # in the indexes Note that None is always mapped to itself, to make
        # it easy to test for it in either encoded or unencoded form.
        #self.__int2obj = {None: None}  # maps integer keys to objects
        #self.__obj2int = {None: None}  # maps objects to integer keys
        self.__int2obj = OrderedDict()
        self.__int2obj[None] = None
        self.__obj2int = OrderedDict()
        self.__obj2int[None] = None
 

        # Indexes for each triple part, and a list of contexts for each triple
        self.__subjectIndex = OrderedDict() #{}    # key: sid    val: set(enctriples)
        self.__predicateIndex = OrderedDict() #{}  # key: pid    val: set(enctriples)
        self.__objectIndex = OrderedDict() #{}     # key: oid    val: set(enctriples)
        self.__tripleContexts = OrderedDict() #{}  # key: enctriple    val: {cid1: quoted, cid2: quoted ...}
        #self.__contextTriples = {None: set()}  # key: cid    val: set(enctriples)
        self.__contextTriples = OrderedDict()
        self.__contextTriples[None] = OrderedSet()

        # all contexts used in store (unencoded)
        self.__all_contexts = OrderedSet() #set()
        # default context information for triples
        self.__defaultContexts = None

    def add(self, triple, context, quoted=False):
        Store.add(self, triple, context, quoted)

        if context is not None:
            self.__all_contexts.add(context)

        enctriple = self.__encodeTriple(triple)
        sid, pid, oid = enctriple

        self.__addTripleContext(enctriple, context, quoted)

        if sid in self.__subjectIndex:
            self.__subjectIndex[sid].add(enctriple)
        else:
            self.__subjectIndex[sid] = OrderedSet([enctriple])

        if pid in self.__predicateIndex:
            self.__predicateIndex[pid].add(enctriple)
        else:
            self.__predicateIndex[pid] = OrderedSet([enctriple])

        if oid in self.__objectIndex:
            self.__objectIndex[oid].add(enctriple)
        else:
            self.__objectIndex[oid] = OrderedSet([enctriple])

    def remove(self, triplepat, context=None):
        req_cid = self.__obj2id(context)
        for triple, contexts in self.triples(triplepat, context):
            enctriple = self.__encodeTriple(triple)
            for cid in self.__getTripleContexts(enctriple):
                if context is not None and req_cid != cid:
                    continue
                self.__removeTripleContext(enctriple, cid)
            ctxs = self.__getTripleContexts(enctriple, skipQuoted=True)
            if None in ctxs and (context is None or len(ctxs) == 1):
                self.__removeTripleContext(enctriple, None)
            if len(self.__getTripleContexts(enctriple)) == 0:
                # triple has been removed from all contexts
                sid, pid, oid = enctriple
                self.__subjectIndex[sid].remove(enctriple)
                self.__predicateIndex[pid].remove(enctriple)
                self.__objectIndex[oid].remove(enctriple)

                del self.__tripleContexts[enctriple]

        if not req_cid is None and \
                req_cid in self.__contextTriples and \
                len(self.__contextTriples[req_cid]) == 0:
            # all triples are removed out of this context
            # and it's not the default context so delete it
            del self.__contextTriples[req_cid]

        if triplepat == (None, None, None) and \
                context in self.__all_contexts and \
                not self.graph_aware:
            # remove the whole context
            self.__all_contexts.remove(context)



    def triples(self, triplein, context=None):
        if context is not None:
            if context == self:  # hmm...does this really ever happen?
                context = None

        cid = self.__obj2id(context)
        enctriple = self.__encodeTriple(triplein)
        sid, pid, oid = enctriple

        # all triples case (no triple parts given as pattern)
        if sid is None and pid is None and oid is None:
            return self.__all_triples(cid)

        # optimize "triple in graph" case (all parts given)
        if sid is not None and pid is not None and oid is not None:
            if sid in self.__subjectIndex and \
               enctriple in self.__subjectIndex[sid] and \
               self.__tripleHasContext(enctriple, cid):
                return ((triplein, self.__contexts(enctriple)) for i in [0])
            else:
                return self.__emptygen()

        # remaining cases: one or two out of three given
        sets = []
        if sid is not None:
            if sid in self.__subjectIndex:
                sets.append(self.__subjectIndex[sid])
            else:
                return self.__emptygen()
        if pid is not None:
            if pid in self.__predicateIndex:
                sets.append(self.__predicateIndex[pid])
            else:
                return self.__emptygen()
        if oid is not None:
            if oid in self.__objectIndex:
                sets.append(self.__objectIndex[oid])
            else:
                return self.__emptygen()

        # to get the result, do an intersection of the sets (if necessary)
        if len(sets) > 1:
            enctriples = sets[0].intersection(*sets[1:])
        else:
            enctriples = sets[0].copy()

        print("context:")
        print(context)
        print("triplein:")
        print(triplein)
        for enctriple in enctriples:
            print("  decodeTriple:")
            print(self.__decodeTriple(enctriple))
            print("---")

        return ((self.__decodeTriple(enctriple), self.__contexts(enctriple))
                for enctriple in enctriples
                if self.__tripleHasContext(enctriple, cid))

    def contexts(self, triple=None):
        if triple is None or triple == (None, None, None):
            return (context for context in self.__all_contexts)

        enctriple = self.__encodeTriple(triple)
        sid, pid, oid = enctriple
        if sid in self.__subjectIndex and enctriple in self.__subjectIndex[sid]:
            return self.__contexts(enctriple)
        else:
            return self.__emptygen()

    def __len__(self, context=None):
        cid = self.__obj2id(context)
        if cid not in self.__contextTriples:
            return 0
        return len(self.__contextTriples[cid])

    def add_graph(self, graph):
        if not self.graph_aware:
            Store.add_graph(self, graph)
        else:
            self.__all_contexts.add(graph)

    def remove_graph(self, graph):
        if not self.graph_aware:
            Store.remove_graph(self, graph)
        else:
            self.remove((None, None, None), graph)
            try:
                self.__all_contexts.remove(graph)
            except KeyError:
                pass  # we didn't know this graph, no problem

    # internal utility methods below






    # internal utility methods below

    def __addTripleContext(self, enctriple, context, quoted):
        """add the given context to the set of contexts for the triple"""
        cid = self.__obj2id(context)

        sid, pid, oid = enctriple
        if sid in self.__subjectIndex and enctriple in self.__subjectIndex[sid]:
            # we know the triple exists somewhere in the store
            if enctriple not in self.__tripleContexts:
                # triple exists with default ctx info
                # start with a copy of the default ctx info
                self.__tripleContexts[
                    enctriple] = self.__defaultContexts.copy()

            self.__tripleContexts[enctriple][cid] = quoted
            if not quoted:
                self.__tripleContexts[enctriple][None] = quoted
        else:
            # the triple didn't exist before in the store
            if quoted:  # this context only
                self.__tripleContexts[enctriple] = {cid: quoted}
            else:  # default context as well
                self.__tripleContexts[enctriple] = {cid: quoted, None: quoted}

        # if the triple is not quoted add it to the default context
        if not quoted:
            self.__contextTriples[None].add(enctriple)

        # always add the triple to given context, making sure it's initialized
        if cid not in self.__contextTriples:
            self.__contextTriples[cid] = OrderedSet() #set()
        self.__contextTriples[cid].add(enctriple)

        # if this is the first ever triple in the store, set default ctx info
        if self.__defaultContexts is None:
            self.__defaultContexts = self.__tripleContexts[enctriple]

        # if the context info is the same as default, no need to store it
        if self.__tripleContexts[enctriple] == self.__defaultContexts:
            del self.__tripleContexts[enctriple]

    def __getTripleContexts(self, enctriple, skipQuoted=False):
        """return a list of (encoded) contexts for the triple, skipping
           quoted contexts if skipQuoted==True"""

        ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)

        if not skipQuoted:
            return ctxs.keys()

        return [cid for cid, quoted in ctxs.items() if not quoted]

    def __tripleHasContext(self, enctriple, cid):
        """return True iff the triple exists in the given context"""
        ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
        return (cid in ctxs)

    def __removeTripleContext(self, enctriple, cid):
        """remove the context from the triple"""
        ctxs = self.__tripleContexts.get(
            enctriple, self.__defaultContexts).copy()
        del ctxs[cid]
        if ctxs == self.__defaultContexts:
            del self.__tripleContexts[enctriple]
        else:
            self.__tripleContexts[enctriple] = ctxs
        self.__contextTriples[cid].remove(enctriple)

    def __obj2id(self, obj):
        """encode object, storing it in the encoding map if necessary,
           and return the integer key"""
        if obj not in self.__obj2int:
            id = randid()
            while id in self.__int2obj:
                id = randid()
            self.__obj2int[obj] = id
            self.__int2obj[id] = obj
            return id
        return self.__obj2int[obj]

    def __encodeTriple(self, triple):
        """encode a whole triple, returning the encoded triple"""
        return tuple(map(self.__obj2id, triple))

    def __decodeTriple(self, enctriple):
        """decode a whole encoded triple, returning the original
        triple"""
        return tuple(map(self.__int2obj.get, enctriple))

    def __all_triples(self, cid):
        """return a generator which yields all the triples (unencoded)
           of the given context"""
        if cid not in self.__contextTriples:
            return
        for enctriple in self.__contextTriples[cid].copy():
            yield self.__decodeTriple(enctriple), self.__contexts(enctriple)

    def __contexts(self, enctriple):
        """return a generator for all the non-quoted contexts
           (unencoded) the encoded triple appears in"""
        return (self.__int2obj.get(cid) for cid in self.__getTripleContexts(enctriple, skipQuoted=True) if cid is not None)

    def __emptygen(self):
        """return an empty generator"""
        if False:
            yield
コード例 #32
0
ファイル: manager.py プロジェクト: mstefany/sgmanager
    def update_remote_groups(self, dry_run=True, threshold=None, remove=True):
        '''Update remote configuration with the local one.'''
        # Copy those so that we can modify them even with dry-run
        local = OrderedSet(self.local)
        remote = OrderedSet(self.remote)

        validate_groups(local)

        def parse_groups(groups, remote):
            if remote:
                self._process_remote_groups(groups)
            groups = {
                group.name: group
                for group in groups if group.name != 'default'
            }
            keys = OrderedSet(groups.keys())
            return groups, keys

        lgroups, lkeys = parse_groups(local, False)
        rgroups, rkeys = parse_groups(remote, True)

        changes = 0
        unchanged = 0
        groups_added = OrderedSet()
        groups_updated = OrderedSet()
        groups_removed = OrderedSet()
        rules_added = OrderedSet()
        rules_removed = OrderedSet()

        # Added groups
        for group in (lgroups[name] for name in lkeys - rkeys):
            grp = Group(group.name, group.description)
            groups_added.add(grp)
            rgroups[group.name] = grp
            rkeys.add(grp.name)
            changes += 1

        # Changed groups
        for rgroup, lgroup in ((rgroups[name], lgroups[name])
                               for name in rkeys & lkeys):
            if rgroup not in groups_added:
                unchanged += 1

            if rgroup.description != lgroup.description:
                # XXX: https://review.openstack.org/596609
                # groups_updated.add((rgroup, lgroup))
                pass

            # FIXME: when comparing using OrderedSet, added rules part contains
            #        all elements rather than different ones.
            lrules, rrules = set(lgroup.rules), set(rgroup.rules)

            if rrules != lrules:
                # Added rules
                for rule in lrules - rrules:
                    rules_added.add((rgroup.name, rule))
                    changes += 1

                # Removed rules
                for rule in rrules - lrules:
                    if remove:
                        rules_removed.add((rgroup.name, rule))
                        changes += 1
                    else:
                        unchanged += 1
            unchanged += len(rrules & lrules)

        # Removed groups
        for group in (rgroups[name] for name in rkeys - lkeys):
            if remove:
                if group._project is None:
                    continue
                groups_removed.add(group)
                changes += len(group.rules) + 1
            else:
                unchanged += len(group.rules) + 1

        if changes == 0 and not groups_updated:
            return

        # Report result
        logger.info(f'{changes:d} changes to be made:')
        for group in groups_added:
            logger.info(f'  - Create group {group.name!r}')
        for rgroup, lgroup in groups_updated:
            logger.info(f'  - Update description for {rgroup.name!r}:'
                        f' {rgroup.description!r} → {lgroup.description!r}')
        for group_name, rule in rules_added:
            logger.info(f'  - Create {rule!r} in group {group_name!r}')
        for group_name, rule in rules_removed:
            logger.info(f'  - Remove {rule!r} from group {group_name!r}')
        for group in groups_removed:
            logger.info(
                f'  - Remove group {group.name!r} with {len(group.rules)} rules'
            )

        if threshold is not None:
            changes_percentage = changes / (unchanged + changes) * 100
            if changes_percentage > threshold:
                raise ThresholdException(
                    f'Amount of changes is {changes_percentage:f}%'
                    f' which is more than allowed ({threshold:f}%)')

        if dry_run:
            return

        # We've modified 'remote', so copy it again
        remote = OrderedSet(self.remote)
        rgroups, rkeys = parse_groups(remote, True)

        # Added groups
        for group in groups_added:
            ginfo = self.connection.create_security_group(
                name=group.name, description=group.description)
            remote.add(Group.from_remote(**ginfo))
        rgroups, rkeys = parse_groups(remote, True)

        # Updated groups
        for rgroup, lgroup in groups_updated:
            self.connection.update_security_group(
                name_or_id=rgroup._id, description=lgroup.description)
            # Updating group should not change its ID
            rgroup.description = lgroup.description

        # Added rules
        for group_name, rule in rules_added:
            rgroup = rgroups[group_name]
            cidr = str(rule.cidr) if rule.cidr is not None else None
            group_id = rgroups[
                rule.group]._id if rule.group is not None else None
            protocol = rule.protocol.value if rule.protocol is not None else None
            rinfo = self.connection.create_security_group_rule(
                secgroup_name_or_id=rgroup._id,
                port_range_min=rule.port_min,
                port_range_max=rule.port_max,
                protocol=protocol,
                remote_ip_prefix=cidr,
                remote_group_id=group_id,
                direction=rule.direction.value,
                ethertype=rule.ethertype.value)
            rgroup.rules.add(Rule.from_remote(**rinfo))

        if remove:
            # Removed rules
            for group_name, rule in rules_removed:
                rgroup = rgroups[group_name]
                self.connection.delete_security_group_rule(rule_id=rule._id)
                rgroup.rules.remove(rule)

            # Removed groups
            for group in groups_removed:
                self.connection.delete_security_group(name_or_id=group._id)
                remote.remove(group)

        self.remote = remote
コード例 #33
0
ファイル: moleculetype.py プロジェクト: evanfeinberg/InterMol
class MoleculeType(object):
    """An abstract container for molecules of one type
    """
    def __init__(self, name):
        """Initialize the MoleculeType container

        Args:
            name (str): the name of the moleculetype to add
        """
        self.name = name
        self.moleculeSet = OrderedSet()
        self.bondForceSet = HashMap()
        self.pairForceSet = HashMap()
        self.angleForceSet = HashMap()
        self.dihedralForceSet = HashMap()
        self.torsiontorsionForceSet = HashMap()
        self.constraints = HashMap()
        self.exclusions = HashMap()
        self.settles = None
        self.nrexcl = None

    def add_molecule(self, molecule):
        """Add a molecule into the moleculetype container

        Args:
            molecule (Molecule): the molecule to append
        """
        self.moleculeSet.add(molecule)

    def remove_molecule(self, molecule):
        """Remove a molecule from the system.

        Args:
            molecule (Molecule): remove a molecule from the moleculeType
        """
        self.moleculeSet.remove(molecule)

    def getMolecule(self, molecule):
        """Get a molecule from the system

        Args:
            molecule (Molecule): retrieve an equivalent molecule from the moleculetype
        """
        return get_equivalent(self.moleculeSet, molecule, False)

    def addForce(self, force):
        """Add a forces to the moleculeType

        Args:
            forces (AbstractForce): Add a forces or contraint to the moleculeType
        """
        self.forceSet.add(force)

    def removeForce(self, force):
        """Remove a forces from the moleculeType

        Args:
            forces (AbstractForce): Remove a forces from the moleculeType
        """
        self.forceSet.remove(force)

    def getForce(self, force):
        """Get a forces from the moleculeType

        Args:
            forces (AbstractForce): Retrieve a forces from the moleculeType
        """
        return get_equivalent(self.forceSet, force, False)

    def setNrexcl(self, nrexcl):
        """Set the nrexcl

        Args:
            nrexcl (int): the value for nrexcl
        """
        self.nrexcl = nrexcl

    def getNrexcl(self):
        """Gets the nrexcl
        """
        return self.nrexcl
コード例 #34
0
class QueryModel(object):
    """
       The QueryModel class represents the intermediate object between the DAG graph and the ultimate SPARQL query.
       """
    def __init__(self):
        """
        Initializing the QueryModel
        QueryModel is a representation of a sparql query. It has a place holder for every
        possible componne of a sparql query
        """

        self.prefixes = {}  # a dictionary of prefix_name: prefix_URI

        self.variables = set()  # a set of all variables in the query.
        self.from_clause = set()  # a list of graph URIs
        self.filter_clause = {
        }  # a dictionary of column name as key and associated conditions as a value
        self.groupBy_columns = OrderedSet(
        )  # a set of columns for the groupby modifier, it's a subset of self.variables
        self.aggregate_clause = {
        }  # a dictionary of new_aggregation_col_name: (aggregate function, src_column_name)
        self.having_clause = {
        }  # a dictionary of new_aggregation_col_name : condition
        self.order_clause = OrderedDict(
        )  # a dictionary of columns and the specifier (ASC, DSC)

        self.limit = 0  # represents the number of rows to be returned by the query.
        self.offset = 0  # represents the offset in terms of the number of rows

        self.triples = [
        ]  # list of basic graph patterns in the form (subject, predicate, object) tuples
        self.optionals = []  # list of optional query models.
        self.subqueries = [
        ]  # list of subqueries. each subquery is a query model
        self.optional_subqueries = [
        ]  # list of optional subqueries. each subquery is a query model
        self.unions = [
        ]  # list of subqueries to union with the current query model
        self.graph_triples = {
        }  # dict of graph: list of triples. When there is more than one triple in the graph

        self.select_columns = OrderedSet(
        )  # list of columns to be selected ,  set()
        self.auto_generated_select_columns = OrderedSet()
        self.select_all = False

        self.querybuilder = None  # a SPARQLbuilder that converts the query model to a string
        self.parent_query_model = None  # a pointer to the parent query if this is a subquery
        self.is_optional = False

    def add_prefixes(self, prefixes):
        """
        Add a dictionary of prefixs to the sparql queries
        :param prefixes: a dictionary of prefixes where the key is the prefix name and the value is the prefix URI
        """
        if not self.is_subquery():
            self.prefixes.update(prefixes)

    def add_graphs(self, graphs):
        """
        Add a list of graphs to the from clause
        :param graphs: a list of graphs' URIs
        """
        if not self.is_subquery():
            self.from_clause = self.from_clause.union(graphs)  #extend

    def add_optional_triples(self, triples, graph=None):
        """
         add a triple to the list of the optional triples in the query model.
         :param subject: subject of the triple
         :param object: object of the triple
         :param predicate: predicate of the triple
         """
        if len(triples) > 0:
            optional_query_model = OptionalQueryModel()
            for (subject, predicate, object) in triples:
                optional_query_model.add_triple(subject, predicate, object)
            self.optionals.append(optional_query_model)
            return optional_query_model

    def add_optional_block(self, optional_query_model):
        """
         add a triple to the list of the optional triples in the query model.
         :param subject: subject of the triple
         :param object: object of the triple
         :param predicate: predicate of the triple
         """
        self.optionals.append(optional_query_model)

    def add_triple(self, subject, predicate, object):
        """
         add a triple to the list of the triples in the query model.
         :param subject: subject of the triple
         :param object: object of the triple
         :param predicate: predicate of the triple
         """
        if (subject, predicate, object) not in self.triples:
            self.triples.append((subject, predicate, object))
            self.add_variable(subject)
            self.add_variable(object)
            self.add_variable(predicate)

    def add_graph_triple(self, graph, triples):
        self.graph_triples[graph] = triples

    def add_unions(self, unionquery):  # subquery type is query_builder
        """
        adds a subquery to the query model
        :param subquery:
        :return:
        """
        if len(unionquery.select_columns) <= 0 and len(
                unionquery.auto_generated_select_columns) <= 0:
            #if len(unionquery.select_columns) <= 0:
            unionquery.select_all = True
        self.unions.append(unionquery)
        unionquery.parent_query_model = weakref.ref(self)
        #unionquery.from_clause.clear()

    def add_subquery(self, subquery):  # subquery type is query_builder
        """
        adds a subquery to the query model
        :param subquery:
        :return:
        """
        self.subqueries.append(subquery)
        subquery.parent_query_model = weakref.ref(self)
        subquery.from_clause.clear()

    def add_optional_subquery(self,
                              subquery):  # subquery type is query_builder
        """
        adds a subquery to the query model
        :param subquery:
        :return:
        """
        self.optional_subqueries.append(subquery)
        subquery.parent_query_model = weakref.ref(self)
        subquery.from_clause.clear()

    def add_variable(self, col_name):
        """
        add a variable (column name) to the list of the variables of a single SPARQL query (mainly to represent
         Select variables) .
         :param col_name: represents the column name after being parced from the corresponding DAG node.

        """
        if not is_uri(col_name):
            if col_name.find(":") < 0:
                self.variables.add(col_name)
            elif col_name[:col_name.find(":")] not in self.prefixes:
                self.variables.add(col_name)

    def add_group_columns(self, col_names):
        """
         add a columns  to the list of the group by columns.
        :param col_names: represents the column name that will group the records based on it.
        """
        self.groupBy_columns = self.groupBy_columns.union(col_names)

    def add_aggregate_pair(self,
                           src_col_name,
                           func_name,
                           new_col_name,
                           agg_param=None):
        """
         add a pair of column, function name to the list that forms the aggregation clause
         :param src_col_name: the source column name to be aggregated
         :param new_col_name: the new column name
         :param func_name: represents aggregation function on the corresponding column
         :param agg_param: aggregation parameter like distinct with count
         """
        if new_col_name not in self.aggregate_clause:
            self.aggregate_clause[new_col_name] = []
        self.aggregate_clause[new_col_name].append(
            (func_name, agg_param, src_col_name))
        self.variables.add(new_col_name)

    def add_filter_condition(self, col_name, condition):
        """
        add a pair of (column, condition) to the list of conditions of the filter clause
        :param col_name: represents the column name at which the condition will be applied.
        :param condition: represents the filtering criterion ( Operator, Value)
         """
        if col_name not in self.variables:
            self.add_variable(col_name)
        if col_name in self.filter_clause:
            self.filter_clause[col_name].append(condition)
        else:
            self.filter_clause[col_name] = [condition]

    def add_having_condition(self, agg_col_name, condition):
        """
        add a pair of (column, condition) to the list of conditions of the filter clause
        :param agg_col_name: represents the column name where the filtering will occur.
        :param condition: represents the having criterion ( Operator Value)
        """
        if agg_col_name not in self.having_clause:
            self.having_clause[agg_col_name] = []
        func_name, agg_param, src_col_name = self.aggregate_clause[
            agg_col_name][0]
        self.having_clause[agg_col_name].append(
            [func_name, agg_param, src_col_name, condition])

    def add_order_columns(self, sorting_cols):
        """
        add a pair of (column, specifier) to the list of sorting options.
        :param sorting_cols: list of pairs of (column name, sort order) that will be used for sorting
        """
        for col, order in sorting_cols.items():
            self.order_clause[col] = order

    def set_limit(self, limit):
        """
        :param limit: the value that represents the number of results to be returned
        :return: none
        """
        self.limit = limit

    def set_offset(self, offset):
        """

        :param offset: the value that represents the number of NEXT results to be returned
        :return: none
        """
        self.offset = offset

    def add_select_column(self, col_name):
        """
        :param col_name:
        :return:
        """
        self.select_columns.add(col_name)

    def auto_add_select_column(self, col_name):
        #print("Auto adding {}".format(col_name))
        self.auto_generated_select_columns.add(col_name)

    def rem_select_column(self, col_name):
        self.select_columns.remove(col_name)

    def rem_all_triples(self):
        self.triples = []

    def rem_graph_triples(self):
        self.graph_triples = {}

    def rem_from_clause(self):
        self.from_clause = set()

    def rem_prefixes(self):
        self.prefixes = {}

    def rem_optional_triples(self):
        self.optionals = []

    def rem_filters(self):
        self.filter_clause = {}

    def rem_subqueries(self):
        self.subqueries = []

    def rem_optional_subqueries(self):
        self.optional_subqueries = []

    def rem_unions(self):
        self.unions = []

    def transfer_grouping_to_subquery(self, subquery):
        grouping_cols = self.groupBy_columns

        for g_col in grouping_cols:
            involved_triples = [
                triple for triple in self.triples
                if g_col == triple[0] or g_col == triple[2]
            ]
            for t in involved_triples:
                subquery.add_triple(*t)

        subquery.groupBy_columns = OrderedSet(grouping_cols)
        subquery.select_columns = set(grouping_cols)
        subquery.having_clause = dict(self.having_clause)
        subquery.aggregate_clause = dict(self.aggregate_clause)

        self.groupBy_columns.clear()
        self.having_clause.clear()
        self.aggregate_clause.clear()
        self.add_subquery(subquery)

    @staticmethod
    def clean_inner_qm(qm):
        # clean the inner query (self)
        qm.rem_prefixes()
        qm.rem_from_clause()
        qm.limit = 0
        qm.offset = 0
        qm.order_clause = OrderedDict()

    def wrap_in_a_parent_query(self):
        """
        wraps the current query in a subquery and returns a new query model that contains one graph pattern which is
        the current query as a subquery
        :return: a new QueryModel that wraps the current query model
        """
        # initialize the parent query with the graph uri, the prefixes and the variables in the inner query
        parent_query = QueryModel()
        parent_query.add_prefixes(self.prefixes)
        parent_query.add_graphs(self.from_clause)
        to_add_to_select = []
        to_rem_from_select = []
        for var in self.select_columns:
            # if select column in groupby or aggregation: add it to selected columns by the user in inner query
            #  and the outer query.
            #  else: find the relevant graph patterns and move them to the outer query and
            #  remove the select column from select clause in inner query
            if (var in self.groupBy_columns) or (var in self.aggregate_clause):
                to_add_to_select.append(var)
            else:
                # add basic graph patterns
                involved_triples = [
                    triple for triple in self.triples
                    if var == triple[0] or var == triple[2]
                ]
                for t in involved_triples:
                    parent_query.add_triple(*t)
                # add filter patterns
                if var in self.filter_clause:
                    for condition in self.filter_clause[var]:
                        parent_query.add_filter_condition(var, condition)
                # add subqueries
                # Is it query.select or query.variables
                for subquery in self.subqueries:
                    if var in subquery.select_columns:
                        parent_query.add_subquery(subquery)
                to_rem_from_select.append(var)
            parent_query.auto_add_select_column(var)
            parent_query.add_variable(var)
        for var in to_add_to_select:
            self.auto_add_select_column(var)
        for var in to_rem_from_select:
            self.rem_select_column(var)

        # set the limit and offset of the outer query. don't allow limit and offset in the inner query
        parent_query.set_limit(self.limit)
        parent_query.set_offset(self.offset)
        parent_query.add_order_columns(self.order_clause)
        # add self to the subqueries in the parent subquery
        parent_query.add_subquery(self)
        self.parent_query_model = parent_query

        # clean the inner query (self)
        QueryModel.clean_inner_qm(self)
        #self.prefixes = {}
        #self.rem_from_clause()
        #self.limit = 0
        #self.offset = 0
        #self.order_clause = OrderedDict()
        #self.rem_from_clause()

        return parent_query

    def transfer_select_triples_to_parent_query(self, parent_ds_cols):
        # transfer the order by, the filter clause,
        for col in parent_ds_cols:
            triples_list = [subquery.triples for subquery in self.subqueries]
            involved_triples = [
                triple for triples in triples_list for triple in triples
                if col == triple[0] or col == triple[2]
            ]
            for t in involved_triples:
                if t not in self.triples:
                    self.add_triple(*t)

    def is_defined_variable(self, var):
        return var in self.variables or any([
            subquery.is_defined_variable(var) for subquery in self.subqueries
        ])

    def is_grouped(self):
        return len(self.groupBy_columns) > 0

    def is_sorted(self):
        return len(self.order_clause) > 0

    def to_sparql(self):
        #self.validate()
        self.querybuilder = SPARQLBuilder()
        return self.querybuilder.to_sparql(self)

    def is_aggregate_col(self, src_col_name):
        if src_col_name in self.aggregate_clause:
            return True
        return False

    def is_subquery(self):
        return self.parent_query_model is not None

    def all_variables(self):
        if len(self.subqueries) == 0:
            return self.variables

        all_vars = set().union(self.variables)

        for subq in self.subqueries:
            all_vars = all_vars.union(subq.all_variables())
        return all_vars

    def rename_variable(self, old_name, new_name):
        self.triples = [[
            new_name if element == old_name else element for element in triple
        ] for triple in self.triples]
        for query in self.optionals:
            query.rename_variable(old_name, new_name)
        self.select_columns = OrderedSet([
            new_name if var == old_name else var for var in self.select_columns
        ])
        self.auto_generated_select_columns = OrderedSet([
            new_name if var == old_name else var
            for var in self.auto_generated_select_columns
        ])
        self.groupBy_columns = OrderedSet([
            new_name if var == old_name else var
            for var in self.groupBy_columns
        ])
        self.variables = {
            new_name if var == old_name else var
            for var in self.variables
        }
        if old_name in self.order_clause:
            self.order_clause[new_name] = self.order_clause[old_name]
            del self.order_clause[old_name]
        if old_name in self.filter_clause:
            self.filter_clause[new_name] = self.filter_clause[old_name]
            del self.filter_clause[old_name]
        if old_name in self.having_clause:
            self.having_clause[new_name] = self.having_clause[old_name]
            del self.having_clause[old_name]
        for var in self.aggregate_clause:
            self.aggregate_clause[var] = [[
                new_name if element == old_name else element
                for element in triple
            ] for triple in self.aggregate_clause[var]]
            if var == old_name:
                self.aggregate_clause[new_name] = self.aggregate_clause[
                    old_name]
                del self.aggregate_clause[old_name]
        for query in self.subqueries:
            query.rename_variable(old_name, new_name)
        for query in self.unions:
            query.rename_variable(old_name, new_name)
        for query in self.optional_subqueries:
            query.rename_variable(old_name, new_name)

    def is_valid_prefix(self, prefix):
        if prefix in self.prefixes.keys():
            return True
        else:
            return False

    """
    def get_triples(self):
        triple_string = ""
        for triple in self.triples:
            triple1 = triple[1]
            triple2 = triple[2]
            if not is_uri(triple[1]) and triple[1].find(":") < 0:
                triple1 = "?" + triple[1]
            if not is_uri(triple[2]) and triple[2].find(":") < 0:
                triple2 = "?" + triple[2]
            triple = (triple[0], triple1, triple2)
            triple_string += '\t?%s %s %s' % (triple[0], triple[1], triple[2]) + " .\n"
        optional_string = self.get_optional_triples()
        triple_string += '\t'.join(('\n' + optional_string.lstrip()).splitlines(True))
        return triple_string

    def get_optional_triples(self):
        optional_string = ""
        if len(self.optionals) > 0:
            optional_string = "OPTIONAL {  \n"
            for triple in self.optionals:
                triple1 = triple[1]
                triple2 = triple[2]
                if not is_uri(triple[1]) and triple[1].find(":") < 0:
                    triple1 = "?" + triple[1]
                if not is_uri(triple[2]) and triple[2].find(":") < 0:
                    triple2 = "?" + triple[2]
                triple = (triple[0], triple1, triple2)
                optional_string += '\t?%s %s %s' % (triple[0], triple[1], triple[2]) + " .\n"
            optional_string += "}"

        return optional_string
        """

    def union(self, qm2):
        """
        union this query model with query model (qm2)
        :param qm2:
        :return: a query model that unions the current query model and qm2
        """
        final_qm = QueryModel()

        if self.from_clause == qm2.from_clause:  # same graph
            # add the graphs to the outer qm and remove them  from the inner qms
            final_qm.add_graphs(self.from_clause)
            final_qm.add_graphs(qm2.from_clause)

            # union the prefixes and remove them  from the inner qms
            # TODO: check that all namespaces that have the same prefix have the same uri
            final_qm.add_prefixes(self.prefixes)
            final_qm.add_prefixes(qm2.prefixes)

            final_qm.variables = final_qm.variables.union(self.variables)
            final_qm.variables = final_qm.variables.union(qm2.variables)

            final_qm.set_offset(min(self.offset, qm2.offset))
            final_qm.set_limit(max(self.limit, qm2.limit))
            final_qm.add_order_columns(self.order_clause)
            final_qm.add_order_columns(qm2.order_clause)

            QueryModel.clean_inner_qm(self)
            QueryModel.clean_inner_qm(qm2)

            final_qm.add_unions(self)
            final_qm.add_unions(qm2)

            return final_qm

    def validate(self):
        """
        validate the columns and parameters data in the query model and reports and inconsistencies.
        1) validate the namespance in the preidcate to match the given in the graphs' prefixes
        2)
        :return: True if valid and False if not
        """
        ## add the aggregation to expandable group

        ## group by in expandable dataset, raise exception if the select cols not in the group by
        ### validate the prefix in the triple
        if self.parent_query_model is None:
            for triple in self.triples:
                if not is_uri(triple[1]):
                    if triple[1].find(":") >= 0:
                        prefix = triple[1].split(":")
                        if (len(prefix) >= 1):
                            if not self.is_valid_prefix(prefix[0]):
                                raise Exception(
                                    "Not a valid Prefix in triple {}".format(
                                        triple))
                    else:
                        # predicate is a variable
                        pass

        if self.parent_query_model is None:
            for col_name in self.filter_clause:
                if col_name.find(':') != -1:
                    prefix = col_name.split(":")
                    if (len(prefix) >= 1):
                        if not self.is_valid_prefix(prefix[0]):
                            raise Exception(
                                "Not a valid Prefix in filter {}".format(
                                    col_name))

        for subquery in self.subqueries:
            subquery_variables_set = set(subquery.variables)
            my_variables_set = set(self.variables)
            intersection_variables = my_variables_set.intersection(
                subquery_variables_set)
            if len(intersection_variables) < 0:
                raise Exception(
                    "No common variables between the main query and the subquery"
                )

        all_vars = self.all_variables()
        missing_vars = set()

        for sel_col in self.select_columns:
            if sel_col not in all_vars:
                missing_vars.add(sel_col)

        if len(missing_vars) > 0:
            raise Exception(
                'Variables {} are not defined in the query\'s body'.format(
                    ', '.join(missing_vars)))

        # filter_clause validation
        for col_name in self.filter_clause:
            if col_name not in all_vars:
                raise Warning(
                    'Cannot add filter on {}, is not part of the query variables'
                    .format(col_name))

        for col in self.order_clause:
            if col not in self.variables:
                raise Warning(
                    '{} cannot be a sorting column, it should be part of variables'
                    .format(col))

        for col_name in self.having_clause:
            if not self.is_aggregate_col(col_name):
                raise Warning(
                    '{} is not an aggregate column, cannot be added to having clause'
                    .format(col_name))

    def copy(self):
        return copy.deepcopy(self)

    def __repr__(self):
        return self.to_sparql()

    def __str__(self):
        return self.to_sparql()
コード例 #35
0
ファイル: __init__.py プロジェクト: jgillmanjr/pyTalendSchema
class TalendSchema:
    def __init__(self):
        self.header = '<?xml version="1.0" encoding="UTF-8"?>'
        self.schema_key = 'schema'  # ET.Element('schema')
        self.column_data = {}
        self.columns = OrderedSet()

        self.type_map = {
            'boolean': 'id_Boolean',
            'byte': 'id_Byte',
            'byte[]': 'id_byte[]',
            'character': 'id_Character',
            'date': 'id_Date',
            'double': 'id_Double',
            'float': 'id_Float',
            'bigdecimal': 'id_BigDecimal',
            'integer': 'id_Integer',
            'long': 'id_Long',
            'object': 'id_Object',
            'short': 'id_Short',
            'string': 'id_String',
            'list': 'id_List',
            'document': 'id_Document'
        }

        self.attrs = {
            'comment': '',
            'default': '',
            'key': False,
            'length': -1,
            'nullable': True,
            'pattern': '',
            'precision': -1,
            'type': '',
        }

        self.attr_bool = ['key', 'nullable']

    def _attr_prep(self, key, value):
        if not isinstance(value, str):
            value = str(value)

        if key in self.attr_bool:
            return {key: str(value).lower()}

        if key == 'label':
            return {'label': hte(value), 'originalDbColumnName': hte(value)}

        if key == 'length':
            return {'length': hte(value), 'originalLength': hte(value)}

        return {key: hte(value)}

    def add_column(self, column_type, label, **properties):
        local_attrs = {
            'talendType': self.type_map[column_type],
            'label': label,
            'originalDbColumnName': label,
        }

        for key in self.attrs.keys():
            if key not in properties:
                raw_value = self.attrs[key]
            else:
                raw_value = properties[key]

            d = self._attr_prep(key, raw_value)

            local_attrs = {**local_attrs, **d}

        self.column_data[label] = local_attrs
        self.columns.add(label)

    def dump_schema(self):
        print(self.header)
        ET.dump(self.generate_schema())

    def generate_schema(self):
        schema = ET.Element(self.schema_key)

        for column in self.columns:
            ET.SubElement(schema, 'column', attrib=self.column_data[column])

        return schema

    def remove_column(self, label):
        try:
            self.columns.remove(label)
            self.column_data.pop(label)
        except:
            pass  # If it doesn't exist, so what?

    def write_schema(self, filename):
        with open(filename, 'w') as sf:
            sf.write(self.header)
            sf.write(ET.tostring(self.generate_schema(), encoding='unicode'))