Example #1
0
    def __init__(self, signing_key, witness_list, url, sbb_index):
        self.log = get_logger("SubBlockBuilder_{}".format(sb_index))
        # Comment out below for more granularity in debugging
        # self.log.setLevel(logging.INFO)

        #self.log.important("SubBlockBuilder started with url {}".format(url))

        # Register signal handler to teardown
        signal.signal(signal.SIGTERM, self._signal_teardown)

        # need to revisit this when threading strategy is clear
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        self.signing_key = signing_key
        # witness_list should be comma separated list of ip:vk  
        self.witness_table = self._parse_witness_list(witness_list)
        self.url = url
        self.sbb_index = sbb_index
        self.block_num = (int) sbb_index / 16       # hard code this for now
        self.sub_block_num = (int) sb_index % 16
        self.num_txs = 0
        self.num_sub_blocks = 0
        self.tasks = []

        #SenecaInterpreter connect with BlockManager (parent process that spawned this one)
        self.context = zmq.asyncio.Context()
        self.socket = self.context.socket(zmq.PAIR)  # For communication with main process
        self.socket.connect(self.url)

        # do we need this still? or do we move it to a util methods
        self.verifying_key = wallet.get_vk(self.signing_key)
        skg = SigningKey(seed=bytes.fromhex(sk))
        self.vk = skg.verify_key.encode().hex()
        self.public_key = self.vk2pk(self.vk)
        self.private_key = crypto_sign_ed25519_sk_to_curve25519(skg._signing_key).hex()
        priv = PrivateKey(bytes.fromhex(self.private_key))
        publ = priv.public_key
        self.public_key = public_key = encode(publ._public_key)
        self.secret = secret_key = encode(priv._private_key)

        self.pending_txs = LinkedHashTable()
        self.interpreter = SenecaInterpreter()
        self._recently_seen = CappedSet(max_size=DUPE_TABLE_SIZE)

        try:
            self._subscribe_to_witnesses()
            # start event loop and start listening witness sockets as well as mgr
            self.run_loop_second_time()
        except Exception as e:
            err_msg = '\n' + '!' * 64 + '\nSBB terminating with exception:\n' + str(traceback.format_exc())
            err_msg += '\n' + '!' * 64 + '\n'
            self.log.error(err_msg)
        finally:
            self._teardown()
Example #2
0
    def test_max_size(self):
        """
        Tests that the max size is never exceeded
        """
        MAX_SIZE = 4

        cs = CappedSet(max_size=MAX_SIZE)

        for i in range(10):
            cs.add(i)

        self.assertTrue(len(cs) <= MAX_SIZE)
Example #3
0
    def test_init(self):
        MAX_SIZE = 10

        cs = CappedSet(max_size=MAX_SIZE)

        self.assertEqual(cs.max_size, MAX_SIZE)
        self.assertTrue(cs.fifo_queue is not None)
Example #4
0
    def test_queue_synced(self):
        """
        Tests that the internal fifo_queue and the set's elements are bijective sets
        """
        MAX_SIZE = 4

        cs = CappedSet(max_size=MAX_SIZE)

        cs.add(1)
        cs.add(2)
        cs.add(3)
        cs.add(4)
        cs.add(5)

        for element in cs.fifo_queue:
            self.assertTrue(element in cs,
                            msg='Element {} not in set {}'.format(element, cs))
Example #5
0
    def test_overflow(self):
        """
        Tests that the set overflow in FIFO order
        """
        MAX_SIZE = 2

        cs = CappedSet(max_size=MAX_SIZE)

        cs.add(1)
        cs.add(2)
        cs.add(3)

        correct_order = [2, 3]

        for correct, actual in zip(correct_order, cs.fifo_queue):
            self.assertEqual(correct, actual)
Example #6
0
class Executor(metaclass=ExecutorMeta):

    _recently_seen = CappedSet(max_size=dupe_table_size)
    _parent_name = 'ReactorDaemon'  # used for log names

    def __init__(self, loop, context, inproc_socket, ironhouse):
        self.loop = loop
        asyncio.set_event_loop(self.loop)
        self.context = context
        self.inproc_socket = inproc_socket
        self.ironhouse = ironhouse
        self.log = get_logger("{}.{}".format(Executor._parent_name, type(self).__name__))

    def add_listener(self, listener_fn, *args, **kwargs):
        # listener_fn must be a coro
        self.log.info("add_listener scheduling future {} with args {} and kwargs {}".format(listener_fn, args, kwargs))
        return asyncio.ensure_future(self._listen(listener_fn, *args, **kwargs))

    async def _listen(self, listener_fn, *args, **kwargs):
        self.log.info("_listen called with fn {}, and args={}, kwargs={}".format(listener_fn, args, kwargs))

        try:
            await listener_fn(*args, **kwargs)
        except Exception as e:
            delim_line = '!' * 64
            err_msg = '\n\n' + delim_line + '\n' + delim_line
            err_msg += '\n ERROR CAUGHT IN LISTENER FUNCTION {}\ncalled \w args={}\nand kwargs={}\n'\
                        .format(listener_fn, args, kwargs)
            err_msg += '\nError Message: '
            err_msg += '\n\n{}'.format(traceback.format_exc())
            err_msg += '\n' + delim_line + '\n' + delim_line
            self.log.error(err_msg)

    async def recv_multipart(self, socket, callback_fn: types.MethodType, ignore_first_frame=False):
        self.log.warning("--- Starting recv on socket {} with callback_fn {} ---".format(socket, callback_fn))
        while True:
            self.log.debug("waiting for multipart msg...")

            try:
                msg = await socket.recv_multipart()
            except asyncio.CancelledError:
                self.log.info("Socket cancelled: {}".format(socket))
                socket.close()
                break

            self.log.debug("Got multipart msg: {}".format(msg))

            if ignore_first_frame:
                header = None
            else:
                assert len(msg) == 2, "Expected 2 frames (header, envelope) but got {}".format(msg)
                header = msg[0].decode()

            env_binary = msg[-1]
            env = self._validate_envelope(envelope_binary=env_binary, header=header)

            if not env:
                continue

            Executor._recently_seen.add(env.meta.uuid)

            callback_fn(header=header, envelope=env)

    def call_on_mp(self, callback: str, header: str=None, envelope_binary: bytes=None, **kwargs):
        if header:
            kwargs['header'] = header

        cmd = ReactorCommand.create_callback(callback=callback, envelope_binary=envelope_binary, **kwargs)

        # self.log.critical("\ncalling callback cmd to reactor interface: {}".format(cmd))  # DEBUG line remove this

        self.inproc_socket.send(cmd.serialize())

        # self.log.critical("command sent: {}".format(cmd))  # DEBUG line, remove this later

    def _validate_envelope(self, envelope_binary: bytes, header: str) -> Union[None, Envelope]:
        # TODO return/raise custom exceptions in this instead of just logging stuff and returning none

        # Deserialize envelope
        env = None
        try:
            env = Envelope.from_bytes(envelope_binary)
        except Exception as e:
            self.log.error("Error deserializing envelope: {}".format(e))
            return None

        # Check seal
        if not env.verify_seal():
            self.log.error("Seal could not be verified for envelope {}".format(env))
            return None

        # If header is not none (meaning this is a ROUTE msg with an ID frame), then verify that the ID frame is
        # the same as the vk on the seal
        if header and (header != env.seal.verifying_key):
            self.log.error("Header frame {} does not match seal's vk {}\nfor envelope {}"
                           .format(header, env.seal.verifying_key, env))
            return None

        # Make sure we haven't seen this message before
        if env.meta.uuid in Executor._recently_seen:
            self.log.debug("Duplicate envelope detect with UUID {}. Ignoring.".format(env.meta.uuid))
            return None

        # TODO -- checks timestamp to ensure this envelope is recv'd in a somewhat reasonable time (within N seconds)

        # If none of the above checks above return None, this envelope should be good
        return env

    def teardown(self):
        raise NotImplementedError
Example #7
0
class SubBlockBuilder:
    def __init__(self, signing_key, witness_list, url, sbb_index):
        self.log = get_logger("SubBlockBuilder_{}".format(sb_index))
        # Comment out below for more granularity in debugging
        # self.log.setLevel(logging.INFO)

        #self.log.important("SubBlockBuilder started with url {}".format(url))

        # Register signal handler to teardown
        signal.signal(signal.SIGTERM, self._signal_teardown)

        # need to revisit this when threading strategy is clear
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        self.signing_key = signing_key
        # witness_list should be comma separated list of ip:vk  
        self.witness_table = self._parse_witness_list(witness_list)
        self.url = url
        self.sbb_index = sbb_index
        self.block_num = (int) sbb_index / 16       # hard code this for now
        self.sub_block_num = (int) sb_index % 16
        self.num_txs = 0
        self.num_sub_blocks = 0
        self.tasks = []

        #SenecaInterpreter connect with BlockManager (parent process that spawned this one)
        self.context = zmq.asyncio.Context()
        self.socket = self.context.socket(zmq.PAIR)  # For communication with main process
        self.socket.connect(self.url)

        # do we need this still? or do we move it to a util methods
        self.verifying_key = wallet.get_vk(self.signing_key)
        skg = SigningKey(seed=bytes.fromhex(sk))
        self.vk = skg.verify_key.encode().hex()
        self.public_key = self.vk2pk(self.vk)
        self.private_key = crypto_sign_ed25519_sk_to_curve25519(skg._signing_key).hex()
        priv = PrivateKey(bytes.fromhex(self.private_key))
        publ = priv.public_key
        self.public_key = public_key = encode(publ._public_key)
        self.secret = secret_key = encode(priv._private_key)

        self.pending_txs = LinkedHashTable()
        self.interpreter = SenecaInterpreter()
        self._recently_seen = CappedSet(max_size=DUPE_TABLE_SIZE)

        try:
            self._subscribe_to_witnesses()
            # start event loop and start listening witness sockets as well as mgr
            self.run_loop_second_time()
        except Exception as e:
            err_msg = '\n' + '!' * 64 + '\nSBB terminating with exception:\n' + str(traceback.format_exc())
            err_msg += '\n' + '!' * 64 + '\n'
            self.log.error(err_msg)
        finally:
            self._teardown()

    def _parse_witness_list(self, witness_list):
        witnesses = witness_list.split(",")
        for witness in witnesses:
          ip, vk = witness.split(":", 1)
          self.witness_table[ip] = []
          self.witness_table[ip].append(vk)
          
    def _subscribe_to_witnesses(self):
        for ip, value in self.witness_table:
            witness_vk = value[0]
            url = "{}:{}".format(ip, PUB_SUB_PORT)
            socket = self._add_sub(
                                   url=url,
                                   filter=str(WITNESS_DELEGATE_FILTER),
                                   vk=witness_vk)
            self.witness_table[ip].append(socket)
            self.tasks.append(self._listen_to_witness(socket, url))
            self.log.debug("Added sub connection to witness at ip:{} socket:{}"
                           ."filter {}"
                           .format(ip, witness_vk, WITNESS_DELEGATE_FILTER))

    def run_loop_forever(self):
        self.tasks.append(self._listen_to_block_manager())
        self.loop.run_until_complete(asyncio.gather(*tasks))

    async def _listen_to_block_manager(self):
        try:
            self.log.debug(
               "Sub-block builder {} listening to Block-manager process at {}"
               .format(self.sbb_index, self.url))
            while True:
                cmd_bin = await self.socket.recv()
                self.log.debug("Got cmd from BM: {}".format(cmd_bin))

                # need to change logic here based on our communication protocols
                if cmd_bin == KILL_SIG:
                    self.log.debug("Sub-block builder {} got kill signal"
                    .format(self.sbb_index))
                    self._teardown()
                    return

                if cmd_bin == MAKE_SUBTREE:
                    # self._interpret = 1  ?
                    return

                if cmd_bin == SEND_NUM_TXS:
                    # send back number of txs pending so it can skip this block if too low

                if cmd_bin == CANCEL_SUBTREE:
                    if self._interpret:
                        self._interpret = 0


        except asyncio.CancelledError:
            self.log.warning("Builder _recv_messages task canceled externally")


    # async def recv_multipart(self, socket, callback_fn: types.MethodType, ignore_first_frame=False):
    async def _listen_to_witness(self, socket, url, ignore_first_frame = True):
        self.log.debug("Sub-block builder {} listening to witness at {}"
                       .format(self.sbb_index, url))
        while True:
            try:
                msg = await socket.recv_multipart()
            except asyncio.CancelledError:
                self.log.debug("Socket at witness {} cancelled".format(url))
                socket.close()
                break

            if ignore_first_frame:
                header = None
            else:
                assert len(msg) == 2,
                       "Expected 2 frames (header, envelope) but got {}"
                       .format(msg)
                header = msg[0].decode()

            env_binary = msg[-1]
            env = self._validate_envelope(envelope_binary=env_binary,
                                          header=header)

            if not env:
                continue

            self._recently_seen.add(env.meta.uuid)
            tx = envelope.message
            self.pending_txs.append(Hasher.hash(tx.transaction), tx)