Beispiel #1
0
    def __init__(self, url, sk=None, name='Node'):
        self.log = get_logger("{}.ReactorDaemon".format(name))
        self.log.info("ReactorDaemon started with url {}".format(url))
        self.url = url

        # Comment out below for more granularity in debugging
        # self.log.setLevel(logging.INFO)

        # TODO optimize cache
        self.ip_cache = CappedDict(max_size=64)

        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        # Register signal handler to teardown
        signal.signal(signal.SIGTERM, self._signal_teardown)

        self.discovery_mode = 'test' if os.getenv(
            'TEST_NAME') else 'neighborhood'
        self.dht = DHT(sk=sk,
                       mode=self.discovery_mode,
                       loop=self.loop,
                       alpha=ALPHA,
                       ksize=KSIZE,
                       daemon=self,
                       max_peers=MAX_PEERS,
                       block=False,
                       cmd_cli=False,
                       wipe_certs=True)

        self.context = zmq.asyncio.Context()
        self.socket = self.context.socket(
            zmq.PAIR)  # For communication with main process
        self.socket.connect(self.url)

        # Set Executor _parent_name to differentiate between nodes in log files
        Executor._parent_name = name

        self.executors = {
            name: executor(self.loop, self.socket, self.dht.network.ironhouse)
            for name, executor in Executor.registry.items()
        }

        try:
            self.loop.run_until_complete(self._recv_messages())
        except Exception as e:
            err_msg = '\n' + '!' * 64 + '\nDeamon Loop terminating with exception:\n' + str(
                traceback.format_exc())
            err_msg += '\n' + '!' * 64 + '\n'
            self.log.error(err_msg)
        finally:
            self._teardown()
def run_node():
    import asyncio, os, zmq.auth
    from cilantro.protocol.overlay.dht import DHT
    from cilantro.logger import get_logger
    log = get_logger(__name__)

    signing_keys = {
        'node_1':
        '06391888e37a48cef1ded85a375490df4f9b2c74f7723e88c954a055f3d2685a',
        'node_2':
        '91f7021a9e8c65ca873747ae24de08e0a7acf58159a8aa6548910fe152dab3d8',
        'node_3':
        'f9489f880ef1a8b2ccdecfcad073e630ede1dd190c3b436421f665f767704c55',
        'node_4':
        '8ddaf072b9108444e189773e2ddcb4cbd2a76bbf3db448e55d0bfc131409a197',
        'node_5':
        '5664ec7306cc22e56820ae988b983bdc8ebec8246cdd771cfee9671299e98e3c',
        'node_6':
        '20b577e71e0c3bddd3ae78c0df8f7bb42b29b0c0ce9ca42a44e6afea2912d17b'
    }
    sk = signing_keys.get(os.getenv('HOSTNAME'))

    dht = DHT(mode='test', sk=sk, wipe_certs=True, block=False)

    dht.loop.run_forever()
Beispiel #3
0
    def test_join_network_as_sole_non_master_node(self):
        def run(self):
            self.loop.call_soon_threadsafe(self.loop.stop)

        with self.assertRaises(ErrorWithArgs):
            self.node = DHT(sk=self.witness['sk'],
                                mode='test',
                                port=3321,
                                keyname='node',
                                wipe_certs=True,
                                loop=self.loop,
                                max_wait=0.1,
                                block=False,
                                retry_discovery=1)

        t = Timer(0.01, run, [self])
        t.start()
        self.loop.run_forever()
Beispiel #4
0
    def test_join_network_as_sole_master(self):
        def run(self):
            self.node.cleanup()
            self.loop.call_soon_threadsafe(self.loop.stop)

        self.node = DHT(sk=self.master['sk'],
                            mode='test',
                            port=3321,
                            keyname='master',
                            wipe_certs=True,
                            loop=self.loop,
                            max_wait=0.1,
                            block=False)

        self.assertEqual(self.node.network.ironhouse.vk, self.master['vk'])
        t = Timer(0.01, run, [self])
        t.start()
        self.loop.run_forever()
Beispiel #5
0
 def _start_service(cls, sk):
     ctx = zmq.asyncio.Context()
     cls.event_sock = ctx.socket(zmq.PUB)
     cls.event_sock.bind(cls.event_url)
     cls.discovery_mode = 'test' if os.getenv(
         'TEST_NAME') else 'neighborhood'
     cls.dht = DHT(sk=sk,
                   mode=cls.discovery_mode,
                   loop=cls.loop,
                   alpha=ALPHA,
                   ksize=KSIZE,
                   event_sock=cls.event_sock,
                   max_peers=MAX_PEERS,
                   block=False,
                   cmd_cli=False,
                   wipe_certs=True)
     cls._started = True
     cls.listener_fut = asyncio.ensure_future(cls._listen_for_cmds())
     cls.event_sock.send_json({'event': 'service_started'})
     cls.loop.run_forever()
Beispiel #6
0
class ReactorDaemon:
    def __init__(self, url, sk=None, name='Node'):
        self.log = get_logger("{}.ReactorDaemon".format(name))
        self.log.info("ReactorDaemon started with url {}".format(url))
        self.url = url

        # Comment out below for more granularity in debugging
        # self.log.setLevel(logging.INFO)

        # TODO optimize cache
        self.ip_cache = CappedDict(max_size=64)

        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        # Register signal handler to teardown
        signal.signal(signal.SIGTERM, self._signal_teardown)

        self.discovery_mode = 'test' if os.getenv(
            'TEST_NAME') else 'neighborhood'
        self.dht = DHT(sk=sk,
                       mode=self.discovery_mode,
                       loop=self.loop,
                       alpha=ALPHA,
                       ksize=KSIZE,
                       daemon=self,
                       max_peers=MAX_PEERS,
                       block=False,
                       cmd_cli=False,
                       wipe_certs=True)

        self.context = zmq.asyncio.Context()
        self.socket = self.context.socket(
            zmq.PAIR)  # For communication with main process
        self.socket.connect(self.url)

        # Set Executor _parent_name to differentiate between nodes in log files
        Executor._parent_name = name

        self.executors = {
            name: executor(self.loop, self.socket, self.dht.network.ironhouse)
            for name, executor in Executor.registry.items()
        }

        try:
            self.loop.run_until_complete(self._recv_messages())
        except Exception as e:
            err_msg = '\n' + '!' * 64 + '\nDeamon Loop terminating with exception:\n' + str(
                traceback.format_exc())
            err_msg += '\n' + '!' * 64 + '\n'
            self.log.error(err_msg)
        finally:
            self._teardown()

    async def _recv_messages(self):
        try:
            # Notify parent proc that this proc is ready
            self.log.notice("Daemon notifying main proc of ready")
            self.socket.send(CHILD_RDY_SIG)

            self.log.info(
                "-- Daemon proc listening to main proc on PAIR Socket at {} --"
                .format(self.url))
            while True:
                self.log.spam(
                    "ReactorDaemon awaiting for command from main thread...")
                cmd_bin = await self.socket.recv()
                self.log.spam("Got cmd from queue: {}".format(cmd_bin))

                if cmd_bin == KILL_SIG:
                    self.log.important(
                        "Daemon Process got kill signal from main proc")
                    self._teardown()
                    return

                # Should from_bytes be in a try/catch? I suppose if we get a bad command from the main proc we might as well
                # blow up because this is very likely because of a development error, so no try/catch for now
                cmd = ReactorCommand.from_bytes(cmd_bin)
                assert cmd.class_name and cmd.func_name, "Received invalid command with no class/func name!"

                self._execute_cmd(cmd)

        except asyncio.CancelledError:
            self.log.warning("Daemon _recv_messages task canceled externally")

    def _signal_teardown(self, signal, frame):
        self.log.important("Daemon process got kill signal!")
        self._teardown()

    def _teardown(self):
        """
        Close sockets. Teardown executors. Close Event Loop.
        """
        self.log.info("[DEAMON PROC] Tearing down Reactor Daemon process")

        self.log.warning("Closing pair socket")
        self.socket.close()

        self.log.warning("Tearing down executors")
        for e in self.executors.values():
            e.teardown()

        self.dht.cleanup()

        self.log.warning("Closing event loop")
        self.loop.call_soon_threadsafe(self.loop.stop)

    def _execute_cmd(self, cmd: ReactorCommand):
        """
        Propagates a command to the appropriate executor
        :param cmd: an instance of ReactorCommand
        """
        assert isinstance(
            cmd, ReactorCommand
        ), "Cannot execute cmd {} that is not a ReactorCommand object".format(
            cmd)

        cmd_args = self._parse_cmd(cmd)
        if cmd_args:
            executor_name, executor_func, kwargs = cmd_args
        else:
            self.log.debugv(
                'Command requires VK lookup. Short circuiting from _execute_cmd.'
            )
            return

        # Sanity checks (for catching bugs mostly)
        assert executor_name in self.executors, "Executor name {} not found in executors {}"\
            .format(executor_name, self.executors)
        assert hasattr(self.executors[executor_name], executor_func), "Function {} not found on executor class {}"\
            .format(executor_func, self.executors[executor_name])

        # Execute command
        try:
            getattr(self.executors[executor_name], executor_func)(**kwargs)
        except Exception as e:
            self.log.fatal("Error executing command {}\n....error={}".format(
                cmd, e))

    def _parse_cmd(self, cmd: ReactorCommand):
        """
        Parses a cmd for execution, by extracting/preparing the necessary kwargs for execution.
        :param cmd: an instance of ReactorCommand
        :return: A tuple of 3 elements (executor_name, executor_func, kwargs). Returns None if the command specifies
        a URL with a VK instead of a IP address.
        """
        executor_name = cmd.class_name
        executor_func = cmd.func_name
        kwargs = cmd.kwargs

        # Remove class_name and func_name from kwargs. We just need these to lookup the function to call
        del kwargs['class_name']
        del kwargs['func_name']

        # Add envelope to kwargs if its in the reactor command
        if cmd.envelope_binary:
            kwargs['envelope'] = cmd.envelope_binary

        # Replace VK with IP address if necessary
        if 'url' in kwargs:
            self.log.spam("Processing command with url {}".format(
                kwargs['url']))
            url = kwargs['url']

            # Check if URL has a VK inside
            vk = IPUtils.get_vk(url)
            if vk:
                if vk == self.dht.network.ironhouse.vk:
                    ip = self.dht.ip
                else:
                    ip = self.dht.network.lookup_ip_in_cache(vk)
                if not ip:
                    self.log.debug(
                        "Could not find ip for vk {} in cache. Performing lookup in DHT."
                        .format(vk))

                    asyncio.ensure_future(self._lookup_ip(cmd, url, vk))
                    return

                new_url = IPUtils.interpolate_url(url, ip)
                kwargs['url'] = new_url

        return executor_name, executor_func, kwargs

    async def _lookup_ip(self, cmd, url, vk, *args, **kwargs):
        ip, node = None, None
        try:
            node, cached = await self.dht.network.lookup_ip(vk)
            # NOTE while secure, this is a more loose connection policy
            self.log.debugv('IP {} resolves {} into {}'.format(
                os.getenv('HOST_IP', '127.0.0.1'), vk, node))
            self.log.debugv(
                '... but is {} authorized? Until next episode!'.format(node))
            if node:
                if not self.dht.network.ironhouse.authorized_nodes.get(
                        node.id):
                    authorization = await self.dht.network.authenticate(node)
                    if not authorization:
                        node = None
            else:
                node = None

        except Exception as e:
            delim_line = '!' * 64
            err_msg = '\n\n' + delim_line + '\n' + delim_line
            err_msg += '\n ERROR CAUGHT IN LOOKUP FOR VK {}\ncalled \w args={}\nand kwargs={}\n'\
                       .format(vk, args, kwargs)
            err_msg += '\nError Message: '
            err_msg += '\n\n{}'.format(traceback.format_exc())
            err_msg += '\n' + delim_line + '\n' + delim_line
            self.log.fatal(err_msg)

        if node is None:
            kwargs = cmd.kwargs
            callback = ReactorCommand.create_callback(
                callback=StateInput.LOOKUP_FAILED, **kwargs)
            self.log.debug(
                "Sending callback failure to mainthread {}".format(callback))
            self.socket.send(callback.serialize())
            # TODO -- send callback to SM saying hey i couldnt lookup this vk

            return

        # Send interpolated command back through pipeline
        ip = node.ip if type(node) == Node else node
        new_url = IPUtils.interpolate_url(url, ip)
        kwargs = cmd.kwargs
        kwargs['url'] = new_url
        new_cmd = ReactorCommand.create_cmd(envelope=cmd.envelope, **kwargs)

        self._execute_cmd(new_cmd)