Ejemplo n.º 1
0
    class RpcContext(object):
        def __init__(self, pool, config):
            self.pool = weakref.proxy(pool)
            self.proxy = ClusterRpcProxy(config, context_data=pool.context_data, timeout=pool.timeout)
            self.rpc = self.proxy.start()

        def stop(self):
            self.proxy.stop()
            self.proxy = None
            self.rpc = None

        def __enter__(self):
            return self.rpc

        def __exit__(self, exc_type, exc_value, traceback, **kwargs):
            try:
                if exc_type == RuntimeError and (
                        exc_value == "This consumer has been stopped, and can no longer be used"
                        or exc_value == "This consumer has been disconnected, and can no longer be used"):
                    self.pool._clear()
                    self.pool._reload()  # reload all worker
                    self.stop()
                elif exc_type == ConnectionError:  # maybe check for RpcTimeout, as well
                    # self.pool._clear()
                    self.pool._reload(1)  # reload atmost 1 worker
                    self.stop()
                else:
                    self.pool._put_back(self)
            except ReferenceError:  # pragma: no cover
                # We're detached from the parent, so this context
                # is going to silently die.
                self.stop()
Ejemplo n.º 2
0
class NfvoRpcClient(Nfvo):
        
    def __init__(self, config, nfvo_agent_name):
        self.config = config
        self.nfvo_agent_name = nfvo_agent_name

        self.cluster_rpc_c = ClusterRpcProxy(config)
        self.cluster_rpc = self.cluster_rpc_c.start()

    def deploy_instance(self, ctx, args):
        agent = getattr(self.cluster_rpc, self.nfvo_agent_name)
        return agent.deploy_instance.call_async(ctx, args)

    def exec_action(self, ctx, args):
        agent = getattr(self.cluster_rpc, self.nfvo_agent_name)
        return agent.exec_action.call_async(ctx, args)

    def exec_custom_action(self, ctx, args):
        agent = getattr(self.cluster_rpc, self.nfvo_agent_name)
        return agent.exec_custom_action.call_async(ctx, args)

    def delete_instance(self, ctx, args):
        agent = getattr(self.cluster_rpc, self.nfvo_agent_name)
        return agent.delete_instance.call_async(ctx, args)

    def stop(self):
        self.cluster_rpc_c.stop()
Ejemplo n.º 3
0
class AuthProxy:
    def __init__(self, token, config):
        self.token = token
        self.config = config
        self.proxy = ClusterRpcProxy(
            config, context_data={'miso_auth_token': self.token})
        self.rpc = self.proxy.start()

    def stop(self):
        self.proxy.stop()
Ejemplo n.º 4
0
class AsyncEvalServiceInvoker:
    def __init__(self, conf):
        self.rpc_proxy = ClusterRpcProxy(conf)
        self.proxy = self.rpc_proxy.start()

    def eval(self, method: EvalMethod,
             inputs: List[DatahubTarget]) -> List[DatahubTarget]:
        result = self.proxy.evaluation_service.eval(method, inputs)
        print(result)

    def stop(self):
        self.rpc_proxy.stop()
Ejemplo n.º 5
0
    class RpcContext(object):
        def __init__(self, pool, config):
            self.pool = weakref.proxy(pool)
            self.proxy = ClusterRpcProxy(config)
            self.rpc = self.proxy.start()

        def stop(self):
            self.proxy.stop()
            self.proxy = None
            self.rpc = None

        def __enter__(self):
            return self.rpc

        def __exit__(self, *args, **kwargs):
            try:
                self.pool._put_back(self)
            except ReferenceError:  # pragma: no cover
                # We're detached from the parent, so this context
                # is going to silently die.
                self.stop()
Ejemplo n.º 6
0
class PooledBrokerConnection(DataBroker):
    def __init__(self, net_location: str):
        logger.info("creating remote client at {}".format(net_location))
        conf = remote_config(net_location)
        self.rpc_proxy = ClusterRpcProxy(conf)
        self.proxy = self.rpc_proxy.start()
        self.net_location = net_location

    def releaseAll(self) -> None:
        logger.info("release all for  remote client at {}".format(
            self.net_location))
        self.proxy.data_broker_service.releaseAll()

    def stop(self):
        logger.info("closing remote client at {}".format(self.net_location))
        self.rpc_proxy.stop()

    def commit(self, matrix: Matrix, revisionInfo: RevisionInfo) -> Revision:
        return self.proxy.data_broker_service.commit(matrix, revisionInfo)

    def history(self, url: str) -> List[Revision]:
        return self.proxy.data_broker_service.history(url)

    def list(self) -> List[MatrixHeader]:
        return self.proxy.data_broker_service.list()

    def checkout(self, url: str, version_id=None) -> Matrix:
        return self.proxy.data_broker_service.checkout(url)

    def view(self, url: str, version_id=None) -> Matrix:
        return self.proxy.data_broker_service.view(url)

    def release(self, matrix) -> None:
        self.proxy.data_broker_service.release(matrix)
        return None

    def peek(self, url) -> MatrixPreview:
        return self.proxy.data_broker_service.peek(url)
Ejemplo n.º 7
0
    class RpcContext(object):
        def __init__(self, pool, config):
            self._pool = weakref.proxy(pool)
            self._proxy = ClusterRpcProxy(config,
                                          context_data=copy.deepcopy(
                                              pool.context_data),
                                          timeout=pool.timeout)
            self._rpc = None
            self._enable_rpc_call = False

        def __del__(self):
            if self._proxy:
                try:
                    self._proxy.stop()
                except:  # ignore any error since the object is being garbage collected
                    pass
            self._proxy = None
            self._rpc = None

        def __getattr__(self, item):
            """ This will return the service proxy instance

            :param item: name of the service
            :return: Service Proxy
            """
            if not self._enable_rpc_call:
                raise AttributeError(item)
            return getattr(self._rpc, item)

        def __enter__(self):
            if self._proxy is None:
                self._pool._reload(1)  # reload 1 worker and raise error
                self.__del__()
                raise RuntimeError("This RpcContext has been stopped already")
            elif self._rpc is None:
                # try to start the RPC proxy if it haven't been started yet (first RPC call of this connection)
                try:
                    self._rpc = self._proxy.start()
                except (IOError, ConnectionError
                        ):  # if failed then reload 1 worker and reraise
                    self._pool._reload(1)  # reload 1 worker
                    self.__del__()
                    raise
            self._enable_rpc_call = True
            return weakref.proxy(self)

        def __exit__(self, exc_type, exc_value, traceback, **kwargs):
            self._enable_rpc_call = False
            try:
                if exc_type == RuntimeError and str(
                        exc_value
                ) in ("This consumer has been stopped, and can no longer be used",
                      "This consumer has been disconnected, and can no longer be used",
                      "This RpcContext has been stopped already"):
                    self._pool._reload(1)  # reload all worker
                    self.__del__()
                elif exc_type == ConnectionError:
                    self._pool._reload(1)  # reload atmost 1 worker
                    self.__del__()
                else:
                    if self._rpc._worker_ctx.data is not None:
                        if self._pool.context_data is None:
                            # clear all key since there is no.pool context_data
                            for key in self._rpc._worker_ctx.data.keys():
                                del self._rpc._worker_ctx.data[key]
                        elif len(self._rpc._worker_ctx.data) != len(self._pool.context_data) \
                                or self._rpc._worker_ctx.data != self._pool.context_data:
                            # ensure that worker_ctx.data is revert back to original
                            # pool.context_data when exit of block
                            for key in self._rpc._worker_ctx.data.keys():
                                if key not in self._pool.context_data:
                                    del self._rpc._worker_ctx.data[key]
                                else:
                                    self._rpc._worker_ctx.data[
                                        key] = self._pool.context_data[key]
                    self._pool._put_back(self)
            except ReferenceError:  # pragma: no cover
                # We're detached from the parent, so this context
                # is going to silently die.
                self.__del__()
Ejemplo n.º 8
0
class TelegramRunner(Loadable, StartStopable, SignalStopWrapper):

    def __init__(self, settings=None):
        if settings is None:
            settings = {}
        super(TelegramRunner, self).__init__(settings)

        nameko_settings = settings['nameko']
        """ :type : dict """
        telegram_settings = settings['telegram']
        """ :type : dict """

        if self._prePath is not None:
            telegram_settings.setdefault('path_prefix', self._prePath)
        if self._prePath is not None:
            nameko_settings.setdefault('path_prefix', self._prePath)
        self.dispatcher = event_dispatcher(nameko_settings)
        self.telegram = TelegramClient(telegram_settings)
        self.telegram.get_user_external = self._rpc_service_user_get_authorized
        self.service = StandaloneTelegramService()
        self.service.dispatch_intent = self._dispatch_intent
        self.service.telegram = self.telegram
        nameko_settings['service_name'] = self.service.name
        nameko_settings['service'] = self.service
        nameko_settings['allowed_functions'] = self.service.allowed
        self.listener = RPCListener(nameko_settings)
        self._cluster_proxy = ClusterRpcProxy(
            nameko_settings, timeout=nameko_settings.get('rpc_timeout', None)
        )
        self._proxy = None
        self._done = threading.Event()
        self._polling_timeout = settings.get('polling_interval', 2.0)

    def _thread_wrapper(self, function, *args, **kwargs):
        """
        Wrap function for exception handling with threaded calls

        :param function: Function to call
        :type function: callable
        :rtype: None
        """
        try:
            function(*args, **kwargs)
        except:
            self.exception("Threaded execution failed")

    def _run_message_watcher(self):
        timeout = self._polling_timeout
        while not self._done.wait(timeout):
            if self.telegram.new_command.is_set():
                cmds = self.service.pop_commands()
                for cmd in cmds:
                    try:
                        im = self.service.to_input_message(cmd)
                        self.service.communicate(im)
                    except:
                        self.exception(
                            "Failed to communicate message\n{}".format(im)
                        )
            if self.telegram.new_text.is_set():
                txts = self.service.pop_texts()
                for txt in txts:
                    try:
                        im = self.service.to_input_message(txt)
                        self.service.communicate(im)
                    except:
                        self.exception(
                            "Failed to communicate message\n{}".format(im)
                        )
            if self.telegram.new_text.is_set() \
                    or self.telegram.new_command.is_set():
                # Got more messages -> don't sleep
                timeout = 0.0
            else:
                timeout = self._polling_timeout

    def _dispatch_intent(self, event_type, event_data):
        self.dispatcher("manager_intent", event_type, event_data)

    def _rpc_service_user_get_authorized(self, user_id):
        self.debug("({})".format(user_id))
        if not self._proxy:
            self.warning("No proxy available")
            return None
        resp = self._proxy.service_user.get_authorized(
            self.service.name, user_id
        )
        if not resp:
            return None
        user, permission = resp
        # TODO: check permission
        if not permission or not user:
            return None
        self.debug("Matched user: {}".format(user))
        return user.get('uuid')

    def start(self, blocking=False):
        self.debug("()")
        super(TelegramRunner, self).start(False)
        self.debug("Starting rpc proxy..")
        tries = 3
        sleep_time = 1.4
        while tries > 0:
            self.debug("Trying to establish nameko proxy..")
            try:
                self._proxy = self._cluster_proxy.start()
            except:
                if tries <= 1:
                    raise
                self.exception("Failed to connect proxy")
                self.info("Sleeping {}s".format(round(sleep_time, 2)))
                time.sleep(sleep_time)
                sleep_time **= 2
            else:
                break
            tries -= 1
        self.service.proxy = self._proxy
        self.debug("Starting telegram client..")
        try:
            self.telegram.start(False)
        except:
            self.exception("Failed to start telegram client")
            self.stop()
            return
        self.info("Telegram client running")
        self.debug("Starting rpc listener")
        try:
            self.listener.start(False)
        except:
            self.exception("Failed to start rpc listener")
            self.stop()
            return
        self.info("RPC listener running")
        self._done.clear()
        if blocking:
            try:
                self._run_message_watcher()
            except IOError as e:
                if e.errno == errno.EINTR:
                    self.warning("Interrupted function in message loop")
                else:
                    self.exception("Failed to run message loop")
            except:
                self.exception("Failed to run message loop")
                self.stop()
                return
        else:
            try:
                a_thread = threading.Thread(
                    target=self._thread_wrapper,
                    args=(self._run_message_watcher,)
                )
                a_thread.daemon = True
                a_thread.start()
            except:
                self.exception("Failed to run message loop")
                self.stop()
                return

    def stop(self):
        self.debug("()")
        self._done.set()
        super(TelegramRunner, self).stop()
        self.debug("Stopping rpc listener")
        try:
            self.listener.stop()
        except:
            self.exception("Failed to stop rpc listener")
        else:
            self.info("RPC listener stopped")
        self.debug("Stopping telegram client")
        try:
            self.telegram.stop()
        except:
            self.exception("Failed to stop telegram client")
        else:
            self.info("Telegram client stopped stopped")
        self.debug("Stopping cluster proxy..")
        try:
            self._cluster_proxy.stop()
        except:
            self.exception("Failed to stop cluster proxy")
        else:
            self.info("RPC proxy stopped")
        finally:
            self._proxy = None
Ejemplo n.º 9
0
    class RpcContext(object):
        def __init__(self, pool, config):
            self._pool = weakref.proxy(pool)
            self._proxy = ClusterRpcProxy(config,
                                          context_data=copy.deepcopy(
                                              pool.context_data),
                                          timeout=pool.timeout)
            self._rpc = self._proxy.start()
            self._enable_rpc_call = False

        def __del__(self):

            try:
                self._proxy.stop()
            except AttributeError:
                pass
            self._proxy = None
            self._rpc = None

        def __getattr__(self, item):
            """ This will return the service proxy instance

            :param item: name of the service
            :return: Service Proxy
            """
            if not self._enable_rpc_call:
                raise AttributeError(item)
            return getattr(self._rpc, item)

        def __enter__(self):
            self._enable_rpc_call = True
            return weakref.proxy(self)

        def __exit__(self, exc_type, exc_value, traceback, **kwargs):
            self._enable_rpc_call = False
            try:
                if exc_type == RuntimeError and (
                        str(exc_value) ==
                        "This consumer has been stopped, and can no longer be used"
                        or str(exc_value) ==
                        "This consumer has been disconnected, and can no longer be used"
                ):
                    self._pool._clear()
                    self._pool._reload()  # reload all worker
                    self.__del__()
                elif exc_type == ConnectionError:  # maybe check for RpcTimeout, as well
                    # self.pool._clear()
                    self._pool._reload(1)  # reload atmost 1 worker
                    self.__del__()
                else:
                    if self._rpc._worker_ctx.data is not None:
                        if self._pool.context_data is None:
                            # clear all key since there is no.pool context_data
                            for key in self._rpc._worker_ctx.data.keys():
                                del self._rpc._worker_ctx.data[key]
                        elif len(self._rpc._worker_ctx.data) != len(self._pool.context_data) \
                                or self._rpc._worker_ctx.data != self._pool.context_data:
                            # ensure that worker_ctx.data is revert back to original
                            # pool.context_data when exit of block
                            for key in self._rpc._worker_ctx.data.keys():
                                if key not in self._pool.context_data:
                                    del self._rpc._worker_ctx.data[key]
                                else:
                                    self._rpc._worker_ctx.data[
                                        key] = self._pool.context_data[key]
                    self._pool._put_back(self)
            except ReferenceError:  # pragma: no cover
                # We're detached from the parent, so this context
                # is going to silently die.
                self.__del__()