示例#1
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Required IPC directory does not exist at"
                            " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy, consume_in, zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Permission denied to IPC directory at"
                            " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Could not create ZeroMQ receiver daemon. "
                        "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
示例#2
0
文件: impl_zmq.py 项目: lihuiba/volt
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Required IPC directory does not exist at"
                                  " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Permission denied to IPC directory at"
                                  " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
                              "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
示例#3
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
        """

        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
        except Exception:
            if self.ack_on_error:
                LOG.exception(
                    _LE("Failed to process message"
                        " ... skipping it."))
                message.ack()
            else:
                LOG.exception(
                    _LE("Failed to process message"
                        " ... will requeue."))
                message.requeue()
        else:
            message.ack()
示例#4
0
文件: service.py 项目: lihuiba/volt
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
示例#5
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug("CONSUMER RECEIVED DATA: %s", data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#6
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(
                        'Dynamic looping call sleeping for %.02f '
                        'seconds', idle)
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#7
0
    def _get_response(self, ctx, proxy, topic, data):
        """Process a curried message and cast the result to topic."""
        LOG.debug("Running func with context: %s", ctx.to_dict())
        data.setdefault('version', None)
        data.setdefault('args', {})

        try:
            result = proxy.dispatch(ctx, data['version'], data['method'],
                                    data.get('namespace'), **data['args'])
            return ConsumerBase.normalize_reply(result, ctx.replies)
        except greenlet.GreenletExit:
            # ignore these since they are just from shutdowns
            pass
        except rpc_common.ClientException as e:
            LOG.debug("Expected exception during message handling (%s)" %
                      e._exc_info[1])
            return {
                'exc':
                rpc_common.serialize_remote_exception(e._exc_info,
                                                      log_failure=False)
            }
        except Exception:
            LOG.error(_LE("Exception during message handling"))
            return {
                'exc': rpc_common.serialize_remote_exception(sys.exc_info())
            }
示例#8
0
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = six.u(str(exc))
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60 or
                     this_exc_message != last_exc_message):
                 logging.exception(
                     _LE('Unexpected exception occurred %d time(s)... '
                         'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
示例#9
0
文件: amqp.py 项目: vmthunder/volt
    def _process_data(self, ctxt, version, method, namespace, args):
        """Process a message in a new thread.

        If the proxy object we have has a dispatch method
        (see rpc.dispatcher.RpcDispatcher), pass it the version,
        method, and args and let it dispatch as appropriate.  If not, use
        the old behavior of magically calling the specified method on the
        proxy we have here.
        """
        ctxt.update_store()
        try:
            rval = self.proxy.dispatch(ctxt, version, method, namespace,
                                       **args)
            # Check if the result was a generator
            if inspect.isgenerator(rval):
                for x in rval:
                    ctxt.reply(x, None, connection_pool=self.connection_pool)
            else:
                ctxt.reply(rval, None, connection_pool=self.connection_pool)
            # This final None tells multicall that it is done.
            ctxt.reply(ending=True, connection_pool=self.connection_pool)
        except rpc_common.ClientException as e:
            LOG.debug('Expected exception during message handling (%s)' %
                      e._exc_info[1])
            ctxt.reply(None,
                       e._exc_info,
                       connection_pool=self.connection_pool,
                       log_failure=False)
        except Exception:
            # sys.exc_info() is deleted by LOG.exception().
            exc_info = sys.exc_info()
            LOG.error(_LE('Exception during message handling'),
                      exc_info=exc_info)
            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
示例#10
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#11
0
文件: impl_zmq.py 项目: lihuiba/volt
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug("CONSUMER RECEIVED DATA: %s", data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#12
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(
                            _LW('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#13
0
文件: service.py 项目: vmthunder/volt
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug('Full set of CONF:')
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_LE('Exception during rpc cleanup.'))

        return status, signo
示例#14
0
 def _error_callback(exc):
     if isinstance(exc, qpid_exceptions.Empty):
         LOG.debug('Timed out waiting for RPC response: %s' % str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(
             _LE('Failed to consume message from queue: %s') % str(exc))
示例#15
0
文件: lockutils.py 项目: lihuiba/volt
 def release(self):
     try:
         self.unlock()
         self.lockfile.close()
         LOG.debug('Released file lock "%s"', self.fname)
     except IOError:
         LOG.exception(_LE("Could not release the acquired lock `%s`"),
                       self.fname)
示例#16
0
 def _error_callback(exc):
     if isinstance(exc, socket.timeout):
         LOG.debug('Timed out waiting for RPC response: %s' % str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(
             _LE('Failed to consume message from queue: %s') % str(exc))
         info['do_consume'] = True
示例#17
0
文件: impl_qpid.py 项目: lihuiba/volt
 def _error_callback(exc):
     if isinstance(exc, qpid_exceptions.Empty):
         LOG.debug('Timed out waiting for RPC response: %s' %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_LE('Failed to consume message from queue: %s') %
                       str(exc))
示例#18
0
 def release(self):
     try:
         self.unlock()
         self.lockfile.close()
         LOG.debug('Released file lock "%s"', self.fname)
     except IOError:
         LOG.exception(_LE("Could not release the acquired lock `%s`"),
                       self.fname)
示例#19
0
 def _error_callback(exc):
     if isinstance(exc, socket.timeout):
         LOG.debug('Timed out waiting for RPC response: %s' %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_LE('Failed to consume message from queue: %s') %
                       str(exc))
         info['do_consume'] = True
示例#20
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         if self.reraise:
             logging.error(_LE('Original exception being dropped: %s'),
                           traceback.format_exception(self.type_,
                                                      self.value,
                                                      self.tb))
         return False
     if self.reraise:
         six.reraise(self.type_, self.value, self.tb)
示例#21
0
文件: impl_zmq.py 项目: lihuiba/volt
def _serialize(data):
    """Serialization wrapper.

    We prefer using JSON, but it cannot encode all types.
    Error if a developer passes us bad data.
    """
    try:
        return jsonutils.dumps(data, ensure_ascii=True)
    except TypeError:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("JSON serialization failed."))
示例#22
0
def _serialize(data):
    """Serialization wrapper.

    We prefer using JSON, but it cannot encode all types.
    Error if a developer passes us bad data.
    """
    try:
        return jsonutils.dumps(data, ensure_ascii=True)
    except TypeError:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("JSON serialization failed."))
示例#23
0
文件: impl_qpid.py 项目: lihuiba/volt
 def consume(self):
     """Fetch the message and pass it to the callback object."""
     message = self.receiver.fetch()
     try:
         self._unpack_json_msg(message)
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_LE("Failed to process message... skipping it."))
     finally:
         # TODO(sandy): Need support for optional ack_on_error.
         self.session.acknowledge(message)
示例#24
0
    def reconnect(self):
        """Handles reconnecting and re-establishing queues.
        Will retry up to self.max_retries number of times.
        self.max_retries = 0 means to retry forever.
        Sleep between tries, starting at self.interval_start
        seconds, backing off self.interval_stepping number of seconds
        each attempt.
        """

        attempt = 0
        while True:
            params = self.params_list[attempt % len(self.params_list)]
            attempt += 1
            try:
                self._connect(params)
                return
            except (IOError, self.connection_errors) as e:
                pass
            except Exception as e:
                # NOTE(comstud): Unfortunately it's possible for amqplib
                # to return an error not covered by its transport
                # connection_errors in the case of a timeout waiting for
                # a protocol response.  (See paste link in LP888621)
                # So, we check all exceptions for 'timeout' in them
                # and try to reconnect in this case.
                if 'timeout' not in str(e):
                    raise

            log_info = {}
            log_info['err_str'] = str(e)
            log_info['max_retries'] = self.max_retries
            log_info.update(params)

            if self.max_retries and attempt == self.max_retries:
                msg = _('Unable to connect to AMQP server on '
                        '%(hostname)s:%(port)d after %(max_retries)d '
                        'tries: %(err_str)s') % log_info
                LOG.error(msg)
                raise rpc_common.RPCException(msg)

            if attempt == 1:
                sleep_time = self.interval_start or 1
            elif attempt > 1:
                sleep_time += self.interval_stepping
            if self.interval_max:
                sleep_time = min(sleep_time, self.interval_max)

            log_info['sleep_time'] = sleep_time
            LOG.error(
                _LE('AMQP server on %(hostname)s:%(port)d is '
                    'unreachable: %(err_str)s. Trying again in '
                    '%(sleep_time)d seconds.') % log_info)
            time.sleep(sleep_time)
示例#25
0
 def consume(self):
     """Fetch the message and pass it to the callback object."""
     message = self.receiver.fetch()
     try:
         self._unpack_json_msg(message)
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_LE("Failed to process message... skipping it."))
     finally:
         # TODO(sandy): Need support for optional ack_on_error.
         self.session.acknowledge(message)
示例#26
0
    def reconnect(self):
        """Handles reconnecting and re-establishing queues.
        Will retry up to self.max_retries number of times.
        self.max_retries = 0 means to retry forever.
        Sleep between tries, starting at self.interval_start
        seconds, backing off self.interval_stepping number of seconds
        each attempt.
        """

        attempt = 0
        while True:
            params = self.params_list[attempt % len(self.params_list)]
            attempt += 1
            try:
                self._connect(params)
                return
            except (IOError, self.connection_errors) as e:
                pass
            except Exception as e:
                # NOTE(comstud): Unfortunately it's possible for amqplib
                # to return an error not covered by its transport
                # connection_errors in the case of a timeout waiting for
                # a protocol response.  (See paste link in LP888621)
                # So, we check all exceptions for 'timeout' in them
                # and try to reconnect in this case.
                if 'timeout' not in str(e):
                    raise

            log_info = {}
            log_info['err_str'] = str(e)
            log_info['max_retries'] = self.max_retries
            log_info.update(params)

            if self.max_retries and attempt == self.max_retries:
                msg = _('Unable to connect to AMQP server on '
                        '%(hostname)s:%(port)d after %(max_retries)d '
                        'tries: %(err_str)s') % log_info
                LOG.error(msg)
                raise rpc_common.RPCException(msg)

            if attempt == 1:
                sleep_time = self.interval_start or 1
            elif attempt > 1:
                sleep_time += self.interval_stepping
            if self.interval_max:
                sleep_time = min(sleep_time, self.interval_max)

            log_info['sleep_time'] = sleep_time
            LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
                          'unreachable: %(err_str)s. Trying again in '
                          '%(sleep_time)d seconds.') % log_info)
            time.sleep(sleep_time)
示例#27
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
        """

        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
        except Exception:
            if self.ack_on_error:
                LOG.exception(_LE("Failed to process message"
                                  " ... skipping it."))
                message.ack()
            else:
                LOG.exception(_LE("Failed to process message"
                                  " ... will requeue."))
                message.requeue()
        else:
            message.ack()
示例#28
0
文件: policy.py 项目: lihuiba/volt
def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_LE("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_LE("No handler for matches of kind %s") % kind)
        return FalseCheck()
示例#29
0
文件: policy.py 项目: vmthunder/volt
def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_LE("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_LE("No handler for matches of kind %s") % kind)
        return FalseCheck()
示例#30
0
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs

        if not message:
            try:
                message = self.msg_fmt % kwargs

            except Exception:
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_LE('Exception in string format operation'))
                for name, value in six.iteritems(kwargs):
                    LOG.error("%s: %s" % (name, value))
                # at least get the core message out if something happened
                message = self.msg_fmt

        super(RPCException, self).__init__(message)
示例#31
0
文件: common.py 项目: lihuiba/volt
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs

        if not message:
            try:
                message = self.msg_fmt % kwargs

            except Exception:
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_LE('Exception in string format operation'))
                for name, value in six.iteritems(kwargs):
                    LOG.error("%s: %s" % (name, value))
                # at least get the core message out if something happened
                message = self.msg_fmt

        super(RPCException, self).__init__(message)
示例#32
0
文件: impl_zmq.py 项目: lihuiba/volt
    def process(self, proxy, ctx, data):
        data.setdefault('version', None)
        data.setdefault('args', {})

        # Method starting with - are
        # processed internally. (non-valid method name)
        method = data.get('method')
        if not method:
            LOG.error(_LE("RPC message did not include method."))
            return

        # Internal method
        # uses internal context for safety.
        if method == '-reply':
            self.private_ctx.reply(ctx, proxy, **data['args'])
            return

        proxy.dispatch(ctx, data['version'],
                       data['method'], data.get('namespace'), **data['args'])
示例#33
0
    def process(self, proxy, ctx, data):
        data.setdefault('version', None)
        data.setdefault('args', {})

        # Method starting with - are
        # processed internally. (non-valid method name)
        method = data.get('method')
        if not method:
            LOG.error(_LE("RPC message did not include method."))
            return

        # Internal method
        # uses internal context for safety.
        if method == '-reply':
            self.private_ctx.reply(ctx, proxy, **data['args'])
            return

        proxy.dispatch(ctx, data['version'], data['method'],
                       data.get('namespace'), **data['args'])
示例#34
0
    def reconnect(self):
        """Handles reconnecting and re-establishing sessions and queues."""
        attempt = 0
        delay = 1
        while True:
            # Close the session if necessary
            if self.connection.opened():
                try:
                    self.connection.close()
                except qpid_exceptions.ConnectionError:
                    pass

            broker = self.brokers[attempt % len(self.brokers)]
            attempt += 1

            try:
                self.connection_create(broker)
                self.connection.open()
            except qpid_exceptions.ConnectionError as e:
                msg_dict = dict(e=e, delay=delay)
                msg = _LE("Unable to connect to AMQP server: %(e)s. "
                          "Sleeping %(delay)s seconds") % msg_dict
                LOG.error(msg)
                time.sleep(delay)
                delay = min(delay + 1, 5)
            else:
                LOG.info(_LI('Connected to AMQP server on %s'), broker)
                break

        self.session = self.connection.session()

        if self.consumers:
            consumers = self.consumers
            self.consumers = {}

            for consumer in six.itervalues(consumers):
                consumer.reconnect(self.session)
                self._register_consumer(consumer)

            LOG.debug("Re-established AMQP queues")
示例#35
0
文件: impl_qpid.py 项目: lihuiba/volt
    def reconnect(self):
        """Handles reconnecting and re-establishing sessions and queues."""
        attempt = 0
        delay = 1
        while True:
            # Close the session if necessary
            if self.connection.opened():
                try:
                    self.connection.close()
                except qpid_exceptions.ConnectionError:
                    pass

            broker = self.brokers[attempt % len(self.brokers)]
            attempt += 1

            try:
                self.connection_create(broker)
                self.connection.open()
            except qpid_exceptions.ConnectionError as e:
                msg_dict = dict(e=e, delay=delay)
                msg = _LE("Unable to connect to AMQP server: %(e)s. "
                          "Sleeping %(delay)s seconds") % msg_dict
                LOG.error(msg)
                time.sleep(delay)
                delay = min(delay + 1, 5)
            else:
                LOG.info(_LI('Connected to AMQP server on %s'), broker)
                break

        self.session = self.connection.session()

        if self.consumers:
            consumers = self.consumers
            self.consumers = {}

            for consumer in six.itervalues(consumers):
                consumer.reconnect(self.session)
                self._register_consumer(consumer)

            LOG.debug("Re-established AMQP queues")
示例#36
0
def serialize_remote_exception(failure_info, log_failure=True):
    """Prepares exception data to be sent over rpc.

    Failure_info should be a sys.exc_info() tuple.

    """
    tb = traceback.format_exception(*failure_info)
    failure = failure_info[1]
    if log_failure:
        LOG.error(_LE("Returning exception %s to caller"),
                  six.text_type(failure))
        LOG.error(tb)

    kwargs = {}
    if hasattr(failure, 'kwargs'):
        kwargs = failure.kwargs

    # NOTE(matiu): With cells, it's possible to re-raise remote, remote
    # exceptions. Lets turn it back into the original exception type.
    cls_name = str(failure.__class__.__name__)
    mod_name = str(failure.__class__.__module__)
    if (cls_name.endswith(_REMOTE_POSTFIX)
            and mod_name.endswith(_REMOTE_POSTFIX)):
        cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
        mod_name = mod_name[:-len(_REMOTE_POSTFIX)]

    data = {
        'class': cls_name,
        'module': mod_name,
        'message': six.text_type(failure),
        'tb': tb,
        'args': failure.args,
        'kwargs': kwargs
    }

    json_data = jsonutils.dumps(data)

    return json_data
示例#37
0
文件: common.py 项目: lihuiba/volt
def serialize_remote_exception(failure_info, log_failure=True):
    """Prepares exception data to be sent over rpc.

    Failure_info should be a sys.exc_info() tuple.

    """
    tb = traceback.format_exception(*failure_info)
    failure = failure_info[1]
    if log_failure:
        LOG.error(_LE("Returning exception %s to caller"),
                  six.text_type(failure))
        LOG.error(tb)

    kwargs = {}
    if hasattr(failure, 'kwargs'):
        kwargs = failure.kwargs

    # NOTE(matiu): With cells, it's possible to re-raise remote, remote
    # exceptions. Lets turn it back into the original exception type.
    cls_name = str(failure.__class__.__name__)
    mod_name = str(failure.__class__.__module__)
    if (cls_name.endswith(_REMOTE_POSTFIX) and
            mod_name.endswith(_REMOTE_POSTFIX)):
        cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
        mod_name = mod_name[:-len(_REMOTE_POSTFIX)]

    data = {
        'class': cls_name,
        'module': mod_name,
        'message': six.text_type(failure),
        'tb': tb,
        'args': failure.args,
        'kwargs': kwargs
    }

    json_data = jsonutils.dumps(data)

    return json_data
示例#38
0
文件: service.py 项目: lihuiba/volt
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
示例#39
0
    def close(self):
        if self.sock is None or self.sock.closed:
            return

        # We must unsubscribe, or we'll leak descriptors.
        if self.subscriptions:
            for f in self.subscriptions:
                try:
                    self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
                except Exception:
                    pass
            self.subscriptions = []

        try:
            # Default is to linger
            self.sock.close()
        except Exception:
            # While this is a bad thing to happen,
            # it would be much worse if some of the code calling this
            # were to fail. For now, lets log, and later evaluate
            # if we can safely raise here.
            LOG.error(_LE("ZeroMQ socket could not be closed."))
        self.sock = None
示例#40
0
文件: service.py 项目: vmthunder/volt
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
示例#41
0
文件: impl_zmq.py 项目: lihuiba/volt
    def close(self):
        if self.sock is None or self.sock.closed:
            return

        # We must unsubscribe, or we'll leak descriptors.
        if self.subscriptions:
            for f in self.subscriptions:
                try:
                    self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
                except Exception:
                    pass
            self.subscriptions = []

        try:
            # Default is to linger
            self.sock.close()
        except Exception:
            # While this is a bad thing to happen,
            # it would be much worse if some of the code calling this
            # were to fail. For now, lets log, and later evaluate
            # if we can safely raise here.
            LOG.error(_LE("ZeroMQ socket could not be closed."))
        self.sock = None
示例#42
0
文件: policy.py 项目: lihuiba/volt
def _parse_text_rule(rule):
    """Parses policy to the tree.

    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_LE("Failed to understand rule %r") % rule)

        # Fail closed
        return FalseCheck()
示例#43
0
文件: policy.py 项目: vmthunder/volt
def _parse_text_rule(rule):
    """Parses policy to the tree.

    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_LE("Failed to understand rule %r") % rule)

        # Fail closed
        return FalseCheck()
示例#44
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug('Dynamic looping call sleeping for %.02f '
                              'seconds', idle)
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#45
0
文件: impl_zmq.py 项目: lihuiba/volt
    def _get_response(self, ctx, proxy, topic, data):
        """Process a curried message and cast the result to topic."""
        LOG.debug("Running func with context: %s", ctx.to_dict())
        data.setdefault('version', None)
        data.setdefault('args', {})

        try:
            result = proxy.dispatch(
                ctx, data['version'], data['method'],
                data.get('namespace'), **data['args'])
            return ConsumerBase.normalize_reply(result, ctx.replies)
        except greenlet.GreenletExit:
            # ignore these since they are just from shutdowns
            pass
        except rpc_common.ClientException as e:
            LOG.debug("Expected exception during message handling (%s)" %
                      e._exc_info[1])
            return {'exc':
                    rpc_common.serialize_remote_exception(e._exc_info,
                                                          log_failure=False)}
        except Exception:
            LOG.error(_LE("Exception during message handling"))
            return {'exc':
                    rpc_common.serialize_remote_exception(sys.exc_info())}
示例#46
0
 def _error_callback(exc):
     log_info = {'topic': topic, 'err_str': str(exc)}
     LOG.exception(
         _LE("Failed to publish message to topic "
             "'%(topic)s': %(err_str)s") % log_info)
示例#47
0
 def _error_callback(exc):
     log_info = {'topic': topic, 'err_str': str(exc)}
     LOG.exception(_LE("Failed to publish message to topic "
                   "'%(topic)s': %(err_str)s") % log_info)
示例#48
0
 def _connect_error(exc):
     log_info = {'topic': topic, 'err_str': str(exc)}
     LOG.error(
         _LE("Failed to declare consumer for topic '%(topic)s': "
             "%(err_str)s") % log_info)
示例#49
0
文件: impl_qpid.py 项目: lihuiba/volt
 def _consume():
     nxt_receiver = self.session.next_receiver(timeout=timeout)
     try:
         self._lookup_consumer(nxt_receiver).consume()
     except Exception:
         LOG.exception(_LE("Error processing message.  Skipping it."))
示例#50
0
    def consume(self, sock):
        ipc_dir = CONF.rpc_zmq_ipc_dir

        data = sock.recv(copy=False)
        topic = data[1].bytes

        if topic.startswith('fanout~'):
            sock_type = zmq.PUB
            topic = topic.split('.', 1)[0]
        elif topic.startswith('zmq_replies'):
            sock_type = zmq.PUB
        else:
            sock_type = zmq.PUSH

        if topic not in self.topic_proxy:

            def publisher(waiter):
                LOG.info(_LI("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type,
                                         bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while (True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data, copy=False)

            wait_sock_creation = eventlet.event.Event()
            eventlet.spawn(publisher, wait_sock_creation)

            try:
                wait_sock_creation.wait()
            except RPCException:
                LOG.error(_LE("Topic socket file creation failed."))
                return

        try:
            self.topic_proxy[topic].put_nowait(data)
        except eventlet.queue.Full:
            LOG.error(
                _LE("Local per-topic backlog buffer full for topic "
                    "%(topic)s. Dropping message.") % {'topic': topic})
示例#51
0
 def _consume():
     nxt_receiver = self.session.next_receiver(timeout=timeout)
     try:
         self._lookup_consumer(nxt_receiver).consume()
     except Exception:
         LOG.exception(_LE("Error processing message.  Skipping it."))
示例#52
0
 def _connect_error(exc):
     log_info = {'topic': topic, 'err_str': str(exc)}
     LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
               "%(err_str)s") % log_info)
示例#53
0
文件: impl_zmq.py 项目: lihuiba/volt
    def consume(self, sock):
        ipc_dir = CONF.rpc_zmq_ipc_dir

        data = sock.recv(copy=False)
        topic = data[1].bytes

        if topic.startswith('fanout~'):
            sock_type = zmq.PUB
            topic = topic.split('.', 1)[0]
        elif topic.startswith('zmq_replies'):
            sock_type = zmq.PUB
        else:
            sock_type = zmq.PUSH

        if topic not in self.topic_proxy:
            def publisher(waiter):
                LOG.info(_LI("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data, copy=False)

            wait_sock_creation = eventlet.event.Event()
            eventlet.spawn(publisher, wait_sock_creation)

            try:
                wait_sock_creation.wait()
            except RPCException:
                LOG.error(_LE("Topic socket file creation failed."))
                return

        try:
            self.topic_proxy[topic].put_nowait(data)
        except eventlet.queue.Full:
            LOG.error(_LE("Local per-topic backlog buffer full for topic "
                          "%(topic)s. Dropping message.") % {'topic': topic})