示例#1
0
    def _process_data(self, ctxt, version, method, namespace, args):
        """Process a message in a new thread.

        If the proxy object we have has a dispatch method
        (see rpc.dispatcher.RpcDispatcher), pass it the version,
        method, and args and let it dispatch as appropriate.  If not, use
        the old behavior of magically calling the specified method on the
        proxy we have here.
        """
        ctxt.update_store()
        try:
            rval = self.proxy.dispatch(ctxt, version, method, namespace,
                                       **args)
            # Check if the result was a generator
            if inspect.isgenerator(rval):
                for x in rval:
                    ctxt.reply(x, None, connection_pool=self.connection_pool)
            else:
                ctxt.reply(rval, None, connection_pool=self.connection_pool)
            # This final None tells multicall that it is done.
            ctxt.reply(ending=True, connection_pool=self.connection_pool)
        except rpc_common.ClientException as e:
            LOG.debug(_('Expected exception during message handling (%s)') %
                      e._exc_info[1])
            ctxt.reply(None, e._exc_info,
                       connection_pool=self.connection_pool,
                       log_failure=False)
        except Exception:
            # sys.exc_info() is deleted by LOG.exception().
            exc_info = sys.exc_info()
            LOG.error(_('Exception during message handling'),
                      exc_info=exc_info)
            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
示例#2
0
def _multi_send(method, context, topic, msg, timeout=None,
                envelope=False, _msg_id=None):
    """Wraps the sending of messages.

    Dispatches to the matchmaker and sends message to all relevant hosts.
    """
    conf = CONF
    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})

    queues = _get_matchmaker().queues(topic)
    LOG.debug(_("Sending message(s) to: %s"), queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope,
                             _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout,
                      envelope)
示例#3
0
 def _connect(self, params):
     """Connect to rabbit.  Re-establish any queues that may have
     been declared before if we are reconnecting.  Exceptions should
     be handled by the caller.
     """
     if self.connection:
         LOG.info(_("Reconnecting to AMQP server on "
                  "%(hostname)s:%(port)d") % params)
         try:
             self.connection.release()
         except self.connection_errors:
             pass
         # Setting this in case the next statement fails, though
         # it shouldn't be doing any network operations, yet.
         self.connection = None
     self.connection = kombu.connection.BrokerConnection(**params)
     self.connection_errors = self.connection.connection_errors
     if self.memory_transport:
         # Kludge to speed up tests.
         self.connection.transport.polling_interval = 0.0
     self.consumer_num = itertools.count(1)
     self.connection.connect()
     self.channel = self.connection.channel()
     # work around 'memory' transport bug in 1.1.3
     if self.memory_transport:
         self.channel._new_queue('ae.undeliver')
     for consumer in self.consumers:
         consumer.reconnect(self.channel)
     LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
              params)
示例#4
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#5
0
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data, copy=False)
示例#6
0
    def _wait_for_exit_or_signal(self, ready_callback=None):
        status = None
        signo = 0

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            if ready_callback:
                ready_callback()
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_('Exception during rpc cleanup.'))

        return status, signo
示例#7
0
    def create_consumer(self, topic, proxy, fanout=False):
        # Register with matchmaker.
        _get_matchmaker().register(topic, CONF.rpc_zmq_host)

        # Subscription scenarios
        if fanout:
            sock_type = zmq.SUB
            subscribe = ('', fanout)[type(fanout) == str]
            topic = 'fanout~' + topic.split('.', 1)[0]
        else:
            sock_type = zmq.PULL
            subscribe = None
            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))

        if topic in self.topics:
            LOG.info(_("Skipping topic registration. Already registered."))
            return

        # Receive messages from (local) proxy
        inaddr = "ipc://%s/zmq_topic_%s" % \
            (CONF.rpc_zmq_ipc_dir, topic)

        LOG.debug(_("Consumer is a zmq.%s"),
                  ['PULL', 'SUB'][sock_type == zmq.SUB])

        self.reactor.register(proxy, inaddr, sock_type,
                              subscribe=subscribe, in_bind=False)
        self.topics.append(topic)
示例#8
0
    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
        self.sock = _get_ctxt().socket(zmq_type)
        self.addr = addr
        self.type = zmq_type
        self.subscriptions = []

        # Support failures on sending/receiving on wrong socket type.
        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
        self.can_sub = zmq_type in (zmq.SUB, )

        # Support list, str, & None for subscribe arg (cast to list)
        do_sub = {
            list: subscribe,
            str: [subscribe],
            type(None): []
        }[type(subscribe)]

        for f in do_sub:
            self.subscribe(f)

        str_data = {'addr': addr, 'type': self.socket_s(),
                    'subscribe': subscribe, 'bind': bind}

        LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
        LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
        LOG.debug(_("-> bind: %(bind)s"), str_data)

        try:
            if bind:
                self.sock.bind(addr)
            else:
                self.sock.connect(addr)
        except Exception:
            raise RPCException(_("Could not open socket."))
示例#9
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = ".".join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name})
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e})
            time.sleep(0)

        return idle_for
示例#10
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(
                            _('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#11
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug(_('Got file lock "%s"'), self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(
                        _("Unable to acquire lock on"
                          " `%(filename)s` due to"
                          " %(exception)s") % {
                              'filename': self.fname,
                              'exception': e,
                          })
示例#12
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(
                        _('Dynamic looping call sleeping for %.02f '
                          'seconds'), idle)
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
示例#13
0
文件: worker.py 项目: jamesyli/solum
def main():
    cfg.CONF(sys.argv[1:], project='solum')
    logging.setup('solum')
    solum.TLS.trace = trace_data.TraceData()

    LOG.info(_('Starting server in PID %s') % os.getpid())
    LOG.debug(_("Configuration:"))
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'solum.worker.config', group='worker')
    cfg.CONF.import_opt('host', 'solum.worker.config', group='worker')
    cfg.CONF.import_opt('handler', 'solum.worker.config', group='worker')

    handlers = {
        'noop': noop_handler.Handler,
        'shell': shell_handler.Handler,
    }

    endpoints = [
        handlers[cfg.CONF.worker.handler](),
    ]

    server = service.Service(cfg.CONF.worker.topic,
                             cfg.CONF.worker.host, endpoints)
    server.serve()
示例#14
0
    def _get_response(self, ctx, proxy, topic, data):
        """Process a curried message and cast the result to topic."""
        LOG.debug(_("Running func with context: %s"), ctx.to_dict())
        data.setdefault('version', None)
        data.setdefault('args', {})

        try:
            result = proxy.dispatch(ctx, data['version'], data['method'],
                                    data.get('namespace'), **data['args'])
            return ConsumerBase.normalize_reply(result, ctx.replies)
        except greenlet.GreenletExit:
            # ignore these since they are just from shutdowns
            pass
        except rpc_common.ClientException as e:
            LOG.debug(
                _("Expected exception during message handling (%s)") %
                e._exc_info[1])
            return {
                'exc':
                rpc_common.serialize_remote_exception(e._exc_info,
                                                      log_failure=False)
            }
        except Exception:
            LOG.error(_("Exception during message handling"))
            return {
                'exc': rpc_common.serialize_remote_exception(sys.exc_info())
            }
示例#15
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug(_('Got file lock "%s"'), self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {
                                                    'filename': self.fname,
                                                    'exception': e,
                                                })
示例#16
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
示例#17
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#18
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
示例#19
0
    def create_consumer(self, topic, proxy, fanout=False):
        # Register with matchmaker.
        _get_matchmaker().register(topic, CONF.rpc_zmq_host)

        # Subscription scenarios
        if fanout:
            sock_type = zmq.SUB
            subscribe = ('', fanout)[type(fanout) == str]
            topic = 'fanout~' + topic.split('.', 1)[0]
        else:
            sock_type = zmq.PULL
            subscribe = None
            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))

        if topic in self.topics:
            LOG.info(_("Skipping topic registration. Already registered."))
            return

        # Receive messages from (local) proxy
        inaddr = "ipc://%s/zmq_topic_%s" % \
            (CONF.rpc_zmq_ipc_dir, topic)

        LOG.debug(_("Consumer is a zmq.%s"), ['PULL',
                                              'SUB'][sock_type == zmq.SUB])

        self.reactor.register(proxy,
                              inaddr,
                              sock_type,
                              subscribe=subscribe,
                              in_bind=False)
        self.topics.append(topic)
示例#20
0
def _multi_send(method,
                context,
                topic,
                msg,
                timeout=None,
                envelope=False,
                _msg_id=None):
    """Wraps the sending of messages.

    Dispatches to the matchmaker and sends message to all relevant hosts.
    """
    conf = CONF
    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})

    queues = _get_matchmaker().queues(topic)
    LOG.debug(_("Sending message(s) to: %s"), queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context, _topic, msg, timeout,
                             envelope, _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout, envelope)
示例#21
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#22
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _("Required IPC directory does not exist at"
                          " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy, consume_in, zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _("Permission denied to IPC directory at"
                          " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Could not create ZeroMQ receiver daemon. "
                      "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
示例#23
0
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type,
                                         bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while (True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data, copy=False)
示例#24
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        while True:
            self.handle_signal()
            self._respawn_children()
            if self.sigcaught:
                signame = _signo_to_signame(self.sigcaught)
                LOG.info(_('Caught %s, stopping children'), signame)
            if not _is_sighup(self.sigcaught):
                break

            for pid in self.children:
                os.kill(pid, signal.SIGHUP)
            self.running = True
            self.sigcaught = None

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
示例#25
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if not _is_sighup(signo):
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
示例#26
0
    def __call__(self, message_data):
        """Consumer callback to call a method on a proxy object.

        Parses the message for validity and fires off a thread to call the
        proxy object method.

        Message data should be a dictionary with two keys:
            method: string representing the method to call
            args: dictionary of arg: value

        Example: {'method': 'echo', 'args': {'value': 42}}

        """
        # It is important to clear the context here, because at this point
        # the previous context is stored in local.store.context
        if hasattr(local.store, 'context'):
            del local.store.context
        rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
        self.msg_id_cache.check_duplicate_message(message_data)
        ctxt = unpack_context(self.conf, message_data)
        method = message_data.get('method')
        args = message_data.get('args', {})
        version = message_data.get('version')
        namespace = message_data.get('namespace')
        if not method:
            LOG.warn(_('no method for message: %s') % message_data)
            ctxt.reply(_('No method for message: %s') % message_data,
                       connection_pool=self.connection_pool)
            return
        self.pool.spawn_n(self._process_data, ctxt, version, method,
                          namespace, args)
示例#27
0
 def release(self):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
     LOG.debug(_('Released file lock "%s"'), self.fname)
示例#28
0
 def _error_callback(exc):
     if isinstance(exc, qpid_exceptions.Empty):
         LOG.debug(_('Timed out waiting for RPC response: %s') %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s') %
                       str(exc))
示例#29
0
 def release(self):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
     LOG.debug(_('Released file lock "%s"'), self.fname)
示例#30
0
 def set_name(self, value):
     if len(value) > 100:
         raise ValueError(_('Names must not be longer than 100 '
                            'characters'))
     allowed_chars = string.ascii_lowercase + string.digits + '-_'
     for ch in value:
         if ch not in allowed_chars:
             raise ValueError(_('Names must only contain a-z,0-9,-,_'))
     self.__name = value
示例#31
0
 def wrapper(*args, **kwargs):
     try:
         path = lock_path or os.environ.get("SOLUM_LOCK_PATH")
         lock = lockfile.FileLock(os.path.join(path, lock_prefix))
         with lock:
             LOG.debug(_('Got lock "%s"') % f.__name__)
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Lock released "%s"') % f.__name__)
示例#32
0
 def _error_callback(exc):
     if isinstance(exc, socket.timeout):
         LOG.debug(_('Timed out waiting for RPC response: %s') %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s') %
                       str(exc))
         info['do_consume'] = True
示例#33
0
 def inner(*args, **kwargs):
     try:
         with lock(name, lock_file_prefix, external):
             LOG.debug(_('Got semaphore / lock "%(function)s"'),
                       {'function': f.__name__})
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Semaphore / lock released "%(function)s"'),
                   {'function': f.__name__})
示例#34
0
 def inner(*args, **kwargs):
     try:
         with lock(name, lock_file_prefix, external):
             LOG.debug(_('Got semaphore / lock "%(function)s"'),
                       {'function': f.__name__})
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Semaphore / lock released "%(function)s"'),
                   {'function': f.__name__})
示例#35
0
 def wrapper(*args, **kwargs):
     try:
         path = lock_path or os.environ.get("SOLUM_LOCK_PATH")
         lock = lockfile.FileLock(os.path.join(path, lock_prefix))
         with lock:
             LOG.debug(_('Got lock "%s"') % f.__name__)
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Lock released "%s"') % f.__name__)
示例#36
0
 def _process_data(self, message_data):
     msg_id = message_data.pop('_msg_id', None)
     waiter = self._call_waiters.get(msg_id)
     if not waiter:
         LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
                    ', message : %(data)s'), {'msg_id': msg_id,
                                              'data': message_data})
         LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
     else:
         waiter.put(message_data)
示例#37
0
 def set_name(self, value):
     if len(value) > 100:
         raise ValueError(
             _('Names must not be longer than 100 '
               'characters'))
     allowed_chars = string.ascii_lowercase + string.digits + '-_'
     for ch in value:
         if ch not in allowed_chars:
             raise ValueError(_('Names must only contain a-z,0-9,-,_'))
     self.__name = value
示例#38
0
def get(url, max_size, chunk_size=None, allowed_schemes=('http', 'https')):
    """Get the data at the specified URL.

    The URL must use the http: or https: schemes.
    The file: scheme is also supported if you override
    the allowed_schemes argument.
    The max_size represents the total max byte of your file.
    The chunk_size is by default set at max_size, it represents the size
    of your chunk.
    Raise an IOError if getting the data fails and if max_size is exceeded.
    """

    LOG.info(_('Fetching data from %s') % url)

    components = urlutils.urlparse(url)

    if components.scheme not in allowed_schemes:
        raise IOError(_('Invalid URL scheme %s') % components.scheme)

    if chunk_size is None:
            chunk_size = max_size
    if max_size < 1:
        raise IOError("max_size should be greater than 0")
    if chunk_size < 1:
        raise IOError("chunk_size should be greater than 0")

    if components.scheme == 'file':
        try:
            return urlutils.urlopen(url).read()
        except urlutils.URLError as uex:
            raise IOError(_('Failed to read file: %s') % str(uex))

    try:
        resp = requests.get(url, stream=True)
        resp.raise_for_status()

        # We cannot use resp.text here because it would download the
        # entire file, and a large enough file would bring down the
        # engine.  The 'Content-Length' header could be faked, so it's
        # necessary to download the content in chunks to until
        # max_size is reached.  The chunk_size we use needs
        # to balance CPU-intensive string concatenation with accuracy
        # (eg. it's possible to fetch 1000 bytes greater than
        # max_size with a chunk_size of 1000).
        reader = resp.iter_content(chunk_size=chunk_size)
        result = ""
        for chunk in reader:
            result += chunk
            if len(result) > max_size:
                raise IOError("File exceeds maximum allowed size (%s "
                              "bytes)" % max_size)
        return result

    except exceptions.RequestException as ex:
        raise IOError(_('Failed to retrieve file: %s') % str(ex))
示例#39
0
def get(url, max_size, chunk_size=None, allowed_schemes=('http', 'https')):
    """Get the data at the specified URL.

    The URL must use the http: or https: schemes.
    The file: scheme is also supported if you override
    the allowed_schemes argument.
    The max_size represents the total max byte of your file.
    The chunk_size is by default set at max_size, it represents the size
    of your chunk.
    Raise an IOError if getting the data fails and if max_size is exceeded.
    """

    LOG.info(_('Fetching data from %s') % url)

    components = moves.urllib.parse.urlparse(url)

    if components.scheme not in allowed_schemes:
        raise IOError(_('Invalid URL scheme %s') % components.scheme)

    if chunk_size is None:
            chunk_size = max_size
    if max_size < 1:
        raise IOError("max_size should be greater than 0")
    if chunk_size < 1:
        raise IOError("chunk_size should be greater than 0")

    if components.scheme == 'file':
        try:
            return moves.urllib.request.urlopen(url).read()
        except moves.urllib.error.URLError as uex:
            raise IOError(_('Failed to read file: %s') % str(uex))

    try:
        resp = requests.get(url, stream=True)
        resp.raise_for_status()

        # We cannot use resp.text here because it would download the
        # entire file, and a large enough file would bring down the
        # engine.  The 'Content-Length' header could be faked, so it's
        # necessary to download the content in chunks to until
        # max_size is reached.  The chunk_size we use needs
        # to balance CPU-intensive string concatenation with accuracy
        # (eg. it's possible to fetch 1000 bytes greater than
        # max_size with a chunk_size of 1000).
        reader = resp.iter_content(chunk_size=chunk_size)
        result = ""
        for chunk in reader:
            result += chunk
            if len(result) > max_size:
                raise IOError("File exceeds maximum allowed size (%s "
                              "bytes)" % max_size)
        return result

    except exceptions.RequestException as ex:
        raise IOError(_('Failed to retrieve file: %s') % str(ex))
示例#40
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(
            _('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(_("Problem '%(e)s' attempting to "
                            "send to notification system. "
                            "Payload=%(payload)s")
                          % dict(e=e, payload=payload))
示例#41
0
    def _v3_client_init(self):
        kwargs = {
            'auth_url': self.endpoint,
            'endpoint': self.endpoint
        }
        # Note try trust_id first, as we can't reuse auth_token in that case
        if self.context.trust_id is not None:
            # We got a trust_id, so we use the admin credentials
            # to authenticate with the trust_id so we can use the
            # trust impersonating the trustor user.
            kwargs.update(self._service_admin_creds())
            kwargs['trust_id'] = self.context.trust_id
            kwargs.pop('project_name')
        elif self.context.auth_token_info is not None:
            # The auth_ref version must be set according to the token version
            if 'access' in self.context.auth_token_info:
                kwargs['auth_ref'] = copy.deepcopy(
                    self.context.auth_token_info['access'])
                kwargs['auth_ref']['version'] = 'v2.0'
                kwargs['auth_ref']['token']['id'] = self.context.auth_token
            elif 'token' in self.context.auth_token_info:
                kwargs['auth_ref'] = copy.deepcopy(
                    self.context.auth_token_info['token'])
                kwargs['auth_ref']['version'] = 'v3'
                kwargs['auth_ref']['auth_token'] = self.context.auth_token
            else:
                LOG.error("Unknown version in auth_token_info")
                raise exception.AuthorizationFailure()
        elif self.context.auth_token is not None:
            kwargs['token'] = self.context.auth_token
            kwargs['project_id'] = self.context.tenant
        else:
            LOG.error(_("Keystone v3 API connection failed, no password "
                        "trust or auth_token!"))
            raise exception.AuthorizationFailure()
        client = kc_v3.Client(**kwargs)
        if 'auth_ref' not in kwargs:
            client.authenticate()
        # If we are authenticating with a trust set the context auth_token
        # with the trust scoped token
        if 'trust_id' in kwargs:
            # Sanity check
            if not client.auth_ref.trust_scoped:
                LOG.error(_("trust token re-scoping failed!"))
                raise exception.AuthorizationFailure()
            # All OK so update the context with the token
            self.context.auth_token = client.auth_ref.auth_token
            self.context.auth_url = self.endpoint
            self.context.user = client.auth_ref.user_id
            self.context.tenant = client.auth_ref.project_id
            self.context.user_name = client.auth_ref.username

        return client
示例#42
0
    def reconnect(self):
        """Handles reconnecting and re-establishing queues.
        Will retry up to self.max_retries number of times.
        self.max_retries = 0 means to retry forever.
        Sleep between tries, starting at self.interval_start
        seconds, backing off self.interval_stepping number of seconds
        each attempt.
        """

        attempt = 0
        while True:
            params = self.params_list[attempt % len(self.params_list)]
            attempt += 1
            try:
                self._connect(params)
                return
            except (IOError, self.connection_errors) as e:
                pass
            except Exception as e:
                # NOTE(comstud): Unfortunately it's possible for amqplib
                # to return an error not covered by its transport
                # connection_errors in the case of a timeout waiting for
                # a protocol response.  (See paste link in LP888621)
                # So, we check all exceptions for 'timeout' in them
                # and try to reconnect in this case.
                if 'timeout' not in str(e):
                    raise

            log_info = {}
            log_info['err_str'] = str(e)
            log_info['max_retries'] = self.max_retries
            log_info.update(params)

            if self.max_retries and attempt == self.max_retries:
                msg = _('Unable to connect to AMQP server on '
                        '%(hostname)s:%(port)d after %(max_retries)d '
                        'tries: %(err_str)s') % log_info
                LOG.error(msg)
                raise rpc_common.RPCException(msg)

            if attempt == 1:
                sleep_time = self.interval_start or 1
            elif attempt > 1:
                sleep_time += self.interval_stepping
            if self.interval_max:
                sleep_time = min(sleep_time, self.interval_max)

            log_info['sleep_time'] = sleep_time
            LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
                        'unreachable: %(err_str)s. Trying again in '
                        '%(sleep_time)d seconds.') % log_info)
            time.sleep(sleep_time)
示例#43
0
    def _v3_client_init(self):
        kwargs = {'auth_url': self.v3_endpoint, 'endpoint': self.v3_endpoint}
        # Note try trust_id first, as we can't reuse auth_token in that case
        if self.context.trust_id is not None:
            # We got a trust_id, so we use the admin credentials
            # to authenticate with the trust_id so we can use the
            # trust impersonating the trustor user.
            kwargs.update(self._service_admin_creds())
            kwargs['trust_id'] = self.context.trust_id
            kwargs.pop('project_name')
        elif self.context.auth_token_info is not None:
            # The auth_ref version must be set according to the token version
            if 'access' in self.context.auth_token_info:
                kwargs['auth_ref'] = copy.deepcopy(
                    self.context.auth_token_info['access'])
                kwargs['auth_ref']['version'] = 'v2.0'
                kwargs['auth_ref']['token']['id'] = self.context.auth_token
            elif 'token' in self.context.auth_token_info:
                kwargs['auth_ref'] = copy.deepcopy(
                    self.context.auth_token_info['token'])
                kwargs['auth_ref']['version'] = 'v3'
                kwargs['auth_ref']['auth_token'] = self.context.auth_token
            else:
                LOG.error("Unknown version in auth_token_info")
                raise exception.AuthorizationFailure()
        elif self.context.auth_token is not None:
            kwargs['token'] = self.context.auth_token
            kwargs['project_id'] = self.context.tenant
        else:
            LOG.error(
                _("Keystone v3 API connection failed, no password "
                  "trust or auth_token!"))
            raise exception.AuthorizationFailure()
        client = kc_v3.Client(**kwargs)
        if 'auth_ref' not in kwargs:
            client.authenticate()
        # If we are authenticating with a trust set the context auth_token
        # with the trust scoped token
        if 'trust_id' in kwargs:
            # Sanity check
            if not client.auth_ref.trust_scoped:
                LOG.error(_("trust token re-scoping failed!"))
                raise exception.AuthorizationFailure()
            # All OK so update the context with the token
            self.context.auth_token = client.auth_ref.auth_token
            self.context.auth_url = self.v3_endpoint
            self.context.user = client.auth_ref.user_id
            self.context.tenant = client.auth_ref.project_id
            self.context.user_name = client.auth_ref.username

        return client
示例#44
0
    def __init__(self, info=None, topic=None, method=None):
        """Initiates Timeout object.

        :param info: Extra info to convey to the user
        :param topic: The topic that the rpc call was sent to
        :param rpc_method_name: The name of the rpc method being
                                called
        """
        self.info = info
        self.topic = topic
        self.method = method
        super(Timeout, self).__init__(None,
                                      info=info or _('<unknown>'),
                                      topic=topic or _('<unknown>'),
                                      method=method or _('<unknown>'))
示例#45
0
文件: utils.py 项目: paulczar/solum
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except KeyError:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise ColumnError(msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise ColumnError(msg % column_name)
    return column
示例#46
0
def bool_from_string(subject, strict=False, default=False):
    """Interpret a string as a boolean.

    A case-insensitive match is performed such that strings matching 't',
    'true', 'on', 'y', 'yes', or '1' are considered True and, when
    `strict=False`, anything else returns the value specified by 'default'.

    Useful for JSON-decoded stuff and config file parsing.

    If `strict=True`, unrecognized values, including None, will raise a
    ValueError which is useful when parsing values passed in from an API call.
    Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
    """
    if not isinstance(subject, six.string_types):
        subject = six.text_type(subject)

    lowered = subject.strip().lower()

    if lowered in TRUE_STRINGS:
        return True
    elif lowered in FALSE_STRINGS:
        return False
    elif strict:
        acceptable = ', '.join(
            "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
        msg = _("Unrecognized value '%(val)s', acceptable values are:"
                " %(acceptable)s") % {'val': subject,
                                      'acceptable': acceptable}
        raise ValueError(msg)
    else:
        return default
示例#47
0
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
示例#48
0
    def deprecated(self, msg, *args, **kwargs):
        """Call this method when a deprecated feature is used.

        If the system is configured for fatal deprecations then the message
        is logged at the 'critical' level and :class:`DeprecatedConfig` will
        be raised.

        Otherwise, the message will be logged (once) at the 'warn' level.

        :raises: :class:`DeprecatedConfig` if the system is configured for
                 fatal deprecations.

        """
        stdmsg = _("Deprecated: %s") % msg
        if CONF.fatal_deprecations:
            self.critical(stdmsg, *args, **kwargs)
            raise DeprecatedConfig(msg=stdmsg)

        # Using a list because a tuple with dict can't be stored in a set.
        sent_args = self._deprecated_messages_sent.setdefault(msg, list())

        if args in sent_args:
            # Already logged this message, so don't log it again.
            return

        sent_args.append(args)
        self.warn(stdmsg, *args, **kwargs)
示例#49
0
    def register(self, proxy, in_addr, zmq_type_in,
                 in_bind=True, subscribe=None):

        LOG.info(_("Registering reactor"))

        if zmq_type_in not in (zmq.PULL, zmq.SUB):
            raise RPCException("Bad input socktype")

        # Items push in.
        inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
                        subscribe=subscribe)

        self.proxies[inq] = proxy
        self.sockets.append(inq)

        LOG.info(_("In reactor registered"))
示例#50
0
class Timeout(RPCException):
    """Signifies that a timeout has occurred.

    This exception is raised if the rpc_response_timeout is reached while
    waiting for a response from the remote side.
    """
    msg_fmt = _('Timeout while waiting on RPC response - '
                'topic: "%(topic)s", RPC method: "%(method)s" '
                'info: "%(info)s"')

    def __init__(self, info=None, topic=None, method=None):
        """Initiates Timeout object.

        :param info: Extra info to convey to the user
        :param topic: The topic that the rpc call was sent to
        :param rpc_method_name: The name of the rpc method being
                                called
        """
        self.info = info
        self.topic = topic
        self.method = method
        super(Timeout, self).__init__(None,
                                      info=info or _('<unknown>'),
                                      topic=topic or _('<unknown>'),
                                      method=method or _('<unknown>'))
示例#51
0
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = six.u(str(exc))
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60
                     or this_exc_message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
示例#52
0
def db_sync(abs_path, version=None, init_version=0):
    """Upgrade or downgrade a database.

    Function runs the upgrade() or downgrade() functions in change scripts.

    :param abs_path:     Absolute path to migrate repository.
    :param version:      Database will upgrade/downgrade until this version.
                         If None - database will update to the latest
                         available version.
    :param init_version: Initial database version
    """
    if version is not None:
        try:
            version = int(version)
        except ValueError:
            raise exception.DbMigrationError(
                message=_("version should be an integer"))

    current_version = db_version(abs_path, init_version)
    repository = _find_migrate_repo(abs_path)
    if version is None or version > current_version:
        return versioning_api.upgrade(get_engine(), repository, version)
    else:
        return versioning_api.downgrade(get_engine(), repository,
                                        version)
示例#53
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
示例#54
0
文件: worker.py 项目: paulczar/solum
def main():
    cfg.CONF(sys.argv[1:], project='solum')
    logging.setup('solum')
    solum.TLS.trace = trace_data.TraceData()

    LOG.info(_('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'solum.worker.config', group='worker')
    cfg.CONF.import_opt('host', 'solum.worker.config', group='worker')
    cfg.CONF.import_opt('handler', 'solum.worker.config', group='worker')

    handlers = {
        'noop': noop_handler.Handler,
        'shell': shell_handler.Handler,
        'shell_nobuild': shell_nobuild_handler.Handler,
    }

    endpoints = [
        handlers[cfg.CONF.worker.handler](),
    ]

    server = service.Service(cfg.CONF.worker.topic,
                             cfg.CONF.worker.host, endpoints)
    server.serve()