def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue(CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(0.5) waiter.send(True) while True: data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {"data": data})
def _check_children(self): done = set() for child in self.child_pids: pid = child[0] job_id = child[1] p, child_info = os.waitpid(pid, os.WNOHANG) if p != 0: if child_info == 0: LOG.debug(_("[%(worker_tag)s] Normal end of processing " "of job [%(job_id)s] by forked child " "[%(child_pid)s]") % {'worker_tag': self.get_worker_tag(), 'job_id': job_id, 'child_pid': p}) else: child_status, child_sig = self._parse_status(child_info) LOG.debug(_("[%(worker_tag)s] Abnormal end of processing " "of job [%(job_id)s] by forked child " "[%(child_pid)s] due to signal %(signal)d" " and exit status %(status)d") % {'worker_tag': self.get_worker_tag(), 'job_id': job_id, 'child_pid': p, 'signal': child_sig, 'status': child_status}) done.add((pid, job_id)) if len(done) > 0: self.child_pids.difference_update(done) return len(self.child_pids)
def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % params)
def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {"msg": " ".join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == "_cast": eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope)
def process_request(self, req): """Try to find a version first in the accept header, then the URL.""" msg = _("Determining version of request: %(method)s %(path)s" " Accept: %(accept)s") args = {'method': req.method, 'path': req.path, 'accept': req.accept} LOG.debug(msg % args) accept = str(req.accept) if accept.startswith('application/vnd.openstack.qonos-'): LOG.debug(_("Using media-type versioning")) token_loc = len('application/vnd.openstack.qonos-') req_version = accept[token_loc:] else: LOG.debug(_("Using url versioning")) # Remove version in url so it doesn't conflict later req_version = req.path_info_pop() try: version = self._match_version_string(req_version) except ValueError: LOG.debug(_("Unknown version. Returning version choices.")) return self.versions_app req.environ['api.version'] = version req.path_info = ''.join(('/v', str(version), req.path_info or '/')) LOG.debug(_("Matched version: v%d"), version) LOG.debug('new uri %s' % req.path_info) return None
def process_request(self, req): """Try to find a version first in the accept header, then the URL.""" msg = _("Determining version of request: %(method)s %(path)s" " Accept: %(accept)s") args = {"method": req.method, "path": req.path, "accept": req.accept} LOG.debug(msg % args) accept = str(req.accept) if accept.startswith("application/vnd.openstack.qonos-"): LOG.debug(_("Using media-type versioning")) token_loc = len("application/vnd.openstack.qonos-") req_version = accept[token_loc:] else: LOG.debug(_("Using url versioning")) # Remove version in url so it doesn't conflict later req_version = req.path_info_pop() try: version = self._match_version_string(req_version) except ValueError: LOG.debug(_("Unknown version. Returning version choices.")) return self.versions_app req.environ["api.version"] = version req.path_info = "".join(("/v", str(version), req.path_info or "/")) LOG.debug(_("Matched version: v%d"), version) LOG.debug("new uri %s" % req.path_info) return None
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) return (stdout, stderr)
def register( self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None ): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered"))
def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ("", fanout)[type(fanout) == str] topic = "fanout~" + topic.split(".", 1)[0] else: sock_type = zmq.PULL subscribe = None topic = ".".join((topic.split(".", 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ["PULL", "SUB"][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic)
def _process_retention(self, instance_id, schedule_id): LOG.debug(_("Processing retention.")) retention = self._get_retention(instance_id) if retention > 0: scheduled_images = self._find_scheduled_images_for_server( instance_id) if len(scheduled_images) > retention: to_delete = scheduled_images[retention:] LOG.info(_('[%(worker_tag)s] Removing %(remove)d ' 'images for a retention of %(retention)d') % {'worker_tag': self.get_worker_tag(), 'remove': len(to_delete), 'retention': retention}) for image in to_delete: image_id = image.id self._get_nova_client().images.delete(image_id) LOG.info(_('[%(worker_tag)s] Removed image ' '%(image_id)s') % {'worker_tag': self.get_worker_tag(), 'image_id': image_id}) else: msg = ("[%(worker_tag)s] Retention %(retention)s found for " "schedule %(schedule)s for %(instance)s" % {'worker_tag': self.get_worker_tag(), 'retention': retention, 'schedule': schedule_id, 'instance': instance_id}) LOG.info(msg)
def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope)
def _run_loop(self, run_once=False): next_run = None current_run = None while self.running: current_run = timeutils.isotime() next_run = time.time() + CONF.scheduler.job_schedule_interval # do work with utils.log_warning_and_dismiss_exception(LOG): self.enqueue_jobs(end_time=current_run) # if shutdown hasn't been requested, do nothing until next run if self.running: seconds = next_run - time.time() if seconds > 0: time.sleep(seconds) else: msg = _('Scheduling of jobs took longer than expected.') LOG.warn(msg) if run_once: break LOG.info(_('Scheduler is shutting down'))
def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {'data': data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ('', fanout)[type(fanout) == str] topic = 'fanout~' + topic.split('.', 1)[0] else: sock_type = zmq.PULL subscribe = None topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic)
def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" # TODO(pekowski): Remove all these comments in Havana. # For amqp_rpc_single_reply_queue = False, # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool # For amqp_rpc_single_reply_queue = True, # The 'with' statement is mandatory for closing the connection LOG.debug(_('Making synchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _add_unique_id(msg) pack_context(msg, context) # TODO(pekowski): Remove this flag and the code under the if clause # in Havana. if not conf.amqp_rpc_single_reply_queue: conn = ConnectionContext(conf, connection_pool) wait_msg = MulticallWaiter(conf, conn, timeout) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) else: with _reply_proxy_create_sem: if not connection_pool.reply_proxy: connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg
def consume_in_thread(self): """Runs the ZmqProxy service""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) if not os.path.isdir(ipc_dir): try: utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), ipc_dir, run_as_root=True) utils.execute('chmod', '750', ipc_dir, run_as_root=True) except utils.ProcessExecutionError: with excutils.save_and_reraise_exception(): LOG.error( _("Could not create IPC directory %s") % (ipc_dir, )) try: self.register(consumption_proxy, consume_in, zmq.PULL, out_bind=True) except zmq.ZMQError: with excutils.save_and_reraise_exception(): LOG.error( _("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread()
def _process_data(self, ctxt, version, method, args): """Process a message in a new thread. If the proxy object we have has a dispatch method (see rpc.dispatcher.RpcDispatcher), pass it the version, method, and args and let it dispatch as appropriate. If not, use the old behavior of magically calling the specified method on the proxy we have here. """ ctxt.update_store() try: rval = self.proxy.dispatch(ctxt, version, method, **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None, connection_pool=self.connection_pool) else: ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) except rpc_common.ClientException as e: LOG.debug(_('Expected exception during message handling (%s)') % e._exc_info[1]) ctxt.reply(None, e._exc_info, connection_pool=self.connection_pool, log_failure=False) except Exception: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error(_('Exception during message handling'), exc_info=exc_info) ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ # It is important to clear the context here, because at this point # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) version = message_data.get('version', None) if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {"data": data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == "cast": # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == "impl_zmq_v2": packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" if self.connection.opened(): try: self.connection.close() except qpid_exceptions.ConnectionError: pass attempt = 0 delay = 1 while True: broker = self.brokers[attempt % len(self.brokers)] attempt += 1 try: self.connection_create(broker) self.connection.open() except qpid_exceptions.ConnectionError, e: msg_dict = dict(e=e, delay=delay) msg = _("Unable to connect to AMQP server: %(e)s. " "Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) delay = min(2 * delay, 60) else: LOG.info(_('Connected to AMQP server on %s'), broker) break
def is_enabled(): cert_file = CONF.ssl.cert_file key_file = CONF.ssl.key_file ca_file = CONF.ssl.ca_file use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if use_ssl and (not cert_file or not key_file): raise RuntimeError( _( "When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file" ) ) return use_ssl
def _poll_image_status(self, job, image_id): try: image_status = self._get_image_status(image_id) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() org_err_msg = tb.format_exception_only(exc_type, exc_value) err_val = {'job_id': job['id'], 'image_id': image_id, 'org_err_msg': org_err_msg} err_msg = ( _("PollingExc: image: %(image_id)s, err: %(org_err_msg)s") % err_val) LOG.exception('[%(worker_tag)s] %(msg)s' % {'worker_tag': self.get_worker_tag(), 'msg': err_msg}) self._job_error_occurred(job, error_message=err_msg) raise exc.PollingException(err_msg) if image_status is None or image_status in _FAILED_IMAGE_STATUSES: err_val = {'image_id': image_id, "image_status": image_status, "job_id": job['id']} err_msg = ( _("PollingErr: Got failed image status. Details:" " image_id: %(image_id)s, 'image_status': %(image_status)s" " job_id: %(job_id)s") % err_val) self._job_error_occurred(job, error_message=err_msg) raise exc.PollingException(err_msg) return image_status
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB,) # Support list, str, & None for subscribe arg (cast to list) do_sub = {list: subscribe, str: [subscribe], type(None): []}[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {"addr": addr, "type": self.socket_s(), "subscribe": subscribe, "bind": bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def _get_retention(self, instance_id): ret_str = None retention = 0 try: result = self._get_nova_client().\ rax_scheduled_images_python_novaclient_ext.get(instance_id) ret_str = result.retention retention = int(ret_str or 0) except exceptions.NotFound: msg = (_('[%(worker_tag)s] Could not retrieve retention for ' 'server %(instance)s: either the server was deleted or ' 'scheduled images for the server was disabled.') % { 'worker_tag': self.get_worker_tag(), 'instance': instance_id }) LOG.warn(msg) except Exception: msg = (_('[%(worker_tag)s] Error getting retention for server ' '%(instance)s') % { 'worker_tag': self.get_worker_tag(), 'instance': instance_id }) LOG.exception(msg) return retention
def _process_retention(self, instance_id, schedule_id): LOG.debug(_("Processing retention.")) retention = self._get_retention(instance_id) if retention > 0: scheduled_images = self._find_scheduled_images_for_server( instance_id) if len(scheduled_images) > retention: to_delete = scheduled_images[retention:] LOG.info( _('[%(worker_tag)s] Removing %(remove)d ' 'images for a retention of %(retention)d') % { 'worker_tag': self.get_worker_tag(), 'remove': len(to_delete), 'retention': retention }) for image in to_delete: image_id = image.id self._get_nova_client().images.delete(image_id) LOG.info( _('[%(worker_tag)s] Removed image ' '%(image_id)s') % { 'worker_tag': self.get_worker_tag(), 'image_id': image_id }) else: msg = ("[%(worker_tag)s] Retention %(retention)s found for " "schedule %(schedule)s for %(instance)s" % { 'worker_tag': self.get_worker_tag(), 'retention': retention, 'schedule': schedule_id, 'instance': instance_id }) LOG.info(msg)
def _poll_image_status(self, job, image_id): try: image_status = self._get_image_status(image_id) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() org_err_msg = tb.format_exception_only(exc_type, exc_value) err_val = { 'job_id': job['id'], 'image_id': image_id, 'org_err_msg': org_err_msg } err_msg = ( _("PollingExc: image: %(image_id)s, err: %(org_err_msg)s") % err_val) LOG.exception('[%(worker_tag)s] %(msg)s' % { 'worker_tag': self.get_worker_tag(), 'msg': err_msg }) self._job_error_occurred(job, error_message=err_msg) raise exc.PollingException(err_msg) if image_status is None or image_status in _FAILED_IMAGE_STATUSES: err_val = { 'image_id': image_id, "image_status": image_status, "job_id": job['id'] } err_msg = ( _("PollingErr: Got failed image status. Details:" " image_id: %(image_id)s, 'image_status': %(image_status)s" " job_id: %(job_id)s") % err_val) self._job_error_occurred(job, error_message=err_msg) raise exc.PollingException(err_msg) return image_status
def process_job(self, job): LOG.info( _("[%(worker_tag)s] Processing job: %(job)s") % { 'worker_tag': self.get_worker_tag(), 'job': job['id'] }) LOG.debug( _("[%(worker_tag)s] Processing job: %(job)s") % { 'worker_tag': self.get_worker_tag(), 'job': str(job) }) self.current_job = job try: self._process_job(job) except exc.PollingException as e: LOG.exception(e) except Exception: msg = _("[%(worker_tag)s] Error processing job: %(job)s") LOG.exception(msg % { 'worker_tag': self.get_worker_tag(), 'job': job['id'] }) exc_type, exc_value, exc_tb = sys.exc_info() err_msg = (_('Job process failed: %s') % tb.format_exception_only(exc_type, exc_value)) self._job_error_occurred(job, error_message=err_msg)
def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc))
def update(self, request, schedule_id, body): if not body: msg = _('The request body must not be empty') raise webob.exc.HTTPBadRequest(explanation=msg) elif 'schedule' not in body: msg = _('The request body must contain a "schedule" entity') raise webob.exc.HTTPBadRequest(explanation=msg) # NOTE(jculp): only raise if a blank tenant is passed # passing no tenant at all is perfectly fine. elif('tenant' in body['schedule'] and not body['schedule']['tenant'].strip()): msg = _('The request body has not specified a "tenant" entity') raise webob.exc.HTTPBadRequest(explanation=msg) api_utils.deserialize_schedule_metadata(body['schedule']) values = {} values.update(body['schedule']) try: values = api_utils.check_read_only_properties(values) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=unicode(e)) request_next_run = body['schedule'].get('next_run') times = { 'minute': None, 'hour': None, 'month': None, 'day_of_week': None, 'day_of_month': None, } update_schedule_times = False for key in times: if key in values: times[key] = values[key] update_schedule_times = True if update_schedule_times: # NOTE(ameade): We must recalculate the schedules next_run time # since the schedule has changed values.update(times) values['next_run'] = api_utils.schedule_to_next_run(times) elif request_next_run: try: timeutils.parse_isotime(request_next_run) except ValueError as e: msg = _('Invalid "next_run" value. Must be ISO 8601 format') raise webob.exc.HTTPBadRequest(explanation=msg) try: schedule = self.db_api.schedule_update(schedule_id, values) except exception.NotFound: msg = _('Schedule %s could not be found.') % schedule_id raise webob.exc.HTTPNotFound(explanation=msg) utils.serialize_datetimes(schedule) api_utils.serialize_schedule_metadata(schedule) return {'schedule': schedule}
def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) info['do_consume'] = True
def _validate_limit(limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise exc.Invalid(message=msg) if limit <= 0: msg = _("limit param must be positive") raise exc.Invalid(message=msg) return limit
def _error(self, inner, req): LOG.exception(_("Caught error: %s"), unicode(inner)) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) return webob.exc.HTTPInternalServerError()
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: params = self.params_list[attempt % len(self.params_list)] attempt += 1 try: self._connect(params) return except (IOError, self.connection_errors) as e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: LOG.error(_('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) # NOTE(comstud): Copied from original code. There's # really no better recourse because if this was a queue we # need to consume on, we have no way to consume anymore. sys.exit(1) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
def enqueue_jobs(self, start_time=None, end_time=None): LOG.debug(_('Fetching schedules to process')) schedules = self.get_schedules(start_time, end_time) if schedules: LOG.info(_('Creating %d jobs') % len(schedules)) for schedule in schedules: try: self.client.create_job(schedule['id'], schedule.get('next_run')) except client_exc.Duplicate: msg = _("Job for schedule %s has already been created") LOG.info(msg % schedule['id'])
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def job_get_and_assign_next_by_action(action, worker_id, new_timeout): """Get the next available job for the given action and assign it to the worker for worker_id.""" now = timeutils.utcnow() session = get_session() job_ref = _job_get_next_by_action(session, now, action) if not job_ref: return None job_id = job_ref['id'] try: job_values = { 'worker_id': worker_id, 'timeout': new_timeout, 'retry_count': job_ref['retry_count'] + 1 } job_ref.update(job_values) job_ref.save(session=session) except sa_orm.exc.NoResultFound: # In case the job was deleted during assignment return nothing LOG.warn( _('[JOB2WORKER] NoResultFound:' ' Could not assign the job to worker_id: %(worker_id)s' ' NoResultFound for job_id: %(job_id)s.') % { 'worker_id': job_values['worker_id'], 'job_id': job_id }) return None except sa_orm.exc.StaleDataError: # In case the job was picked up by another transaction return nothing LOG.warn( _('[JOB2WORKER] StaleDataError:' ' Could not assign the job to worker_id: %(worker_id)s' ' Job already assigned to another worker,' ' job_id: %(job_id)s.') % { 'worker_id': job_values['worker_id'], 'job_id': job_id }) return None LOG.info( _('[JOB2WORKER] Assigned Job: %(job_id)s' ' To Worker: %(worker_id)s') % { 'job_id': job_id, 'worker_id': job_values['worker_id'] }) return _job_get_by_id(job_id)
def __init__(self, info=None, topic=None, method=None): """ :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault("version", None) data.setdefault("args", {}) try: result = proxy.dispatch(ctx, data["version"], data["method"], **data["args"]) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except rpc_common.ClientException, e: LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1]) return {"exc": rpc_common.serialize_remote_exception(e._exc_info, log_failure=False)}
def test_get_and_assign_next_job_fails_on_job_update_with_stale_data(self): stream = StringIO.StringIO() self._setup_db_api_log_fixture(stream) self._create_jobs(10, self.job_fixture_1) with mock.patch.object(base.db_api, '_job_get_next_by_action') as mocked_next_job: same_job_ref_1, same_job_ref_2 = \ self._prepare_same_job_for_workers(self.jobs[0]['id']) mocked_next_job.side_effect = [same_job_ref_1, same_job_ref_2] workers = ['CONCURRENT_WORKER-1', 'CONCURRENT_WORKER-2'] worker1_job, worker2_job = \ self._assign_jobs_for_concurrent_workers(workers) # assertion for successful assigned job self.assertIsNotNone(worker1_job) self.assertEqual(worker1_job['id'], same_job_ref_1['id']) self.assertEqual(workers[0], worker1_job['worker_id']) # assertion for failed job assignment due to StaleDataError self.assertIsNone(worker2_job) stale_data_err_msg = _( '[JOB2WORKER] StaleDataError:' ' Could not assign the job to worker_id: %(worker_id)s' ' Job already assigned to another worker,' ' job_id: %(job_id)s.') % { 'worker_id': workers[1], 'job_id': same_job_ref_2['id'] } self.assertEqual(stale_data_err_msg, stream.getvalue().rstrip('\n'))
def run(self, run_once=False, poll_once=False): LOG.info(_('[%s] Starting qonos worker service') % self.get_worker_tag()) for sig, action in self._signal_map().iteritems(): signal.signal(sig, action) self._run_loop(run_once, poll_once)
def _terminate(self, signum, frame): LOG.debug(_('[%(worker_tag)s] Received signal %(signum)s - will exit') % {'worker_tag': self.get_worker_tag(), 'signum': str(signum)}) self.running = False self._on_terminate(signum)
class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ message = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """ :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def _run_loop(self, run_once=False, poll_once=False): self.init_worker() while self.running: time_before = time.time() if self._can_accept_job(): job = self._poll_for_next_job(poll_once) if job: try: self.process_job(job) except Exception as e: LOG.exception(e) time_after = time.time() # Ensure that we wait at least job_poll_interval between jobs time_delta = time_after - time_before if time_delta < CONF.worker.job_poll_interval: time.sleep(CONF.worker.job_poll_interval - time_delta) if run_once: self.running = False LOG.info(_('[%s] Worker is shutting down') % self.get_worker_tag()) self._on_shutdown() self._unregister_worker() self.processor.cleanup_processor()
def _safe_log(log_func, msg, msg_data): """Sanitizes the msg_data field before logging.""" SANITIZE = {'set_admin_password': [('args', 'new_pass')], 'run_instance': [('args', 'admin_password')], 'route_message': [('args', 'message', 'args', 'method_info', 'method_kwargs', 'password'), ('args', 'message', 'args', 'method_info', 'method_kwargs', 'admin_password')]} has_method = 'method' in msg_data and msg_data['method'] in SANITIZE has_context_token = '_context_auth_token' in msg_data has_token = 'auth_token' in msg_data if not any([has_method, has_context_token, has_token]): return log_func(msg, msg_data) msg_data = copy.deepcopy(msg_data) if has_method: for arg in SANITIZE.get(msg_data['method'], []): try: d = msg_data for elem in arg[:-1]: d = d[elem] d[arg[-1]] = '<SANITIZED>' except KeyError, e: LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'), {'item': arg, 'err': e})
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def notify(context, message): """Deprecated in Grizzly. Please use rpc_notifier instead.""" LOG.deprecated( _("The rabbit_notifier is now deprecated." " Please use rpc_notifier instead.")) rpc_notifier.notify(context, message)
def create(self, request, body=None): invalid_params = [] if not body: invalid_params.append('request body is empty') elif 'schedule' not in body: invalid_params.append('request body needs "schedule" entity') else: if not body['schedule'].get('tenant'): invalid_params.append('request body needs "tenant" entity') if not body['schedule'].get('action'): invalid_params.append('request body needs "action" entity') if invalid_params: msg = _('The following errors occured with your request: %s') \ % ', '.join(invalid_params) raise webob.exc.HTTPBadRequest(explanation=msg) api_utils.deserialize_schedule_metadata(body['schedule']) values = {} values.update(body['schedule']) values['next_run'] = api_utils.schedule_to_next_run(body['schedule']) schedule = self.db_api.schedule_create(values) utils.serialize_datetimes(schedule) api_utils.serialize_schedule_metadata(schedule) return {'schedule': schedule}