def pre_populate_hook(): """Fetch the existing LBs from neutron then and returns list back to populate to be distributed to workers. Wait for neutron to return the list of the existing LBs. Pause up to max_sleep seconds between each attempt and ignore neutron client exceptions. """ nap_time = 1 neutron_client = neutron.Neutron(cfg.CONF) while True: try: resources = [] for lb in neutron_client.get_loadbalancers(): resources.append( event.Resource(driver=LoadBalancer.RESOURCE_NAME, id=lb.id, tenant_id=lb.tenant_id)) return resources except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err: LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err) return except Exception as err: LOG.warning( _LW('Could not fetch loadbalancers from neutron: %s'), err) LOG.warning(_LW('sleeping %s seconds before retrying'), nap_time) time.sleep(nap_time) nap_time = min(nap_time * 2, cfg.CONF.astara_appliance.max_sleep)
def pre_populate_hook(): """Fetch the existing LBs from neutron then and returns list back to populate to be distributed to workers. Wait for neutron to return the list of the existing LBs. Pause up to max_sleep seconds between each attempt and ignore neutron client exceptions. """ nap_time = 1 neutron_client = neutron.Neutron(cfg.CONF) while True: try: resources = [] for lb in neutron_client.get_loadbalancers(): resources.append( event.Resource(driver=LoadBalancer.RESOURCE_NAME, id=lb.id, tenant_id=lb.tenant_id)) return resources except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err: LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err) return except Exception as err: LOG.warning( _LW('Could not fetch loadbalancers from neutron: %s'), err) LOG.warning(_LW( 'sleeping %s seconds before retrying'), nap_time) time.sleep(nap_time) nap_time = min(nap_time * 2, cfg.CONF.astara_appliance.max_sleep)
def run(self, ip_address, port=cfg.CONF.astara_metadata_port): """Run the MetadataProxy. :param ip_address: the ip address to bind to for incoming requests :param port: the port to bind to for incoming requests :returns: returns nothing """ app = MetadataProxyHandler() for i in six.moves.range(5): LOG.info(_LI('Starting the metadata proxy on %s:%s'), ip_address, port) try: sock = eventlet.listen((ip_address, port), family=socket.AF_INET6, backlog=128) except socket.error as err: if err.errno != 99: raise LOG.warning(_LW('Could not create metadata proxy socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish metadata proxy socket on %s:%s') % (ip_address, port)) eventlet.wsgi.server(sock, app, custom_pool=self.pool, log=LOG)
def send_message(self, message): "Called when the worker put a message in the state machine queue" if self.deleted: # Ignore any more incoming messages self.driver.log.debug("deleted state machine, ignoring incoming message %s", message) return False # NOTE(dhellmann): This check is largely redundant with the # one in CalcAction.transition() but it may allow us to avoid # adding poll events to the queue at all, and therefore cut # down on the number of times a worker thread wakes up to # process something on a router that isn't going to actually # do any work. if message.crud == POLL and self.instance.state == states.ERROR: self.driver.log.info(_LI("Resource status is ERROR, ignoring POLL message: %s"), message) return False if message.crud == REBUILD: if message.body.get("image_uuid"): self.driver.log.info(_LI("Resource is being REBUILT with custom image %s"), message.body["image_uuid"]) self.image_uuid = message.body["image_uuid"] else: self.image_uuid = self.driver.image_uuid self._queue.append(message.crud) queue_len = len(self._queue) if queue_len > self._queue_warning_threshold: logger = self.driver.log.warning else: logger = self.driver.log.debug logger(_LW("incoming message brings queue length to %s"), queue_len) return True
def plug(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None): """Plugin the interface.""" if not ip_lib.device_exists(device_name, self.root_helper, namespace=namespace): ip = ip_lib.IPWrapper(self.root_helper) # Enable agent to define the prefix if prefix: tap_name = device_name.replace(prefix, 'tap') else: tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') root_veth, ns_veth = ip.add_veth(tap_name, device_name) ns_veth.link.set_address(mac_address) if self.conf.network_device_mtu: root_veth.link.set_mtu(self.conf.network_device_mtu) ns_veth.link.set_mtu(self.conf.network_device_mtu) if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_veth) root_veth.link.set_up() ns_veth.link.set_up() else: LOG.warning(_LW("Device %s already exists"), device_name)
def _deprecated_amqp_url(): """Allow for deprecating amqp_url setting over time. This warns and attempts to translate an amqp_url to something oslo_messaging can use to load a driver. """ url = cfg.CONF.amqp_url if not url: return LOG.warning(_LW( 'Use of amqp_url is deprecated. Please instead use options defined in ' 'oslo_messaging_rabbit to declare your AMQP connection.')) url = urlparse.urlsplit(url) if url.scheme == 'amqp': scheme = 'rabbit' else: scheme = url.scheme port = str(url.port or 5672) netloc = url.netloc if netloc.endswith(':'): netloc = netloc[:-1] out = urlparse.urlunsplit(( scheme, '%s:%s' % (netloc, port), url.path, '', '' )) return out
def _proxy_request(self, instance_id, req): """Proxy a signed HTTP request to an instance. :param instance_id: ID of the Instance being proxied to :param req: The webob.Request to handle :returns: returns a valid HTTP Response or Error """ headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), 'X-Tenant-ID': req.headers.get('X-Tenant-ID') } url = urlparse.urlunsplit(( 'http', '%s:%s' % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, '')) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' ) LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _LW('Remote metadata server experienced an' ' internal server error.') LOG.warning(msg) return webob.exc.HTTPInternalServerError( explanation=six.text_type(msg)) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def _ssh_key(): key = config.get_best_config_path(cfg.CONF.ssh_public_key) if not key: return '' try: with open(key) as out: return out.read() except IOError: LOG.warning(_LW('Could not load router ssh public key from %s'), key) return ''
def _proxy_request(self, instance_id, req): """Proxy a signed HTTP request to an instance. :param instance_id: ID of the Instance being proxied to :param req: The webob.Request to handle :returns: returns a valid HTTP Response or Error """ headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), 'X-Tenant-ID': req.headers.get('X-Tenant-ID') } url = urlparse.urlunsplit( ('http', '%s:%s' % (cfg.CONF.nova_metadata_ip, cfg.CONF.nova_metadata_port), req.path_info, req.query_string, '')) h = httplib2.Http() resp, content = h.request(url, headers=headers) if resp.status == 200: LOG.debug(str(resp)) return content elif resp.status == 403: msg = _LW( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.') LOG.warning(msg) return webob.exc.HTTPForbidden() elif resp.status == 404: return webob.exc.HTTPNotFound() elif resp.status == 500: msg = _LW('Remote metadata server experienced an' ' internal server error.') LOG.warning(msg) return webob.exc.HTTPInternalServerError( explanation=six.text_type(msg)) else: raise Exception(_('Unexpected response code: %s') % resp.status)
def run(self, ip_address, port): app = RugAPI() try: socket.inet_pton(socket.AF_INET6, ip_address) family = socket.AF_INET6 except Exception: family = socket.AF_INET for i in six.moves.range(5): LOG.info(_LI( 'Starting the rug-api on %s:%s'), ip_address, port, ) try: sock = eventlet.listen( (ip_address, port), family=family, backlog=128 ) except socket.error as err: if err.errno != 99: # EADDRNOTAVAIL raise LOG.warning(_LW('Could not create rug-api socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError(_( 'Could not establish rug-api socket on %s:%s') % (ip_address, port) ) eventlet.wsgi.server( sock, app, custom_pool=self.pool, log=LOG)
def run(self, ip_address, port=cfg.CONF.astara_metadata_port): """Run the MetadataProxy. :param ip_address: the ip address to bind to for incoming requests :param port: the port to bind to for incoming requests :returns: returns nothing """ app = MetadataProxyHandler() for i in six.moves.range(5): LOG.info(_LI( 'Starting the metadata proxy on %s:%s'), ip_address, port ) try: sock = eventlet.listen( (ip_address, port), family=socket.AF_INET6, backlog=128 ) except socket.error as err: if err.errno != 99: raise LOG.warning( _LW('Could not create metadata proxy socket: %s'), err) LOG.warning(_LW('Sleeping %s before trying again'), i + 1) eventlet.sleep(i + 1) else: break else: raise RuntimeError( _('Could not establish metadata proxy socket on %s:%s') % (ip_address, port) ) eventlet.wsgi.server( sock, app, custom_pool=self.pool, log=loggers.WritableLogger(LOG))
def pre_populate_hook(): """Fetch the existing routers from neutrom then and returns list back to populate to be distributed to workers. Wait for neutron to return the list of the existing routers. Pause up to max_sleep seconds between each attempt and ignore neutron client exceptions. """ nap_time = 1 max_sleep = 15 neutron_client = neutron.Neutron(cfg.CONF) while True: try: neutron_routers = neutron_client.get_routers(detailed=False) resources = [] for router in neutron_routers: resources.append( event.Resource(driver=DRIVER_NAME, id=router.id, tenant_id=router.tenant_id) ) return resources except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err: LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err) return except Exception as err: LOG.warning( _LW('Could not fetch routers from neutron: %s'), err) LOG.warning(_LW( 'sleeping %s seconds before retrying'), nap_time) time.sleep(nap_time) # FIXME(rods): should we get max_sleep from the config file? nap_time = min(nap_time * 2, max_sleep)
def get_parser(self, prog_name): new_cmd = str(prog_name).replace('router', 'resource') LOG.warning(_LW( "WARNING: '%s' is deprecated in favor of '%s' and will be removed " "in the Mitaka release.") % (prog_name, new_cmd)) # Bypass the direct base class to let us put the tenant id # argument first p = super(_TenantRouterCmd, self).get_parser(prog_name) p.add_argument( 'router_id', ) p.add_argument( '--reason', ) return p
def delete_vrrp_port(self, object_id, label='VRRP'): name = 'ASTARA:%s:%s' % (label, object_id) response = self.api_client.list_ports(name=name) port_data = response.get('ports') if not port_data and self.conf.legacy_fallback_mode: name = name.replace('ASTARA', 'AKANDA') LOG.info(_LI('Attempting legacy query for %s.'), name) response = self.api_client.list_ports(name=name) port_data = response.get('ports') if not port_data: LOG.warning(_LW( 'Unable to find VRRP port to delete with name %s.'), name) for port in port_data: self.api_client.delete_port(port['id'])
def delete_vrrp_port(self, object_id, label='VRRP'): name = 'ASTARA:%s:%s' % (label, object_id) response = self.api_client.list_ports(name=name) port_data = response.get('ports') if not port_data and self.conf.legacy_fallback_mode: name = name.replace('ASTARA', 'AKANDA') LOG.info(_LI('Attempting legacy query for %s.'), name) response = self.api_client.list_ports(name=name) port_data = response.get('ports') if not port_data: LOG.warning( _LW('Unable to find VRRP port to delete with name %s.'), name) for port in port_data: self.api_client.delete_port(port['id'])
def get_vif_port_by_id(self, port_id): args = ['--', '--columns=external_ids,name,ofport', 'find', 'Interface', 'external_ids:iface-id="%s"' % port_id] result = self.run_vsctl(args) if not result: return match = self.re_id.search(result) try: vif_mac = match.group('vif_mac') vif_id = match.group('vif_id') port_name = match.group('port_name') ofport = int(match.group('ofport')) return VifPort(port_name, ofport, vif_id, vif_mac, self) except Exception, e: LOG.warning(_LW("Unable to parse regex results. Exception: %s"), e) return
def get_vif_port_by_id(self, port_id): args = [ '--', '--columns=external_ids,name,ofport', 'find', 'Interface', 'external_ids:iface-id="%s"' % port_id ] result = self.run_vsctl(args) if not result: return match = self.re_id.search(result) try: vif_mac = match.group('vif_mac') vif_id = match.group('vif_id') port_name = match.group('port_name') ofport = int(match.group('ofport')) return VifPort(port_name, ofport, vif_id, vif_mac, self) except Exception, e: LOG.warning(_LW("Unable to parse regex results. Exception: %s"), e) return
def pick_workers(self, target): """Returns the workers that match the target. """ target = target.strip() if target else None # If we get any wildcard target, send the message to all of # the workers. if target in commands.WILDCARDS: return self.workers[:] try: idx = uuid.UUID(target).int % len(self.workers) except (TypeError, ValueError) as e: LOG.warning(_LW( 'Could not determine UUID from %r: %s, ignoring message'), target, e, ) return [] else: LOG.debug('target %s maps to worker %s', target, idx) return [self.workers[idx]]
def pick_workers(self, target): """Returns the workers that match the target. """ target = target.strip() if target else None # If we get any wildcard target, send the message to all of # the workers. if target in commands.WILDCARDS: return self.workers[:] try: idx = uuid.UUID(target).int % len(self.workers) except (TypeError, ValueError) as e: LOG.warning( _LW('Could not determine UUID from %r: %s, ignoring message'), target, e, ) return [] else: LOG.debug('target %s maps to worker %s', target, idx) return [self.workers[idx]]
def send_message(self, message): "Called when the worker put a message in the state machine queue" if self.deleted: # Ignore any more incoming messages self.resource.log.debug( 'deleted state machine, ignoring incoming message %s', message) return False # NOTE(dhellmann): This check is largely redundant with the # one in CalcAction.transition() but it may allow us to avoid # adding poll events to the queue at all, and therefore cut # down on the number of times a worker thread wakes up to # process something on a router that isn't going to actually # do any work. if message.crud == POLL and \ self.instance.state == states.ERROR: self.resource.log.info(_LI( 'Resource status is ERROR, ignoring POLL message: %s'), message, ) return False if message.crud == REBUILD: if message.body.get('image_uuid'): self.resource.log.info(_LI( 'Resource is being REBUILT with custom image %s'), message.body['image_uuid'] ) self.image_uuid = message.body['image_uuid'] else: self.image_uuid = self.resource.image_uuid self._queue.append(message.crud) queue_len = len(self._queue) if queue_len > self._queue_warning_threshold: logger = self.resource.log.warning else: logger = self.resource.log.debug logger(_LW('incoming message brings queue length to %s'), queue_len) return True
def plug(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge self.check_bridge_exists(bridge) if not ip_lib.device_exists(device_name, self.root_helper, namespace=namespace): ip = ip_lib.IPWrapper(self.root_helper) tap_name = self._get_tap_name(device_name, prefix) if self.conf.ovs_use_veth: root_dev, ns_dev = ip.add_veth(tap_name, device_name) internal = not self.conf.ovs_use_veth self._ovs_add_port(bridge, tap_name, port_id, mac_address, internal=internal) ns_dev = ip.device(device_name) ns_dev.link.set_address(mac_address) if self.conf.network_device_mtu: ns_dev.link.set_mtu(self.conf.network_device_mtu) if self.conf.ovs_use_veth: root_dev.link.set_mtu(self.conf.network_device_mtu) if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) ns_dev.link.set_up() if self.conf.ovs_use_veth: root_dev.link.set_up() else: LOG.warning(_LW("Device %s already exists"), device_name)
def _deprecated_amqp_url(): """Allow for deprecating amqp_url setting over time. This warns and attempts to translate an amqp_url to something oslo_messaging can use to load a driver. """ url = cfg.CONF.amqp_url if not url: return LOG.warning( _LW('Use of amqp_url is deprecated. Please instead use options defined in ' 'oslo_messaging_rabbit to declare your AMQP connection.')) url = urlparse.urlsplit(url) if url.scheme == 'amqp': scheme = 'rabbit' else: scheme = url.scheme port = str(url.port or 5672) netloc = url.netloc if netloc.endswith(':'): netloc = netloc[:-1] out = urlparse.urlunsplit( (scheme, '%s:%s' % (netloc, port), url.path, '', '')) return out
def _populate_resource_id(self, message): """Ensure message's resource is populated with a resource id if it does not contain one. If not, attempt to lookup by tenant using the driver supplied functionality. :param message: event.Event object :returns: a new event.Event object with a populated Event.resource.id if found, otherwise the original Event is returned. """ if message.resource.id: return message LOG.debug("Looking for %s resource for for tenant %s", message.resource.driver, message.resource.tenant_id) resource_id = self.resource_cache.get_by_tenant( message.resource, self._context, message) if not resource_id: LOG.warning(_LW( 'Resource of type %s not found for tenant %s.'), message.resource.driver, message.resource.tenant_id) else: new_resource = event.Resource( id=resource_id, driver=message.resource.driver, tenant_id=message.resource.tenant_id, ) new_message = event.Event( resource=new_resource, crud=message.crud, body=message.body, ) message = new_message LOG.debug("Using resource %s.", new_resource) return message
def _populate_resource_id(self, message): """Ensure message's resource is populated with a resource id if it does not contain one. If not, attempt to lookup by tenant using the driver supplied functionality. :param message: event.Event object :returns: a new event.Event object with a populated Event.resource.id if found, otherwise the original Event is returned. """ if message.resource.id: return message LOG.debug("Looking for %s resource for for tenant %s", message.resource.driver, message.resource.tenant_id) resource_id = self.resource_cache.get_by_tenant( message.resource, self._context, message) if not resource_id: LOG.warning(_LW('Resource of type %s not found for tenant %s.'), message.resource.driver, message.resource.tenant_id) else: new_resource = event.Resource( id=resource_id, driver=message.resource.driver, tenant_id=message.resource.tenant_id, ) new_message = event.Event( resource=new_resource, crud=message.crud, body=message.body, ) message = new_message LOG.debug("Using resource %s.", new_resource) return message
def load_provider_rules(path): try: return jsonutils.load(open(path)) except: # pragma nocover LOG.warning(_LW('unable to open provider rules: %s'), path) return {}
def _dispatch_command(self, target, message): if not self._should_process_command(message): return instructions = message.body if instructions['command'] == commands.WORKERS_DEBUG: self.report_status() # NOTE(adam_g): Drop 'router-debug' compat in M. elif (instructions['command'] == commands.RESOURCE_DEBUG or instructions['command'] == commands.ROUTER_DEBUG): resource_id = (instructions.get('resource_id') or instructions.get('router_id')) if not resource_id: LOG.warning(_LW( 'Ignoring instruction to debug resource with no id')) return reason = instructions.get('reason') if resource_id in commands.WILDCARDS: LOG.warning(_LW( 'Ignoring instruction to debug all resources with %r'), resource_id) else: LOG.info(_LI('Placing resource %s in debug mode (reason: %s)'), resource_id, reason) self.db_api.enable_resource_debug(resource_id, reason) elif (instructions['command'] == commands.RESOURCE_MANAGE or instructions['command'] == commands.ROUTER_MANAGE): resource_id = (instructions.get('resource_id') or instructions.get('router_id')) if not resource_id: LOG.warning(_LW( 'Ignoring instruction to manage resource with no id')) return try: self.db_api.disable_resource_debug(resource_id) LOG.info(_LI('Resuming management of resource %s'), resource_id) except KeyError: pass try: self._resource_locks[resource_id].release() LOG.info(_LI('Unlocked resource %s'), resource_id) except KeyError: pass except threading.ThreadError: # Already unlocked, that's OK. pass elif instructions['command'] in EVENT_COMMANDS: resource_id = instructions.get('resource_id') sm = self._find_state_machine_by_resource_id(resource_id) if not sm: LOG.debug( 'Will not process command, no managed state machine ' 'found for resource %s', resource_id) return new_res = event.Resource( id=resource_id, driver=sm.driver.RESOURCE_NAME, tenant_id=sm.tenant_id) new_msg = event.Event( resource=new_res, crud=EVENT_COMMANDS[instructions['command']], body=instructions, ) # Use handle_message() to ensure we acquire the lock LOG.info(_LI('sending %s instruction to %s'), instructions['command'], new_res) self.handle_message(new_msg.resource.tenant_id, new_msg) LOG.info(_LI('forced %s for %s complete'), instructions['command'], new_res) # NOTE(adam_g): This is here to support the deprecated old format of # sending commands to specific routers and can be # removed once the CLI component is dropped in M. elif instructions['command'] in DEPRECATED_ROUTER_COMMANDS: new_rsc = event.Resource( driver=drivers.router.Router.RESOURCE_NAME, id=message.body.get('router_id'), tenant_id=message.body.get('tenant_id'), ) new_msg = event.Event( resource=new_rsc, crud=DEPRECATED_ROUTER_COMMANDS[instructions['command']], body=instructions, ) # Use handle_message() to ensure we acquire the lock LOG.info(_LI('sending %s instruction to %s'), instructions['command'], new_rsc) self.handle_message(new_msg.resource.tenant_id, new_msg) LOG.info(_LI('forced %s for %s complete'), instructions['command'], new_rsc) elif instructions['command'] == commands.TENANT_DEBUG: tenant_id = instructions['tenant_id'] reason = instructions.get('reason') if tenant_id in commands.WILDCARDS: LOG.warning(_LW( 'Ignoring instruction to debug all tenants with %r'), tenant_id) else: LOG.info(_LI('Placing tenant %s in debug mode (reason: %s)'), tenant_id, reason) self.db_api.enable_tenant_debug(tenant_id, reason) elif instructions['command'] == commands.TENANT_MANAGE: tenant_id = instructions['tenant_id'] try: self.db_api.disable_tenant_debug(tenant_id) LOG.info(_LI('Resuming management of tenant %s'), tenant_id) except KeyError: pass elif instructions['command'] == commands.GLOBAL_DEBUG: enable = instructions.get('enabled') reason = instructions.get('reason') if enable == 1: LOG.info('Enabling global debug mode (reason: %s)', reason) self.db_api.enable_global_debug(reason) elif enable == 0: LOG.info('Disabling global debug mode') self.db_api.disable_global_debug() else: LOG.warning('Unrecognized global debug command: %s', instructions) elif instructions['command'] == commands.CONFIG_RELOAD: try: cfg.CONF() except Exception: LOG.exception(_LE('Could not reload configuration')) else: cfg.CONF.log_opt_values(LOG, INFO) else: LOG.warning(_LW('Unrecognized command: %s'), instructions)
def _dispatch_command(self, target, message): if not self._should_process_command(message): return instructions = message.body if instructions['command'] == commands.WORKERS_DEBUG: self.report_status() # NOTE(adam_g): Drop 'router-debug' compat in M. elif (instructions['command'] == commands.RESOURCE_DEBUG or instructions['command'] == commands.ROUTER_DEBUG): resource_id = (instructions.get('resource_id') or instructions.get('router_id')) if not resource_id: LOG.warning( _LW('Ignoring instruction to debug resource with no id')) return reason = instructions.get('reason') if resource_id in commands.WILDCARDS: LOG.warning( _LW('Ignoring instruction to debug all resources with %r'), resource_id) else: LOG.info(_LI('Placing resource %s in debug mode (reason: %s)'), resource_id, reason) self.db_api.enable_resource_debug(resource_id, reason) elif (instructions['command'] == commands.RESOURCE_MANAGE or instructions['command'] == commands.ROUTER_MANAGE): resource_id = (instructions.get('resource_id') or instructions.get('router_id')) if not resource_id: LOG.warning( _LW('Ignoring instruction to manage resource with no id')) return try: self.db_api.disable_resource_debug(resource_id) LOG.info(_LI('Resuming management of resource %s'), resource_id) except KeyError: pass try: self._resource_locks[resource_id].release() LOG.info(_LI('Unlocked resource %s'), resource_id) except KeyError: pass except threading.ThreadError: # Already unlocked, that's OK. pass elif instructions['command'] in EVENT_COMMANDS: resource_id = instructions.get('resource_id') sm = self._find_state_machine_by_resource_id(resource_id) if not sm: LOG.debug( 'Will not process command, no managed state machine ' 'found for resource %s', resource_id) return new_res = event.Resource(id=resource_id, driver=sm.resource.RESOURCE_NAME, tenant_id=sm.tenant_id) new_msg = event.Event( resource=new_res, crud=EVENT_COMMANDS[instructions['command']], body=instructions, ) # Use handle_message() to ensure we acquire the lock LOG.info(_LI('sending %s instruction to %s'), instructions['command'], new_res) self.handle_message(new_msg.resource.tenant_id, new_msg) LOG.info(_LI('forced %s for %s complete'), instructions['command'], new_res) # NOTE(adam_g): This is here to support the deprecated old format of # sending commands to specific routers and can be # removed once the CLI component is dropped in M. elif instructions['command'] in DEPRECATED_ROUTER_COMMANDS: new_rsc = event.Resource( driver=drivers.router.Router.RESOURCE_NAME, id=message.body.get('router_id'), tenant_id=message.body.get('tenant_id'), ) new_msg = event.Event( resource=new_rsc, crud=DEPRECATED_ROUTER_COMMANDS[instructions['command']], body=instructions, ) # Use handle_message() to ensure we acquire the lock LOG.info(_LI('sending %s instruction to %s'), instructions['command'], new_rsc) self.handle_message(new_msg.resource.tenant_id, new_msg) LOG.info(_LI('forced %s for %s complete'), instructions['command'], new_rsc) elif instructions['command'] == commands.TENANT_DEBUG: tenant_id = instructions['tenant_id'] reason = instructions.get('reason') if tenant_id in commands.WILDCARDS: LOG.warning( _LW('Ignoring instruction to debug all tenants with %r'), tenant_id) else: LOG.info(_LI('Placing tenant %s in debug mode (reason: %s)'), tenant_id, reason) self.db_api.enable_tenant_debug(tenant_id, reason) elif instructions['command'] == commands.TENANT_MANAGE: tenant_id = instructions['tenant_id'] try: self.db_api.disable_tenant_debug(tenant_id) LOG.info(_LI('Resuming management of tenant %s'), tenant_id) except KeyError: pass elif instructions['command'] == commands.GLOBAL_DEBUG: enable = instructions.get('enabled') reason = instructions.get('reason') if enable == 1: LOG.info('Enabling global debug mode (reason: %s)', reason) self.db_api.enable_global_debug(reason) elif enable == 0: LOG.info('Disabling global debug mode') self.db_api.disable_global_debug() else: LOG.warning('Unrecognized global debug command: %s', instructions) elif instructions['command'] == commands.CONFIG_RELOAD: try: cfg.CONF() except Exception: LOG.exception(_LE('Could not reload configuration')) else: cfg.CONF.log_opt_values(LOG, INFO) else: LOG.warning(_LW('Unrecognized command: %s'), instructions)