def _abort(node_info, ironic): # runs in background if node_info.finished_at is not None: # introspection already finished; nothing to do LOG.info(_LI('Cannot abort introspection as it is already ' 'finished'), node_info=node_info) node_info.release_lock() return # block this node from PXE Booting the introspection image try: firewall.update_filters(ironic) except Exception as exc: # Note(mkovacik): this will be retried in firewall update # periodic task; we continue aborting LOG.warning(_LW('Failed to update firewall filters: %s'), exc, node_info=node_info) # finish the introspection LOG.debug('Forcing power-off', node_info=node_info) try: ironic.node.set_power_state(node_info.uuid, 'off') except Exception as exc: LOG.warning(_LW('Failed to power off node: %s'), exc, node_info=node_info) node_info.finished(error=_('Canceled by operator')) LOG.info(_LI('Introspection aborted'), node_info=node_info)
def create_ssl_context(): if not CONF.use_ssl: return MIN_VERSION = (2, 7, 9) if sys.version_info < MIN_VERSION: LOG.warning(_LW('Unable to use SSL in this version of Python: ' '%{current}, please ensure your version of Python is ' 'greater than %{min} to enable this feature.'), {'current': '.'.join(map(str, sys.version_info[:3])), 'min': '.'.join(map(str, MIN_VERSION))}) return context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if CONF.ssl_cert_path and CONF.ssl_key_path: try: context.load_cert_chain(CONF.ssl_cert_path, CONF.ssl_key_path) except IOError as exc: LOG.warning(_LW('Failed to load certificate or key from defined ' 'locations: %{cert} and %{key}, will continue to ' 'run with the default settings: %{exc}'), {'cert': CONF.ssl_cert_path, 'key': CONF.ssl_key_path, 'exc': exc}) except ssl.SSLError as exc: LOG.warning(_LW('There was a problem with the loaded certificate ' 'and key, will continue to run with the default ' 'settings: %s'), exc) return context
def create_ssl_context(): if not CONF.use_ssl: return MIN_VERSION = (2, 7, 9) if sys.version_info < MIN_VERSION: LOG.warning( _LW('Unable to use SSL in this version of Python: ' '%{current}, please ensure your version of Python is ' 'greater than %{min} to enable this feature.'), { 'current': '.'.join(map(str, sys.version_info[:3])), 'min': '.'.join(map(str, MIN_VERSION)) }) return context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if CONF.ssl_cert_path and CONF.ssl_key_path: try: context.load_cert_chain(CONF.ssl_cert_path, CONF.ssl_key_path) except IOError as exc: LOG.warning( _LW('Failed to load certificate or key from defined ' 'locations: %{cert} and %{key}, will continue to ' 'run with the default settings: %{exc}'), { 'cert': CONF.ssl_cert_path, 'key': CONF.ssl_key_path, 'exc': exc }) except ssl.SSLError as exc: LOG.warning( _LW('There was a problem with the loaded certificate ' 'and key, will continue to run with the default ' 'settings: %s'), exc) return context
def before_update(self, introspection_data, node_info, **kwargs): current_devices = self._get_serials(introspection_data) if not current_devices: LOG.warning(_LW('No block device was received from ramdisk'), node_info=node_info, data=introspection_data) return node = node_info.node() if 'root_device' in node.properties: LOG.info(_LI('Root device is already known for the node'), node_info=node_info, data=introspection_data) return if 'block_devices' in node.extra: # Compare previously discovered devices with the current ones previous_devices = node.extra['block_devices']['serials'] new_devices = [ device for device in current_devices if device not in previous_devices ] if len(new_devices) > 1: LOG.warning(_LW('Root device cannot be identified because ' 'multiple new devices were found'), node_info=node_info, data=introspection_data) return elif len(new_devices) == 0: LOG.warning(_LW('No new devices were found'), node_info=node_info, data=introspection_data) return node_info.patch([{ 'op': 'remove', 'path': '/extra/block_devices' }, { 'op': 'add', 'path': '/properties/root_device', 'value': { 'serial': new_devices[0] } }]) else: # No previously discovered devices - save the inspector block # devices in node.extra node_info.patch([{ 'op': 'add', 'path': '/extra/block_devices', 'value': { 'serials': current_devices } }])
def _validate_interfaces(self, interfaces, data=None): """Validate interfaces on correctness and suitability. :return: dict interface name -> dict with keys 'mac' and 'ip' """ if not interfaces: raise utils.Error(_('No interfaces supplied by the ramdisk'), data=data) pxe_mac = utils.get_pxe_mac(data) if not pxe_mac and CONF.processing.add_ports == 'pxe': LOG.warning(_LW('No boot interface provided in the introspection ' 'data, will add all ports with IP addresses')) result = {} for name, iface in interfaces.items(): mac = iface.get('mac') ip = iface.get('ip') if not mac: LOG.debug('Skipping interface %s without link information', name, data=data) continue if not utils.is_valid_mac(mac): LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not ' 'valid, skipping'), {'mac': mac, 'name': name}, data=data) continue mac = mac.lower() if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): LOG.debug('Skipping local interface %s', name, data=data) continue if (CONF.processing.add_ports == 'pxe' and pxe_mac and mac != pxe_mac): LOG.debug('Skipping interface %s as it was not PXE booting', name, data=data) continue elif CONF.processing.add_ports != 'all' and not ip: LOG.debug('Skipping interface %s as it did not have ' 'an IP address assigned during the ramdisk run', name, data=data) continue result[name] = {'ip': ip, 'mac': mac.lower()} if not result: raise utils.Error(_('No suitable interfaces found in %s') % interfaces, data=data) return result
def before_processing(self, introspection_data, **kwargs): """Validate information about network interfaces.""" bmc_address = introspection_data.get('ipmi_address') if not introspection_data.get('interfaces'): raise utils.Error(_('No interfaces supplied by the ramdisk')) valid_interfaces = { n: iface for n, iface in introspection_data['interfaces'].items() if utils.is_valid_mac(iface.get('mac')) } pxe_mac = introspection_data.get('boot_interface') if CONF.processing.add_ports == 'pxe' and not pxe_mac: LOG.warning(_LW('No boot interface provided in the introspection ' 'data, will add all ports with IP addresses')) if CONF.processing.add_ports == 'pxe' and pxe_mac: LOG.info(_LI('PXE boot interface was %s'), pxe_mac) if '-' in pxe_mac: # pxelinux format: 01-aa-bb-cc-dd-ee-ff pxe_mac = pxe_mac.split('-', 1)[1] pxe_mac = pxe_mac.replace('-', ':').lower() valid_interfaces = { n: iface for n, iface in valid_interfaces.items() if iface['mac'].lower() == pxe_mac } elif CONF.processing.add_ports != 'all': valid_interfaces = { n: iface for n, iface in valid_interfaces.items() if iface.get('ip') } if not valid_interfaces: raise utils.Error(_('No valid interfaces found for node with ' 'BMC %(ipmi_address)s, got %(interfaces)s') % {'ipmi_address': bmc_address, 'interfaces': introspection_data['interfaces']}) elif valid_interfaces != introspection_data['interfaces']: invalid = {n: iface for n, iface in introspection_data['interfaces'].items() if n not in valid_interfaces} LOG.warning(_LW( 'The following interfaces were invalid or not eligible in ' 'introspection data for node with BMC %(ipmi_address)s and ' 'were excluded: %(invalid)s'), {'invalid': invalid, 'ipmi_address': bmc_address}) LOG.info(_LI('Eligible interfaces are %s'), valid_interfaces) introspection_data['all_interfaces'] = introspection_data['interfaces'] introspection_data['interfaces'] = valid_interfaces valid_macs = [iface['mac'] for iface in valid_interfaces.values()] introspection_data['macs'] = valid_macs
def _get_interfaces(self, data=None): """Convert inventory to a dict with interfaces. :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} inventory = data.get('inventory', {}) if inventory: for iface in inventory.get('interfaces', ()): name = iface.get('name') mac = iface.get('mac_address') ip = iface.get('ipv4_address') if not name: LOG.error(_LE('Malformed interface record: %s'), iface, data=data) continue if not mac: LOG.debug('Skipping interface %s without link information', name, data=data) continue if not utils.is_valid_mac(mac): LOG.warning(_LW('MAC %(mac)s for interface %(name)s is ' 'not valid, skipping'), { 'mac': mac, 'name': name }, data=data) continue mac = mac.lower() LOG.debug( 'Found interface %(name)s with MAC "%(mac)s" and ' 'IP address "%(ip)s"', { 'name': name, 'mac': mac, 'ip': ip }, data=data) result[name] = {'ip': ip, 'mac': mac} else: LOG.warning(_LW('No inventory provided: using old bash ramdisk ' 'is deprecated, please switch to ' 'ironic-python-agent'), data=data) result = data.get('interfaces') return result
def before_update(self, introspection_data, node_info, **kwargs): """Stores the 'data' key from introspection_data in Swift. If the 'data' key exists, updates Ironic extra column 'hardware_swift_object' key to the name of the Swift object, and stores the data in the 'inspector' container in Swift. Otherwise, it does nothing. """ if 'data' not in introspection_data: LOG.warning(_LW('No extra hardware information was received from ' 'the ramdisk'), node_info=node_info, data=introspection_data) return data = introspection_data['data'] name = 'extra_hardware-%s' % node_info.uuid self._store_extra_hardware(name, json.dumps(data)) # NOTE(sambetts) If data is edeploy format, convert to dicts for rules # processing, store converted data in introspection_data['extra']. # Delete introspection_data['data'], it is assumed unusable # by rules. if self._is_edeploy_data(data): LOG.debug( 'Extra hardware data is in eDeploy format, ' 'converting to usable format', node_info=node_info, data=introspection_data) introspection_data['extra'] = self._convert_edeploy_data(data) else: LOG.warning(_LW('Extra hardware data was not in a recognised ' 'format (eDeploy), and will not be forwarded to ' 'introspection rules'), node_info=node_info, data=introspection_data) LOG.debug( 'Deleting \"data\" key from introspection data as it is ' 'assumed unusable by introspection rules. Raw data is ' 'stored in swift', node_info=node_info, data=introspection_data) del introspection_data['data'] node_info.patch([{ 'op': 'add', 'path': '/extra/hardware_swift_object', 'value': name }])
def _parse_lldp_tlvs(self, tlvs, node_info): """Parse LLDP TLVs into dictionary of name/value pairs :param tlvs: list of raw TLVs :param node_info: node being introspected :returns nv: dictionary of name/value pairs. The LLDP user-friendly names, e.g. "switch_port_id" are the keys """ # Generate name/value pairs for each TLV supported by this plugin. parser = lldp_parsers.LLDPBasicMgmtParser(node_info) for tlv_type, tlv_value in tlvs: try: data = bytearray(binascii.a2b_hex(tlv_value)) except TypeError as e: LOG.warning(_LW( "TLV value for TLV type %(tlv_type)d not in correct " "format, value must be in hexadecimal: %(msg)s"), {'tlv_type': tlv_type, 'msg': e}, node_info=node_info) continue if parser.parse_tlv(tlv_type, data): LOG.debug("Handled TLV type %d", tlv_type, node_info=node_info) else: LOG.debug("LLDP TLV type %d not handled", tlv_type, node_info=node_info) return parser.nv_dict
def init(): """Initialize firewall management. Must be called one on start-up. """ if not CONF.firewall.manage_firewall: return global INTERFACE, CHAIN, NEW_CHAIN, BASE_COMMAND, BLACKLIST_CACHE BLACKLIST_CACHE = None INTERFACE = CONF.firewall.dnsmasq_interface CHAIN = CONF.firewall.firewall_chain NEW_CHAIN = CHAIN + '_temp' BASE_COMMAND = ('sudo', 'ironic-inspector-rootwrap', CONF.rootwrap_config, 'iptables',) # -w flag makes iptables wait for xtables lock, but it's not supported # everywhere yet try: with open(os.devnull, 'wb') as null: subprocess.check_call(BASE_COMMAND + ('-w', '-h'), stderr=null, stdout=null) except subprocess.CalledProcessError: LOG.warning(_LW('iptables does not support -w flag, please update ' 'it to at least version 1.4.21')) else: BASE_COMMAND += ('-w',) _clean_up(CHAIN) # Not really needed, but helps to validate that we have access to iptables _iptables('-N', CHAIN)
def init(): """Initialize firewall management. Must be called one on start-up. """ if not CONF.firewall.manage_firewall: return global INTERFACE, CHAIN, NEW_CHAIN, BASE_COMMAND INTERFACE = CONF.firewall.dnsmasq_interface CHAIN = CONF.firewall.firewall_chain NEW_CHAIN = CHAIN + '_temp' BASE_COMMAND = ('sudo', 'ironic-inspector-rootwrap', CONF.rootwrap_config, 'iptables',) # -w flag makes iptables wait for xtables lock, but it's not supported # everywhere yet try: with open(os.devnull, 'wb') as null: subprocess.check_call(BASE_COMMAND + ('-w', '-h'), stderr=null, stdout=null) except subprocess.CalledProcessError: LOG.warning(_LW('iptables does not support -w flag, please update ' 'it to at least version 1.4.21')) else: BASE_COMMAND += ('-w',) _clean_up(CHAIN) # Not really needed, but helps to validate that we have access to iptables _iptables('-N', CHAIN)
def create_ports(self, ports, ironic=None): """Create one or several ports for this node. :param ports: List of ports with all their attributes e.g [{'mac': xx, 'ip': xx, 'client_id': None}, {'mac': xx, 'ip': None, 'client_id': None}] It also support the old style of list of macs. A warning is issued if port already exists on a node. :param ironic: Ironic client to use instead of self.ironic """ existing_macs = [] for port in ports: mac = port extra = {} if isinstance(port, dict): mac = port['mac'] client_id = port.get('client_id') if client_id: extra = {'client-id': client_id} if mac not in self.ports(): self._create_port(mac, ironic=ironic, extra=extra) else: existing_macs.append(mac) if existing_macs: LOG.warning(_LW('Did not create ports %s as they already exist'), existing_macs, node_info=self)
def _check_existing_nodes(introspection_data, node_driver_info, ironic): macs = utils.get_valid_macs(introspection_data) if macs: # verify existing ports for mac in macs: ports = ironic.port.list(address=mac) if not ports: continue raise utils.Error( _('Port %(mac)s already exists, uuid: %(uuid)s') % {'mac': mac, 'uuid': ports[0].uuid}, data=introspection_data) else: LOG.warning(_LW('No suitable interfaces found for discovered node. ' 'Check that validate_interfaces hook is listed in ' '[processing]default_processing_hooks config option')) # verify existing node with discovered ipmi address ipmi_address = node_driver_info.get('ipmi_address') if ipmi_address: # FIXME(aarefiev): it's not effective to fetch all nodes, and may # impact on performance on big clusters nodes = ironic.node.list(fields=('uuid', 'driver_info'), limit=0) for node in nodes: if ipmi_address == ir_utils.get_ipmi_address(node): raise utils.Error( _('Node %(uuid)s already has BMC address ' '%(ipmi_address)s, not enrolling') % {'ipmi_address': ipmi_address, 'uuid': node.uuid}, data=introspection_data)
def handle_org_specific_tlv(self, struct, name, data): """Handle Organizationally Unique ID TLVs This class supports 802.1Q and 802.3 OUI TLVs. See http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D and http://standards.ieee.org/about/get/802/802.3.html """ oui = binascii.hexlify(struct.oui).decode() subtype = struct.subtype oui_data = data[4:] if oui == tlv.LLDP_802dot1_OUI: parser = LLDPdot1Parser(self.node_info, self.nv_dict) if parser.parse_tlv(subtype, oui_data): LOG.debug("Handled 802.1 subtype %d", subtype) else: LOG.debug("Subtype %d not found for 802.1", subtype) elif oui == tlv.LLDP_802dot3_OUI: parser = LLDPdot3Parser(self.node_info, self.nv_dict) if parser.parse_tlv(subtype, oui_data): LOG.debug("Handled 802.3 subtype %d", subtype) else: LOG.debug("Subtype %d not found for 802.3", subtype) else: LOG.warning(_LW("Organizationally Unique ID %s not " "recognized"), oui, node_info=self.node_info)
def _check_existing_nodes(introspection_data, node_driver_info, ironic): macs = utils.get_valid_macs(introspection_data) if macs: # verify existing ports for mac in macs: ports = ironic.port.list(address=mac) if not ports: continue raise utils.Error( _('Port %(mac)s already exists, uuid: %(uuid)s') % { 'mac': mac, 'uuid': ports[0].uuid }, data=introspection_data) else: LOG.warning( _LW('No suitable interfaces found for discovered node. ' 'Check that validate_interfaces hook is listed in ' '[processing]default_processing_hooks config option')) # verify existing node with discovered ipmi address ipmi_address = node_driver_info.get('ipmi_address') if ipmi_address: # FIXME(aarefiev): it's not effective to fetch all nodes, and may # impact on performance on big clusters nodes = ironic.node.list(fields=('uuid', 'driver_info'), limit=0) for node in nodes: if ipmi_address == ir_utils.get_ipmi_address(node): raise utils.Error(_('Node %(uuid)s already has BMC address ' '%(ipmi_address)s, not enrolling') % { 'ipmi_address': ipmi_address, 'uuid': node.uuid }, data=introspection_data)
def before_update(self, introspection_data, node_info, **kwargs): """Update node with scheduler properties.""" inventory = introspection_data.get('inventory') errors = [] root_disk = introspection_data.get('root_disk') if root_disk: introspection_data['local_gb'] = root_disk['size'] // units.Gi if CONF.processing.disk_partitioning_spacing: introspection_data['local_gb'] -= 1 elif inventory: errors.append(_('root disk is not supplied by the ramdisk and ' 'root_disk_selection hook is not enabled')) if inventory: try: introspection_data['cpus'] = int(inventory['cpu']['count']) introspection_data['cpu_arch'] = six.text_type( inventory['cpu']['architecture']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing CPU information: %s') % inventory.get('cpu')) try: introspection_data['memory_mb'] = int( inventory['memory']['physical_mb']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing memory information: %s; ' 'introspection requires physical memory size ' 'from dmidecode') % inventory.get('memory')) else: LOG.warning(_LW('No inventory provided: using old bash ramdisk ' 'is deprecated, please switch to ' 'ironic-python-agent'), node_info=node_info, data=introspection_data) missing = [key for key in self.KEYS if not introspection_data.get(key)] if missing: raise utils.Error( _('The following required parameters are missing: %s') % missing, node_info=node_info, data=introspection_data) if errors: raise utils.Error(_('The following problems encountered: %s') % '; '.join(errors), node_info=node_info, data=introspection_data) LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, ' 'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'), {key: introspection_data.get(key) for key in self.KEYS}, node_info=node_info, data=introspection_data) overwrite = CONF.processing.overwrite_existing properties = {key: str(introspection_data[key]) for key in self.KEYS if overwrite or not node_info.node().properties.get(key)} node_info.update_properties(**properties)
def get_ipmi_address(node): ipmi_fields = ['ipmi_address'] + CONF.ipmi_address_fields # NOTE(sambetts): IPMI Address is useless to us if bridging is enabled so # just ignore it and return None if node.driver_info.get("ipmi_bridging", "no") != "no": return for name in ipmi_fields: value = node.driver_info.get(name) if not value: continue try: ip = socket.gethostbyname(value) except socket.gaierror: msg = _('Failed to resolve the hostname (%(value)s)' ' for node %(uuid)s') raise utils.Error(msg % {'value': value, 'uuid': node.uuid}, node_info=node) if netaddr.IPAddress(ip).is_loopback(): LOG.warning(_LW('Ignoring loopback BMC address %s'), ip, node_info=node) ip = None return ip
def _get_local_link_patch(self, tlv_type, tlv_value, port): try: data = bytearray(binascii.unhexlify(tlv_value)) except TypeError: LOG.warning(_LW("TLV value for TLV type %d not in correct" "format, ensure TLV value is in " "hexidecimal format when sent to " "inspector"), tlv_type) return item = value = None if tlv_type == LLDP_TLV_TYPE_PORT_ID: # Check to ensure the port id is an allowed type item = "port_id" if data[0] in STRING_PORT_SUBTYPES: value = data[1:].decode() if data[0] == PORT_ID_SUBTYPE_MAC: value = str(netaddr.EUI( binascii.hexlify(data[1:]).decode(), dialect=netaddr.mac_unix_expanded)) elif tlv_type == LLDP_TLV_TYPE_CHASSIS_ID: # Check to ensure the chassis id is the allowed type if data[0] == CHASSIS_ID_SUBTYPE_MAC: item = "switch_id" value = str(netaddr.EUI( binascii.hexlify(data[1:]).decode(), dialect=netaddr.mac_unix_expanded)) if item and value: if (not CONF.processing.overwrite_existing and item in port.local_link_connection): return return {'op': 'add', 'path': '/local_link_connection/%s' % item, 'value': value}
def _get_interfaces(self, data=None): """Convert inventory to a dict with interfaces. :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} inventory = data.get('inventory', {}) if inventory: for iface in inventory.get('interfaces', ()): name = iface.get('name') mac = iface.get('mac_address') ip = iface.get('ipv4_address') if not name: LOG.error(_LE('Malformed interface record: %s'), iface, data=data) continue LOG.debug('Found interface %(name)s with MAC "%(mac)s" and ' 'IP address "%(ip)s"', {'name': name, 'mac': mac, 'ip': ip}, data=data) result[name] = {'ip': ip, 'mac': mac} else: LOG.warning(_LW('No inventory provided: using old bash ramdisk ' 'is deprecated, please switch to ' 'ironic-python-agent'), data=data) result = data.get('interfaces') return result
def _get_local_link_patch(self, tlv_type, tlv_value, port): try: data = bytearray(binascii.unhexlify(tlv_value)) except TypeError: LOG.warning( _LW("TLV value for TLV type %d not in correct" "format, ensure TLV value is in " "hexidecimal format when sent to " "inspector"), tlv_type) return item = value = None if tlv_type == LLDP_TLV_TYPE_PORT_ID: # Check to ensure the port id is an allowed type item = "port_id" if data[0] in STRING_PORT_SUBTYPES: value = data[1:].decode() if data[0] == PORT_ID_SUBTYPE_MAC: value = str(netaddr.EUI(binascii.hexlify(data[1:]).decode())) elif tlv_type == LLDP_TLV_TYPE_CHASSIS_ID: # Check to ensure the chassis id is the allowed type if data[0] == CHASSIS_ID_SUBTYPE_MAC: item = "switch_id" value = str(netaddr.EUI(binascii.hexlify(data[1:]).decode())) if item and value: if (not CONF.processing.overwrite_existing and item in port.local_link_connection): return return { 'op': 'add', 'path': '/local_link_connection/%s' % item, 'value': value }
def init(self): if utils.get_auth_strategy() != 'noauth': utils.add_auth_middleware(app) else: LOG.warning(_LW('Starting unauthenticated, please check' ' configuration')) if CONF.processing.store_data == 'none': LOG.warning(_LW('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not ' 'the desired behavior')) elif CONF.processing.store_data == 'swift': LOG.info(_LI('Introspection data will be stored in Swift in the ' 'container %s'), CONF.swift.container) utils.add_cors_middleware(app) db.init() try: hooks = [ext.name for ext in plugins_base.processing_hooks_manager()] except KeyError as exc: # callback function raises MissingHookError derived from KeyError # on missing hook LOG.critical(_LC('Hook(s) %s failed to load or was not found'), str(exc)) sys.exit(1) LOG.info(_LI('Enabled processing hooks: %s'), hooks) if CONF.firewall.manage_firewall: firewall.init() periodic_update_ = periodics.periodic( spacing=CONF.firewall.firewall_update_period, enabled=CONF.firewall.manage_firewall )(periodic_update) periodic_clean_up_ = periodics.periodic( spacing=CONF.clean_up_period )(periodic_clean_up) self._periodics_worker = periodics.PeriodicWorker( callables=[(periodic_update_, None, None), (periodic_clean_up_, None, None)], executor_factory=periodics.ExistingExecutor(utils.executor())) utils.executor().submit(self._periodics_worker.start)
def _background_introspect_locked(ironic, node_info): # TODO(dtantsur): pagination macs = list(node_info.ports()) if macs: node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) LOG.info( _LI('Whitelisting MAC\'s %(macs)s for node %(node)s on the' ' firewall') % { 'macs': macs, 'node': node_info.uuid }) firewall.update_filters(ironic) attrs = node_info.attributes if CONF.processing.node_not_found_hook is None and not attrs: raise utils.Error( _('No lookup attributes were found for node %s, inspector won\'t ' 'be able to find it after introspection. Consider creating ' 'ironic ports or providing an IPMI address.') % node_info.uuid) LOG.info( _LI('The following attributes will be used for looking up ' 'node %(uuid)s: %(attrs)s'), { 'attrs': attrs, 'uuid': node_info.uuid }) if not node_info.options.get('new_ipmi_credentials'): try: ironic.node.set_boot_device(node_info.uuid, 'pxe', persistent=False) except Exception as exc: LOG.warning( _LW('Failed to set boot device to PXE for' ' node %(node)s: %(exc)s') % { 'node': node_info.uuid, 'exc': exc }) try: ironic.node.set_power_state(node_info.uuid, 'reboot') except Exception as exc: raise utils.Error( _('Failed to power on node %(node)s,' ' check it\'s power ' 'management configuration:\n%(exc)s') % { 'node': node_info.uuid, 'exc': exc }) LOG.info(_LI('Introspection started successfully for node %s'), node_info.uuid) else: LOG.info( _LI('Introspection environment is ready for node %(node)s, ' 'manual power on is required within %(timeout)d seconds') % { 'node': node_info.uuid, 'timeout': CONF.timeout })
def _run_post_hooks(node_info, introspection_data): hooks = plugins_base.processing_hooks_manager() for hook_ext in hooks: node_patches = [] ports_patches = {} hook_ext.obj.before_update(introspection_data, node_info, node_patches=node_patches, ports_patches=ports_patches) if node_patches: LOG.warning(_LW('Using node_patches is deprecated')) node_info.patch(node_patches) if ports_patches: LOG.warning(_LW('Using ports_patches is deprecated')) for mac, patches in ports_patches.items(): node_info.patch_port(mac, patches)
def _background_introspect(ironic, node_info): global _LAST_INTROSPECTION_TIME # TODO(dtantsur): pagination macs = list(node_info.ports()) if macs: node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) LOG.info(_LI('Whitelisting MAC\'s %(macs)s for node %(node)s on the' ' firewall') % {'macs': macs, 'node': node_info.uuid}) firewall.update_filters(ironic) attrs = node_info.attributes if CONF.processing.node_not_found_hook is None and not attrs: raise utils.Error( _('No lookup attributes were found for node %s, inspector won\'t ' 'be able to find it after introspection. Consider creating ' 'ironic ports or providing an IPMI address.') % node_info.uuid) LOG.info(_LI('The following attributes will be used for looking up ' 'node %(uuid)s: %(attrs)s'), {'attrs': attrs, 'uuid': node_info.uuid}) if not node_info.options.get('new_ipmi_credentials'): try: ironic.node.set_boot_device(node_info.uuid, 'pxe', persistent=False) except Exception as exc: LOG.warning(_LW('Failed to set boot device to PXE for' ' node %(node)s: %(exc)s') % {'node': node_info.uuid, 'exc': exc}) if re.match(CONF.introspection_delay_drivers, node_info.node().driver): LOG.debug('Attempting to acquire lock on last introspection time') with _LAST_INTROSPECTION_LOCK: delay = (_LAST_INTROSPECTION_TIME - time.time() + CONF.introspection_delay) if delay > 0: LOG.debug('Waiting %d seconds before sending the next ' 'node on introspection', delay) time.sleep(delay) _LAST_INTROSPECTION_TIME = time.time() try: ironic.node.set_power_state(node_info.uuid, 'reboot') except Exception as exc: raise utils.Error(_('Failed to power on node %(node)s,' ' check it\'s power ' 'management configuration:\n%(exc)s') % {'node': node_info.uuid, 'exc': exc}) LOG.info(_LI('Introspection started successfully for node %s'), node_info.uuid) else: LOG.info(_LI('Introspection environment is ready for node %(node)s, ' 'manual power on is required within %(timeout)d seconds') % {'node': node_info.uuid, 'timeout': CONF.timeout})
def _extract_node_driver_info(introspection_data): node_driver_info = {} ipmi_address = utils.get_ipmi_address_from_data(introspection_data) if ipmi_address: node_driver_info['ipmi_address'] = ipmi_address else: LOG.warning(_LW('No BMC address provided, discovered node will be ' 'created without ipmi address')) return node_driver_info
def before_update(self, introspection_data, node_info, **kwargs): current_devices = self._get_serials(introspection_data) if not current_devices: LOG.warning(_LW('No block device was received from ramdisk'), node_info=node_info, data=introspection_data) return node = node_info.node() if 'root_device' in node.properties: LOG.info(_LI('Root device is already known for the node'), node_info=node_info, data=introspection_data) return if 'block_devices' in node.extra: # Compare previously discovered devices with the current ones previous_devices = node.extra['block_devices']['serials'] new_devices = [device for device in current_devices if device not in previous_devices] if len(new_devices) > 1: LOG.warning(_LW('Root device cannot be identified because ' 'multiple new devices were found'), node_info=node_info, data=introspection_data) return elif len(new_devices) == 0: LOG.warning(_LW('No new devices were found'), node_info=node_info, data=introspection_data) return node_info.patch([ {'op': 'remove', 'path': '/extra/block_devices'}, {'op': 'add', 'path': '/properties/root_device', 'value': {'serial': new_devices[0]}} ]) else: # No previously discovered devices - save the inspector block # devices in node.extra node_info.patch([{'op': 'add', 'path': '/extra/block_devices', 'value': {'serials': current_devices}}])
def _store_logs(introspection_data, node_info): logs = introspection_data.get('logs') if not logs: LOG.warning(_LW('No logs were passed by the ramdisk'), data=introspection_data, node_info=node_info) return if not CONF.processing.ramdisk_logs_dir: LOG.warning(_LW('Failed to store logs received from the ramdisk ' 'because ramdisk_logs_dir configuration option ' 'is not set'), data=introspection_data, node_info=node_info) return fmt_args = { 'uuid': node_info.uuid if node_info is not None else 'unknown', 'mac': (utils.get_pxe_mac(introspection_data) or 'unknown').replace(':', ''), 'dt': datetime.datetime.utcnow(), 'bmc': (utils.get_ipmi_address_from_data(introspection_data) or 'unknown') } file_name = CONF.processing.ramdisk_logs_filename_format.format(**fmt_args) try: if not os.path.exists(CONF.processing.ramdisk_logs_dir): os.makedirs(CONF.processing.ramdisk_logs_dir) with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), 'wb') as fp: fp.write(base64.b64decode(logs)) except EnvironmentError: LOG.exception(_LE('Could not store the ramdisk logs'), data=introspection_data, node_info=node_info) else: LOG.info(_LI('Ramdisk logs were stored in file %s'), file_name, data=introspection_data, node_info=node_info)
def _detect_boot_mode(self, inventory, node_info, data=None): boot_mode = inventory.get('boot', {}).get('current_boot_mode') if boot_mode is not None: LOG.info(_LI('Boot mode was %s'), boot_mode, data=data, node_info=node_info) return {'boot_mode': boot_mode} else: LOG.warning(_LW('No boot mode information available'), data=data, node_info=node_info) return {}
def init(self): if utils.get_auth_strategy() != 'noauth': utils.add_auth_middleware(app) else: LOG.warning( _LW('Starting unauthenticated, please check' ' configuration')) if CONF.processing.store_data == 'none': LOG.warning( _LW('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not ' 'the desired behavior')) elif CONF.processing.store_data == 'swift': LOG.info( _LI('Introspection data will be stored in Swift in the ' 'container %s'), CONF.swift.container) utils.add_cors_middleware(app) db.init() try: hooks = [ ext.name for ext in plugins_base.processing_hooks_manager() ] except KeyError as exc: # callback function raises MissingHookError derived from KeyError # on missing hook LOG.critical(_LC('Hook(s) %s failed to load or was not found'), str(exc)) sys.exit(1) LOG.info(_LI('Enabled processing hooks: %s'), hooks) if CONF.firewall.manage_firewall: firewall.init() self._periodics_worker = periodics.PeriodicWorker( callables=[(periodic_update, None, None), (periodic_clean_up, None, None)], executor_factory=periodics.ExistingExecutor(utils.executor())) utils.executor().submit(self._periodics_worker.start)
def before_update(self, introspection_data, node_info, node_patches, ports_patches, **kwargs): """Detect root disk from root device hints and IPA inventory.""" hints = node_info.node().properties.get('root_device') if not hints: LOG.debug('Root device hints are not provided for node %s', node_info.uuid) return inventory = introspection_data.get('inventory') if not inventory: LOG.error( _LW('Root device selection require ironic-python-agent ' 'as an inspection ramdisk')) # TODO(dtantsur): make it a real error in Mitaka cycle return disks = inventory.get('disks', []) if not disks: raise utils.Error( _('No disks found on a node %s') % node_info.uuid) for disk in disks: properties = disk.copy() # Root device hints are in GiB, data from IPA is in bytes properties['size'] //= units.Gi for name, value in hints.items(): actual = properties.get(name) if actual != value: LOG.debug( 'Disk %(disk)s does not satisfy hint ' '%(name)s=%(value)s for node %(node)s, ' 'actual value is %(actual)s', { 'disk': disk.get('name'), 'name': name, 'value': value, 'node': node_info.uuid, 'actual': actual }) break else: LOG.debug( 'Disk %(disk)s of size %(size)s satisfies ' 'root device hints for node %(node)s', { 'disk': disk.get('name'), 'node': node_info.uuid, 'size': disk['size'] }) introspection_data['root_disk'] = disk return raise utils.Error( _('No disks satisfied root device hints for node %s') % node_info.uuid)
def before_update(self, introspection_data, node_info, **kwargs): """Stores the 'data' key from introspection_data in Swift. If the 'data' key exists, updates Ironic extra column 'hardware_swift_object' key to the name of the Swift object, and stores the data in the 'inspector' container in Swift. Otherwise, it does nothing. """ if 'data' not in introspection_data: LOG.warning(_LW('No extra hardware information was received from ' 'the ramdisk'), node_info=node_info, data=introspection_data) return data = introspection_data['data'] name = 'extra_hardware-%s' % node_info.uuid self._store_extra_hardware(name, json.dumps(data)) # NOTE(sambetts) If data is edeploy format, convert to dicts for rules # processing, store converted data in introspection_data['extra']. # Delete introspection_data['data'], it is assumed unusable # by rules. if self._is_edeploy_data(data): LOG.debug('Extra hardware data is in eDeploy format, ' 'converting to usable format', node_info=node_info, data=introspection_data) introspection_data['extra'] = self._convert_edeploy_data(data) else: LOG.warning(_LW('Extra hardware data was not in a recognised ' 'format (eDeploy), and will not be forwarded to ' 'introspection rules'), node_info=node_info, data=introspection_data) LOG.debug('Deleting \"data\" key from introspection data as it is ' 'assumed unusable by introspection rules. Raw data is ' 'stored in swift', node_info=node_info, data=introspection_data) del introspection_data['data'] node_info.patch([{'op': 'add', 'path': '/extra/hardware_swift_object', 'value': name}])
def create_ports(self, macs): """Create one or several ports for this node. A warning is issued if port already exists on a node. """ for mac in macs: if mac not in self.ports(): self._create_port(mac) else: LOG.warning(_LW('Port %s already exists, skipping'), mac, node_info=self)
def _create_port(self, mac): try: port = self.ironic.port.create(node_uuid=self.uuid, address=mac) except exceptions.Conflict: LOG.warning(_LW('Port %s already exists, skipping'), mac, node_info=self) # NOTE(dtantsur): we didn't get port object back, so we have to # reload ports on next access self._ports = None else: self._ports[mac] = port
def delete_nodes_not_in_list(uuids): """Delete nodes which don't exist in Ironic node UUIDs. :param uuids: Ironic node UUIDs """ inspector_uuids = _list_node_uuids() for uuid in inspector_uuids - uuids: LOG.warning( _LW('Node %s was deleted from Ironic, dropping from Ironic ' 'Inspector database'), uuid) _delete_node(uuid)
def _create_port(self, mac): try: port = self.ironic.port.create(node_uuid=self.uuid, address=mac) except exceptions.Conflict: LOG.warning( _LW('Port %(mac)s already exists for node %(uuid)s, ' 'skipping'), {'mac': mac, 'uuid': self.uuid}) # NOTE(dtantsur): we didn't get port object back, so we have to # reload ports on next access self._ports = None else: self._ports[mac] = port
def create_ports(self, macs): """Create one or several ports for this node. A warning is issued if port already exists on a node. """ for mac in macs: if mac not in self.ports(): self._create_port(mac) else: LOG.warning( _LW('Port %(mac)s already exists for node %(uuid)s, ' 'skipping'), {'mac': mac, 'uuid': self.uuid})
def _create_port(self, mac, ironic=None): ironic = ironic or self.ironic try: port = ironic.port.create(node_uuid=self.uuid, address=mac) except exceptions.Conflict: LOG.warning(_LW('Port %s already exists, skipping'), mac, node_info=self) # NOTE(dtantsur): we didn't get port object back, so we have to # reload ports on next access self._ports = None else: self._ports[mac] = port
def init(): if utils.get_auth_strategy() != 'noauth': utils.add_auth_middleware(app) else: LOG.warning( _LW('Starting unauthenticated, please check' ' configuration')) if CONF.processing.store_data == 'none': LOG.warning( _LW('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not the ' 'desired behavior')) elif CONF.processing.store_data == 'swift': LOG.info( _LI('Introspection data will be stored in Swift in the ' 'container %s'), CONF.swift.container) db.init() try: hooks = [ext.name for ext in plugins_base.processing_hooks_manager()] except KeyError as exc: # stevedore raises KeyError on missing hook LOG.critical(_LC('Hook %s failed to load or was not found'), str(exc)) sys.exit(1) LOG.info(_LI('Enabled processing hooks: %s'), hooks) if CONF.firewall.manage_firewall: firewall.init() period = CONF.firewall.firewall_update_period utils.spawn_n(periodic_update, period) if CONF.timeout > 0: period = CONF.clean_up_period utils.spawn_n(periodic_clean_up, period) else: LOG.warning(_LW('Timeout is disabled in configuration'))
def before_update(self, introspection_data, node_info, **kwargs): if 'pci_devices' not in introspection_data: if CONF.pci_devices.alias: LOG.warning(_LW('No PCI devices information was received from ' 'the ramdisk.')) return alias_count = {self.aliases[id_pair]: count for id_pair, count in self._found_pci_devices_count( introspection_data['pci_devices']).items()} if alias_count: node_info.update_capabilities(**alias_count) LOG.info(_LI('Found the following PCI devices: %s'), alias_count)
def rule_actions_manager(): """Create a Stevedore extension manager for actions in rules.""" global _ACTIONS_MGR if _ACTIONS_MGR is None: _ACTIONS_MGR = stevedore.ExtensionManager( 'ironic_inspector.rules.actions', invoke_on_load=True) for act in _ACTIONS_MGR: # a trick to detect if function was overriden if "rollback" in act.obj.__class__.__dict__: LOG.warning( _LW('Defining "rollback" for introspection rules ' 'actions is deprecated (action "%s")'), act.name) return _ACTIONS_MGR
def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and patch Ironic port local link connection""" inventory = utils.get_inventory(introspection_data) ironic_ports = node_info.ports() for iface in inventory['interfaces']: if iface['name'] not in introspection_data['all_interfaces']: continue mac_address = iface['mac_address'] port = ironic_ports.get(mac_address) if not port: LOG.debug( "Skipping LLC processing for interface %s, matching " "port not found in Ironic.", mac_address, node_info=node_info, data=introspection_data) continue lldp_data = iface.get('lldp') if lldp_data is None: LOG.warning(_LW("No LLDP Data found for interface %s"), mac_address, node_info=node_info, data=introspection_data) continue patches = [] for tlv_type, tlv_value in lldp_data: patch = self._get_local_link_patch(tlv_type, tlv_value, port) if patch is not None: patches.append(patch) try: # NOTE(sambetts) We need a newer version of Ironic API for this # transaction, so create a new ironic client and explicitly # pass it into the function. cli = ironic.get_client(api_version=REQUIRED_IRONIC_VERSION) node_info.patch_port(port, patches, ironic=cli) except client_exc.NotAcceptable: LOG.error(_LE("Unable to set Ironic port local link " "connection information because Ironic does not " "support the required version"), node_info=node_info, data=introspection_data) # NOTE(sambetts) May as well break out out of the loop here # because Ironic version is not going to change for the other # interfaces. break
def _validate_interfaces(self, interfaces, data=None): """Validate interfaces on correctness and suitability. :return: dict interface name -> dict with keys 'mac' and 'ip' """ if not interfaces: raise utils.Error(_('No interfaces supplied by the ramdisk'), data=data) pxe_mac = utils.get_pxe_mac(data) if not pxe_mac and CONF.processing.add_ports == 'pxe': LOG.warning( _LW('No boot interface provided in the introspection ' 'data, will add all ports with IP addresses')) result = {} for name, iface in interfaces.items(): mac = iface.get('mac') ip = iface.get('ip') client_id = iface.get('client_id') if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): LOG.debug('Skipping local interface %s', name, data=data) continue if (CONF.processing.add_ports == 'pxe' and pxe_mac and mac != pxe_mac): LOG.debug('Skipping interface %s as it was not PXE booting', name, data=data) continue elif CONF.processing.add_ports != 'all' and not ip: LOG.debug( 'Skipping interface %s as it did not have ' 'an IP address assigned during the ramdisk run', name, data=data) continue result[name] = { 'ip': ip, 'mac': mac.lower(), 'client_id': client_id } if not result: raise utils.Error(_('No suitable interfaces found in %s') % interfaces, data=data) return result
def rule_actions_manager(): """Create a Stevedore extension manager for actions in rules.""" global _ACTIONS_MGR if _ACTIONS_MGR is None: _ACTIONS_MGR = stevedore.ExtensionManager( 'ironic_inspector.rules.actions', invoke_on_load=True) for act in _ACTIONS_MGR: # a trick to detect if function was overriden if "rollback" in act.obj.__class__.__dict__: LOG.warning(_LW('Defining "rollback" for introspection rules ' 'actions is deprecated (action "%s")'), act.name) return _ACTIONS_MGR
def _get_interfaces(self, data=None): """Convert inventory to a dict with interfaces. :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} inventory = utils.get_inventory(data) for iface in inventory['interfaces']: name = iface.get('name') mac = iface.get('mac_address') ip = iface.get('ipv4_address') client_id = iface.get('client_id') if not name: LOG.error(_LE('Malformed interface record: %s'), iface, data=data) continue if not mac: LOG.debug('Skipping interface %s without link information', name, data=data) continue if not netutils.is_valid_mac(mac): LOG.warning(_LW('MAC %(mac)s for interface %(name)s is ' 'not valid, skipping'), { 'mac': mac, 'name': name }, data=data) continue mac = mac.lower() LOG.debug( 'Found interface %(name)s with MAC "%(mac)s", ' 'IP address "%(ip)s" and client_id "%(client_id)s"', { 'name': name, 'mac': mac, 'ip': ip, 'client_id': client_id }, data=data) result[name] = {'ip': ip, 'mac': mac, 'client_id': client_id} return result
def init(): if utils.get_auth_strategy() != 'noauth': utils.add_auth_middleware(app) else: LOG.warning(_LW('Starting unauthenticated, please check' ' configuration')) if CONF.processing.store_data == 'none': LOG.warning(_LW('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not the ' 'desired behavior')) elif CONF.processing.store_data == 'swift': LOG.info(_LI('Introspection data will be stored in Swift in the ' 'container %s'), CONF.swift.container) db.init() try: hooks = [ext.name for ext in plugins_base.processing_hooks_manager()] except KeyError as exc: # stevedore raises KeyError on missing hook LOG.critical(_LC('Hook %s failed to load or was not found'), str(exc)) sys.exit(1) LOG.info(_LI('Enabled processing hooks: %s'), hooks) if CONF.firewall.manage_firewall: firewall.init() period = CONF.firewall.firewall_update_period utils.spawn_n(periodic_update, period) if CONF.timeout > 0: period = CONF.clean_up_period utils.spawn_n(periodic_clean_up, period) else: LOG.warning(_LW('Timeout is disabled in configuration'))
def before_update(self, introspection_data, node_info, node_patches, ports_patches, **kwargs): """Detect root disk from root device hints and IPA inventory.""" hints = node_info.node().properties.get('root_device') if not hints: LOG.debug('Root device hints are not provided for node %s', node_info.uuid) return inventory = introspection_data.get('inventory') if not inventory: LOG.error(_LW('Root device selection require ironic-python-agent ' 'as an inspection ramdisk')) # TODO(dtantsur): make it a real error in Mitaka cycle return disks = inventory.get('disks', []) if not disks: raise utils.Error(_('No disks found on a node %s') % node_info.uuid) for disk in disks: properties = disk.copy() # Root device hints are in GiB, data from IPA is in bytes properties['size'] //= units.Gi for name, value in hints.items(): actual = properties.get(name) if actual != value: LOG.debug('Disk %(disk)s does not satisfy hint ' '%(name)s=%(value)s for node %(node)s, ' 'actual value is %(actual)s', {'disk': disk.get('name'), 'name': name, 'value': value, 'node': node_info.uuid, 'actual': actual}) break else: LOG.debug('Disk %(disk)s of size %(size)s satisfies ' 'root device hints for node %(node)s', {'disk': disk.get('name'), 'node': node_info.uuid, 'size': disk['size']}) introspection_data['root_disk'] = disk return raise utils.Error(_('No disks satisfied root device hints for node %s') % node_info.uuid)
def _store_logs(self, logs, introspection_data): if not CONF.processing.ramdisk_logs_dir: LOG.warning( _LW('Failed to store logs received from the ramdisk ' 'because ramdisk_logs_dir configuration option ' 'is not set')) return if not os.path.exists(CONF.processing.ramdisk_logs_dir): os.makedirs(CONF.processing.ramdisk_logs_dir) time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT) bmc_address = introspection_data.get('ipmi_address', 'unknown') file_name = 'bmc_%s_%s' % (bmc_address, time_fmt) with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), 'wb') as fp: fp.write(base64.b64decode(logs))
def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and patch Ironic port local link connection""" inventory = utils.get_inventory(introspection_data) ironic_ports = node_info.ports() for iface in inventory['interfaces']: if iface['name'] not in introspection_data['all_interfaces']: continue mac_address = iface['mac_address'] port = ironic_ports.get(mac_address) if not port: LOG.debug("Skipping LLC processing for interface %s, matching " "port not found in Ironic.", mac_address, node_info=node_info, data=introspection_data) continue lldp_data = iface.get('lldp') if lldp_data is None: LOG.warning(_LW("No LLDP Data found for interface %s"), mac_address, node_info=node_info, data=introspection_data) continue patches = [] for tlv_type, tlv_value in lldp_data: patch = self._get_local_link_patch(tlv_type, tlv_value, port) if patch is not None: patches.append(patch) try: # NOTE(sambetts) We need a newer version of Ironic API for this # transaction, so create a new ironic client and explicitly # pass it into the function. cli = ironic.get_client(api_version=REQUIRED_IRONIC_VERSION) node_info.patch_port(port, patches, ironic=cli) except client_exc.NotAcceptable: LOG.error(_LE("Unable to set Ironic port local link " "connection information because Ironic does not " "support the required version"), node_info=node_info, data=introspection_data) # NOTE(sambetts) May as well break out out of the loop here # because Ironic version is not going to change for the other # interfaces. break
def _detect_cpu_flags(self, inventory, node_info, data=None): flags = inventory['cpu'].get('flags') if not flags: LOG.warning(_LW('No CPU flags available, please update your ' 'introspection ramdisk'), data=data, node_info=node_info) return {} flags = set(flags) caps = {} for flag, name in CONF.capabilities.cpu_flags.items(): if flag in flags: caps[name] = 'true' LOG.info(_LI('CPU capabilities: %s'), list(caps), data=data, node_info=node_info) return caps
def check_provision_state(node, with_credentials=False): if node.maintenance: LOG.warning( _LW('Introspecting nodes in maintenance mode is deprecated, ' 'accepted states: %s'), VALID_STATES) return state = node.provision_state.lower() if with_credentials and state not in SET_CREDENTIALS_VALID_STATES: msg = _('Invalid provision state "%(state)s" for setting IPMI ' 'credentials on node %(node)s, valid states are %(valid)s') raise Error(msg % {'node': node.uuid, 'state': state, 'valid': list(SET_CREDENTIALS_VALID_STATES)}) elif not with_credentials and state not in VALID_STATES: msg = _('Invalid provision state "%(state)s" for introspection of ' 'node %(node)s, valid states are "%(valid)s"') raise Error(msg % {'node': node.uuid, 'state': state, 'valid': list(VALID_STATES)})
def _background_introspect_locked(ironic, node_info): # TODO(dtantsur): pagination macs = list(node_info.ports()) if macs: node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) LOG.info(_LI('Whitelisting MAC\'s %s on the firewall'), macs, node_info=node_info) firewall.update_filters(ironic) attrs = node_info.attributes if CONF.processing.node_not_found_hook is None and not attrs: raise utils.Error( _('No lookup attributes were found, inspector won\'t ' 'be able to find it after introspection, consider creating ' 'ironic ports or providing an IPMI address'), node_info=node_info) LOG.info(_LI('The following attributes will be used for look up: %s'), attrs, node_info=node_info) if not node_info.options.get('new_ipmi_credentials'): try: ironic.node.set_boot_device(node_info.uuid, 'pxe', persistent=False) except Exception as exc: LOG.warning(_LW('Failed to set boot device to PXE: %s'), exc, node_info=node_info) try: ironic.node.set_power_state(node_info.uuid, 'reboot') except Exception as exc: raise utils.Error(_('Failed to power on the node, check it\'s ' 'power management configuration: %s'), exc, node_info=node_info) LOG.info(_LI('Introspection started successfully'), node_info=node_info) else: LOG.info(_LI('Introspection environment is ready, manual power on is ' 'required within %d seconds'), CONF.timeout, node_info=node_info)