def test_get_client_without_auth_token(self, mock_client, mock_load, mock_opts): mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(None) args = {'session': mock_sess, 'region_name': 'somewhere', 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args)
def test_get_client_without_auth_token(self, mock_client, mock_load, mock_opts, mock_adapter): fake_ironic_url = 'http://127.0.0.1:6385' mock_adapter.return_value.get_endpoint.return_value = fake_ironic_url mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(None) args = {'session': mock_sess, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, fake_ironic_url, **args)
def test_get_client_without_auth_token(self, mock_client, mock_load, mock_opts, mock_adapter): fake_ironic_url = 'http://127.0.0.1:6385' mock_adapter.return_value.get_endpoint.return_value = fake_ironic_url mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(None) args = {'session': mock_sess, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, fake_ironic_url, **args)
def test_get_client_without_auth_token(self, mock_client): ir_utils.get_client(None) args = {'os_password': CONF.ironic.os_password, 'os_username': CONF.ironic.os_username, 'os_auth_url': CONF.ironic.os_auth_url, 'os_tenant_name': CONF.ironic.os_tenant_name, 'os_endpoint_type': CONF.ironic.os_endpoint_type, 'os_service_type': CONF.ironic.os_service_type, 'os_ironic_api_version': '1.11', 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args)
def test_get_client_with_auth_token(self, mock_keystone_client, mock_client): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' mock_keystone_client().service_catalog.url_for.return_value = ( fake_ironic_url) ir_utils.get_client(fake_token) args = {'os_auth_token': fake_token, 'ironic_url': fake_ironic_url, 'os_ironic_api_version': '1.11', 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args)
def test_get_client_without_auth_token(self, mock_client, mock_load, mock_opts): mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(None) args = { 'session': mock_sess, 'region_name': 'somewhere', 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval } mock_client.assert_called_once_with(1, **args)
def test_get_client_without_auth_token(self, mock_client): ir_utils.get_client(None) args = { 'os_password': CONF.ironic.os_password, 'os_username': CONF.ironic.os_username, 'os_auth_url': CONF.ironic.os_auth_url, 'os_tenant_name': CONF.ironic.os_tenant_name, 'os_endpoint_type': CONF.ironic.os_endpoint_type, 'os_service_type': CONF.ironic.os_service_type, 'os_ironic_api_version': '1.11', 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval } mock_client.assert_called_once_with(1, **args)
def test_get_client_with_auth_token(self, mock_client, mock_load, mock_opts, mock_adapter): fake_token = 'token' mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(fake_token) args = { 'token': fake_token, 'session': mock_sess, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval } endpoint = mock_adapter.return_value.get_endpoint.return_value mock_client.assert_called_once_with(1, endpoint=endpoint, **args)
def test_get_client_with_auth_token(self, mock_keystone_client, mock_client): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' mock_keystone_client().service_catalog.url_for.return_value = ( fake_ironic_url) ir_utils.get_client(fake_token) args = { 'os_auth_token': fake_token, 'ironic_url': fake_ironic_url, 'os_ironic_api_version': '1.11', 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval } mock_client.assert_called_once_with(1, **args)
def _reapply(node_info): # runs in background try: introspection_data = _get_unprocessed_data(node_info.uuid) except Exception as exc: LOG.exception(_LE('Encountered exception while fetching ' 'stored introspection data'), node_info=node_info) msg = (_('Unexpected exception %(exc_class)s while fetching ' 'unprocessed introspection data from Swift: %(error)s') % {'exc_class': exc.__class__.__name__, 'error': exc}) node_info.finished(error=msg) return try: ironic = ir_utils.get_client() except Exception as exc: msg = _('Encountered an exception while getting the Ironic client: ' '%s') % exc LOG.error(msg, node_info=node_info, data=introspection_data) node_info.fsm_event(istate.Events.error) node_info.finished(error=msg) return try: _reapply_with_data(node_info, introspection_data) except Exception as exc: node_info.finished(error=str(exc)) return _finish(node_info, ironic, introspection_data, power_off=False) LOG.info(_LI('Successfully reapplied introspection on stored ' 'data'), node_info=node_info, data=introspection_data)
def introspect(node_id, manage_boot=True, token=None): """Initiate hardware properties introspection for a given node. :param node_id: node UUID or name :param manage_boot: whether to manage boot for this node :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node) if manage_boot: validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address, bmc_ipv4, bmc_ipv6 = ir_utils.get_ipmi_address(node) lookup_attrs = list(filter(None, [bmc_ipv4, bmc_ipv6])) node_info = node_cache.start_introspection(node.uuid, bmc_address=lookup_attrs, manage_boot=manage_boot, ironic=ironic) utils.executor().submit(_background_introspect, node_info, ironic)
def introspect(node_id, token=None): """Initiate hardware properties introspection for a given node. :param node_id: node UUID or name :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node) validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address = ir_utils.get_ipmi_address(node) node_info = node_cache.start_introspection(node.uuid, bmc_address=bmc_address, ironic=ironic) def _handle_exceptions(fut): try: fut.result() except utils.Error as exc: # Logging has already happened in Error.__init__ node_info.finished(error=str(exc)) except Exception as exc: msg = _('Unexpected exception in background introspection thread') LOG.exception(msg, node_info=node_info) node_info.finished(error=msg) future = utils.executor().submit(_background_introspect, ironic, node_info) future.add_done_callback(_handle_exceptions)
def _process_node(node_info, node, introspection_data): # NOTE(dtantsur): repeat the check in case something changed keep_power_on = ir_utils.check_provision_state(node) _run_post_hooks(node_info, introspection_data) store_introspection_data(node_info.uuid, introspection_data) ironic = ir_utils.get_client() pxe_filter.driver().sync(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.id} # determine how to handle power if keep_power_on or not node_info.manage_boot: power_action = False else: power_action = CONF.processing.power_off utils.executor().submit(_finish, node_info, ironic, introspection_data, power_off=power_action) return resp
def setUp(self): super(Base, self).setUp() rules.delete_all() self.cli = ir_utils.get_client() self.cli.reset_mock() self.cli.node.get.return_value = self.node self.cli.node.update.return_value = self.node self.cli.node.list.return_value = [self.node] self.patch = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] self.patch_root_hints = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '19', 'op': 'add'} ] self.node.power_state = 'power off' self.cfg = self.useFixture(config_fixture.Config()) conf_file = get_test_conf_file() self.cfg.set_config_files([conf_file])
def _process_node(node, introspection_data, node_info): # NOTE(dtantsur): repeat the check in case something changed ir_utils.check_provision_state(node) node_info.create_ports(introspection_data.get('macs') or ()) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) ironic = ir_utils.get_client() firewall.update_filters(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.uuid} if node_info.options.get('new_ipmi_credentials'): new_username, new_password = ( node_info.options.get('new_ipmi_credentials')) utils.executor().submit(_finish_set_ipmi_credentials, ironic, node, node_info, introspection_data, new_username, new_password) resp['ipmi_setup_credentials'] = True resp['ipmi_username'] = new_username resp['ipmi_password'] = new_password else: utils.executor().submit(_finish, ironic, node_info, introspection_data) return resp
def _reapply(node_info, introspection_data=None): # runs in background node_info.started_at = timeutils.utcnow() node_info.commit() try: ironic = ir_utils.get_client() except Exception as exc: msg = _('Encountered an exception while getting the Ironic client: ' '%s') % exc LOG.error(msg, node_info=node_info, data=introspection_data) node_info.finished(istate.Events.error, error=msg) return try: _reapply_with_data(node_info, introspection_data) except Exception as exc: msg = (_('Failed reapply for node %(node)s, Error: ' '%(exc)s') % { 'node': node_info.uuid, 'exc': exc }) LOG.error(msg, node_info=node_info, data=introspection_data) return _finish(node_info, ironic, introspection_data, power_off=False) LOG.info('Successfully reapplied introspection on stored ' 'data', node_info=node_info, data=introspection_data)
def periodic_clean_up(): # pragma: no cover try: if node_cache.clean_up(): pxe_filter.driver().sync(ir_utils.get_client()) sync_with_ironic() except Exception: LOG.exception('Periodic clean up of node cache failed')
def introspect(node_id, manage_boot=True, token=None): """Initiate hardware properties introspection for a given node. :param node_id: node UUID or name :param manage_boot: whether to manage boot for this node :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node) if manage_boot: try: ironic.validate_node(node.id, required='power') except os_exc.ValidationException as exc: msg = _('Failed validation of power interface: %s') raise utils.Error(msg % exc, node_info=node) bmc_address, bmc_ipv4, bmc_ipv6 = ir_utils.get_ipmi_address(node) lookup_attrs = list(filter(None, [bmc_ipv4, bmc_ipv6])) node_info = node_cache.start_introspection(node.id, bmc_address=lookup_attrs, manage_boot=manage_boot, ironic=ironic) if manage_boot: try: utils.executor().submit(_do_introspect, node_info, ironic) except Exception as exc: msg = _('Failed to submit introspection job: %s') raise utils.Error(msg % exc, node_info=node) else: _do_introspect(node_info, ironic)
def enroll_node_not_found_hook(introspection_data, **kwargs): node_attr = CONF.discovery.enroll_node_fields.copy() ironic = ir_utils.get_client() node_driver_info = _extract_node_driver_info(introspection_data) node_attr['driver_info'] = node_driver_info # NOTE(rpittau) by default, if the provision_state is None, it will # be set to 'available' by openstacksdk, blocking the inspection of the # node in this phase, as it's not a valid state for inspection. node_attr['provision_state'] = 'enroll' node_driver = CONF.discovery.enroll_node_driver _check_existing_nodes(introspection_data, node_driver_info, ironic) LOG.debug( 'Creating discovered node with driver %(driver)s and ' 'attributes: %(attr)s', { 'driver': node_driver, 'attr': node_attr }, data=introspection_data) # NOTE(aarefiev): This flag allows to distinguish enrolled manually # and auto-discovered nodes in the introspection rules. introspection_data['auto_discovered'] = True return node_cache.create_node(node_driver, ironic=ironic, **node_attr)
def _process_node(node_info, node, introspection_data): # NOTE(dtantsur): repeat the check in case something changed ir_utils.check_provision_state(node) interfaces = introspection_data.get('interfaces') node_info.create_ports(list(interfaces.values())) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) ironic = ir_utils.get_client() firewall.update_filters(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.uuid} if node_info.options.get('new_ipmi_credentials'): new_username, new_password = ( node_info.options.get('new_ipmi_credentials')) utils.executor().submit(_finish_set_ipmi_credentials, node_info, ironic, node, introspection_data, new_username, new_password) resp['ipmi_setup_credentials'] = True resp['ipmi_username'] = new_username resp['ipmi_password'] = new_password else: utils.executor().submit(_finish, node_info, ironic, introspection_data, power_off=CONF.processing.power_off) return resp
def get_periodic_sync_task(self): """Get periodic sync task for the filter. The periodic task returned is casting the InvalidFilterDriverState to the periodics.NeverAgain exception to quit looping. :raises: periodics.NeverAgain :returns: a periodic task to be run in the background. """ ironic = ir_utils.get_client() def periodic_sync_task(): try: self.sync(ironic) except InvalidFilterDriverState as e: LOG.warning( 'Filter driver %s disabling periodic sync ' 'task because of an invalid state.', self) raise periodics.NeverAgain(e) return periodics.periodic( # NOTE(milan): the periodic decorator doesn't support 0 as # a spacing value of (a switched off) periodic spacing=CONF.pxe_filter.sync_period or float('inf'), enabled=bool(CONF.pxe_filter.sync_period))(periodic_sync_task)
def introspect(node_id, manage_boot=True, token=None): """Initiate hardware properties introspection for a given node. :param node_id: node UUID or name :param manage_boot: whether to manage boot for this node :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node) if manage_boot: validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address = ir_utils.get_ipmi_address(node) node_info = node_cache.start_introspection(node.uuid, bmc_address=bmc_address, manage_boot=manage_boot, ironic=ironic) utils.executor().submit(_background_introspect, node_info, ironic)
def test_get_client_with_auth_token(self, mock_client, mock_load, mock_opts): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' mock_sess = mock.Mock() mock_sess.get_endpoint.return_value = fake_ironic_url mock_load.return_value = mock_sess ir_utils.get_client(fake_token) mock_sess.get_endpoint.assert_called_once_with( endpoint_type=CONF.ironic.os_endpoint_type, service_type=CONF.ironic.os_service_type, region_name=CONF.ironic.os_region) args = {'token': fake_token, 'endpoint': fake_ironic_url, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, **args)
def update_filters(ironic=None): """Update firewall filter rules for introspection. Gives access to PXE boot port for any machine, except for those, whose MAC is registered in Ironic and is not on introspection right now. This function is called from both introspection initialization code and from periodic task. This function is supposed to be resistant to unexpected iptables state. ``init()`` function must be called once before any call to this function. This function is using ``eventlet`` semaphore to serialize access from different green threads. Does nothing, if firewall management is disabled in configuration. :param ironic: Ironic client instance, optional. """ global BLACKLIST_CACHE, ENABLED if not CONF.firewall.manage_firewall: return assert INTERFACE is not None ironic = ir_utils.get_client() if ironic is None else ironic with LOCK: if not _should_enable_dhcp(): _disable_dhcp() return ports_active = ironic.port.list(limit=0, fields=['address', 'extra']) macs_active = set(p.address for p in ports_active) to_blacklist = macs_active - node_cache.active_macs() ib_mac_mapping = (_ib_mac_to_rmac_mapping(to_blacklist, ports_active)) if (BLACKLIST_CACHE is not None and to_blacklist == BLACKLIST_CACHE and not ib_mac_mapping): LOG.debug('Not updating iptables - no changes in MAC list %s', to_blacklist) return LOG.debug('Blacklisting active MAC\'s %s', to_blacklist) # Force update on the next iteration if this attempt fails BLACKLIST_CACHE = None with _temporary_chain(NEW_CHAIN, CHAIN): # - Blacklist active macs, so that nova can boot them for mac in to_blacklist: mac = ib_mac_mapping.get(mac) or mac _iptables('-A', NEW_CHAIN, '-m', 'mac', '--mac-source', mac, '-j', 'DROP') # - Whitelist everything else _iptables('-A', NEW_CHAIN, '-j', 'ACCEPT') # Cache result of successful iptables update ENABLED = True BLACKLIST_CACHE = to_blacklist
def periodic_clean_up(): # pragma: no cover try: if node_cache.clean_up(): pxe_filter.driver().sync(ir_utils.get_client()) except Exception: LOG.exception('Periodic clean up of node cache failed') try: sync_with_ironic() except Exception: LOG.exception('Periodic sync of node list with ironic failed')
def test_get_client_with_auth_token(self, mock_client, mock_load, mock_opts): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' mock_sess = mock.Mock() mock_sess.get_endpoint.return_value = fake_ironic_url mock_load.return_value = mock_sess ir_utils.get_client(fake_token) mock_sess.get_endpoint.assert_called_once_with( endpoint_type=CONF.ironic.os_endpoint_type, service_type=CONF.ironic.os_service_type, region_name=CONF.ironic.os_region) args = { 'token': fake_token, 'endpoint': fake_ironic_url, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval } mock_client.assert_called_once_with(1, **args)
def periodic_sync_task(): nonlocal _cached_client if _cached_client is None: _cached_client = ir_utils.get_client() try: self.sync(_cached_client) except InvalidFilterDriverState as e: LOG.warning( 'Filter driver %s disabling periodic sync ' 'task because of an invalid state.', self) raise periodics.NeverAgain(e)
def update_filters(ironic=None): """Update firewall filter rules for introspection. Gives access to PXE boot port for any machine, except for those, whose MAC is registered in Ironic and is not on introspection right now. This function is called from both introspection initialization code and from periodic task. This function is supposed to be resistant to unexpected iptables state. ``init()`` function must be called once before any call to this function. This function is using ``eventlet`` semaphore to serialize access from different green threads. Does nothing, if firewall management is disabled in configuration. :param ironic: Ironic client instance, optional. """ global BLACKLIST_CACHE, ENABLED if not CONF.firewall.manage_firewall: return assert INTERFACE is not None ironic = ir_utils.get_client() if ironic is None else ironic with LOCK: if not _should_enable_dhcp(): _disable_dhcp() return macs_active = set(p.address for p in ironic.port.list(limit=0)) to_blacklist = macs_active - node_cache.active_macs() if BLACKLIST_CACHE is not None and to_blacklist == BLACKLIST_CACHE: LOG.debug('Not updating iptables - no changes in MAC list %s', to_blacklist) return LOG.debug('Blacklisting active MAC\'s %s', to_blacklist) # Force update on the next iteration if this attempt fails BLACKLIST_CACHE = None with _temporary_chain(NEW_CHAIN, CHAIN): # - Blacklist active macs, so that nova can boot them for mac in to_blacklist: _iptables('-A', NEW_CHAIN, '-m', 'mac', '--mac-source', mac, '-j', 'DROP') # - Whitelist everything else _iptables('-A', NEW_CHAIN, '-j', 'ACCEPT') # Cache result of successful iptables update ENABLED = True BLACKLIST_CACHE = to_blacklist
def get_periodic_sync_task(self): """Get periodic sync task for the filter. :returns: a periodic task to be run in the background. """ ironic = ir_utils.get_client() return periodics.periodic( # NOTE(milan): the periodic decorator doesn't support 0 as # a spacing value of (a switched off) periodic spacing=CONF.pxe_filter.sync_period or float('inf'), enabled=bool( CONF.pxe_filter.sync_period))(lambda: self.sync(ironic))
def sync_with_ironic(conductor): if (conductor.coordinator is not None and not conductor.coordinator.is_leader): LOG.debug('The conductor is not a leader, skipping syncing ' 'with ironic') return LOG.debug('Syncing with ironic') ironic = ir_utils.get_client() # TODO(yuikotakada): pagination ironic_nodes = ironic.nodes(fields=["uuid"], limit=None) ironic_node_uuids = {node.id for node in ironic_nodes} node_cache.delete_nodes_not_in_list(ironic_node_uuids)
def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and patch Ironic port local link connection""" inventory = utils.get_inventory(introspection_data) ironic_ports = node_info.ports() for iface in inventory['interfaces']: if iface['name'] not in introspection_data['all_interfaces']: continue mac_address = iface['mac_address'] port = ironic_ports.get(mac_address) if not port: LOG.debug( "Skipping LLC processing for interface %s, matching " "port not found in Ironic.", mac_address, node_info=node_info, data=introspection_data) continue lldp_data = iface.get('lldp') if lldp_data is None: LOG.warning(_LW("No LLDP Data found for interface %s"), mac_address, node_info=node_info, data=introspection_data) continue patches = [] for tlv_type, tlv_value in lldp_data: patch = self._get_local_link_patch(tlv_type, tlv_value, port) if patch is not None: patches.append(patch) try: # NOTE(sambetts) We need a newer version of Ironic API for this # transaction, so create a new ironic client and explicitly # pass it into the function. cli = ironic.get_client(api_version=REQUIRED_IRONIC_VERSION) node_info.patch_port(port, patches, ironic=cli) except client_exc.NotAcceptable: LOG.error(_LE("Unable to set Ironic port local link " "connection information because Ironic does not " "support the required version"), node_info=node_info, data=introspection_data) # NOTE(sambetts) May as well break out out of the loop here # because Ironic version is not going to change for the other # interfaces. break
def introspect(uuid, new_ipmi_credentials=None, token=None): """Initiate hardware properties introspection for a given node. :param uuid: node uuid :param new_ipmi_credentials: tuple (new username, new password) or None :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) try: node = ironic.node.get(uuid) except exceptions.NotFound: raise utils.Error(_("Cannot find node %s") % uuid, code=404) except exceptions.HttpError as exc: raise utils.Error( _("Cannot get node %(node)s: %(exc)s") % { 'node': uuid, 'exc': exc }) ir_utils.check_provision_state(node, with_credentials=new_ipmi_credentials) if new_ipmi_credentials: new_ipmi_credentials = (_validate_ipmi_credentials( node, new_ipmi_credentials)) else: validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address = ir_utils.get_ipmi_address(node) node_info = node_cache.add_node(node.uuid, bmc_address=bmc_address, ironic=ironic) node_info.set_option('new_ipmi_credentials', new_ipmi_credentials) def _handle_exceptions(fut): try: fut.result() except utils.Error as exc: # Logging has already happened in Error.__init__ node_info.finished(error=str(exc)) except Exception as exc: msg = _('Unexpected exception in background introspection thread') LOG.exception(msg, node_info=node_info) node_info.finished(error=msg) future = utils.executor().submit(_background_introspect, ironic, node_info) future.add_done_callback(_handle_exceptions)
def introspect(uuid, new_ipmi_credentials=None, token=None): """Initiate hardware properties introspection for a given node. :param uuid: node uuid :param new_ipmi_credentials: tuple (new username, new password) or None :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) try: node = ironic.node.get(uuid) except exceptions.NotFound: raise utils.Error(_("Cannot find node %s") % uuid, code=404) except exceptions.HttpError as exc: raise utils.Error(_("Cannot get node %(node)s: %(exc)s") % {'node': uuid, 'exc': exc}) ir_utils.check_provision_state(node, with_credentials=new_ipmi_credentials) if new_ipmi_credentials: new_ipmi_credentials = ( _validate_ipmi_credentials(node, new_ipmi_credentials)) else: validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address = ir_utils.get_ipmi_address(node) node_info = node_cache.add_node(node.uuid, bmc_address=bmc_address, ironic=ironic) node_info.set_option('new_ipmi_credentials', new_ipmi_credentials) def _handle_exceptions(fut): try: fut.result() except utils.Error as exc: # Logging has already happened in Error.__init__ node_info.finished(error=str(exc)) except Exception as exc: msg = _('Unexpected exception in background introspection thread') LOG.exception(msg, node_info=node_info) node_info.finished(error=msg) future = utils.executor().submit(_background_introspect, ironic, node_info) future.add_done_callback(_handle_exceptions)
def init_filter(self): """Performs an initial sync with ironic and starts dnsmasq. The initial _sync() call reduces the chances dnsmasq might lose some inotify blacklist events by prefetching the blacklist before the dnsmasq is started. :raises: OSError, IOError. :returns: None. """ _purge_dhcp_hostsdir() ironic = ir_utils.get_client() self._sync(ironic) _execute(CONF.dnsmasq_pxe_filter.dnsmasq_start_command) LOG.info('The dnsmasq PXE filter was initialized')
def init_filter(self): """Performs an initial sync with ironic and starts dnsmasq. The initial _sync() call reduces the chances dnsmasq might lose some inotify blacklist events by prefetching the blacklist before the dnsmasq is started. :raises: OSError, IOError. :returns: None. """ _purge_dhcp_hostsdir() ironic = ir_utils.get_client() self._sync(ironic) _execute(CONF.dnsmasq_pxe_filter.dnsmasq_start_command) LOG.info('The dnsmasq PXE filter was initialized')
def _process_node(node, introspection_data, node_info): # NOTE(dtantsur): repeat the check in case something changed ir_utils.check_provision_state(node) node_info.create_ports(introspection_data.get('macs') or ()) _run_post_hooks(node_info, introspection_data) if CONF.processing.store_data == 'swift': stored_data = {k: v for k, v in introspection_data.items() if k not in _STORAGE_EXCLUDED_KEYS} swift_object_name = swift.store_introspection_data(stored_data, node_info.uuid) LOG.info(_LI('Introspection data was stored in Swift in object %s'), swift_object_name, node_info=node_info, data=introspection_data) if CONF.processing.store_data_location: node_info.patch([{'op': 'add', 'path': '/extra/%s' % CONF.processing.store_data_location, 'value': swift_object_name}]) else: LOG.debug('Swift support is disabled, introspection data ' 'won\'t be stored', node_info=node_info, data=introspection_data) ironic = ir_utils.get_client() firewall.update_filters(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.uuid} if node_info.options.get('new_ipmi_credentials'): new_username, new_password = ( node_info.options.get('new_ipmi_credentials')) utils.executor().submit(_finish_set_ipmi_credentials, ironic, node, node_info, introspection_data, new_username, new_password) resp['ipmi_setup_credentials'] = True resp['ipmi_username'] = new_username resp['ipmi_password'] = new_password else: utils.executor().submit(_finish, ironic, node_info, introspection_data) return resp
def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and patch Ironic port local link connection""" inventory = utils.get_inventory(introspection_data) ironic_ports = node_info.ports() for iface in inventory['interfaces']: if iface['name'] not in introspection_data['all_interfaces']: continue mac_address = iface['mac_address'] port = ironic_ports.get(mac_address) if not port: LOG.debug("Skipping LLC processing for interface %s, matching " "port not found in Ironic.", mac_address, node_info=node_info, data=introspection_data) continue lldp_data = iface.get('lldp') if lldp_data is None: LOG.warning(_LW("No LLDP Data found for interface %s"), mac_address, node_info=node_info, data=introspection_data) continue patches = [] for tlv_type, tlv_value in lldp_data: patch = self._get_local_link_patch(tlv_type, tlv_value, port) if patch is not None: patches.append(patch) try: # NOTE(sambetts) We need a newer version of Ironic API for this # transaction, so create a new ironic client and explicitly # pass it into the function. cli = ironic.get_client(api_version=REQUIRED_IRONIC_VERSION) node_info.patch_port(port, patches, ironic=cli) except client_exc.NotAcceptable: LOG.error(_LE("Unable to set Ironic port local link " "connection information because Ironic does not " "support the required version"), node_info=node_info, data=introspection_data) # NOTE(sambetts) May as well break out out of the loop here # because Ironic version is not going to change for the other # interfaces. break
def _reapply(node_info): # runs in background try: introspection_data = _get_unprocessed_data(node_info.uuid) except Exception as exc: LOG.exception(_LE('Encountered exception while fetching ' 'stored introspection data'), node_info=node_info) msg = (_('Unexpected exception %(exc_class)s while fetching ' 'unprocessed introspection data from Swift: %(error)s') % { 'exc_class': exc.__class__.__name__, 'error': exc }) node_info.finished(error=msg) return failures = [] _run_pre_hooks(introspection_data, failures) if failures: LOG.error(_LE('Pre-processing failures detected reapplying ' 'introspection on stored data:\n%s'), '\n'.join(failures), node_info=node_info) node_info.finished(error='\n'.join(failures)) return try: ironic = ir_utils.get_client() node_info.create_ports(introspection_data.get('macs') or ()) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) node_info.invalidate_cache() rules.apply(node_info, introspection_data) _finish(ironic, node_info, introspection_data, power_off=False) except Exception as exc: LOG.exception(_LE('Encountered exception reapplying ' 'introspection on stored data'), node_info=node_info, data=introspection_data) node_info.finished(error=str(exc)) else: LOG.info(_LI('Successfully reapplied introspection on stored ' 'data'), node_info=node_info, data=introspection_data)
def enroll_node_not_found_hook(introspection_data, **kwargs): node_attr = {} ironic = ir_utils.get_client() node_driver_info = _extract_node_driver_info(introspection_data) node_attr['driver_info'] = node_driver_info node_driver = CONF.discovery.enroll_node_driver _check_existing_nodes(introspection_data, node_driver_info, ironic) LOG.debug('Creating discovered node with driver %(driver)s and ' 'attributes: %(attr)s', {'driver': node_driver, 'attr': node_attr}, data=introspection_data) # NOTE(aarefiev): This flag allows to distinguish enrolled manually # and auto-discovered nodes in the introspection rules. introspection_data['auto_discovered'] = True return node_cache.create_node(node_driver, ironic=ironic, **node_attr)
def enroll_node_not_found_hook(introspection_data, **kwargs): node_attr = {} ironic = ir_utils.get_client() node_driver_info = _extract_node_driver_info(introspection_data) node_attr['driver_info'] = node_driver_info node_driver = CONF.discovery.enroll_node_driver _check_existing_nodes(introspection_data, node_driver_info, ironic) LOG.debug('Creating discovered node with driver %(driver)s and ' 'attributes: %(attr)s', {'driver': node_driver, 'attr': node_attr}, data=introspection_data) # NOTE(aarefiev): This flag allows to distinguish enrolled manually # and auto-discovered nodes in the introspection rules. introspection_data['auto_discovered'] = True return node_cache.create_node(node_driver, ironic=ironic, **node_attr)
def record_node(ironic=None, bmc_addresses=None, macs=None): """Create a cache record for a known active node. :param ironic: ironic client instance. :param bmc_addresses: list of BMC addresses. :param macs: list of MAC addresses. :return: NodeInfo """ if not bmc_addresses and not macs: raise utils.NotFoundInCacheError( _("Existing node cannot be found since neither MAC addresses " "nor BMC addresses are present in the inventory")) if ironic is None: ironic = ir_utils.get_client() node = ir_utils.lookup_node(macs=macs, bmc_addresses=bmc_addresses, ironic=ironic) if not node: bmc_addresses = ', '.join(bmc_addresses) if bmc_addresses else None macs = ', '.join(macs) if macs else None raise utils.NotFoundInCacheError( _("Existing node was not found by MAC address(es) %(macs)s " "and BMC address(es) %(addr)s") % { 'macs': macs, 'addr': bmc_addresses }) node = ironic.node.get(node, fields=['uuid', 'provision_state']) # TODO(dtantsur): do we want to allow updates in all states? if node.provision_state not in ir_utils.VALID_ACTIVE_STATES: raise utils.Error( _("Node %(node)s is not active, its provision " "state is %(state)s") % { 'node': node.uuid, 'state': node.provision_state }) return add_node(node.uuid, istate.States.waiting, manage_boot=False, mac=macs, bmc_address=bmc_addresses)
def abort(uuid, token=None): """Abort running introspection. :param uuid: node uuid :param token: authentication token :raises: Error """ LOG.debug('Aborting introspection for node %s', uuid) ironic = ir_utils.get_client(token) node_info = node_cache.get_node(uuid, ironic=ironic, locked=False) # check pending operations locked = node_info.acquire_lock(blocking=False) if not locked: # Node busy --- cannot abort atm raise utils.Error(_('Node is locked, please, retry later'), node_info=node_info, code=409) utils.executor().submit(_abort, node_info, ironic)
def abort(node_id, token=None): """Abort running introspection. :param node_id: node UUID or name :param token: authentication token :raises: Error """ LOG.debug('Aborting introspection for node %s', node_id) ironic = ir_utils.get_client(token) node_info = node_cache.get_node(node_id, ironic=ironic) # check pending operations locked = node_info.acquire_lock(blocking=False) if not locked: # Node busy --- cannot abort atm raise utils.Error(_('Node is locked, please, retry later'), node_info=node_info, code=409) utils.executor().submit(_abort, node_info, ironic)
def create_node(driver, ironic=None, **attributes): """Create ironic node and cache it. * Create new node in ironic. * Cache it in inspector. :param driver: driver for Ironic node. :param ironic: ronic client instance. :param attributes: dict, additional keyword arguments to pass to the ironic client on node creation. :return: NodeInfo, or None in case error happened. """ if ironic is None: ironic = ir_utils.get_client() try: node = ironic.node.create(driver=driver, **attributes) except exceptions.InvalidAttribute as e: LOG.error(_LE('Failed to create new node: %s'), e) else: LOG.info(_LI('Node %s was created successfully'), node.uuid) return add_node(node.uuid, ironic=ironic)
def _process_node(node_info, node, introspection_data): # NOTE(dtantsur): repeat the check in case something changed ir_utils.check_provision_state(node) _run_post_hooks(node_info, introspection_data) _store_data(node_info.uuid, introspection_data) ironic = ir_utils.get_client() pxe_filter.driver().sync(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.uuid} utils.executor().submit(_finish, node_info, ironic, introspection_data, power_off=CONF.processing.power_off) return resp
def _reapply(node_info): # runs in background try: introspection_data = _get_unprocessed_data(node_info.uuid) except Exception: LOG.exception(_LE('Encountered exception while fetching ' 'stored introspection data'), node_info=node_info) node_info.release_lock() return failures = [] _run_pre_hooks(introspection_data, failures) if failures: LOG.error(_LE('Pre-processing failures detected reapplying ' 'introspection on stored data:\n%s'), '\n'.join(failures), node_info=node_info) node_info.finished(error='\n'.join(failures)) return try: ironic = ir_utils.get_client() node_info.create_ports(introspection_data.get('macs') or ()) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) node_info.invalidate_cache() rules.apply(node_info, introspection_data) _finish(ironic, node_info, introspection_data, power_off=False) except Exception as exc: LOG.exception(_LE('Encountered exception reapplying ' 'introspection on stored data'), node_info=node_info, data=introspection_data) node_info.finished(error=str(exc)) else: LOG.info(_LI('Successfully reapplied introspection on stored ' 'data'), node_info=node_info, data=introspection_data)
def get_periodic_sync_task(self): """Get periodic sync task for the filter. The periodic task returned is casting the InvalidFilterDriverState to the periodics.NeverAgain exception to quit looping. :raises: periodics.NeverAgain :returns: a periodic task to be run in the background. """ ironic = ir_utils.get_client() def periodic_sync_task(): try: self.sync(ironic) except InvalidFilterDriverState as e: LOG.warning('Filter driver %s disabling periodic sync ' 'task because of an invalid state.', self) raise periodics.NeverAgain(e) return periodics.periodic( # NOTE(milan): the periodic decorator doesn't support 0 as # a spacing value of (a switched off) periodic spacing=CONF.pxe_filter.sync_period or float('inf'), enabled=bool(CONF.pxe_filter.sync_period))(periodic_sync_task)
def ironic(self): """Ironic client instance.""" if self._ironic is None: self._ironic = ir_utils.get_client() return self._ironic
def setUp(self): super(Base, self).setUp() rules.delete_all() self.cli = ir_utils.get_client() self.cli.reset_mock() self.cli.node.get.return_value = self.node self.cli.node.update.return_value = self.node self.cli.node.list.return_value = [self.node] # https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa self.data = { 'boot_interface': '01-' + self.macs[0].replace(':', '-'), 'inventory': { 'interfaces': [ {'name': 'eth1', 'mac_address': self.macs[0], 'ipv4_address': '1.2.1.2'}, {'name': 'eth2', 'mac_address': '12:12:21:12:21:12'}, {'name': 'eth3', 'mac_address': self.macs[1], 'ipv4_address': '1.2.1.1'}, ], 'disks': [ {'name': '/dev/sda', 'model': 'Big Data Disk', 'size': 1000 * units.Gi}, {'name': '/dev/sdb', 'model': 'Small OS Disk', 'size': 20 * units.Gi}, ], 'cpu': { 'count': 4, 'architecture': 'x86_64' }, 'memory': { 'physical_mb': 12288 }, 'bmc_address': self.bmc_address }, 'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk', 'size': 1000 * units.Gi, 'wwn': None}, } self.data_old_ramdisk = { 'cpus': 4, 'cpu_arch': 'x86_64', 'memory_mb': 12288, 'local_gb': 464, 'interfaces': { 'eth1': {'mac': self.macs[0], 'ip': '1.2.1.2'}, 'eth2': {'mac': '12:12:21:12:21:12'}, 'eth3': {'mac': self.macs[1], 'ip': '1.2.1.1'}, }, 'boot_interface': '01-' + self.macs[0].replace(':', '-'), 'ipmi_address': self.bmc_address, } self.patch = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] self.patch_old_ramdisk = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '464', 'op': 'add'} ] self.patch_root_hints = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '19', 'op': 'add'} ] self.node.power_state = 'power off'
def sync_with_ironic(): ironic = ir_utils.get_client() # TODO(yuikotakada): pagination ironic_nodes = ironic.node.list(limit=0) ironic_node_uuids = {node.uuid for node in ironic_nodes} node_cache.delete_nodes_not_in_list(ironic_node_uuids)