def release_node(self, tag, node_id): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) # be optimistic and assume we usually release a reservation count = query.filter_by(reservation=tag).update( {'reservation': None}, synchronize_session=False) try: if count != 1: node = query.one() if node['reservation'] is None: raise exception.NodeNotLocked(node=node_id) else: raise exception.NodeLocked(node=node_id, host=node['reservation']) except NoResultFound: raise exception.NodeNotFound(node_id)
def reserve_node(self, tag, node_id): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) # be optimistic and assume we usually create a reservation count = query.filter_by(reservation=None).update( {'reservation': tag}, synchronize_session=False) try: node = query.one() if count != 1: # Nothing updated and node exists. Must already be # locked. raise exception.NodeLocked(node=node_id, host=node['reservation']) return node except NoResultFound: raise exception.NodeNotFound(node_id)
def test_shared_lock_node_get_exception(self, get_portgroups_mock, get_ports_mock, build_driver_mock, reserve_mock, release_mock, node_get_mock): node_get_mock.side_effect = exception.NodeNotFound(node='foo') self.assertRaises(exception.NodeNotFound, task_manager.TaskManager, self.context, 'fake-node-id', shared=True) self.assertFalse(reserve_mock.called) self.assertFalse(release_mock.called) node_get_mock.assert_called_once_with(self.context, 'fake-node-id') self.assertFalse(get_ports_mock.called) self.assertFalse(get_portgroups_mock.called) self.assertFalse(build_driver_mock.called)
def _get_power_status(ssh_obj, driver_info): """Returns a node's current power state. :param ssh_obj: paramiko.SSHClient, an active ssh connection. :param driver_info: information for accessing the node. :returns: one of ironic.common.states POWER_OFF, POWER_ON. :raises: NodeNotFound """ power_state = None node_name = _get_hosts_name_for_node(ssh_obj, driver_info) if node_name: # Get a list of vms running on the host. If the command supports # it, explicitly specify the desired node." cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'], driver_info['cmd_set']['list_running']) cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node_name) running_list = _ssh_execute(ssh_obj, cmd_to_exec) # Command should return a list of running vms. If the current node is # not listed then we can assume it is not powered on. quoted_node_name = '"%s"' % node_name for node in running_list: if not node: continue # 'node' here is an formatted output from the virt cli's. The # node name is always quoted but can contain other information. # vbox returns '"NodeName" {b43c4982-110c-4c29-9325-d5f41b053513}' # so we must use the 'in' comparison here and not '==' if quoted_node_name in node: power_state = states.POWER_ON break if not power_state: power_state = states.POWER_OFF else: err_msg = _LE('Node "%(host)s" with MAC address %(mac)s not found.') LOG.error(err_msg, { 'host': driver_info['host'], 'mac': driver_info['macs'] }) raise exception.NodeNotFound(node=driver_info['host']) return power_state
def destroy_node(self, node_id): with _session_for_write(): query = model_query(models.Node) query = add_identity_filter(query, node_id) try: node_ref = query.one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Get node ID, if an UUID was supplied. The ID is # required for deleting all ports, attached to the node. if uuidutils.is_uuid_like(node_id): node_id = node_ref['id'] port_query = model_query(models.Port) port_query = add_port_filter_by_node(port_query, node_id) port_query.delete() query.delete()
def _do_update_node(self, node_id, values): session = get_session() with session.begin(): query = model_query(models.Node, session=session) query = add_identity_filter(query, node_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Prevent instance_uuid overwriting if values.get("instance_uuid") and ref.instance_uuid: raise exception.NodeAssociated(node=node_id, instance=ref.instance_uuid) if 'provision_state' in values: values['provision_updated_at'] = timeutils.utcnow() ref.update(values) return ref
def _get_domain_by_macs(task): """Get the domain the host uses to reference the node. :param task: a TaskManager instance containing the node to act on :returns: the libvirt domain object. :raises: NodeNotFound if could not find a VM corresponding to any of the provided MACs. :raises: InvalidParameterValue if any connection parameters are incorrect or if failed to connect to the Libvirt uri. :raises: LibvirtError if failed to connect to the Libvirt uri. """ driver_info = _parse_driver_info(task.node) conn = _get_libvirt_connection(driver_info) driver_info['macs'] = driver_utils.get_node_mac_addresses(task) node_macs = {_normalize_mac(mac) for mac in driver_info['macs']} full_node_list = conn.listAllDomains() for domain in full_node_list: LOG.debug("Checking Domain: %s's Mac address." % domain.name()) parsed = ET.fromstring(domain.XMLDesc()) domain_macs = { _normalize_mac(el.attrib['address']) for el in parsed.iter('mac') } found_macs = domain_macs & node_macs # this is intersection of sets if found_macs: LOG.debug("Found MAC addresses: %s " "for node: %s" % (found_macs, driver_info['uuid'])) return domain raise exception.NodeNotFound( _("Can't find domain with specified MACs: %(macs)s " "for node %(node)s.") % { 'macs': driver_info['macs'], 'node': driver_info['uuid'] })
def get_rpc_node(node_ident): """Get the RPC node from the node uuid or logical name. :param node_ident: the UUID or logical name of a node. :returns: The RPC Node. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: NodeNotFound if the node is not found. """ # Check to see if the node_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(node_ident): return objects.Node.get_by_uuid(pecan.request.context, node_ident) # We can refer to nodes by their name, if the client supports it if allow_node_logical_names(): if is_valid_logical_name(node_ident): return objects.Node.get_by_name(pecan.request.context, node_ident) raise exception.InvalidUuidOrName(name=node_ident) # Ensure we raise the same exception as we did for the Juno release raise exception.NodeNotFound(node=node_ident)
def _find_node_by_macs(self, context, mac_addresses): """Given a list of MAC addresses, find the ports that match the MACs and return the node they are all connected to. :raises: NodeNotFound if the ports point to multiple nodes or no nodes. """ ports = self._find_ports_by_macs(context, mac_addresses) if not ports: raise exception.NodeNotFound( _('No ports matching the given MAC addresses %sexist in the ' 'database.') % mac_addresses) node_id = self._get_node_id(ports) try: node = objects.Node.get_by_id(context, node_id) except exception.NodeNotFound: with excutils.save_and_reraise_exception(): LOG.exception( _LE('Could not find matching node for the ' 'provided MACs %s.'), mac_addresses) return node
def release_nodes(self, tag, nodes): session = get_session() with session.begin(): # TODO(deva): Optimize this by trying to release all the nodes # at once, and fall back to releasing one at a time # only if needed to determine the cause of an error. for node in nodes: query = model_query(models.Node, session=session) query = add_identity_filter(query, node) # be optimistic and assume we usually release a reservation count = query.filter_by(reservation=tag).\ update({'reservation': None}) if count != 1: try: ref = query.one() except NoResultFound: raise exception.NodeNotFound(node=node) else: if ref['reservation'] is not None: raise exception.NodeLocked(node=node)
def _get_node_id(self, ports): """Given a list of ports, either return the node_id they all share or raise a NotFound if there are multiple node_ids, which indicates some ports are connected to one node and the remaining port(s) are connected to one or more other nodes. :raises: NodeNotFound if the MACs match multiple nodes. This could happen if you swapped a NIC from one server to another and don't notify Ironic about it or there is a MAC collision (since they're not guaranteed to be unique). """ # See if all the ports point to the same node node_ids = set(port_ob.node_id for port_ob in ports) if len(node_ids) > 1: raise exception.NodeNotFound( _('Ports matching mac addresses match multiple nodes. MACs: ' '%(macs)s. Port ids: %(port_ids)s') % { 'macs': [port_ob.address for port_ob in ports], 'port_ids': [port_ob.uuid for port_ob in ports] }) # Only have one node_id left, return it. return node_ids.pop()
def destroy_node(self, node_id): with _session_for_write(): query = model_query(models.Node) query = add_identity_filter(query, node_id) try: node_ref = query.one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Get node ID, if an UUID was supplied. The ID is # required for deleting all ports, attached to the node. if uuidutils.is_uuid_like(node_id): node_id = node_ref['id'] port_query = model_query(models.Port) port_query = add_port_filter_by_node(port_query, node_id) port_query.delete() portgroup_query = model_query(models.Portgroup) portgroup_query = add_portgroup_filter_by_node( portgroup_query, node_id) portgroup_query.delete() # Delete all tags attached to the node tag_query = model_query(models.NodeTag).filter_by(node_id=node_id) tag_query.delete() volume_connector_query = model_query( models.VolumeConnector).filter_by(node_id=node_id) volume_connector_query.delete() volume_target_query = model_query( models.VolumeTarget).filter_by(node_id=node_id) volume_target_query.delete() query.delete()
def _do_update_node(self, node_id, values): with _session_for_write(): query = model_query(models.Node) query = add_identity_filter(query, node_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.NodeNotFound(node=node_id) if 'provision_state' in values: values['provision_updated_at'] = timeutils.utcnow() if values['provision_state'] == states.INSPECTING: values['inspection_started_at'] = timeutils.utcnow() values['inspection_finished_at'] = None elif (ref.provision_state == states.INSPECTING and values['provision_state'] == states.MANAGEABLE): values['inspection_finished_at'] = timeutils.utcnow() values['inspection_started_at'] = None elif (ref.provision_state == states.INSPECTING and values['provision_state'] == states.INSPECTFAIL): values['inspection_started_at'] = None ref.update(values) return ref
def _get_power_status(ssh_obj, driver_info): """Returns a node's current power state. :param ssh_obj: paramiko.SSHClient, an active ssh connection. :param driver_info: information for accessing the node. :returns: one of ironic.common.states POWER_OFF, POWER_ON. :raises: NodeNotFound """ power_state = None node_name = _get_hosts_name_for_node(ssh_obj, driver_info) if node_name: # Get a list of vms running on the host. If the command supports # it, explicitly specify the desired node." cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'], driver_info['cmd_set']['list_running']) cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node_name) running_list = _ssh_execute(ssh_obj, cmd_to_exec) for node in running_list: if not node: continue if node_name in node: power_state = states.POWER_ON break if not power_state: power_state = states.POWER_OFF else: err_msg = _LE('Node "%(host)s" with MAC address %(mac)s not found.') LOG.error(err_msg, { 'host': driver_info['host'], 'mac': driver_info['macs'] }) raise exception.NodeNotFound(node=driver_info['host']) return power_state
def _check_node_exists(self, node_id): if not model_query(models.Node).filter_by(id=node_id).scalar(): raise exception.NodeNotFound(node=node_id)
def post(self, port): """Create a new port. :param port: a port within the request body. :raises: NotAcceptable, HTTPNotFound, Conflict """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() # NOTE(lucasagomes): Create the node_id attribute on-the-fly # to satisfy the api -> rpc object # conversion. # NOTE(TheJulia): The get of the node *does* check if the node # can be accessed. We need to be able to get the node regardless # in order to perform the actual policy check. raise_node_not_found = False node = None owner = None lessee = None node_uuid = port.get('node_uuid') try: node = api_utils.replace_node_uuid_with_id(port) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True # While the rule is for the port, the base object that controls access # is the node. api_utils.check_owner_policy('node', 'baremetal:port:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: # Delayed raise of NodeNotFound because we want to check # the access policy first. raise exception.NodeNotFound(node=node_uuid, code=http_client.BAD_REQUEST) context = api.request.context self._check_allowed_port_fields(port) portgroup = None if port.get('portgroup_uuid'): try: portgroup = objects.Portgroup.get(api.request.context, port.pop('portgroup_uuid')) if portgroup.node_id != node.id: raise exception.BadRequest( _('Port can not be added to a ' 'portgroup belonging to a ' 'different node.')) # NOTE(lucasagomes): Create the portgroup_id attribute # on-the-fly to satisfy the api -> # rpc object conversion. port['portgroup_id'] = portgroup.id except exception.PortgroupNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST # BadRequest raise e if port.get('is_smartnic'): try: api_utils.LOCAL_LINK_SMART_NIC_VALIDATOR( 'local_link_connection', port.get('local_link_connection')) except exception.Invalid: raise exception.Invalid( "Smart NIC port must have port_id " "and hostname in local_link_connection") physical_network = port.get('physical_network') if physical_network is not None and not physical_network: raise exception.Invalid('A non-empty value is required when ' 'setting physical_network') vif = api_utils.handle_post_port_like_extra_vif(port) if (portgroup and (port.get('pxe_enabled') or vif)): if not portgroup.standalone_ports_supported: msg = _("Port group %s doesn't support standalone ports. " "This port cannot be created as a member of that " "port group because either 'extra/vif_port_id' " "was specified or 'pxe_enabled' was set to True.") raise exception.Conflict(msg % portgroup.uuid) # NOTE(yuriyz): UUID is mandatory for notifications payload if not port.get('uuid'): port['uuid'] = uuidutils.generate_uuid() rpc_port = objects.Port(context, **port) notify_extra = { 'node_uuid': node.uuid, 'portgroup_uuid': portgroup and portgroup.uuid or None } notify.emit_start_notification(context, rpc_port, 'create', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'create', **notify_extra): topic = api.request.rpcapi.get_topic_for(node) new_port = api.request.rpcapi.create_port(context, rpc_port, topic) notify.emit_end_notification(context, new_port, 'create', **notify_extra) # Set the HTTP Location Header api.response.location = link.build_url('ports', new_port.uuid) return convert_with_links(new_port)
def post(self, portgroup): """Create a new portgroup. :param portgroup: a portgroup within the request body. """ if not api_utils.allow_portgroups(): raise exception.NotFound() raise_node_not_found = False node = None owner = None lessee = None node_uuid = portgroup.get('node_uuid') try: # The replace_node_uuid_with_id also checks access to the node # and will raise an exception if access is not permitted. node = api_utils.replace_node_uuid_with_id(portgroup) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True # While the rule is for the port, the base object that controls access # is the node. api_utils.check_owner_policy('node', 'baremetal:portgroup:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: # Delayed raise of NodeNotFound because we want to check # the access policy first. raise exception.NodeNotFound(node=node_uuid, code=http_client.BAD_REQUEST) context = api.request.context if self.parent_node_ident: raise exception.OperationNotPermitted() if (not api_utils.allow_portgroup_mode_properties() and (portgroup.get('mode') or portgroup.get('properties'))): raise exception.NotAcceptable() if (portgroup.get('name') and not api_utils.is_valid_logical_name(portgroup['name'])): error_msg = _("Cannot create portgroup with invalid name " "'%(name)s'") % { 'name': portgroup['name'] } raise exception.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) # NOTE(yuriyz): UUID is mandatory for notifications payload if not portgroup.get('uuid'): portgroup['uuid'] = uuidutils.generate_uuid() new_portgroup = objects.Portgroup(context, **portgroup) notify.emit_start_notification(context, new_portgroup, 'create', node_uuid=node.uuid) with notify.handle_error_notification(context, new_portgroup, 'create', node_uuid=node.uuid): new_portgroup.create() notify.emit_end_notification(context, new_portgroup, 'create', node_uuid=node.uuid) # Set the HTTP Location Header api.response.location = link.build_url('portgroups', new_portgroup.uuid) return convert_with_links(new_portgroup)
def _handle_node_lock_not_found(nodes, query, query_by): refs = query.all() existing = [ref[query_by] for ref in refs] missing = set(nodes) - set(existing) raise exception.NodeNotFound(node=missing.pop())
def get_node_by_uuid(self, node_uuid): query = model_query(models.Node).filter_by(uuid=node_uuid) try: return query.one() except NoResultFound: raise exception.NodeNotFound(node=node_uuid)