def add_volume_type_access(self, name_or_id, project_id): """Grant access on a volume_type to a project. :param name_or_id: ID or name of a volume_type :param project_id: A project id NOTE: the call works even if the project does not exist. :raises: OpenStackCloudException on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exc.OpenStackCloudException("VolumeType not found: %s" % name_or_id) payload = {'project': project_id} resp = self.block_storage.post( '/types/{id}/action'.format(id=volume_type.id), json=dict(addProjectAccess=payload)) proxy._json_response(resp, error_message="Unable to authorize {project} " "to use volume type {name}".format( name=name_or_id, project=project_id))
def delete_stack(self, name_or_id, wait=False): """Delete a stack :param string name_or_id: Stack name or ID. :param boolean wait: Whether to wait for the delete to finish :returns: True if delete succeeded, False if the stack was not found. :raises: ``OpenStackCloudException`` if something goes wrong during the OpenStack API call """ stack = self.get_stack(name_or_id, resolve_outputs=False) if stack is None: self.log.debug("Stack %s not found for deleting", name_or_id) return False if wait: # find the last event to use as the marker events = event_utils.get_events( self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}) marker = events[0].id if events else None self.orchestration.delete_stack(stack) if wait: try: event_utils.poll_for_events(self, stack_name=name_or_id, action='DELETE', marker=marker) except exc.OpenStackCloudHTTPError: pass stack = self.get_stack(name_or_id, resolve_outputs=False) if stack and stack['stack_status'] == 'DELETE_FAILED': raise exc.OpenStackCloudException( "Failed to delete stack {id}: {reason}".format( id=name_or_id, reason=stack['stack_status_reason'])) return True
def shade_exceptions(error_message=None): """Context manager for dealing with shade exceptions. :param string error_message: String to use for the exception message content on non-OpenStackCloudExceptions. Useful for avoiding wrapping shade OpenStackCloudException exceptions within themselves. Code called from within the context may throw such exceptions without having to catch and reraise them. Non-OpenStackCloudException exceptions thrown within the context will be wrapped and the exception message will be appended to the given error message. """ try: yield except exc.OpenStackCloudException: raise except Exception as e: if error_message is None: error_message = str(e) raise exc.OpenStackCloudException(error_message)
def detach_volume(self, server, volume, wait=True, timeout=None): """Detach a volume from a server. :param server: The server dict to detach from. :param volume: The volume dict to detach. :param wait: If true, waits for volume to be detached. :param timeout: Seconds to wait for volume detachment. None is forever. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ proxy._json_response( self.compute.delete( '/servers/{server_id}/os-volume_attachments/{volume_id}'. format(server_id=server['id'], volume_id=volume['id'])), error_message=( "Error detaching volume {volume} from server {server}".format( volume=volume['id'], server=server['id']))) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to detach." % volume['id']): try: vol = self.get_volume(volume['id']) except Exception: self.log.debug("Error getting volume info %s", volume['id'], exc_info=True) continue if vol['status'] == 'available': return if vol['status'] == 'error': raise exc.OpenStackCloudException( "Error in detaching volume %s" % volume['id'])
def update_security_group(self, name_or_id, **kwargs): """Update a security group :param string name_or_id: Name or ID of the security group to update. :param string name: New name for the security group. :param string description: New description for the security group. :returns: A ``munch.Munch`` describing the updated security group. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") group = self.get_security_group(name_or_id) if group is None: raise exc.OpenStackCloudException("Security group %s not found." % name_or_id) if self._use_neutron_secgroups(): data = proxy._json_response( self.network.put( '/security-groups/{sg_id}.json'.format(sg_id=group['id']), json={'security_group': kwargs}), error_message="Error updating security group {0}".format( name_or_id)) else: for key in ('name', 'description'): kwargs.setdefault(key, group[key]) data = proxy._json_response( self.compute.put( '/os-security-groups/{id}'.format(id=group['id']), json={'security_group': kwargs})) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def get_recordset(self, zone, name_or_id): """Get a recordset by name or ID. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name_or_id: Name or ID of the recordset :returns: A recordset dict or None if no matching recordset is found. """ if isinstance(zone, resource.Resource): zone_obj = zone else: zone_obj = self.get_zone(zone) if not zone_obj: raise exc.OpenStackCloudException( "Zone %s not found." % zone) try: return self.dns.find_recordset( zone=zone_obj, name_or_id=name_or_id, ignore_missing=False) except Exception: return None
def update_recordset(self, zone, name_or_id, **kwargs): """Update a recordset. :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance of the zone managing the recordset. :param name_or_id: Name or ID of the recordset being updated. :param records: List of the recordset definitions :param description: Description of the recordset :param ttl: TTL (Time to live) value in seconds of the recordset :returns: a dict representing the updated recordset. :raises: OpenStackCloudException on operation error. """ rs = self.get_recordset(zone, name_or_id) if not rs: raise exc.OpenStackCloudException( "Recordset %s not found." % name_or_id) rs = self.dns.update_recordset(recordset=rs, **kwargs) return rs
def update_cluster_receiver(self, name_or_id, new_name=None, action=None, params=None): old_receiver = self.get_cluster_receiver(name_or_id) if old_receiver is None: raise exc.OpenStackCloudException( 'Invalid receiver {receiver}'.format(receiver=name_or_id)) receiver = {} if new_name is not None: receiver['name'] = new_name if action is not None: receiver['action'] = action if params is not None: receiver['params'] = params data = self._clustering_client.patch( "/receivers/{receiver_id}".format(receiver_id=old_receiver.id), json={'receiver': receiver}, error_message="Error updating receiver {name}".format( name=name_or_id)) return self._get_and_munchify(key=None, data=data)
def update_zone(self, name_or_id, **kwargs): """Update a zone. :param name_or_id: Name or ID of the zone being updated. :param email: Email of the zone owner (only applies if zone_type is primary) :param description: Description of the zone :param ttl: TTL (Time to live) value in seconds :param masters: Master nameservers (only applies if zone_type is secondary) :returns: a dict representing the updated zone. :raises: OpenStackCloudException on operation error. """ zone = self.get_zone(name_or_id) if not zone: raise exc.OpenStackCloudException( "Zone %s not found." % name_or_id) data = self._dns_client.patch( "/zones/{zone_id}".format(zone_id=zone['id']), json=kwargs, error_message="Error updating zone {0}".format(name_or_id)) return self._get_and_munchify(key=None, data=data)
def range_filter(data, key, range_exp): """Filter a list by a single range expression. :param list data: List of dictionaries to be searched. :param string key: Key name to search within the data set. :param string range_exp: The expression describing the range of values. :returns: A list subset of the original data set. :raises: OpenStackCloudException on invalid range expressions. """ filtered = [] range_exp = str(range_exp).upper() if range_exp == "MIN": key_min = safe_dict_min(key, data) if key_min is None: return [] for d in data: if int(d[key]) == key_min: filtered.append(d) return filtered elif range_exp == "MAX": key_max = safe_dict_max(key, data) if key_max is None: return [] for d in data: if int(d[key]) == key_max: filtered.append(d) return filtered # Not looking for a min or max, so a range or exact value must # have been supplied. val_range = parse_range(range_exp) # If parsing the range fails, it must be a bad value. if val_range is None: raise exc.OpenStackCloudException( "Invalid range value: {value}".format(value=range_exp)) op = val_range[0] if op: # Range matching for d in data: d_val = int(d[key]) if op == '<': if d_val < val_range[1]: filtered.append(d) elif op == '>': if d_val > val_range[1]: filtered.append(d) elif op == '<=': if d_val <= val_range[1]: filtered.append(d) elif op == '>=': if d_val >= val_range[1]: filtered.append(d) return filtered else: # Exact number match for d in data: if int(d[key]) == val_range[1]: filtered.append(d) return filtered
def attach_volume(self, server, volume, device=None, wait=True, timeout=None): """Attach a volume to a server. This will attach a volume, described by the passed in volume dict (as returned by get_volume()), to the server described by the passed in server dict (as returned by get_server()) on the named device on the server. If the volume is already attached to the server, or generally not available, then an exception is raised. To re-attach to a server, but under a different device, the user must detach it first. :param server: The server dict to attach to. :param volume: The volume dict to attach. :param device: The device name where the volume will attach. :param wait: If true, waits for volume to be attached. :param timeout: Seconds to wait for volume attachment. None is forever. :returns: a volume attachment object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ dev = self.get_volume_attach_device(volume, server['id']) if dev: raise exc.OpenStackCloudException( "Volume %s already attached to server %s on device %s" % (volume['id'], server['id'], dev)) if volume['status'] != 'available': raise exc.OpenStackCloudException( "Volume %s is not available. Status is '%s'" % (volume['id'], volume['status'])) payload = {'volumeId': volume['id']} if device: payload['device'] = device data = proxy._json_response( self.compute.post( '/servers/{server_id}/os-volume_attachments'.format( server_id=server['id']), json=dict(volumeAttachment=payload)), error_message="Error attaching volume {volume_id} to server " "{server_id}".format(volume_id=volume['id'], server_id=server['id'])) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to attach." % volume['id']): try: self.list_volumes.invalidate(self) vol = self.get_volume(volume['id']) except Exception: self.log.debug("Error getting volume info %s", volume['id'], exc_info=True) continue if self.get_volume_attach_device(vol, server['id']): break # TODO(Shrews) check to see if a volume can be in error status # and also attached. If so, we should move this # above the get_volume_attach_device call if vol['status'] == 'error': raise exc.OpenStackCloudException( "Error in attaching volume %s" % volume['id']) return self._normalize_volume_attachment( self._get_and_munchify('volumeAttachment', data))
def create_volume(self, size, wait=True, timeout=None, image=None, bootable=None, **kwargs): """Create a volume. :param size: Size, in GB of the volume to create. :param name: (optional) Name for the volume. :param description: (optional) Name for the volume. :param wait: If true, waits for volume to be created. :param timeout: Seconds to wait for volume creation. None is forever. :param image: (optional) Image name, ID or object from which to create the volume :param bootable: (optional) Make this volume bootable. If set, wait will also be set to true. :param kwargs: Keyword arguments as expected for cinder client. :returns: The created volume object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ if bootable is not None: wait = True if image: image_obj = self.get_image(image) if not image_obj: raise exc.OpenStackCloudException( "Image {image} was requested as the basis for a new" " volume, but was not found on the cloud".format( image=image)) kwargs['imageRef'] = image_obj['id'] kwargs = self._get_volume_kwargs(kwargs) kwargs['size'] = size payload = dict(volume=kwargs) if 'scheduler_hints' in kwargs: payload['OS-SCH-HNT:scheduler_hints'] = kwargs.pop( 'scheduler_hints', None) data = self._volume_client.post( '/volumes', json=dict(payload), error_message='Error in creating volume') volume = self._get_and_munchify('volume', data) self.list_volumes.invalidate(self) if volume['status'] == 'error': raise exc.OpenStackCloudException("Error in creating volume") if wait: vol_id = volume['id'] for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be available."): volume = self.get_volume(vol_id) if not volume: continue if volume['status'] == 'available': if bootable is not None: self.set_volume_bootable(volume, bootable=bootable) # no need to re-fetch to update the flag, just set it. volume['bootable'] = bootable return volume if volume['status'] == 'error': raise exc.OpenStackCloudException("Error creating volume") return self._normalize_volume(volume)
def _get_versioned_client(self, service_type, min_version=None, max_version=None): config_version = self.config.get_api_version(service_type) config_major = self._get_major_version_id(config_version) max_major = self._get_major_version_id(max_version) min_major = self._get_major_version_id(min_version) # TODO(shade) This should be replaced with use of Connection. However, # we need to find a sane way to deal with this additional # logic - or we need to give up on it. If we give up on it, # we need to make sure we can still support it in the shade # compat layer. # NOTE(mordred) This logic for versions is slightly different # than the ksa Adapter constructor logic. openstack.cloud knows the # versions it knows, and uses them when it detects them. However, if # a user requests a version, and it's not found, and a different one # openstack.cloud does know about is found, that's a warning in # openstack.cloud. if config_version: if min_major and config_major < min_major: raise exc.OpenStackCloudException( "Version {config_version} requested for {service_type}" " but shade understands a minimum of {min_version}".format( config_version=config_version, service_type=service_type, min_version=min_version)) elif max_major and config_major > max_major: raise exc.OpenStackCloudException( "Version {config_version} requested for {service_type}" " but openstack.cloud understands a maximum of" " {max_version}".format(config_version=config_version, service_type=service_type, max_version=max_version)) request_min_version = config_version request_max_version = '{version}.latest'.format( version=config_major) adapter = proxy._ShadeAdapter( session=self.session, service_type=self.config.get_service_type(service_type), service_name=self.config.get_service_name(service_type), interface=self.config.get_interface(service_type), endpoint_override=self.config.get_endpoint(service_type), region_name=self.config.get_region_name(service_type), statsd_prefix=self.config.get_statsd_prefix(), statsd_client=self.config.get_statsd_client(), prometheus_counter=self.config.get_prometheus_counter(), prometheus_histogram=self.config.get_prometheus_histogram(), min_version=request_min_version, max_version=request_max_version) if adapter.get_endpoint(): return adapter adapter = proxy._ShadeAdapter( session=self.session, service_type=self.config.get_service_type(service_type), service_name=self.config.get_service_name(service_type), interface=self.config.get_interface(service_type), endpoint_override=self.config.get_endpoint(service_type), region_name=self.config.get_region_name(service_type), min_version=min_version, max_version=max_version) # data.api_version can be None if no version was detected, such # as with neutron api_version = adapter.get_api_major_version( endpoint_override=self.config.get_endpoint(service_type)) api_major = self._get_major_version_id(api_version) # If we detect a different version that was configured, warn the user. # shade still knows what to do - but if the user gave us an explicit # version and we couldn't find it, they may want to investigate. if api_version and config_version and (api_major != config_major): warning_msg = ( '{service_type} is configured for {config_version}' ' but only {api_version} is available. shade is happy' ' with this version, but if you were trying to force an' ' override, that did not happen. You may want to check' ' your cloud, or remove the version specification from' ' your config.'.format(service_type=service_type, config_version=config_version, api_version='.'.join( [str(f) for f in api_version]))) self.log.debug(warning_msg) warnings.warn(warning_msg) return adapter
def create_volume_backup(self, volume_id, name=None, description=None, force=False, wait=True, timeout=None, incremental=False, snapshot_id=None): """Create a volume backup. :param volume_id: the ID of the volume to backup. :param name: name of the backup, one will be generated if one is not provided :param description: description of the backup, one will be generated if one is not provided :param force: If set to True the backup will be created even if the volume is attached to an instance, if False it will not :param wait: If true, waits for volume backup to be created. :param timeout: Seconds to wait for volume backup creation. None is forever. :param incremental: If set to true, the backup will be incremental. :param snapshot_id: The UUID of the source snapshot to back up. :returns: The created volume backup object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ payload = { 'name': name, 'volume_id': volume_id, 'description': description, 'force': force, 'incremental': incremental, 'snapshot_id': snapshot_id, } resp = self.block_storage.post('/backups', json=dict(backup=payload)) data = proxy._json_response( resp, error_message="Error creating backup of volume " "{volume_id}".format(volume_id=volume_id)) backup = self._get_and_munchify('backup', data) if wait: backup_id = backup['id'] msg = ("Timeout waiting for the volume backup {} to be " "available".format(backup_id)) for _ in utils.iterate_timeout(timeout, msg): backup = self.get_volume_backup(backup_id) if backup['status'] == 'available': break if backup['status'] == 'error': raise exc.OpenStackCloudException( "Error in creating volume backup {id}".format( id=backup_id)) return backup
def _set_interesting_networks(self): external_ipv4_networks = [] external_ipv4_floating_networks = [] internal_ipv4_networks = [] external_ipv6_networks = [] internal_ipv6_networks = [] nat_destination = None nat_source = None default_network = None all_subnets = None # Filter locally because we have an or condition try: # TODO(mordred): Rackspace exposes neutron but it does not # work. I think that overriding what the service catalog # reports should be a thing os-client-config should handle # in a vendor profile - but for now it does not. That means # this search_networks can just totally fail. If it does # though, that's fine, clearly the neutron introspection is # not going to work. all_networks = self.list_networks() except exc.OpenStackCloudException: self._network_list_stamp = True return for network in all_networks: # External IPv4 networks if (network['name'] in self._external_ipv4_names or network['id'] in self._external_ipv4_names): external_ipv4_networks.append(network) elif ( (('router:external' in network and network['router:external']) or network.get('provider:physical_network')) and network['name'] not in self._internal_ipv4_names and network['id'] not in self._internal_ipv4_names): external_ipv4_networks.append(network) # Internal networks if (network['name'] in self._internal_ipv4_names or network['id'] in self._internal_ipv4_names): internal_ipv4_networks.append(network) elif (not network.get('router:external', False) and not network.get('provider:physical_network') and network['name'] not in self._external_ipv4_names and network['id'] not in self._external_ipv4_names): internal_ipv4_networks.append(network) # External networks if (network['name'] in self._external_ipv6_names or network['id'] in self._external_ipv6_names): external_ipv6_networks.append(network) elif (network.get('router:external') and network['name'] not in self._internal_ipv6_names and network['id'] not in self._internal_ipv6_names): external_ipv6_networks.append(network) # Internal networks if (network['name'] in self._internal_ipv6_names or network['id'] in self._internal_ipv6_names): internal_ipv6_networks.append(network) elif (not network.get('router:external', False) and network['name'] not in self._external_ipv6_names and network['id'] not in self._external_ipv6_names): internal_ipv6_networks.append(network) # External Floating IPv4 networks if self._nat_source in (network['name'], network['id']): if nat_source: raise exc.OpenStackCloudException( 'Multiple networks were found matching' ' {nat_net} which is the network configured' ' to be the NAT source. Please check your' ' cloud resources. It is probably a good idea' ' to configure this network by ID rather than' ' by name.'.format(nat_net=self._nat_source)) external_ipv4_floating_networks.append(network) nat_source = network elif self._nat_source is None: if network.get('router:external'): external_ipv4_floating_networks.append(network) nat_source = nat_source or network # NAT Destination if self._nat_destination in (network['name'], network['id']): if nat_destination: raise exc.OpenStackCloudException( 'Multiple networks were found matching' ' {nat_net} which is the network configured' ' to be the NAT destination. Please check your' ' cloud resources. It is probably a good idea' ' to configure this network by ID rather than' ' by name.'.format(nat_net=self._nat_destination)) nat_destination = network elif self._nat_destination is None: # TODO(mordred) need a config value for floating # ips for this cloud so that we can skip this # No configured nat destination, we have to figured # it out. if all_subnets is None: try: all_subnets = self.list_subnets() except exc.OpenStackCloudException: # Thanks Rackspace broken neutron all_subnets = [] for subnet in all_subnets: # TODO(mordred) trap for detecting more than # one network with a gateway_ip without a config if ('gateway_ip' in subnet and subnet['gateway_ip'] and network['id'] == subnet['network_id']): nat_destination = network break # Default network if self._default_network in (network['name'], network['id']): if default_network: raise exc.OpenStackCloudException( 'Multiple networks were found matching' ' {default_net} which is the network' ' configured to be the default interface' ' network. Please check your cloud resources.' ' It is probably a good idea' ' to configure this network by ID rather than' ' by name.'.format(default_net=self._default_network)) default_network = network # Validate config vs. reality for net_name in self._external_ipv4_names: if net_name not in [net['name'] for net in external_ipv4_networks]: raise exc.OpenStackCloudException( "Networks: {network} was provided for external IPv4" " access and those networks could not be found".format( network=net_name)) for net_name in self._internal_ipv4_names: if net_name not in [net['name'] for net in internal_ipv4_networks]: raise exc.OpenStackCloudException( "Networks: {network} was provided for internal IPv4" " access and those networks could not be found".format( network=net_name)) for net_name in self._external_ipv6_names: if net_name not in [net['name'] for net in external_ipv6_networks]: raise exc.OpenStackCloudException( "Networks: {network} was provided for external IPv6" " access and those networks could not be found".format( network=net_name)) for net_name in self._internal_ipv6_names: if net_name not in [net['name'] for net in internal_ipv6_networks]: raise exc.OpenStackCloudException( "Networks: {network} was provided for internal IPv6" " access and those networks could not be found".format( network=net_name)) if self._nat_destination and not nat_destination: raise exc.OpenStackCloudException( 'Network {network} was configured to be the' ' destination for inbound NAT but it could not be' ' found'.format(network=self._nat_destination)) if self._nat_source and not nat_source: raise exc.OpenStackCloudException( 'Network {network} was configured to be the' ' source for inbound NAT but it could not be' ' found'.format(network=self._nat_source)) if self._default_network and not default_network: raise exc.OpenStackCloudException( 'Network {network} was configured to be the' ' default network interface but it could not be' ' found'.format(network=self._default_network)) self._external_ipv4_networks = external_ipv4_networks self._external_ipv4_floating_networks = external_ipv4_floating_networks self._internal_ipv4_networks = internal_ipv4_networks self._external_ipv6_networks = external_ipv6_networks self._internal_ipv6_networks = internal_ipv6_networks self._nat_destination_network = nat_destination self._nat_source_network = nat_source self._default_network_network = default_network
def unregister_machine(self, nics, uuid, wait=False, timeout=600): """Unregister Baremetal from Ironic Removes entries for Network Interfaces and baremetal nodes from an Ironic API :param nics: An array of strings that consist of MAC addresses to be removed. :param string uuid: The UUID of the node to be deleted. :param wait: Boolean value, defaults to false, if to block the method upon the final step of unregistering the machine. :param timeout: Integer value, representing seconds with a default value of 600, which controls the maximum amount of time to block the method's completion on. :raises: OpenStackCloudException on operation failure. """ machine = self.get_machine(uuid) invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] if machine['provision_state'] in invalid_states: raise exc.OpenStackCloudException( "Error unregistering node '%s' due to current provision " "state '%s'" % (uuid, machine['provision_state'])) # NOTE(TheJulia) There is a high possibility of a lock being present # if the machine was just moved through the state machine. This was # previously concealed by exception retry logic that detected the # failure, and resubitted the request in python-ironicclient. try: self.wait_for_baremetal_node_lock(machine, timeout=timeout) except exc.OpenStackCloudException as e: raise exc.OpenStackCloudException( "Error unregistering node '%s': Exception occured while" " waiting to be able to proceed: %s" % (machine['uuid'], e)) for nic in nics: port_msg = ("Error removing NIC {nic} from baremetal API for " "node {uuid}").format(nic=nic, uuid=uuid) port_url = '/ports/detail?address={mac}'.format(mac=nic['mac']) port = self._baremetal_client.get(port_url, microversion=1.6, error_message=port_msg) port_url = '/ports/{uuid}'.format(uuid=port['ports'][0]['uuid']) _utils._call_client_and_retry(self._baremetal_client.delete, port_url, retry_on=[409, 503], error_message=port_msg) with _utils.shade_exceptions( "Error unregistering machine {node_id} from the baremetal " "API".format(node_id=uuid)): # NOTE(TheJulia): While this should not matter microversion wise, # ironic assumes all calls without an explicit microversion to be # version 1.0. Ironic expects to deprecate support for older # microversions in future releases, as such, we explicitly set # the version to what we have been using with the client library.. version = "1.6" msg = "Baremetal machine failed to be deleted" url = '/nodes/{node_id}'.format(node_id=uuid) _utils._call_client_and_retry(self._baremetal_client.delete, url, retry_on=[409, 503], error_message=msg, microversion=version) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for machine to be deleted"): if not self.get_machine(uuid): break
def register_machine(self, nics, wait=False, timeout=3600, lock_timeout=600, **kwargs): """Register Baremetal with Ironic Allows for the registration of Baremetal nodes with Ironic and population of pertinant node information or configuration to be passed to the Ironic API for the node. This method also creates ports for a list of MAC addresses passed in to be utilized for boot and potentially network configuration. If a failure is detected creating the network ports, any ports created are deleted, and the node is removed from Ironic. :param nics: An array of MAC addresses that represent the network interfaces for the node to be created. Example:: [ {'mac': 'aa:bb:cc:dd:ee:01'}, {'mac': 'aa:bb:cc:dd:ee:02'} ] :param wait: Boolean value, defaulting to false, to wait for the node to reach the available state where the node can be provisioned. It must be noted, when set to false, the method will still wait for locks to clear before sending the next required command. :param timeout: Integer value, defautling to 3600 seconds, for the wait state to reach completion. :param lock_timeout: Integer value, defaulting to 600 seconds, for locks to clear. :param kwargs: Key value pairs to be passed to the Ironic API, including uuid, name, chassis_uuid, driver_info, parameters. :raises: OpenStackCloudException on operation error. :returns: Returns a ``munch.Munch`` representing the new baremetal node. """ msg = ("Baremetal machine node failed to be created.") port_msg = ("Baremetal machine port failed to be created.") url = '/nodes' # TODO(TheJulia): At some point we need to figure out how to # handle data across when the requestor is defining newer items # with the older api. machine = self._baremetal_client.post(url, json=kwargs, error_message=msg, microversion="1.6") created_nics = [] try: for row in nics: payload = {'address': row['mac'], 'node_uuid': machine['uuid']} nic = self._baremetal_client.post('/ports', json=payload, error_message=port_msg) created_nics.append(nic['uuid']) except Exception as e: self.log.debug("ironic NIC registration failed", exc_info=True) # TODO(mordred) Handle failures here try: for uuid in created_nics: try: port_url = '/ports/{uuid}'.format(uuid=uuid) # NOTE(TheJulia): Added in hope that it is logged. port_msg = ('Failed to delete port {port} for node ' '{node}').format(port=uuid, node=machine['uuid']) self._baremetal_client.delete(port_url, error_message=port_msg) except Exception: pass finally: version = "1.6" msg = "Baremetal machine failed to be deleted." url = '/nodes/{node_id}'.format(node_id=machine['uuid']) self._baremetal_client.delete(url, error_message=msg, microversion=version) raise exc.OpenStackCloudException( "Error registering NICs with the baremetal service: %s" % str(e)) with _utils.shade_exceptions( "Error transitioning node to available state"): if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for node transition to " "available state"): machine = self.get_machine(machine['uuid']) # Note(TheJulia): Per the Ironic state code, a node # that fails returns to enroll state, which means a failed # node cannot be determined at this point in time. if machine['provision_state'] in ['enroll']: self.node_set_provision_state(machine['uuid'], 'manage') elif machine['provision_state'] in ['manageable']: self.node_set_provision_state(machine['uuid'], 'provide') elif machine['last_error'] is not None: raise exc.OpenStackCloudException( "Machine encountered a failure: %s" % machine['last_error']) # Note(TheJulia): Earlier versions of Ironic default to # None and later versions default to available up until # the introduction of enroll state. # Note(TheJulia): The node will transition through # cleaning if it is enabled, and we will wait for # completion. elif machine['provision_state'] in ['available', None]: break else: if machine['provision_state'] in ['enroll']: self.node_set_provision_state(machine['uuid'], 'manage') # Note(TheJulia): We need to wait for the lock to clear # before we attempt to set the machine into provide state # which allows for the transition to available. for count in utils.iterate_timeout( lock_timeout, "Timeout waiting for reservation to clear " "before setting provide state"): machine = self.get_machine(machine['uuid']) if (machine['reservation'] is None and machine['provision_state'] != 'enroll'): # NOTE(TheJulia): In this case, the node has # has moved on from the previous state and is # likely not being verified, as no lock is # present on the node. self.node_set_provision_state( machine['uuid'], 'provide') machine = self.get_machine(machine['uuid']) break elif machine['provision_state'] in [ 'cleaning', 'available' ]: break elif machine['last_error'] is not None: raise exc.OpenStackCloudException( "Machine encountered a failure: %s" % machine['last_error']) if not isinstance(machine, str): return self._normalize_machine(machine) else: return machine
def inspect_machine(self, name_or_id, wait=False, timeout=3600): """Inspect a Barmetal machine Engages the Ironic node inspection behavior in order to collect metadata about the baremetal machine. :param name_or_id: String representing machine name or UUID value in order to identify the machine. :param wait: Boolean value controlling if the method is to wait for the desired state to be reached or a failure to occur. :param timeout: Integer value, defautling to 3600 seconds, for the$ wait state to reach completion. :returns: ``munch.Munch`` representing the current state of the machine upon exit of the method. """ return_to_available = False node = self.baremetal.get_node(name_or_id) # NOTE(TheJulia): If in available state, we can do this. However, # we need to to move the machine back to manageable first. if node.provision_state == 'available': if node.instance_id: raise exc.OpenStackCloudException( "Refusing to inspect available machine %(node)s " "which is associated with an instance " "(instance_uuid %(inst)s)" % { 'node': node.id, 'inst': node.instance_id }) return_to_available = True # NOTE(TheJulia): Changing available machine to managedable state # and due to state transitions we need to until that transition has # completed. node = self.baremetal.set_node_provision_state(node, 'manage', wait=True, timeout=timeout) if node.provision_state not in ('manageable', 'inspect failed'): raise exc.OpenStackCloudException( "Machine %(node)s must be in 'manageable', 'inspect failed' " "or 'available' provision state to start inspection, the " "current state is %(state)s" % { 'node': node.id, 'state': node.provision_state }) node = self.baremetal.set_node_provision_state(node, 'inspect', wait=True, timeout=timeout) if return_to_available: node = self.baremetal.set_node_provision_state(node, 'provide', wait=True, timeout=timeout) return node._to_munch()
def create_security_group_rule(self, secgroup_name_or_id, port_range_min=None, port_range_max=None, protocol=None, remote_ip_prefix=None, remote_group_id=None, direction='ingress', ethertype='IPv4', project_id=None): """Create a new security group rule :param string secgroup_name_or_id: The security group name or ID to associate with this security group rule. If a non-unique group name is given, an exception is raised. :param int port_range_min: The minimum port number in the range that is matched by the security group rule. If the protocol is TCP or UDP, this value must be less than or equal to the port_range_max attribute value. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param int port_range_max: The maximum port number in the range that is matched by the security group rule. The port_range_min attribute constrains the port_range_max attribute. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param string protocol: The protocol that is matched by the security group rule. Valid values are None, tcp, udp, and icmp. :param string remote_ip_prefix: The remote IP prefix to be associated with this security group rule. This attribute matches the specified IP prefix as the source IP address of the IP packet. :param string remote_group_id: The remote group ID to be associated with this security group rule. :param string direction: Ingress or egress: The direction in which the security group rule is applied. For a compute instance, an ingress security group rule is applied to incoming (ingress) traffic for that instance. An egress rule is applied to traffic leaving the instance. :param string ethertype: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rules. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group rule. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") secgroup = self.get_security_group(secgroup_name_or_id) if not secgroup: raise exc.OpenStackCloudException("Security group %s not found." % secgroup_name_or_id) if self._use_neutron_secgroups(): # NOTE: Nova accepts -1 port numbers, but Neutron accepts None # as the equivalent value. rule_def = { 'security_group_id': secgroup['id'], 'port_range_min': None if port_range_min == -1 else port_range_min, 'port_range_max': None if port_range_max == -1 else port_range_max, 'protocol': protocol, 'remote_ip_prefix': remote_ip_prefix, 'remote_group_id': remote_group_id, 'direction': direction, 'ethertype': ethertype } if project_id is not None: rule_def['tenant_id'] = project_id return self.network.create_security_group_rule(**rule_def) else: # NOTE: Neutron accepts None for protocol. Nova does not. if protocol is None: raise exc.OpenStackCloudException('Protocol must be specified') if direction == 'egress': self.log.debug( 'Rule creation failed: Nova does not support egress rules') raise exc.OpenStackCloudException( 'No support for egress rules') # NOTE: Neutron accepts None for ports, but Nova requires -1 # as the equivalent value for ICMP. # # For TCP/UDP, if both are None, Neutron allows this and Nova # represents this as all ports (1-65535). Nova does not accept # None values, so to hide this difference, we will automatically # convert to the full port range. If only a single port value is # specified, it will error as normal. if protocol == 'icmp': if port_range_min is None: port_range_min = -1 if port_range_max is None: port_range_max = -1 elif protocol in ['tcp', 'udp']: if port_range_min is None and port_range_max is None: port_range_min = 1 port_range_max = 65535 security_group_rule_dict = dict( security_group_rule=dict(parent_group_id=secgroup['id'], ip_protocol=protocol, from_port=port_range_min, to_port=port_range_max, cidr=remote_ip_prefix, group_id=remote_group_id)) if project_id is not None: security_group_rule_dict['security_group_rule'][ 'tenant_id'] = project_id data = proxy._json_response( self.compute.post('/os-security-group-rules', json=security_group_rule_dict)) return self._normalize_secgroup_rule( self._get_and_munchify('security_group_rule', data))
def _upload_image_task(self, name, filename, wait, timeout, meta, **image_kwargs): if not self._connection.has_service('object-store'): raise exc.OpenStackCloudException( "The cloud {cloud} is configured to use tasks for image" " upload, but no object-store service is available." " Aborting.".format(cloud=self._connection.config.name)) properties = image_kwargs.pop('properties', {}) md5 = properties[self._connection._IMAGE_MD5_KEY] sha256 = properties[self._connection._IMAGE_SHA256_KEY] container = properties[self._connection._IMAGE_OBJECT_KEY].split( '/', 1)[0] image_kwargs.update(properties) image_kwargs.pop('disk_format', None) image_kwargs.pop('container_format', None) self._connection.create_container(container) self._connection.create_object( container, name, filename, md5=md5, sha256=sha256, metadata={self._connection._OBJECT_AUTOCREATE_KEY: 'true'}, **{'content-type': 'application/octet-stream'}) # TODO(mordred): Can we do something similar to what nodepool does # using glance properties to not delete then upload but instead make a # new "good" image and then mark the old one as "bad" task_args = dict(type='import', input=dict(import_from='{container}/{name}'.format( container=container, name=name), image_properties=dict(name=name))) data = self.post('/tasks', json=task_args) glance_task = self._connection._get_and_munchify(key=None, data=data) self._connection.list_images.invalidate(self) if wait: start = time.time() image_id = None for count in utils.iterate_timeout( timeout, "Timeout waiting for the image to import."): if image_id is None: response = self.get( '/tasks/{id}'.format(id=glance_task.id)) status = self._connection._get_and_munchify(key=None, data=response) if status['status'] == 'success': image_id = status['result']['image_id'] image = self._connection.get_image(image_id) if image is None: continue self.update_image_properties(image=image, meta=meta, **image_kwargs) self._connection.log.debug( "Image Task %s imported %s in %s", glance_task.id, image_id, (time.time() - start)) # Clean up after ourselves. The object we created is not # needed after the import is done. self._connection.delete_object(container, name) return self._connection.get_image(image_id) elif status['status'] == 'failure': if status['message'] == _IMAGE_ERROR_396: glance_task = self.post('/tasks', data=task_args) self._connection.list_images.invalidate(self) else: # Clean up after ourselves. The image did not import # and this isn't a 'just retry' error - glance didn't # like the content. So we don't want to keep it for # next time. self._connection.delete_object(container, name) raise exc.OpenStackCloudException( "Image creation failed: {message}".format( message=status['message']), extra_data=status) else: return glance_task