def set_volume_bootable(self, name_or_id, bootable=True): """Set a volume's bootable flag. :param name_or_id: Name, unique ID of the volume or a volume dict. :param bool bootable: Whether the volume should be bootable. (Defaults to True) :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ volume = self.get_volume(name_or_id) if not volume: raise exc.OpenStackCloudException( "Volume {name_or_id} does not exist".format( name_or_id=name_or_id)) resp = self.block_storage.post( 'volumes/{id}/action'.format(id=volume['id']), json={'os-set_bootable': { 'bootable': bootable }}) proxy._json_response( resp, error_message="Error setting bootable on volume {volume}".format( volume=volume['id']))
def delete_volume_snapshot(self, name_or_id=None, wait=False, timeout=None): """Delete a volume snapshot. :param name_or_id: Name or unique ID of the volume snapshot. :param wait: If true, waits for volume snapshot to be deleted. :param timeout: Seconds to wait for volume snapshot deletion. None is forever. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ volumesnapshot = self.get_volume_snapshot(name_or_id) if not volumesnapshot: return False resp = self.block_storage.delete('/snapshots/{snapshot_id}'.format( snapshot_id=volumesnapshot['id'])) proxy._json_response(resp, error_message="Error in deleting volume snapshot") if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be deleted."): if not self.get_volume_snapshot(volumesnapshot['id']): break return True
def delete_security_group(self, name_or_id): """Delete a security group :param string name_or_id: The name or unique ID of the security group. :returns: True if delete succeeded, False otherwise. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") # TODO(mordred): Let's come back and stop doing a GET before we do # the delete. secgroup = self.get_security_group(name_or_id) if secgroup is None: self.log.debug('Security group %s not found for deleting', name_or_id) return False if self._use_neutron_secgroups(): self.network.delete_security_group(secgroup['id'], ignore_missing=False) return True else: proxy._json_response( self.compute.delete( '/os-security-groups/{id}'.format(id=secgroup['id']))) return True
def delete_volume(self, name_or_id=None, wait=True, timeout=None, force=False): """Delete a volume. :param name_or_id: Name or unique ID of the volume. :param wait: If true, waits for volume to be deleted. :param timeout: Seconds to wait for volume deletion. None is forever. :param force: Force delete volume even if the volume is in deleting or error_deleting state. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ self.list_volumes.invalidate(self) volume = self.get_volume(name_or_id) if not volume: self.log.debug("Volume %(name_or_id)s does not exist", {'name_or_id': name_or_id}, exc_info=True) return False with _utils.shade_exceptions("Error in deleting volume"): try: if force: proxy._json_response( self.block_storage.post( 'volumes/{id}/action'.format(id=volume['id']), json={'os-force_delete': None})) else: proxy._json_response( self.block_storage.delete( 'volumes/{id}'.format(id=volume['id']))) except exc.OpenStackCloudURINotFound: self.log.debug( "Volume {id} not found when deleting. Ignoring.".format( id=volume['id'])) return False self.list_volumes.invalidate(self) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume to be deleted."): if not self.get_volume(volume['id']): break return True
def update_security_group(self, name_or_id, **kwargs): """Update a security group :param string name_or_id: Name or ID of the security group to update. :param string name: New name for the security group. :param string description: New description for the security group. :returns: A ``munch.Munch`` describing the updated security group. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") group = self.get_security_group(name_or_id) if group is None: raise exc.OpenStackCloudException("Security group %s not found." % name_or_id) if self._use_neutron_secgroups(): return self.network.update_security_group(group['id'], **kwargs) else: for key in ('name', 'description'): kwargs.setdefault(key, group[key]) data = proxy._json_response( self.compute.put( '/os-security-groups/{id}'.format(id=group['id']), json={'security_group': kwargs})) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def list_security_groups(self, filters=None): """List all available security groups. :param filters: (optional) dict of filter conditions to push down :returns: A list of security group ``munch.Munch``. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") if not filters: filters = {} data = [] # Handle neutron security groups if self._use_neutron_secgroups(): # pass filters dict to the list to filter as much as possible on # the server side return list( self.network.security_groups(allow_unknown_params=True, **filters)) # Handle nova security groups else: data = proxy._json_response( self.compute.get('/os-security-groups', params=filters)) return self._normalize_secgroups( self._get_and_munchify('security_groups', data))
def create_security_group(self, name, description, project_id=None): """Create a new security group :param string name: A name for the security group. :param string description: Describes the security group. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") data = [] security_group_json = {'name': name, 'description': description} if project_id is not None: security_group_json['tenant_id'] = project_id if self._use_neutron_secgroups(): return self.network.create_security_group(**security_group_json) else: data = proxy._json_response( self.compute.post('/os-security-groups', json={'security_group': security_group_json})) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def get_volume_limits(self, name_or_id=None): """ Get volume limits for a project :param name_or_id: (optional) project name or ID to get limits for if different from the current project :raises: OpenStackCloudException if it's not a valid project :returns: Munch object with the limits """ params = {} project_id = None error_msg = "Failed to get limits" if name_or_id: proj = self.get_project(name_or_id) if not proj: raise exc.OpenStackCloudException("project does not exist") project_id = proj.id params['tenant_id'] = project_id error_msg = "{msg} for the project: {project} ".format( msg=error_msg, project=name_or_id) data = proxy._json_response( self.block_storage.get('/limits', params=params)) limits = self._get_and_munchify('limits', data) return limits
def list_volumes(self, cache=True): """List all available volumes. :returns: A list of volume ``munch.Munch``. """ def _list(data): volumes.extend(data.get('volumes', [])) endpoint = None for link in data.get('volumes_links', []): if 'rel' in link and 'next' == link['rel']: endpoint = link['href'] break if endpoint: try: _list( proxy._json_response(self.block_storage.get(endpoint))) except exc.OpenStackCloudURINotFound: # Catch and re-raise here because we are making recursive # calls and we just have context for the log here self.log.debug( "While listing volumes, could not find next link" " {link}.".format(link=data)) raise if not cache: warnings.warn('cache argument to list_volumes is deprecated. Use ' 'invalidate instead.') # Fetching paginated volumes can fails for several reasons, if # something goes wrong we'll have to start fetching volumes from # scratch attempts = 5 for _ in range(attempts): volumes = [] data = proxy._json_response( self.block_storage.get('/volumes/detail')) if 'volumes_links' not in data: # no pagination needed volumes.extend(data.get('volumes', [])) break try: _list(data) break except exc.OpenStackCloudURINotFound: pass else: self.log.debug( "List volumes failed to retrieve all volumes after" " {attempts} attempts. Returning what we found.".format( attempts=attempts)) # list volumes didn't complete succesfully so just return what # we found return self._normalize_volumes( self._get_and_munchify(key=None, data=volumes))
def set_volume_quotas(self, name_or_id, **kwargs): """ Set a volume quota in a project :param name_or_id: project name or id :param kwargs: key/value pairs of quota name and quota value :raises: OpenStackCloudException if the resource to set the quota does not exist. """ proj = self.get_project(name_or_id) if not proj: raise exc.OpenStackCloudException("project does not exist") kwargs['tenant_id'] = proj.id resp = self.block_storage.put( '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id), json={'quota_set': kwargs}) proxy._json_response(resp, error_message="No valid quota or resource")
def create_volume_backup(self, volume_id, name=None, description=None, force=False, wait=True, timeout=None): """Create a volume backup. :param volume_id: the ID of the volume to backup. :param name: name of the backup, one will be generated if one is not provided :param description: description of the backup, one will be generated if one is not provided :param force: If set to True the backup will be created even if the volume is attached to an instance, if False it will not :param wait: If true, waits for volume backup to be created. :param timeout: Seconds to wait for volume backup creation. None is forever. :returns: The created volume backup object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ payload = { 'name': name, 'volume_id': volume_id, 'description': description, 'force': force, } resp = self.block_storage.post('/backups', json=dict(backup=payload)) data = proxy._json_response( resp, error_message="Error creating backup of volume " "{volume_id}".format(volume_id=volume_id)) backup = self._get_and_munchify('backup', data) if wait: backup_id = backup['id'] msg = ("Timeout waiting for the volume backup {} to be " "available".format(backup_id)) for _ in utils.iterate_timeout(timeout, msg): backup = self.get_volume_backup(backup_id) if backup['status'] == 'available': break if backup['status'] == 'error': raise exc.OpenStackCloudException( "Error in creating volume backup {id}".format( id=backup_id)) return backup
def list_volume_types(self, get_extra=True): """List all available volume types. :returns: A list of volume ``munch.Munch``. """ resp = self.block_storage.get('/types', params=dict(is_public='None')) data = proxy._json_response( resp, error_message='Error fetching volume_type list') return self._normalize_volume_types( self._get_and_munchify('volume_types', data))
def list_volume_snapshots(self, detailed=True, search_opts=None): """List all volume snapshots. :returns: A list of volume snapshots ``munch.Munch``. """ endpoint = '/snapshots/detail' if detailed else '/snapshots' resp = self.block_storage.get(endpoint, params=search_opts) data = proxy._json_response( resp, error_message="Error getting a list of snapshots") return self._get_and_munchify('snapshots', data)
def _get_and_munchify(self, key, data): """Wrapper around meta.get_and_munchify. Some of the methods expect a `meta` attribute to be passed in as part of the method signature. In those methods the meta param is overriding the meta module making the call to meta.get_and_munchify to fail. """ if isinstance(data, requests.models.Response): data = proxy._json_response(data) return meta.get_and_munchify(key, data)
def create_volume_snapshot(self, volume_id, force=False, wait=True, timeout=None, **kwargs): """Create a volume. :param volume_id: the ID of the volume to snapshot. :param force: If set to True the snapshot will be created even if the volume is attached to an instance, if False it will not :param name: name of the snapshot, one will be generated if one is not provided :param description: description of the snapshot, one will be generated if one is not provided :param wait: If true, waits for volume snapshot to be created. :param timeout: Seconds to wait for volume snapshot creation. None is forever. :returns: The created volume object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ kwargs = self._get_volume_kwargs(kwargs) payload = {'volume_id': volume_id, 'force': force} payload.update(kwargs) resp = self.block_storage.post('/snapshots', json=dict(snapshot=payload)) data = proxy._json_response( resp, error_message="Error creating snapshot of volume " "{volume_id}".format(volume_id=volume_id)) snapshot = self._get_and_munchify('snapshot', data) if wait: snapshot_id = snapshot['id'] for count in utils.iterate_timeout( timeout, "Timeout waiting for the volume snapshot to be available." ): snapshot = self.get_volume_snapshot_by_id(snapshot_id) if snapshot['status'] == 'available': break if snapshot['status'] == 'error': raise exc.OpenStackCloudException( "Error in creating volume snapshot") # TODO(mordred) need to normalize snapshots. We were normalizing them # as volumes, which is an error. They need to be normalized as # volume snapshots, which are completely different objects return snapshot
def remove_volume_type_access(self, name_or_id, project_id): """Revoke access on a volume_type to a project. :param name_or_id: ID or name of a volume_type :param project_id: A project id :raises: OpenStackCloudException on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exc.OpenStackCloudException("VolumeType not found: %s" % name_or_id) payload = {'project': project_id} resp = self.block_storage.post( '/types/{id}/action'.format(id=volume_type.id), json=dict(removeProjectAccess=payload)) proxy._json_response(resp, error_message="Unable to revoke {project} " "to use volume type {name}".format( name=name_or_id, project=project_id))
def get_security_group_by_id(self, id): """ Get a security group by ID :param id: ID of the security group. :returns: A security group ``munch.Munch``. """ if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups") error_message = ("Error getting security group with" " ID {id}".format(id=id)) if self._use_neutron_secgroups(): resp = self.network.get('/security-groups/{id}'.format(id=id)) data = proxy._json_response(resp, error_message=error_message) else: data = proxy._json_response(self.compute.get( '/os-security-groups/{id}'.format(id=id)), error_message=error_message) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def list_containers(self, full_listing=True, prefix=None): """List containers. :param full_listing: Ignored. Present for backwards compat :returns: list of Munch of the container objects :raises: OpenStackCloudException on operation error. """ params = dict(format='json', prefix=prefix) response = self.object_store.get('/', params=params) return self._get_and_munchify(None, proxy._json_response(response))
def create_security_group(self, name, description, project_id=None): """Create a new security group :param string name: A name for the security group. :param string description: Describes the security group. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) data = [] security_group_json = { 'security_group': { 'name': name, 'description': description }} if project_id is not None: security_group_json['security_group']['tenant_id'] = project_id if self._use_neutron_secgroups(): data = proxy._json_response( self.network.post( '/security-groups.json', json=security_group_json), error_message="Error creating security group {0}".format(name)) else: data = proxy._json_response(self.compute.post( '/os-security-groups', json=security_group_json)) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def delete_security_group(self, name_or_id): """Delete a security group :param string name_or_id: The name or unique ID of the security group. :returns: True if delete succeeded, False otherwise. :raises: OpenStackCloudException on operation error. :raises: OpenStackCloudUnavailableFeature if security groups are not supported on this cloud. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) # TODO(mordred): Let's come back and stop doing a GET before we do # the delete. secgroup = self.get_security_group(name_or_id) if secgroup is None: self.log.debug('Security group %s not found for deleting', name_or_id) return False if self._use_neutron_secgroups(): exceptions.raise_from_response( self.network.delete( '/security-groups/{sg_id}.json'.format( sg_id=secgroup['id'])), error_message="Error deleting security group {0}".format( name_or_id) ) return True else: proxy._json_response(self.compute.delete( '/os-security-groups/{id}'.format(id=secgroup['id']))) return True
def update_security_group(self, name_or_id, **kwargs): """Update a security group :param string name_or_id: Name or ID of the security group to update. :param string name: New name for the security group. :param string description: New description for the security group. :returns: A ``munch.Munch`` describing the updated security group. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) group = self.get_security_group(name_or_id) if group is None: raise exc.OpenStackCloudException( "Security group %s not found." % name_or_id) if self._use_neutron_secgroups(): data = proxy._json_response( self.network.put( '/security-groups/{sg_id}.json'.format(sg_id=group['id']), json={'security_group': kwargs}), error_message="Error updating security group {0}".format( name_or_id)) else: for key in ('name', 'description'): kwargs.setdefault(key, group[key]) data = proxy._json_response( self.compute.put( '/os-security-groups/{id}'.format(id=group['id']), json={'security_group': kwargs})) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def get_volume_by_id(self, id): """ Get a volume by ID :param id: ID of the volume. :returns: A volume ``munch.Munch``. """ resp = self.block_storage.get('/volumes/{id}'.format(id=id)) data = proxy._json_response( resp, error_message="Error getting volume with ID {id}".format(id=id)) volume = self._normalize_volume(self._get_and_munchify('volume', data)) return volume
def get_security_group_by_id(self, id): """ Get a security group by ID :param id: ID of the security group. :returns: A security group ``munch.Munch``. """ if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) error_message = ("Error getting security group with" " ID {id}".format(id=id)) if self._use_neutron_secgroups(): resp = self.network.get('/security-groups/{id}'.format(id=id)) data = proxy._json_response(resp, error_message=error_message) else: data = proxy._json_response( self.compute.get( '/os-security-groups/{id}'.format(id=id)), error_message=error_message) return self._normalize_secgroup( self._get_and_munchify('security_group', data))
def add_volume_type_access(self, name_or_id, project_id): """Grant access on a volume_type to a project. :param name_or_id: ID or name of a volume_type :param project_id: A project id NOTE: the call works even if the project does not exist. :raises: OpenStackCloudException on operation error. """ volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exc.OpenStackCloudException("VolumeType not found: %s" % name_or_id) payload = {'project': project_id} resp = self.block_storage.post( '/types/{id}/action'.format(id=volume_type.id), json=dict(addProjectAccess=payload)) proxy._json_response(resp, error_message="Unable to authorize {project} " "to use volume type {name}".format( name=name_or_id, project=project_id))
def delete_volume_backup(self, name_or_id=None, force=False, wait=False, timeout=None): """Delete a volume backup. :param name_or_id: Name or unique ID of the volume backup. :param force: Allow delete in state other than error or available. :param wait: If true, waits for volume backup to be deleted. :param timeout: Seconds to wait for volume backup deletion. None is forever. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ volume_backup = self.get_volume_backup(name_or_id) if not volume_backup: return False msg = "Error in deleting volume backup" if force: resp = self.block_storage.post( '/backups/{backup_id}/action'.format( backup_id=volume_backup['id']), json={'os-force_delete': None}) else: resp = self.block_storage.delete( '/backups/{backup_id}'.format(backup_id=volume_backup['id'])) proxy._json_response(resp, error_message=msg) if wait: msg = "Timeout waiting for the volume backup to be deleted." for count in utils.iterate_timeout(timeout, msg): if not self.get_volume_backup(volume_backup['id']): break return True
def detach_volume(self, server, volume, wait=True, timeout=None): """Detach a volume from a server. :param server: The server dict to detach from. :param volume: The volume dict to detach. :param wait: If true, waits for volume to be detached. :param timeout: Seconds to wait for volume detachment. None is forever. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ proxy._json_response( self.compute.delete( '/servers/{server_id}/os-volume_attachments/{volume_id}'. format(server_id=server['id'], volume_id=volume['id'])), error_message=( "Error detaching volume {volume} from server {server}".format( volume=volume['id'], server=server['id']))) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to detach." % volume['id']): try: vol = self.get_volume(volume['id']) except Exception: self.log.debug("Error getting volume info %s", volume['id'], exc_info=True) continue if vol['status'] == 'available': return if vol['status'] == 'error': raise exc.OpenStackCloudException( "Error in detaching volume %s" % volume['id'])
def detach_volume(self, server, volume, wait=True, timeout=None): """Detach a volume from a server. :param server: The server dict to detach from. :param volume: The volume dict to detach. :param wait: If true, waits for volume to be detached. :param timeout: Seconds to wait for volume detachment. None is forever. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ proxy._json_response(self.compute.delete( '/servers/{server_id}/os-volume_attachments/{volume_id}'.format( server_id=server['id'], volume_id=volume['id'])), error_message=( "Error detaching volume {volume} from server {server}".format( volume=volume['id'], server=server['id']))) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to detach." % volume['id']): try: vol = self.get_volume(volume['id']) except Exception: self.log.debug( "Error getting volume info %s", volume['id'], exc_info=True) continue if vol['status'] == 'available': return if vol['status'] == 'error': raise exc.OpenStackCloudException( "Error in detaching volume %s" % volume['id'] )
def get_object_capabilities(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. """ # The endpoint in the catalog has version and project-id in it # To get capabilities, we have to disassemble and reassemble the URL # This logic is taken from swiftclient endpoint = urllib_parse.urlparse(self.object_store.get_endpoint()) url = "{scheme}://{netloc}/info".format(scheme=endpoint.scheme, netloc=endpoint.netloc) return proxy._json_response(self.object_store.get(url))
def get_object_capabilities(self): """Get infomation about the object-storage service The object-storage service publishes a set of capabilities that include metadata about maximum values and thresholds. """ # The endpoint in the catalog has version and project-id in it # To get capabilities, we have to disassemble and reassemble the URL # This logic is taken from swiftclient endpoint = urllib.parse.urlparse(self.object_store.get_endpoint()) url = "{scheme}://{netloc}/info".format( scheme=endpoint.scheme, netloc=endpoint.netloc) return proxy._json_response(self.object_store.get(url))
def get_volume_snapshot_by_id(self, snapshot_id): """Takes a snapshot_id and gets a dict of the snapshot that maches that ID. Note: This is more efficient than get_volume_snapshot. param: snapshot_id: ID of the volume snapshot. """ resp = self.block_storage.get( '/snapshots/{snapshot_id}'.format(snapshot_id=snapshot_id)) data = proxy._json_response( resp, error_message="Error getting snapshot " "{snapshot_id}".format(snapshot_id=snapshot_id)) return self._normalize_volume(self._get_and_munchify('snapshot', data))
def get_volume_quotas(self, name_or_id): """ Get volume quotas for a project :param name_or_id: project name or id :raises: OpenStackCloudException if it's not a valid project :returns: Munch object with the quotas """ proj = self.get_project(name_or_id) if not proj: raise exc.OpenStackCloudException("project does not exist") resp = self.block_storage.get( '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id)) data = proxy._json_response(resp, error_message="cinder client call failed") return self._get_and_munchify('quota_set', data)
def update_volume(self, name_or_id, **kwargs): kwargs = self._get_volume_kwargs(kwargs) volume = self.get_volume(name_or_id) if not volume: raise exc.OpenStackCloudException("Volume %s not found." % name_or_id) resp = self.block_storage.put( '/volumes/{volume_id}'.format(volume_id=volume.id), json=dict({'volume': kwargs})) data = proxy._json_response(resp, error_message='Error updating volume') self.list_volumes.invalidate(self) return self._normalize_volume(self._get_and_munchify('volume', data))
def delete_volume_quotas(self, name_or_id): """ Delete volume quotas for a project :param name_or_id: project name or id :raises: OpenStackCloudException if it's not a valid project or the cinder client call failed :returns: dict with the quotas """ proj = self.get_project(name_or_id) if not proj: raise exc.OpenStackCloudException("project does not exist") resp = self.block_storage.delete( '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id)) return proxy._json_response(resp, error_message="cinder client call failed")
def test_get(self): response = (proxy._json_response( self.conn.block_storage.get('/os-hosts'))) host = response['hosts'][0]['host_name'] sot = self.conn.block_storage.get_capabilities(host) self.assertIn('description', sot) self.assertIn('display_name', sot) self.assertIn('driver_version', sot) self.assertIn('namespace', sot) self.assertIn('pool_name', sot) self.assertIn('properties', sot) self.assertIn('replication_targets', sot) self.assertIn('storage_protocol', sot) self.assertIn('vendor_name', sot) self.assertIn('visibility', sot) self.assertIn('volume_backend_name', sot)
def _list(data): volumes.extend(data.get('volumes', [])) endpoint = None for link in data.get('volumes_links', []): if 'rel' in link and 'next' == link['rel']: endpoint = link['href'] break if endpoint: try: _list( proxy._json_response(self.block_storage.get(endpoint))) except exc.OpenStackCloudURINotFound: # Catch and re-raise here because we are making recursive # calls and we just have context for the log here self.log.debug( "While listing volumes, could not find next link" " {link}.".format(link=data)) raise
def _get_volume_quotas(self, project_id, usage=True): """Get volume quotas for a project :param name_or_id: project name or id :raises: OpenStackCloudException if it's not a valid project :returns: Munch object with the quotas """ if usage: resp = self.conn.block_storage.get( "/os-quota-sets/{project_id}?usage=True".format( project_id=project_id)) else: resp = self.conn.block_storage.get( "/os-quota-sets/{project_id}".format(project_id=project_id)) data = proxy._json_response(resp, error_message="cinder client call failed") return self.conn._get_and_munchify("quota_set", data)
def _setup_networks(self): if self.user_cloud.has_service('network'): # Create a network self.test_net = self.user_cloud.create_network( name=self.new_item_name + '_net') # Create a subnet on it self.test_subnet = self.user_cloud.create_subnet( subnet_name=self.new_item_name + '_subnet', network_name_or_id=self.test_net['id'], cidr='10.24.4.0/24', enable_dhcp=True ) # Create a router self.test_router = self.user_cloud.create_router( name=self.new_item_name + '_router') # Attach the router to an external network ext_nets = self.user_cloud.search_networks( filters={'router:external': True}) self.user_cloud.update_router( name_or_id=self.test_router['id'], ext_gateway_net_id=ext_nets[0]['id']) # Attach the router to the internal subnet self.user_cloud.add_router_interface( self.test_router, subnet_id=self.test_subnet['id']) # Select the network for creating new servers self.nic = {'net-id': self.test_net['id']} self.addDetail( 'networks-neutron', content.text_content(pprint.pformat( self.user_cloud.list_networks()))) else: # Find network names for nova-net data = proxy._json_response( self.user_cloud._conn.compute.get('/os-tenant-networks')) nets = meta.get_and_munchify('networks', data) self.addDetail( 'networks-nova', content.text_content(pprint.pformat( nets))) self.nic = {'net-id': nets[0].id}
def _upload_object(self, endpoint, filename, headers): with open(filename, 'rb') as dt: return proxy._json_response(self.put( endpoint, headers=headers, data=dt))
def create_security_group_rule(self, secgroup_name_or_id, port_range_min=None, port_range_max=None, protocol=None, remote_ip_prefix=None, remote_group_id=None, direction='ingress', ethertype='IPv4', project_id=None): """Create a new security group rule :param string secgroup_name_or_id: The security group name or ID to associate with this security group rule. If a non-unique group name is given, an exception is raised. :param int port_range_min: The minimum port number in the range that is matched by the security group rule. If the protocol is TCP or UDP, this value must be less than or equal to the port_range_max attribute value. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param int port_range_max: The maximum port number in the range that is matched by the security group rule. The port_range_min attribute constrains the port_range_max attribute. If nova is used by the cloud provider for security groups, then a value of None will be transformed to -1. :param string protocol: The protocol that is matched by the security group rule. Valid values are None, tcp, udp, and icmp. :param string remote_ip_prefix: The remote IP prefix to be associated with this security group rule. This attribute matches the specified IP prefix as the source IP address of the IP packet. :param string remote_group_id: The remote group ID to be associated with this security group rule. :param string direction: Ingress or egress: The direction in which the security group rule is applied. For a compute instance, an ingress security group rule is applied to incoming (ingress) traffic for that instance. An egress rule is applied to traffic leaving the instance. :param string ethertype: Must be IPv4 or IPv6, and addresses represented in CIDR must match the ingress or egress rules. :param string project_id: Specify the project ID this security group will be created on (admin-only). :returns: A ``munch.Munch`` representing the new security group rule. :raises: OpenStackCloudException on operation error. """ # Security groups not supported if not self._has_secgroups(): raise exc.OpenStackCloudUnavailableFeature( "Unavailable feature: security groups" ) secgroup = self.get_security_group(secgroup_name_or_id) if not secgroup: raise exc.OpenStackCloudException( "Security group %s not found." % secgroup_name_or_id) if self._use_neutron_secgroups(): # NOTE: Nova accepts -1 port numbers, but Neutron accepts None # as the equivalent value. rule_def = { 'security_group_id': secgroup['id'], 'port_range_min': None if port_range_min == -1 else port_range_min, 'port_range_max': None if port_range_max == -1 else port_range_max, 'protocol': protocol, 'remote_ip_prefix': remote_ip_prefix, 'remote_group_id': remote_group_id, 'direction': direction, 'ethertype': ethertype } if project_id is not None: rule_def['tenant_id'] = project_id data = proxy._json_response( self.network.post( '/security-group-rules.json', json={'security_group_rule': rule_def}), error_message="Error creating security group rule") else: # NOTE: Neutron accepts None for protocol. Nova does not. if protocol is None: raise exc.OpenStackCloudException('Protocol must be specified') if direction == 'egress': self.log.debug( 'Rule creation failed: Nova does not support egress rules' ) raise exc.OpenStackCloudException( 'No support for egress rules') # NOTE: Neutron accepts None for ports, but Nova requires -1 # as the equivalent value for ICMP. # # For TCP/UDP, if both are None, Neutron allows this and Nova # represents this as all ports (1-65535). Nova does not accept # None values, so to hide this difference, we will automatically # convert to the full port range. If only a single port value is # specified, it will error as normal. if protocol == 'icmp': if port_range_min is None: port_range_min = -1 if port_range_max is None: port_range_max = -1 elif protocol in ['tcp', 'udp']: if port_range_min is None and port_range_max is None: port_range_min = 1 port_range_max = 65535 security_group_rule_dict = dict(security_group_rule=dict( parent_group_id=secgroup['id'], ip_protocol=protocol, from_port=port_range_min, to_port=port_range_max, cidr=remote_ip_prefix, group_id=remote_group_id )) if project_id is not None: security_group_rule_dict[ 'security_group_rule']['tenant_id'] = project_id data = proxy._json_response( self.compute.post( '/os-security-group-rules', json=security_group_rule_dict )) return self._normalize_secgroup_rule( self._get_and_munchify('security_group_rule', data))
def _upload_object_data(self, endpoint, data, headers): return proxy._json_response(self.object_store.put( endpoint, headers=headers, data=data))
def _upload_object(self, endpoint, filename, headers): return proxy._json_response(self.object_store.put( endpoint, headers=headers, data=open(filename, 'rb')))
def attach_volume(self, server, volume, device=None, wait=True, timeout=None): """Attach a volume to a server. This will attach a volume, described by the passed in volume dict (as returned by get_volume()), to the server described by the passed in server dict (as returned by get_server()) on the named device on the server. If the volume is already attached to the server, or generally not available, then an exception is raised. To re-attach to a server, but under a different device, the user must detach it first. :param server: The server dict to attach to. :param volume: The volume dict to attach. :param device: The device name where the volume will attach. :param wait: If true, waits for volume to be attached. :param timeout: Seconds to wait for volume attachment. None is forever. :returns: a volume attachment object. :raises: OpenStackCloudTimeout if wait time exceeded. :raises: OpenStackCloudException on operation error. """ dev = self.get_volume_attach_device(volume, server['id']) if dev: raise exc.OpenStackCloudException( "Volume %s already attached to server %s on device %s" % (volume['id'], server['id'], dev) ) if volume['status'] != 'available': raise exc.OpenStackCloudException( "Volume %s is not available. Status is '%s'" % (volume['id'], volume['status']) ) payload = {'volumeId': volume['id']} if device: payload['device'] = device data = proxy._json_response( self.compute.post( '/servers/{server_id}/os-volume_attachments'.format( server_id=server['id']), json=dict(volumeAttachment=payload)), error_message="Error attaching volume {volume_id} to server " "{server_id}".format(volume_id=volume['id'], server_id=server['id'])) if wait: for count in utils.iterate_timeout( timeout, "Timeout waiting for volume %s to attach." % volume['id']): try: self.list_volumes.invalidate(self) vol = self.get_volume(volume['id']) except Exception: self.log.debug( "Error getting volume info %s", volume['id'], exc_info=True) continue if self.get_volume_attach_device(vol, server['id']): break # TODO(Shrews) check to see if a volume can be in error status # and also attached. If so, we should move this # above the get_volume_attach_device call if vol['status'] == 'error': raise exc.OpenStackCloudException( "Error in attaching volume %s" % volume['id'] ) return self._normalize_volume_attachment( self._get_and_munchify('volumeAttachment', data))