class VolumeAction(resource2.Resource): base_path = '/volumes' service = block_store_service.BlockStoreService() def _action(self, session, volume_id, json): url = utils.urljoin(self.base_path, volume_id, 'action') request = self._prepare_request(requires_id=False) request.uri = url endpoint_override = self.service.get_endpoint_override() session.post(request.uri, endpoint_filter=self.service, endpoint_override=endpoint_override, json=json, headers={}) def set_new_size(self, session, volume_id, new_size): d = {'os-extend': {'new_size': new_size}} self._action(session, volume_id, d) def set_bootable(self, session, volume_id, bootable): d = {'os-set_bootable': {'bootable': bootable}} self._action(session, volume_id, d) def set_readonly(self, session, volume_id, readonly): d = {'os-update_readonly_flag': {'readonly': readonly}} self._action(session, volume_id, d)
class VolumeTransfer(resource.Resource): resource_key = 'transfer' resources_key = 'transfers' base_path = '/os-volume-transfer' service = block_store_service.BlockStoreService() # capabilities allow_create = True allow_delete = True allow_get = True allow_list = True _query_mapping = resource.QueryParameters('offset') # Properties #: The disk ID. volume_id = resource.Body('volume_id') #: The name of the disk transfer. name = resource.Body('name') #: The authentication key of the disk transfer. auth_key = resource.Body('auth_key') #: The links of the disk transfer. links = resource.Body('links', type=list) #: he time when the disk transfer was created. created_at = resource.Body('created_at') #: The disk transfer ID. id = resource.Body('id')
def __init__(self, plugins=None): """User preference for each service. :param list plugins: List of entry point namespaces to load. Create a new :class:`~openstack.profile.Profile` object with no preferences defined, but knowledge of the services. Services are identified by their service type, e.g.: 'identity', 'compute', etc. """ self._preferences = {} self._services = {} self._add_service(cluster_service.ClusterService()) self._add_service(compute_service.ComputeService()) self._add_service(database_service.DatabaseService()) self._add_service(identity_service.IdentityService()) self._add_service(image_service.ImageService()) self._add_service(metric_service.MetricService()) self._add_service(network_service.NetworkService()) self._add_service(object_store_service.ObjectStoreService()) self._add_service(orchestration_service.OrchestrationService()) self._add_service(key_management_service.KeyManagementService()) self._add_service(telemetry_service.TelemetryService()) self._add_service(block_store_service.BlockStoreService()) self._add_service(message_service.MessageService()) if plugins: for plugin in plugins: self._load_plugin(plugin) self.service_names = sorted(self._services.keys())
class Snapshot(resource.Resource): resource_key = "snapshot" resources_key = "snapshots" base_path = "/snapshots" service = block_store_service.BlockStoreService() # capabilities allow_retrieve = True allow_create = True allow_delete = True allow_update = True # Properties #: A UUID representing this snapshot. id = resource.prop("id") #: Name of the snapshot. Default is None. name = resource.prop("name") #: The current status of this snapshot. Potential values are creating, #: available, deleting, error, and error_deleting. status = resource.prop("status") #: Description of snapshot. Default is None. description = resource.prop("description") #: The timestamp of this snapshot creation. created = resource.prop("created_at") #: Metadata associated with this snapshot. metadata = resource.prop("metadata", type=dict) #: The ID of the volume this snapshot was taken of. volume = resource.prop("volume_id") #: The size of the volume, in GBs. size = resource.prop("size", type=int) #: Indicate whether to snapshot, even if the volume is attached. #: Default is False. force = resource.prop("force", type=bool)
def __init__(self, plugins=None): """User preference for each service. :param plugins: List of entry point namespaces to load. Create a new :class:`~openstack.profile.Profile` object with no preferences defined, but knowledge of the services. Services are identified by their service type, e.g.: 'identity', 'compute', etc. """ self._services = {} self._add_service(anti_ddos_service.AntiDDosService(version="v1")) self._add_service(block_store_service.BlockStoreService(version="v2")) self._add_service(compute_service.ComputeService(version="v2")) self._add_service(cts_service.CTSService(version="v1")) self._add_service(dms_service.DMSService(version="v1")) self._add_service(identity_service.IdentityService(version="v3")) self._add_service(image_service.ImageService(version="v2")) self._add_service(kms_service.KMSService(version="v1")) self._add_service(maas_service.MaaSService(version="v1")) self._add_service(network_service.NetworkService(version="v2.0")) self._add_service( orchestration_service.OrchestrationService(version="v1")) self._add_service(smn_service.SMNService(version="v2")) # QianBiao.NG HuaWei Services self._add_service(dns_service.DNSService(version="v2")) self._add_service(cloud_eye_service.CloudEyeService(version="v1")) ass = auto_scaling_service.AutoScalingService(version="v1") self._add_service(ass) vbs_v2 = volume_backup_service.VolumeBackupService(version="v2") self._add_service(vbs_v2) self._add_service(map_reduce_service.MapReduceService(version="v1")) self._add_service(evs_service.EvsServiceV2_1(version='v2.1')) self._add_service(evs_service.EvsService(version='v2')) self._add_service(ecs_service.EcsService(version='v1')) self._add_service(ecs_service.EcsServiceV1_1(version='v1.1')) self._add_service(vpc_service.VpcService(version='v2.0')) self._add_service(bms_service.BmsService(version='v1')) self._add_service(lb_service.LoadBalancerService(version='v1')) # not support below service # self._add_service(message_service.MessageService(version="v1")) # self._add_service(cluster_service.ClusterService(version="v1")) # self._add_service(database_service.DatabaseService(version="v1")) # self._add_service(alarm_service.AlarmService(version="v2")) # self._add_service(bare_metal_service.BareMetalService(version="v1")) # self._add_service(key_manager_service.KeyManagerService(version="v1")) # self._add_service( # object_store_service.ObjectStoreService(version="v1")) self._add_service(rds_service.RDSService(version="v1")) self._add_service(cdn_service.CDNService(version='v1')) # self._add_service(rds_os_service.RDSService(version="v1")) # self._add_service(telemetry_service.TelemetryService(version="v2")) # self._add_service(workflow_service.WorkflowService(version="v2")) if plugins: for plugin in plugins: self._load_plugin(plugin) self.service_keys = sorted(self._services.keys())
class Type(resource2.Resource): resource_key = "volume_type" resources_key = "volume_types" base_path = "/types" service = block_store_service.BlockStoreService() # capabilities allow_get = True allow_create = True allow_delete = True allow_list = True # Properties #: A ID representing this type. id = resource2.Body("id") #: Name of the type. name = resource2.Body("name") #: A dict of extra specifications. "capabilities" is a usual key. extra_specs = resource2.Body("extra_specs", type=dict) #: Description of the type. description = resource2.Body('description') #: The quality of service ID of the block_store disk type. qos_specs_id = resource2.Body('qos_specs_id') #: Whether the block_store disk type is public. is_public = resource2.Body('is_public', type=bool)
class Volume(resource2.Resource): resource_key = "volume" resources_key = "volumes" base_path = "/volumes" service = block_store_service.BlockStoreService() _query_mapping = resource2.QueryParameters('all_tenants', 'name', 'status', 'project_id') # capabilities allow_get = True allow_create = True allow_delete = True allow_update = True allow_list = True # Properties #: A ID representing this volume. id = resource2.Body("id") #: The name of this volume. name = resource2.Body("name") #: A list of links associated with this volume. *Type: list* links = resource2.Body("links", type=list) #: The availability zone. availability_zone = resource2.Body("availability_zone") #: To create a volume from an existing volume, specify the ID of #: the existing volume. If specified, the volume is created with #: same size of the source volume. source_volume_id = resource2.Body("source_volid") #: The volume description. description = resource2.Body("description") #: To create a volume from an existing snapshot, specify the ID of #: the existing volume snapshot. If specified, the volume is created #: in same availability zone and with same size of the snapshot. snapshot_id = resource2.Body("snapshot_id") #: The size of the volume, in GBs. *Type: int* size = resource2.Body("size", type=int) #: The ID of the image from which you want to create the volume. #: Required to create a bootable volume. image_id = resource2.Body("imageRef") #: The name of the associated volume type. volume_type = resource2.Body("volume_type") #: Enables or disables the bootable attribute. You can boot an #: instance from a bootable volume. *Type: bool* is_bootable = resource2.Body("bootable", type=format.BoolStr) #: One or more metadata key and value pairs to associate with the volume. metadata = resource2.Body("metadata") #: One of the following values: creating, available, attaching, in-use #: deleting, error, error_deleting, backing-up, restoring-backup, #: error_restoring. For details on these statuses, see the #: Block Storage API documentation. status = resource2.Body("status") #: TODO(briancurtin): This is currently undocumented in the API. attachments = resource2.Body("attachments") #: The timestamp of this volume creation. created_at = resource2.Body("created_at")
def test_service(self): sot = block_store_service.BlockStoreService() self.assertEqual("volume", sot.service_type) self.assertEqual("public", sot.visibility) self.assertIsNone(sot.region) self.assertIsNone(sot.service_name) self.assertEqual(1, len(sot.valid_versions)) self.assertEqual("v2", sot.valid_versions[0].module) self.assertEqual("v2", sot.valid_versions[0].path)
def __init__(self): """Preferences for each service. Create a new :class:`~openstack.profile.Profile` object with no preferences defined, but knowledge of the services. Services are identified by their service type, e.g.: 'identity', 'compute', etc. """ self._preferences = {} self._services = {} """ NOTE(thowe): We should probably do something more clever here rather than brute force create all the services. Maybe use entry points or something, but I'd like to leave that work for another commit. """ serv = cluster_service.ClusterService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = compute_service.ComputeService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = database_service.DatabaseService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = identity_service.IdentityService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = image_service.ImageService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = metric_service.MetricService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = network_service.NetworkService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = object_store_service.ObjectStoreService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = orchestration_service.OrchestrationService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = keystore_service.KeystoreService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = telemetry_service.TelemetryService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = block_store_service.BlockStoreService() serv.set_visibility(None) self._services[serv.service_type] = serv serv = message_service.MessageService() serv.set_visibility(None) self._services[serv.service_type] = serv self.service_names = sorted(self._services.keys())
class Type(resource2.Resource): resource_key = "volume_type" resources_key = "volume_types" base_path = "/types" service = block_store_service.BlockStoreService() # capabilities allow_get = True allow_create = True allow_delete = True allow_list = True # Properties #: A ID representing this type. id = resource2.Body("id") #: Name of the type. name = resource2.Body("name") #: A dict of extra specifications. "capabilities" is a usual key. extra_specs = resource2.Body("extra_specs", type=dict) #: List of cloud disk types returned by the query request #volume_types = resource2.Body("volue_types", type = list) #: Cinder backend cloud drive type name volume_backend_name = resource2.Body("volume_backend_name") #: Available partition availability_zone = resource2.Body("availability-zone") #: Error message returned when an error occurs message = resource2.Body("message") # the specific meaning refers to the list of return values below # 400 Bad Request # 401 Unauthorized # 403 Forbidden # 404 Not Found # 405 Method Not Allowed # 406 Not Acceptable # 407 Proxy Authentication Required # 408 Request Timeout # 409 Conflict # 500 Internal Server Error # 501 Not Implemented # 502 Bad Gateway # 503 Service Unavailable # 504 Gateway Timeout code = resource2.Body("code") #: Description of the cloud disk type description = resource2.Body("description") #: The id of the qos corresponding to the cloud drive type qos_specs_id = resource2.Body("qos_specs_id") #: Whether it is a public type is_public = resource2.Body("is_public", type=bool) #: Support AZ list of current cloud drive type RESKEY_availability_zone = resource2.Body("RESKEY:availability_zone") #: AZ list of current cloud drive types sold out sold_out_availability_zones = resource2.Body( "os-vendor-extended:sold_out_availability_zones")
class AvailabilityZone(resource.Resource): resource_key = None resources_key = 'availabilityZoneInfo' base_path = '/os-availability-zone' service = block_store_service.BlockStoreService() # capabilities allow_list = True # Properties #: The AZ status. zoneState = resource.Body('zoneState', type=dict) #: The AZ name. zoneName = resource.Body('zoneName')
class Snapshot(resource2.Resource): resource_key = "snapshot" resources_key = "snapshots" base_path = "/snapshots" service = block_store_service.BlockStoreService() _query_mapping = resource2.QueryParameters('all_tenants', 'name', 'status', 'volume_id', "offset") # capabilities allow_get = True allow_create = True allow_delete = True allow_update = True allow_list = True # Properties #: A ID representing this snapshot. id = resource2.Body("id") #: Name of the snapshot. Default is None. name = resource2.Body("name") #: The current status of this snapshot. Potential values are creating, #: available, deleting, error, and error_deleting. status = resource2.Body("status") #: Description of snapshot. Default is None. description = resource2.Body("description") #: The timestamp of this snapshot creation. created_at = resource2.Body("created_at") #: Metadata associated with this snapshot. metadata = resource2.Body("metadata", type=dict) #: The ID of the volume this snapshot was taken of. volume_id = resource2.Body("volume_id") #: The size of the volume, in GBs. size = resource2.Body("size", type=int) #: Indicate whether to create snapshot, even if the volume is attached. #: Default is ``False``. *Type: bool* is_forced = resource2.Body("force", type=format.BoolStr) #: Update time. updated_at = resource2.Body("updated_at") #: Same as name. display_name = resource2.Body("display_name") #: Same as description. display_description = resource2.Body("display_description") #: The percentage of completeness the snapshot is currently at. progress = resource2.Body("os-extended-snapshot-attributes:progress") #: The project ID this snapshot is associated with. project_id = resource2.Body("os-extended-snapshot-attributes:project_id")
class Type(resource.Resource): resource_key = "volume_type" resources_key = "volume_types" base_path = "/types" service = block_store_service.BlockStoreService() # capabilities allow_retrieve = True allow_create = True allow_delete = True # Properties #: A UUID representing this volume. id = resource.prop("id") #: Name of the type. name = resource.prop("name") #: A dict of extra specifications. "capabilities" is a usual key. extra_specs = resource.prop("extra_specs", type=dict)
class Extension(resource2.Resource): base_path = '/extensions' resources_key = 'extensions' service = block_store_service.BlockStoreService() # capabilities allow_list = True # Properties #: The last update time updated = resource2.Body('updated') #: Description description = resource2.Body('description') #: The link for the disk transfer links = resource2.Body('links', type=list) #: The link associated with the extension namespace = resource2.Body('namespace') #: The alias of the extension alias = resource2.Body('alias') #: The name of the disk transfer name = resource2.Body('name')
class SnapshotRollback(resource2.Resource): base_path = '/os-vendor-snapshots' service = block_store_service.BlockStoreService() # capabilities allow_update = True # Properties #: The snapshot rollback information rollback = resource2.Body('rollback', type=dict) def rollback_snapshot(self, session, snapshot_id, **kwargs): request = self._prepare_request(requires_id=False) request.uri = utils.urljoin(self.base_path, snapshot_id, 'rollback') endpoint_override = self.service.get_endpoint_override() response = session.post(request.uri, endpoint_filter=self.service, endpoint_override=endpoint_override, json=kwargs, headers={}) self._translate_response(response) return self
class Version(resource2.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' service = block_store_service.BlockStoreService() # capabilities allow_list = True # Properties links = resource2.Body('links', type=list) status = resource2.Body('status') #: The minimum API version. min_version = resource2.Body('min_version') #: The request messages type of the API version. media_types = resource2.Body('media-types', type=list) #: The ID of the API version. id = resource2.Body('id') #: The last time when the API version is updated. updated = resource2.Body('updated') #: The sub-version of the API version. version = resource2.Body('version')
class ExportImageByVolume(resource2.Resource): resource_key = 'os-volume_upload_image' resources_key = None base_path = '/volumes/%(volume_id)s/action' volume_id = resource2.URI('volume_id') service = block_store_service.BlockStoreService() # capabilities allow_create = True # Properties #: The format of the exported image. The default value is vhd. disk_format = resource2.Body('disk_format') #: The name of the exported image. image_name = resource2.Body('image_name') #: Whether to forcibly export the image. The default value is false. force = resource2.Body('force', type=bool) #: The container type of the exported image. The default value is bare. container_format = resource2.Body('container_format') #: The OS type of the exported image. #: Currently, only windows and linux are supported. #: The default value is linux. os_type = resource2.Body('__os_type') #: The status after the image is exported. The normal value is uploading. status = resource2.Body('status') #: The ID of the exported image. image_id = resource2.Body('image_id') #: The information of the disk type. volume_type = resource2.Body('volume_type', type=dict) #: The disk capacity. size = resource2.Body('size', type=int) #: The disk ID. id = resource2.Body('id') #: The disk description. display_description = resource2.Body('display_description') #: The time when the disk information was updated. updated_at = resource2.Body('updated_at')
class QuotaSet(resource2.Resource): resource_key = 'quota_set' base_path = '/os-quota-sets/%(tenant_id)s?usage=True' service = block_store_service.BlockStoreService() # capabilities allow_get = True # URI parameters tenant_id = resource2.URI('tenant_id') #: Properties #: The number of block_store disks volumes = resource2.Body('volumes', type=dict) #: The number of snapshots snapshots = resource2.Body('snapshots', type=dict) #: The total size (GB) of block_store disks and snapshots allowed by the quota gigabytes = resource2.Body('gigabytes', type=dict) #: The number of block_store disks reserved for a specified volume type volumes_sata = resource2.Body('volumes_SATA', type=dict) volumes_ssd = resource2.Body('volumes_SSD', type=dict) volumes_sas = resource2.Body('volumes_SAS', type=dict) #: The number of snapshots reserved for a specified volume type snapshots_sata = resource2.Body('snapshots_SATA', type=dict) snapshots_ssd = resource2.Body('snapshots_SSD', type=dict) snapshots_sas = resource2.Body('snapshots_SAS', type=dict) #: The size (GB) reserved for a specified volume type gigabytes_sata = resource2.Body('gigabytes_SATA', type=dict) gigabytes_ssd = resource2.Body('gigabytes_SSD', type=dict) gigabytes_sas = resource2.Body('gigabytes_SAS', type=dict) #: Tenant ID id = resource2.Body('id') #: The number of backups backups = resource2.Body('backups', type=dict) #: The backup size (GB) backup_gigabytes = resource2.Body('backup_gigabytes', type=dict)
class Volume(resource2.Resource): resource_key = "volume" resources_key = "volumes" base_path = "/volumes" service = block_store_service.BlockStoreService() _query_mapping = resource2.QueryParameters('all_tenants', 'name', 'status', 'project_id') # capabilities allow_get = True allow_create = True allow_delete = True allow_update = True allow_list = True # Properties #: A ID representing this volume. id = resource2.Body("id") #: The name of this volume. name = resource2.Body("name") #: A list of links associated with this volume. *Type: list* links = resource2.Body("links", type=list) #: The availability zone. availability_zone = resource2.Body("availability_zone") #: To create a volume from an existing volume, specify the ID of #: the existing volume. If specified, the volume is created with #: same size of the source volume. source_volume_id = resource2.Body("source_volid") #: The volume description. description = resource2.Body("description") #: To create a volume from an existing snapshot, specify the ID of #: the existing volume snapshot. If specified, the volume is created #: in same availability zone and with same size of the snapshot. snapshot_id = resource2.Body("snapshot_id") #: The size of the volume, in GBs. *Type: int* size = resource2.Body("size", type=int) #: The ID of the image from which you want to create the volume. #: Required to create a bootable volume. image_id = resource2.Body("imageRef") #: The name of the associated volume type. volume_type = resource2.Body("volume_type") #: Enables or disables the bootable attribute. You can boot an #: instance from a bootable volume. *Type: bool* is_bootable = resource2.Body("bootable", type=format.BoolStr) #: One or more metadata key and value pairs to associate with the volume. metadata = resource2.Body("metadata") #: One of the following values: creating, available, attaching, in-use #: deleting, error, error_deleting, backing-up, restoring-backup, #: error_restoring. For details on these statuses, see the #: Block Storage API documentation. status = resource2.Body("status") #: TODO(briancurtin): This is currently undocumented in the API. attachments = resource2.Body("attachments") #: The timestamp of this volume creation. created_at = resource2.Body("created_at") #: The project ID associated with current back-end. project_id = resource2.Body("os-vol-tenant-attr:tenant_id") #: The metadata of the block_store disk image. volume_image_metadata = resource2.Body('volume_image_metadata', type=dict) #: The volume's current back-end. host = resource2.Body("os-vol-host-attr:host") #: ``True`` if this volume is encrypted, ``False`` if not. #: *Type: bool* is_encrypted = resource2.Body("encrypted", type=format.BoolStr) #: The time when the block_store disk information was updated. updated_at = resource2.Body('updated_at') #: Extended replication status on this volume. extended_replication_status = resource2.Body( "os-volume-replication:extended_status") #: Status of replication on this volume. replication_status = resource2.Body("replication_status") #: The status of this volume's migration (None means that a migration #: is not currently in progress). migration_status = resource2.Body("os-vol-mig-status-attr:migstat") #: ID of the consistency group. consistency_group_id = resource2.Body("consistencygroup_id") #: The volume ID that this volume's name on the back-end is based on. migration_id = resource2.Body("os-vol-mig-status-attr:name_id") #: Whether the block_store disk is shareable. shareable = resource2.Body('shareable', type=bool) #: Reserved. user_id = resource2.Body('user_id') #: The shared block_store disk information. multi_attach = resource2.Body('multiattach', type=bool)
class QuotaSet(resource2.Resource): resource_key = 'quota_set' base_path = '/os-quota-sets/%(tenant_id)s' service = block_store_service.BlockStoreService() # capabilities allow_get = True # URI parameters tenant_id = resource2.URI('tenant_id') #: Properties #: The number of block_store disks volumes = resource2.Body('volumes', type=dict) #: The number of snapshots snapshots = resource2.Body('snapshots', type=dict) #: The total size (GB) of block_store disks and snapshots allowed by the quota gigabytes = resource2.Body('gigabytes', type=dict) #: The number of block_store disks reserved for a specified volume type volumes_sata = resource2.Body('volumes_SATA', type=dict) volumes_ssd = resource2.Body('volumes_SSD', type=dict) volumes_sas = resource2.Body('volumes_SAS', type=dict) #: The number of snapshots reserved for a specified volume type snapshots_sata = resource2.Body('snapshots_SATA', type=dict) snapshots_ssd = resource2.Body('snapshots_SSD', type=dict) snapshots_sas = resource2.Body('snapshots_SAS', type=dict) #: The size (GB) reserved for a specified volume type gigabytes_sata = resource2.Body('gigabytes_SATA', type=dict) gigabytes_ssd = resource2.Body('gigabytes_SSD', type=dict) gigabytes_sas = resource2.Body('gigabytes_SAS', type=dict) #: Tenant ID id = resource2.Body('id') #: The number of backups backups = resource2.Body('backups', type=dict) #: The backup size (GB) backup_gigabytes = resource2.Body('backup_gigabytes', type=dict) def get_quotas(self, session, requires_id=True, params=None): if not self.allow_get: raise exceptions.MethodNotSupported(self, "get") try: request = self._prepare_request(requires_id=requires_id) endpoint_override = self.service.get_endpoint_override() service = self.get_service_filter(self, session) response = session.get(request.uri, endpoint_filter=self.service, microversion=service.microversion, endpoint_override=endpoint_override, params=params) self._translate_response(response) return self except exceptions.NotFoundException as e: raise exceptions.ResourceNotFound(message="No %s found for %s" % ("Quota", self.tenant_id), details=e.details, response=e.response, request_id=e.request_id, url=e.url, method=e.method, http_status=e.http_status, cause=e.cause, code=e.code)
class Volume(resource2.Resource): resource_key = "volume" resources_key = "volumes" base_path = "/volumes" service = block_store_service.BlockStoreService() _query_mapping = resource2.QueryParameters('all_tenants', 'name', 'status', 'project_id', "sort_key", "offset", "availability_zone", "sort_dir", changes_since="changes-since", metadata_alias="metadata") # capabilities allow_get = True allow_create = True allow_delete = True allow_update = True allow_list = True # Properties #: A ID representing this volume. id = resource2.Body("id") #: The name of this volume. name = resource2.Body("name") #: A list of links associated with this volume. *Type: list* links = resource2.Body("links", type=list) #: The availability zone. availability_zone = resource2.Body("availability_zone") #: To create a volume from an existing volume, specify the ID of #: the existing volume. If specified, the volume is created with #: same size of the source volume. source_volume_id = resource2.Body("source_volid") #: The volume description. description = resource2.Body("description") #: To create a volume from an existing snapshot, specify the ID of #: the existing volume snapshot. If specified, the volume is created #: in same availability zone and with same size of the snapshot. snapshot_id = resource2.Body("snapshot_id") #: The size of the volume, in GBs. *Type: int* size = resource2.Body("size", type=int) #: The ID of the image from which you want to create the volume. #: Required to create a bootable volume. image_id = resource2.Body("imageRef") #: The name of the associated volume type. volume_type = resource2.Body("volume_type") #: Enables or disables the bootable attribute. You can boot an #: instance from a bootable volume. *Type: bool* is_bootable = resource2.Body("bootable", type=format.BoolStr) #: One or more metadata key and value pairs to associate with the volume. metadata = resource2.Body("metadata", type=dict) #: One of the following values: creating, available, attaching, in-use #: deleting, error, error_deleting, backing-up, restoring-backup, #: error_restoring. For details on these statuses, see the #: Block Storage API documentation. status = resource2.Body("status") #: TODO(briancurtin): This is currently undocumented in the API. attachments = resource2.Body("attachments", type=list) #: The timestamp of this volume creation. created_at = resource2.Body("created_at") # This parameter indicates that the cloud disk is created from the clone of the disk. # The current cloud disk service does not support this function. source_replica = resource2.Body("source_replica") # Consistency group ID. This parameter indicates that the cloud disk belongs to the consistency group. The current # cloud disk service does not support this function. consistencygroup_id = resource2.Body("consistencygroup_id") # Is it a shareable cloud drive shareable = resource2.Body("shareable", type=bool) # Share the cloud drive flag. The default is false. multiattach = resource2.Body("multiattach", type=bool) #: ``True`` if this volume is encrypted, ``False`` if not. #: *Type: bool* encrypted = resource2.Body("encrypted", type=bool) #: Status of replication on this volume. replication_status = resource2.Body("replication_status") # updated date updated_at = resource2.Body("updated_at") # user id user_id = resource2.Body("user_id") # If the cloud drive is created from the image, this field will be available, otherwise the field is empty. volume_image_metadata = resource2.Body("volume_image_metadata", type=dict) #: Extended replication status on this volume. extended_status = resource2.Body("os-volume-replication:extended_status") #: The project ID associated with current back-end. tenant_id = resource2.Body("os-vol-tenant-attr:tenant_id") #: The status of this volume's migration (None means that a migration #: is not currently in progress). migstat = resource2.Body("os-vol-mig-status-attr:migstat") #: The volume ID that this volume's name on the back-end is based on. name_id = resource2.Body("os-vol-mig-status-attr:name_id") # message message = resource2.Body("message") # code code = resource2.Body("code") #: The volume's current back-end. host = resource2.Body("os-vol-host-attr:host") # The scheduling parameter currently supports the dedicated_storage_id field, # indicating that the cloud disk is created in the DSS storage pool. scheduler_hints = resource2.Body("OS-SCH-HNT:scheduler_hints", type=dict) # Mirror ID. Specifying the parameter to create a cloud disk is created from the image. # Description: # Creating a BMS system disk through BMS mirroring is not supported. imageRef = resource2.Body("imageRef")
class SnapshotMetadata(resource2.Resource): base_path = '/snapshots' service = block_store_service.BlockStoreService() # capabilities allow_create = True allow_get = True allow_update = True allow_delete = True # Properties #: The disk snapshot information meta = resource2.Body('meta') metadata = resource2.Body('metadata') def _operate_metadata(self, method, url, has_body=True, **kwargs): request = self._prepare_request(requires_id=False) request.uri = url endpoint_override = self.service.get_endpoint_override() response = method(request.uri, endpoint_filter=self.service, endpoint_override=endpoint_override, **kwargs) self._translate_response(response, has_body) return self def create_metadata(self, session, snapshot_id, metadata): url = utils.urljoin(self.base_path, snapshot_id, 'metadata') d = { 'json': metadata, 'headers': {}, } return self._operate_metadata(session.post, url, **d) def get_metadata(self, session, snapshot_id, key=None): if key: url = utils.urljoin(self.base_path, snapshot_id, 'metadata', key) else: url = utils.urljoin(self.base_path, snapshot_id, 'metadata') return self._operate_metadata(session.get, url) def update_metadata(self, session, snapshot_id, metadata, key=None): if key: url = utils.urljoin(self.base_path, snapshot_id, 'metadata', key) else: url = utils.urljoin(self.base_path, snapshot_id, 'metadata') if key and metadata.get('meta'): for k in list(metadata['meta'].keys()): if not k == key: del metadata[k] d = { 'json': metadata, 'headers': {}, } return self._operate_metadata(session.put, url, **d) def delete_metadata(self, session, snapshot_id, key, ignore_missing): url = utils.urljoin(self.base_path, snapshot_id, 'metadata', key) d = {'headers': {'Accept': ''}, 'params': None} try: self._operate_metadata(session.delete, url, has_body=False, **d) except exceptions.NotFoundException as e: if ignore_missing: return None else: # Reraise with a more specific type and message raise exceptions.ResourceNotFound( message="No %s found for %s" % (self.__name__, key), details=e.details, response=e.response, request_id=e.request_id, url=e.url, method=e.method, http_status=e.http_status, cause=e.cause, code=e.code)
class Version(resource2.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' service = block_store_service.BlockStoreService() # capabilities allow_list = True # Properties links = resource2.Body('links', type=list) status = resource2.Body('status') #: The minimum API version. min_version = resource2.Body('min_version') #: The request messages type of the API version. media_types = resource2.Body('media-types', type=list) #: The ID of the API version. id = resource2.Body('id') #: The last time when the API version is updated. updated = resource2.Body('updated') #: The sub-version of the API version. version = resource2.Body('version') @classmethod def list(cls, session, paginated=False, **params): """This method is a generator which yields resource objects. This resource object list generator handles pagination and takes query params for response filtering. :param session: The session to use for making this request. :type session: :class:`~openstack.session.Session` :param bool paginated: ``True`` if a GET to this resource returns a paginated series of responses, or ``False`` if a GET returns only one page of data. **When paginated is False only one page of data will be returned regardless of the API's support of pagination.** :param dict params: These keyword arguments are passed through the :meth:`~openstack.resource2.QueryParamter._transpose` method to find if any of them match expected query parameters to be sent in the *params* argument to :meth:`~openstack.session.Session.get`. They are additionally checked against the :data:`~openstack.resource2.Resource.base_path` format string to see if any path fragments need to be filled in by the contents of this argument. :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_list` is not set to ``True``. """ if not cls.allow_list: raise exceptions.MethodNotSupported(cls, "list") more_data = True query_params = cls._query_mapping._transpose(params) uri = cls.get_list_uri(params) service = cls.get_service_filter(cls, session) while more_data: endpoint_override = cls.get_endpoint_override(session) resp = session.get(uri, endpoint_filter=cls.service, microversion=service.microversion, endpoint_override=endpoint_override, headers={"Accept": "application/json"}, params=query_params) response_json = resp.json() if cls.resources_key: resources = cls.find_value_by_accessor(response_json, cls.resources_key) else: resources = response_json if not resources: more_data = False # Keep track of how many items we've yielded. If we yielded # less than our limit, we don't need to do an extra request # to get back an empty data set, which acts as a sentinel. yielded = 0 new_marker = None for data in resources: # Do not allow keys called "self" through. Glance chose # to name a key "self", so we need to pop it out because # we can't send it through cls.existing and into the # Resource initializer. "self" is already the first # argument and is practically a reserved word. data.pop("self", None) value = cls.existing(**data) new_marker = value.id yielded += 1 yield value query_params = dict(query_params) # if `next marker path` is explicit specified, use it as marker next_marker = cls.get_next_marker(response_json, yielded, query_params) if next_marker: new_marker = next_marker if next_marker != -1 else None # if cls.next_marker_path: # if isinstance(cls.next_marker_path, six.string_types): # new_marker = cls.find_value_by_accessor(response_json, # cls.next_marker_path) # elif callable(cls.next_marker_path): # new_marker = cls.next_marker_path(response_json, yielded) if not new_marker: return if not paginated: return if cls.query_limit_key in query_params: if yielded < query_params["limit"]: return query_params[cls.query_limit_key] = yielded query_params[cls.query_marker_key] = new_marker @classmethod def get_endpoint_override(cls, session): endpoint = session.get_endpoint(interface=cls.service.interface, service_type=cls.service.service_type) endpoint_override = endpoint.split('/v') return endpoint_override[0]