class StackFiles(resource.Resource): service = orchestration_service.OrchestrationService() base_path = "/stacks/%(stack_name)s/%(stack_id)s/files" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat stack_name = name #: ID of the stack where the template is referenced. id = resource.URI('stack_id') # Backwards compat stack_id = id def fetch(self, session): # The stack files response contains a map of filenames and file # contents. request = self._prepare_request(requires_id=False) resp = session.get(request.url) return resp.json()
class Datastore(sdk_resource.Resource): base_path = '/datastores/%(datastore_name)s/versions' resource_key = '' resources_key = 'dataStores' # capabilities allow_get = False allow_list = True project_id = resource.URI('project_id') datastore_name = resource.URI('datastore_name') # Indicates the database version ID. Its value is unique. # :*Type:string* id = resource.Body('id') # Indicates the database version. # :*Type:string* name = resource.Body('name') # Indicates the database ID. #: *Type:string* datastore = resource.Body('datastore') # Indicates the database image ID. # :*Type:string* image = resource.Body('image') #: Indicates the database package version information. #: *Type:string* packages = resource.Body('packages') # Indicates the current database version status. 0 indicates # Non-activated, and 1 indicates Activated. # The interface can only query information of versions that are activated. #: *Type:int* active = resource.Body('active', type=int)
class StackTemplate(resource.Resource): service = orchestration_service.OrchestrationService() base_path = "/stacks/%(stack_name)s/%(stack_id)s/template" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat. _stack_name will never match, but the alias will # point it to the value pulled for name. stack_name = resource.URI('_stack_name', alias='name') #: ID of the stack where the template is referenced. stack_id = resource.URI('stack_id', alternate_id=True) #: The description specified in the template description = resource.Body('Description') #: The version of the orchestration HOT template. heat_template_version = resource.Body('heat_template_version') #: Key and value that contain output data. outputs = resource.Body('outputs', type=dict) #: Key and value pairs that contain template parameters parameters = resource.Body('parameters', type=dict) #: Key and value pairs that contain definition of resources in the #: template resources = resource.Body('resources', type=dict) # List parameters grouped. parameter_groups = resource.Body('parameter_groups', type=list) # Restrict conditions which supported since '2016-10-14'. conditions = resource.Body('conditions', type=dict)
class AgencyRole(_base.BadBaseResource): resources_key = 'roles' base_path = ('/v3.0/OS-AGENCY/%(role_ref_type)ss/%(role_ref_id)s' '/agencies/%(agency_id)s/roles') # capabilities allow_commit = True allow_head = True allow_delete = True allow_list = True # Properties role_ref_type = resource.URI('role_ref_type') role_ref_id = resource.URI('role_ref_id') agency_id = resource.URI('agency_id') #: Directory where a role locates. catalog = resource.Body('catalog') #: Description of the role. description = resource.Body('description') #: ID of the domain to which a role belongs. domain_id = resource.Body('domain_id') #: Name of a role. name = resource.Body('display_name') #: Policy of a role. policy = resource.Body('policy', type=dict) #: Display mode of a role. type = resource.Body('type')
class StackEnvironment(resource.Resource): service = orchestration_service.OrchestrationService() base_path = "/stacks/%(stack_name)s/%(stack_id)s/environment" # capabilities allow_create = False allow_list = False allow_fetch = True allow_delete = False allow_commit = False # Properties #: Name of the stack where the template is referenced. name = resource.URI('stack_name') # Backwards compat stack_name = name #: ID of the stack where the template is referenced. id = resource.URI('stack_id') # Backwards compat stack_id = id #: A list of parameter names whose values are encrypted encrypted_param_names = resource.Body('encrypted_param_names') #: A list of event sinks event_sinks = resource.Body('event_sinks') #: A map of parameters and their default values defined for the stack. parameter_defaults = resource.Body('parameter_defaults') #: A map of parametes defined in the stack template. parameters = resource.Body('parameters', type=dict) #: A map containing customized resource definitions. resource_registry = resource.Body('resource_registry', type=dict)
class Host(resource.Resource): resource_key = "host" resources_key = "hosts" base_path = "/segments/%(segment_id)s/hosts" service = instance_ha_service.InstanceHaService() # capabilities # 1] GET /v1/segments/<segment_uuid>/hosts # 2] GET /v1/segments/<segment_uuid>/hosts/<host_uuid> # 3] POST /v1/segments/<segment_uuid>/hosts # 4] PUT /v1/segments/<segment_uuid>/hosts # 5] DELETE /v1/segments/<segment_uuid>/hosts allow_list = True allow_get = True allow_create = True allow_update = True allow_delete = True # Properties # Refer "https://github.com/openstack/masakari/blob/ # master/masakari/api/openstack/ha/schemas/hosts.py" # for properties of host API #: A ID of representing this host id = resource.URI("id") #: A Uuid of representing this host uuid = resource.Body("uuid") #: A failover segment ID of this host(in URI) segment_id = resource.URI("segment_id") #: A created time of this host created_at = resource.Body("created_at") #: A latest updated time of this host updated_at = resource.Body("updated_at") #: A name of this host name = resource.Body("name") #: A type of this host type = resource.Body("type") #: A control attributes of this host control_attributes = resource.Body("control_attributes") #: A maintenance status of this host on_maintenance = resource.Body("on_maintenance") #: A reservation status of this host reserved = resource.Body("reserved") #: A failover segment ID of this host(in Body) failover_segment_id = resource.Body("failover_segment_id") _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", failover_segment_id="failover_segment_id", type="type", on_maintenance="on_maintenance", reserved="reserved")
class ServerInterface(resource.Resource): resource_key = 'interfaceAttachment' resources_key = 'interfaceAttachments' base_path = '/servers/%(server_id)s/os-interface' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True #: Fixed IP addresses with subnet IDs. fixed_ips = resource.Body('fixed_ips') #: The MAC address. mac_addr = resource.Body('mac_addr') #: The network ID. net_id = resource.Body('net_id') #: The ID of the port for which you want to create an interface. port_id = resource.Body('port_id', alternate_id=True) #: The port state. port_state = resource.Body('port_state') #: The ID for the server. server_id = resource.URI('server_id') #: Tags for the virtual interfaces. tag = resource.Body('tag') # tag introduced in 2.70 _max_microversion = '2.70'
class TypeEncryption(resource.Resource): resource_key = "encryption" resources_key = "encryption" base_path = "/types/%(volume_type_id)s/encryption" # capabilities allow_fetch = True allow_create = True allow_delete = True allow_list = False allow_commit = True # Properties #: A ID representing this type. encryption_id = resource.Body("encryption_id", alternate_id=True) #: The ID of the Volume Type. volume_type_id = resource.URI("volume_type_id") #: The Size of encryption key. key_size = resource.Body("key_size") #: The class that provides encryption support. provider = resource.Body("provider") #: Notional service where encryption is performed. control_location = resource.Body("control_location") #: The encryption algorithm or mode. cipher = resource.Body("cipher") #: The resource is deleted or not. deleted = resource.Body("deleted") #: The date and time when the resource was created. created_at = resource.Body("created_at") #: The date and time when the resource was updated. updated_at = resource.Body("updated_at") #: The date and time when the resource was deleted. deleted_at = resource.Body("deleted_at")
class VolumeAttachment(resource.Resource): resource_key = 'volumeAttachment' resources_key = 'volumeAttachments' base_path = '/servers/%(server_id)s/os-volume_attachments' # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True _query_mapping = resource.QueryParameters("limit", "offset") #: Name of the device such as, /dev/vdb. device = resource.Body('device') #: The ID of the attachment. id = resource.Body('id') #: The ID for the server. server_id = resource.URI('server_id') #: The ID of the attached volume. volume_id = resource.Body('volumeId') #: The ID of the attachment you want to delete or update. attachment_id = resource.Body('attachment_id', alternate_id=True) #: Virtual device tags for the attachment. tag = resource.Body('tag') # tag introduced in 2.70 _max_microversion = '2.70'
class SnapshotPolicy(resource.Resource): base_path = '/clusters/%(cluster_id)s/index_snapshot/policy' allow_create = True allow_fetch = True #: ID of the cluster where automatic snapshot creation is enabled. cluster_id = resource.URI('cluster_id') #: Retention days for a snapshot. backup_keep_days = resource.Body('keepday', type=int) #: Time when a snapshot is created every day. backup_period = resource.Body('period') #: Snapshot name prefix. backup_prefix = resource.Body('prefix') #: OBS bucket for storing snapshots. bucket_name = resource.Body('bucket') #: Storage path of the snapshot in the OBS bucket. backup_path = resource.Body('basePath') #: Agency used to access OBS buckets. agency = resource.Body('agency') #: Whether to enable the automatic snapshot creation policy. enable = resource.Body('enable', type=otc_format.BoolStr_1) #: Name of the index to be backed up. indices = resource.Body('indices') #: Snapshot encryption ID. cmk_id = resource.Body('snapshotCmkId') #: Whether to delete all automatically created snapshots when the #: automatic snapshot creation policy is disabled. delete_auto = resource.Body('deleteAuto')
class Vpc(resource.Resource): resource_key = 'vpc' resources_key = 'vpcs' base_path = '/v1/%(project_id)s/vpcs' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties project_id = resource.URI('project_id') #: Specifies the available IP address ranges for subnets in the VPC. cidr = resource.Body('cidr') #: Provides supplementary information about the VPC. description = resource.Body('description') #: Specifies whether the shared SNAT function is enabled. enable_shared_snat = resource.Body('enable_shared_snat', type=bool) #: Specifies the VPC name. name = resource.Body('name') #: Specifies the route list. routes = resource.Body('routes', type=list, list_type=dict) #: Specifies the VPC status. status = resource.Body('status')
class VolumeAttachment(resource.Resource): resource_key = 'volumeAttachment' resources_key = 'volumeAttachments' base_path = '/servers/%(server_id)s/os-volume_attachments' service = compute_service.ComputeService() # capabilities allow_create = True allow_fetch = True allow_commit = False allow_delete = True allow_list = True _query_mapping = resource.QueryParameters("limit", "offset") #: Name of the device such as, /dev/vdb. device = resource.Body('device') #: The ID of the attachment. id = resource.Body('id') #: The ID for the server. server_id = resource.URI('server_id') #: The ID of the attached volume. volume_id = resource.Body('volumeId') #: The ID of the attachment you want to delete or update. attachment_id = resource.Body('attachment_id', alternate_id=True)
class ClusterNode(_base.Resource): base_path = '/clusters/%(cluster_id)s/nodes' allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: Cluster id. cluster_id = resource.URI('cluster_id') #: Spec spec = resource.Body('spec', type=NodeSpec) #: Status status = resource.Body('status', type=StatusSpec) @classmethod def new(cls, **kwargs): if 'kind' not in kwargs: kwargs['kind'] = 'Node' if 'apiVersion' not in kwargs: kwargs['apiVersion'] = 'v3' return cls(_synchronized=False, **kwargs)
class Group(_base.Resource): # NOTE: we are not interested in the also returned short queue info resources_key = 'groups' base_path = 'queues/%(queue_id)s/groups' # capabilities allow_create = True allow_list = True allow_delete = True _query_mapping = resource.QueryParameters( 'include_deadletter' ) # Properties queue_id = resource.URI('queue_id') #: Consume group Id id = resource.Body('id') #: Name name = resource.Body('name') #: Total message number, not including deleted message #: *Type: int* produced_messages = resource.Body('produced_messages', type=int) #: Consumed message number #: *Type: int* consumed_messages = resource.Body('consumed_messages', type=int) #: Available message number #: *Type: int* available_messages = resource.Body('available_messages', type=int) #: Total deadletters number #: *Type: int* produced_deadletters = resource.Body('produced_deadletters', type=int) #: Available deadletters number #: *Type: int* available_deadletters = resource.Body('available_deadletters', type=int) def create(self, session, group): """create group""" body = {"groups": [{'name': group}]} request = self._prepare_request(requires_id=False, prepend_key=False) response = session.post( request.url, json=body, headers={'Content-Length': str(len(str(body)))}) # Squize groups into single response entity resp = response.json() if self.resources_key in resp: res = resp[self.resources_key][0] res = self._consume_body_attrs(res) self._body.attributes.update(res) self._body.clean() return self
class ServerRemoteConsole(resource.Resource): resource_key = 'remote_console' base_path = '/servers/%(server_id)s/remote-consoles' # capabilities allow_create = True allow_fetch = False allow_commit = False allow_delete = False allow_list = False _max_microversion = '2.8' #: Protocol of the remote console. protocol = resource.Body('protocol') #: Type of the remote console. type = resource.Body('type') #: URL used to connect to the console. url = resource.Body('url') #: The ID for the server. server_id = resource.URI('server_id') def create(self, session, prepend_key=True, base_path=None, **params): if not self.protocol: self.protocol = \ CONSOLE_TYPE_PROTOCOL_MAPPING.get(self.type) if (not utils.supports_microversion(session, '2.8') and self.type == 'webmks'): raise ValueError('Console type webmks is not supported on ' 'server side') return super(ServerRemoteConsole, self).create(session, prepend_key=prepend_key, base_path=base_path, **params)
class FloatingIPEvent(resource.Resource): resources_key = 'logs' base_path = '/antiddos/%(floating_ip_id)s/logs' _query_mapping = resource.QueryParameters('limit', 'offset', 'sort_dir') # capabilities allow_list = True # Properties floating_ip_id = resource.URI('floating_ip_id') #: start time #: *Type: int* start_time = resource.Body('start_time', type=format.TimeTMsStr) #: end time #: *Type: int* end_time = resource.Body('end_time', type=format.TimeTMsStr) #: Anti-ddos status #: Defense status, the possible value of which is one of the following: #: * 1: indicates that traffic cleaning is underway. #: * 2: indicates that traffic is discarded. #: *Type: int* status = resource.Body('status', type=int) #: Trigger bps (bit/s) #: *Type: int* trigger_bps = resource.Body('trigger_bps', type=int) #: Trigger package per second #: *Type: int* trigger_pps = resource.Body('trigger_pps', type=int) #: Trigger http requests #: *Type: int* trigger_http_pps = resource.Body('trigger_http_pps', type=int)
class Flavor(resource.Resource): base_path = '/flavors/%(datastore_name)s' resources_key = 'flavors' # capabilities allow_list = True _query_mapping = resource.QueryParameters('version_name') datastore_name = resource.URI('datastore_name') #: Instance Mode (single/ha/replica) #: *Type: int* instance_mode = resource.Body('instance_mode') #: Ram size in MB. #: *Type:int* ram = resource.Body('ram', type=int) #: Flavor name. #: *Type:str* name = resource.Body('name', alias='spec_code') #: Specification code #: *Type: str* spec_code = resource.Body('spec_code') #: Amount of VCPU's #: *Type: str* vcpus = resource.Body('vcpus')
class Flavor(resource.Resource): base_path = '/flavors' resources_key = 'flavors' # capabilities allow_list = True _query_mapping = resource.QueryParameters( 'region', 'engine_name') region = resource.URI('region') # Properties #: Indicates the engine name. engine_name = resource.Body('engine_name') #: Indicates the node type. DDS contains the following types of nodes: # * mongos # * shard # * config # * replica type = resource.Body('type') #: Number of vCPUs. vcpus = resource.Body('vcpus') #: Indicates the memory size in gigabyte (GB). ram = resource.Body('ram') #: Indicates the resource specifications code. spec_code = resource.Body('spec_code') #: Indicates the status of specifications in an AZ. az_status = resource.Body('az_status')
class Restore(resource.Resource): """CBR Backup Resource""" resource_key = 'restore' resources_key = '' base_path = '/backups/%(backup_id)s/restore' # capabilities allow_create = True allow_list = False allow_fetch = False allow_delete = False allow_commit = False _query_mapping = resource.QueryParameters() #: Properties #: URI backup reference backup = resource.URI('backup_id') #: Restores mapping relationship. #: Mandatory for VM restoreation and optional for disk restoration mappings = resource.Body('mappings', type=Mappings) #: Whether the server is powered on after restoration. #: Default: True power_on = resource.Body('power_on', type=bool) #: ID of the resource to be restored resource_id = resource.Body('resource_id') #: ID of the target VM to be restored. #: Mandatory for VM restoration. server_id = resource.Body('server_id') #: ID of the target disk to be restored #: This parameter is mandatory for disk restoration volume_id = resource.Body('volume_id')
class ApplicationCredential(resource.Resource): resource_key = 'application_credential' resources_key = 'application_credentials' base_path = '/users/%(user_id)s/application_credentials' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # Properties #: User ID using application credential. *Type: string* user_id = resource.URI('user_id') #: User object using application credential. *Type: string* user = resource.Body('user') #: The links for the application credential resource. links = resource.Body('links') #: name of the user. *Type: string* name = resource.Body('name') #: secret that application credential will be created with, if any. # *Type: string* secret = resource.Body('secret') #: description of application credential's purpose. *Type: string* description = resource.Body('description') #: expire time of application credential. *Type: string* expires_at = resource.Body('expires_at') #: roles of the user. *Type: list* roles = resource.Body('roles') #: restricts the application credential. *Type: boolean* unrestricted = resource.Body('unrestricted', type=bool) #: ID of project. *Type: string* project_id = resource.Body('project_id')
class PortForwarding(resource.Resource): name_attribute = "floating_ip_port_forwarding" resource_name = "port forwarding" resource_key = 'port_forwarding' resources_key = 'port_forwardings' base_path = '/floatingips/%(floatingip_id)s/port_forwardings' _allow_unknown_attrs_in_body = True # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters('internal_port_id', 'external_port', 'protocol') # Properties #: The ID of Floating IP address floatingip_id = resource.URI('floatingip_id') #: The ID of internal port internal_port_id = resource.Body('internal_port_id') #: The internal IP address internal_ip_address = resource.Body('internal_ip_address') #: The internal TCP/UDP/other port number internal_port = resource.Body('internal_port', type=int) #: The external TCP/UDP/other port number external_port = resource.Body('external_port', type=int) #: The protocol protocol = resource.Body('protocol') #: The description description = resource.Body('description')
class Message(resource.Resource): base_path = '/notifications/topics/%(topic_urn)s/publish' # capabilities allow_create = True #: Message ID, which is unique message_id = resource.Body('message_id') #: Request ID, which is unique. request_id = resource.Body('request_id') #: Message subject, which is used as the email #: subject when you publish email messages. subject = resource.Body('subject') #: Message content. #: The message content is a UTF-8-coded character string of #: no more than 256 KB. For SMS subscribers, if the content #: exceeds 256 bytes, the system will divide it into multiple #: messages and send only the first two. message = resource.Body('message') #: Message structure, which contains JSON character strings. #: Specify protocols in the structure, which can be http, #: https, email, dms, and sms. message_structure = resource.Body('message_structure') #: Message template name, which can be obtained according #: to Querying Message Templates message_template_name = resource.Body('message_template_name') #: Time-to-live (TTL) of a message, specifically, the maximum time #: period for retaining the message in the system time_to_live = resource.Body('time_to_live') #: Resource identifier of a topic, which is unique topic_urn = resource.URI('topic_urn')
class Topic(_base.Resource): """DMS Topic resource""" resources_key = 'topics' base_path = '/instances/%(instance_id)s/topics' # capabilities allow_list = True allow_create = True allow_delete = True # _query_mapping = resource.QueryParameters('instance_id') instance_id = resource.URI('instance_id') #: Properties #: Synchronous flushing. Default=false is_sync_flush = resource.Body('sync_message_flush', type=bool) #: Synchronous replication. Default=false. With replication=1 can be only #: false is_sync_replication = resource.Body('sync_replication', type=bool) #: Number of partitions. Default=3 partition = resource.Body('partition', type=int) #: Replication factor. Default=3 replication = resource.Body('replication', type=int) #: Retention time in hours. Default=72 retention_time = resource.Body('retention_time', type=int)
class ClusterPolicy(resource.Resource): resource_key = 'cluster_policy' resources_key = 'cluster_policies' base_path = '/clusters/%(cluster_id)s/policies' service = clustering_service.ClusteringService() # Capabilities allow_list = True allow_get = True _query_mapping = resource.QueryParameters('sort', 'policy_name', 'policy_type', is_enabled='enabled') # Properties #: ID of the policy object. policy_id = resource.Body('policy_id', alternate_id=True) #: Name of the policy object. policy_name = resource.Body('policy_name') #: ID of the cluster object. cluster_id = resource.URI('cluster_id') #: Name of the cluster object. cluster_name = resource.Body('cluster_name') #: Type string of the policy. policy_type = resource.Body('policy_type') #: Whether the policy is enabled on the cluster. *Type: bool* is_enabled = resource.Body('enabled', type=bool) #: Data associated with the cluster-policy binding. data = resource.Body('data', type=dict)
class ServerInterface(resource.Resource): resource_key = 'interfaceAttachment' resources_key = 'interfaceAttachments' base_path = '/servers/%(server_id)s/os-interface' service = compute_service.ComputeService() # capabilities allow_create = True allow_get = True allow_update = False allow_delete = True allow_list = True #: Fixed IP addresses with subnet IDs. fixed_ips = resource.Body('fixed_ips') #: The MAC address. mac_addr = resource.Body('mac_addr') #: The network ID. net_id = resource.Body('net_id') #: The ID of the port for which you want to create an interface. port_id = resource.Body('port_id', alternate_id=True) #: The port state. port_state = resource.Body('port_state') #: The ID for the server. server_id = resource.URI('server_id')
class ClusterAttr(resource.Resource): resources_key = 'cluster_attributes' base_path = '/clusters/%(cluster_id)s/attrs/%(path)s' # capabilities allow_list = True # Properties #: The identity of the cluster cluster_id = resource.URI('cluster_id') #: The json path string for attribute retrieval path = resource.URI('path') #: The id of the node that carries the attribute value. node_id = resource.Body('id') #: The value of the attribute requested. attr_value = resource.Body('value')
class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/instances/%(instance_id)s/users' service = database_service.DatabaseService() # capabilities allow_create = True allow_delete = True allow_list = True instance_id = resource.URI('instance_id') # Properties #: Databases the user has access to databases = resource.Body('databases') #: The name of the user name = resource.Body('name', alternate_id=True) #: The password of the user password = resource.Body('password') def _prepare_request(self, requires_id=True, prepend_key=True): """Prepare a request for the database service's create call User.create calls require the resources_key. The base_prepare_request would insert the resource_key (singular) """ body = {self.resources_key: self._body.dirty} uri = self.base_path % self._uri.attributes uri = utils.urljoin(uri, self.id) return resource._Request(uri, body, None)
class FederationProtocol(resource.Resource): resource_key = 'protocol' resources_key = 'protocols' base_path = '/OS-FEDERATION/identity_providers/%(idp_id)s/protocols' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True create_exclude_id_from_body = True create_method = 'PUT' commit_method = 'PATCH' _query_mapping = resource.QueryParameters('id', ) # Properties #: name of the protocol (read only) *Type: string* name = resource.Body('id') #: The ID of the identity provider the protocol is attached to. # *Type: string* idp_id = resource.URI('idp_id') #: The definition of the protocol # *Type: dict* mapping_id = resource.Body('mapping_id')
class Member(resource.Resource): resources_key = 'members' base_path = '/images/%(image_id)s/members' # capabilities allow_create = True allow_fetch = True allow_commit = True allow_delete = True allow_list = True # See https://bugs.launchpad.net/glance/+bug/1526991 for member/member_id # 'member' is documented incorrectly as being deprecated but it's the # only thing that works. 'member_id' is not accepted. #: The ID of the image member. An image member is a tenant #: with whom the image is shared. member_id = resource.Body('member', alternate_id=True) #: The date and time when the member was created. created_at = resource.Body('created_at') #: Image ID stored through the image API. Typically a UUID. image_id = resource.URI('image_id') #: The status of the image. status = resource.Body('status') #: The URL for schema of the member. schema = resource.Body('schema') #: The date and time when the member was updated. updated_at = resource.Body('updated_at')
class Host(resource.Resource): resource_key = "host" resources_key = "hosts" base_path = "/segments/%(segment_id)s/hosts" # capabilities # 1] GET /v1/segments/<segment_uuid>/hosts # 2] GET /v1/segments/<segment_uuid>/hosts/<host_uuid> # 3] POST /v1/segments/<segment_uuid>/hosts # 4] PUT /v1/segments/<segment_uuid>/hosts # 5] DELETE /v1/segments/<segment_uuid>/hosts allow_list = True allow_fetch = True allow_create = True allow_commit = True allow_delete = True #: A ID of representing this host id = resource.URI("id") #: A Uuid of representing this host uuid = resource.Body("uuid") #: A failover segment ID of this host(in URI) segment_id = resource.URI("segment_id") #: A created time of this host created_at = resource.Body("created_at") #: A latest updated time of this host updated_at = resource.Body("updated_at") #: A name of this host name = resource.Body("name") #: A type of this host type = resource.Body("type") #: A control attributes of this host control_attributes = resource.Body("control_attributes") #: A maintenance status of this host on_maintenance = resource.Body("on_maintenance") #: A reservation status of this host reserved = resource.Body("reserved") #: A failover segment ID of this host(in Body) failover_segment_id = resource.Body("failover_segment_id") _query_mapping = resource.QueryParameters( "sort_key", "sort_dir", failover_segment_id="failover_segment_id", type="type", on_maintenance="on_maintenance", reserved="reserved")