def _create_ports(self, payload): """bulk create ports using openstacksdk module""" # TODO(gryf): this function should be removed while we update openstacksdk # version to 0.42. key_map = { 'binding_host_id': 'binding:host_id', 'binding_profile': 'binding:profile', 'binding_vif_details': 'binding:vif_details', 'binding_vif_type': 'binding:vif_type', 'binding_vnic_type': 'binding:vnic_type' } for port in payload['ports']: for key, mapping in key_map.items(): if key in port: port[mapping] = port.pop(key) response = self.post(os_port.Port.base_path, json=payload) if not response.ok: raise os_exc.SDKException('Error when bulk creating ports: %s' % response.text) return (os_port.Port(**item) for item in response.json()['ports'])
def map_exceptions_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except _exceptions.HttpError as e: if e.http_status == 404: raise exceptions.NotFoundException(message=e.message, details=e.details, response=e.response, request_id=e.request_id, url=e.url, method=e.method, http_status=e.http_status, cause=e) else: raise exceptions.HttpException(message=e.message, details=e.details, response=e.response, request_id=e.request_id, url=e.url, method=e.method, http_status=e.http_status, cause=e) except _exceptions.ClientException as e: raise exceptions.SDKException(message=e.message, cause=e)
def __init__(self, session=None, authenticator=None, profile=None, verify=True, cert=None, user_agent=None, auth_plugin="password", **auth_args): """Create a context for a connection to a cloud provider. A connection needs a transport and an authenticator. The user may pass in a transport and authenticator they want to use or they may pass in the parameters to create a transport and authenticator. The connection creates a :class:`~openstack.session.Session` which uses the profile and authenticator to perform HTTP requests. :param session: A session object compatible with :class:`~openstack.session.Session`. :type session: :class:`~openstack.session.Session` :param authenticator: An authenticator derived from the base authenticator plugin that was previously created. Two common authentication identity plugins are :class:`identity_v2 <openstack.auth.identity.v2.Auth>` and :class:`identity_v3 <openstack.auth.identity.v3.Auth>`. If this parameter is not passed in, the connection will create an authenticator. :type authenticator: :class:`~openstack.auth.base.BaseAuthPlugin` :param profile: If the user has any special profiles such as the service name, region, version or interface, they may be provided in the profile object. If no profiles are provided, the services that appear first in the service catalog will be used. :type profile: :class:`~openstack.profile.Profile` :param bool verify: If a transport is not provided to the connection, this parameter will be used to create a transport. If ``verify`` is set to true, which is the default, the SSL cert will be verified. It can also be set to a CA_BUNDLE path. :param cert: If a transport is not provided to the connection then this parameter will be used to create a transport. `cert` allows to provide a client certificate file path or a tuple with client certificate and key paths. :type cert: str or tuple :param str user_agent: If a transport is not provided to the connection, this parameter will be used when creating a transport. The value given here will be prepended to the default, which is specified in :attr:`~openstack.transport.USER_AGENT`. The resulting ``user_agent`` value is used for the ``User-Agent`` HTTP header. :param str auth_plugin: The name of authentication plugin to use. The default value is ``password``. :param auth_args: The rest of the parameters provided are assumed to be authentication arguments that are used by the authentication plugin. """ self.profile = profile if profile else _profile.Profile() if session: # Make sure it is the right kind of session. A keystoneauth1 # session would work in some ways but show strange errors in # others. E.g. a Resource.find would work with an id but fail when # given a name because it attempts to catch # openstack.exceptions.NotFoundException to signal that a search by # ID failed before trying a search by name, but with a # keystoneauth1 session the lookup by ID raises # keystoneauth1.exceptions.NotFound instead. We need to ensure our # Session class gets used so that our implementation of various # methods always works as we expect. if not isinstance(session, _session.Session): raise exceptions.SDKException( 'Session instance is from %s but must be from %s' % (session.__module__, _session.__name__)) self.session = session else: self.authenticator = self._create_authenticator( authenticator, auth_plugin, **auth_args) self.session = _session.Session(self.profile, auth=self.authenticator, verify=verify, cert=cert, user_agent=user_agent) self._open()
def _gen_openstack_sdk_error(self, message='SomeThingIsGoingWrong'): return mock.MagicMock(side_effect=exceptions.SDKException( message=message))
def create_image(self, name, filename=None, container=None, md5=None, sha256=None, disk_format=None, container_format=None, disable_vendor_agent=True, allow_duplicates=False, meta=None, wait=False, timeout=3600, data=None, validate_checksum=True, **kwargs): """Upload an image. :param str name: Name of the image to create. If it is a pathname of an image, the name will be constructed from the extensionless basename of the path. :param str filename: The path to the file to upload, if needed. (optional, defaults to None) :param data: Image data (string or file-like object). It is mutually exclusive with filename :param str container: Name of the container in swift where images should be uploaded for import if the cloud requires such a thing. (optional, defaults to 'images') :param str md5: md5 sum of the image file. If not given, an md5 will be calculated. :param str sha256: sha256 sum of the image file. If not given, an md5 will be calculated. :param str disk_format: The disk format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param str container_format: The container format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param bool disable_vendor_agent: Whether or not to append metadata flags to the image to inform the cloud in question to not expect a vendor agent to be runing. (optional, defaults to True) :param allow_duplicates: If true, skips checks that enforce unique image name. (optional, defaults to False) :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. :param bool wait: If true, waits for image to be created. Defaults to true - however, be aware that one of the upload methods is always synchronous. :param timeout: Seconds to wait for image creation. None is forever. :param bool validate_checksum: If true and cloud returns checksum, compares return value with the one calculated or passed into this call. If value does not match - raises exception. Default is 'false' Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. If you are sure you have all of your data types correct or have an advanced need to be explicit, use meta. If you are just a normal consumer, using kwargs is likely the right choice. If a value is in meta and kwargs, meta wins. :returns: A ``munch.Munch`` of the Image object :raises: SDKException if there are problems uploading """ if container is None: container = self._connection._OBJECT_AUTOCREATE_CONTAINER if not meta: meta = {} if not disk_format: disk_format = self._connection.config.config['image_format'] if not container_format: # https://docs.openstack.org/image-guide/image-formats.html container_format = 'bare' if data and filename: raise exceptions.SDKException( 'Passing filename and data simultaneously is not supported') # If there is no filename, see if name is actually the filename if not filename and not data: name, filename = self._get_name_and_filename( name, self._connection.config.config['image_format']) if validate_checksum and data and not isinstance(data, bytes): raise exceptions.SDKException( 'Validating checksum is not possible when data is not a ' 'direct binary object') if not (md5 or sha256) and validate_checksum: if filename: (md5, sha256) = self._connection._get_file_hashes(filename) elif data and isinstance(data, bytes): (md5, sha256) = self._connection._calculate_data_hashes(data) if allow_duplicates: current_image = None else: current_image = self.find_image(name) if current_image: props = current_image.get('properties', {}) md5_key = props.get(self._IMAGE_MD5_KEY, props.get(self._SHADE_IMAGE_MD5_KEY, '')) sha256_key = props.get( self._IMAGE_SHA256_KEY, props.get(self._SHADE_IMAGE_SHA256_KEY, '')) up_to_date = self._connection._hashes_up_to_date( md5=md5, sha256=sha256, md5_key=md5_key, sha256_key=sha256_key) if up_to_date: self.log.debug("image %(name)s exists and is up to date", {'name': name}) return current_image else: self.log.debug( "image %(name)s exists, but contains different " "checksums. Updating.", {'name': name}) if disable_vendor_agent: kwargs.update( self._connection.config.config['disable_vendor_agent']) # If a user used the v1 calling format, they will have # passed a dict called properties along properties = kwargs.pop('properties', {}) properties[self._IMAGE_MD5_KEY] = md5 or '' properties[self._IMAGE_SHA256_KEY] = sha256 or '' properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) kwargs.update(properties) image_kwargs = dict(properties=kwargs) if disk_format: image_kwargs['disk_format'] = disk_format if container_format: image_kwargs['container_format'] = container_format if filename or data: image = self._upload_image(name, filename=filename, data=data, meta=meta, wait=wait, timeout=timeout, validate_checksum=validate_checksum, **image_kwargs) else: image = self._create_image(**image_kwargs) self._connection._get_cache(None).invalidate() return image
def generate_form_signature(self, container, object_prefix, redirect_url, max_file_size, max_upload_count, timeout, temp_url_key=None): """Generate a signature for a FormPost upload. :param container: The value can be the name of a container or a :class:`~openstack.object_store.v1.container.Container` instance. :param object_prefix: Prefix to apply to limit all object names created using this signature. :param redirect_url: The URL to redirect the browser to after the uploads have completed. :param max_file_size: The maximum file size per file uploaded. :param max_upload_count: The maximum number of uploaded files allowed. :param timeout: The number of seconds from now to allow the form post to begin. :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. Optional, if omitted, the key will be fetched from the container or the account. """ max_file_size = int(max_file_size) if max_file_size < 1: raise exceptions.SDKException( 'Please use a positive max_file_size value.') max_upload_count = int(max_upload_count) if max_upload_count < 1: raise exceptions.SDKException( 'Please use a positive max_upload_count value.') if timeout < 1: raise exceptions.SDKException( 'Please use a positive <timeout> value.') expires = int(time.time() + int(timeout)) if temp_url_key: if not isinstance(temp_url_key, six.binary_type): temp_url_key = temp_url_key.encode('utf8') else: temp_url_key = self.get_temp_url_key(container) if not temp_url_key: raise exceptions.SDKException( 'temp_url_key was not given, nor was a temporary url key' ' found for the account or the container.') res = self._get_resource(_container.Container, container) endpoint = parse.urlparse(self.get_endpoint()) path = '/'.join([endpoint.path, res.name, object_prefix]) data = '%s\n%s\n%s\n%s\n%s' % (path, redirect_url, max_file_size, max_upload_count, expires) if six.PY3: data = data.encode('utf8') sig = hmac.new(temp_url_key, data, sha1).hexdigest() return (expires, sig)
def _upload_image_task(self, name, filename, wait, timeout, meta, **image_kwargs): if not self._connection.has_service('object-store'): raise exceptions.SDKException( "The cloud {cloud} is configured to use tasks for image" " upload, but no object-store service is available." " Aborting.".format(cloud=self._connection.config.name)) properties = image_kwargs.get('properties', {}) md5 = properties[self._IMAGE_MD5_KEY] sha256 = properties[self._IMAGE_SHA256_KEY] container = properties[self._IMAGE_OBJECT_KEY].split('/', 1)[0] image_kwargs.pop('disk_format', None) image_kwargs.pop('container_format', None) self._connection.create_container(container) self._connection.create_object( container, name, filename, md5=md5, sha256=sha256, metadata={self._connection._OBJECT_AUTOCREATE_KEY: 'true'}, **{'content-type': 'application/octet-stream'}) # TODO(mordred): Can we do something similar to what nodepool does # using glance properties to not delete then upload but instead make a # new "good" image and then mark the old one as "bad" task_args = dict(type='import', input=dict(import_from='{container}/{name}'.format( container=container, name=name), image_properties=dict(name=name))) glance_task = self.create_task(**task_args) self._connection.list_images.invalidate(self) if wait: start = time.time() try: glance_task = self.wait_for_task(task=glance_task, status='success', wait=timeout) image_id = glance_task.result['image_id'] image = self.get_image(image_id) # NOTE(gtema): Since we might move unknown attributes of # the image under properties - merge current with update # properties not to end up removing "existing" properties props = image.properties.copy() props.update(image_kwargs.pop('properties', {})) image_kwargs['properties'] = props image = self.update_image(image, **image_kwargs) self.log.debug("Image Task %s imported %s in %s", glance_task.id, image_id, (time.time() - start)) except exceptions.ResourceFailure as e: glance_task = self.get_task(glance_task) raise exceptions.SDKException( "Image creation failed: {message}".format( message=e.message), extra_data=glance_task) finally: # Clean up after ourselves. The object we created is not # needed after the import is done. self._connection.delete_object(container, name) self._connection.list_images.invalidate(self) return image else: return glance_task
def create_rds_instance(self, name, wait=True, timeout=180, wait_interval=5, **kwargs): """Create RDS instance with all the checks :param str availability_zone: :param str backup: Name or ID of the backup to create instance from (when from_instance is passed). :param int backup_keepdays: :param str backup_timeframe: :param str charge_mode: Charge mode. :param str configuration: dict(type=str), :param str datastore_type: dict(type=str, default='postgresql'), :param str datastore_version: dict(type=str), :param str disk_encryption_id: dict(type=str), :param str flavor: dict(required=True, type=str), :param str from_instance: Name or ID of the instance to create instance from (requires from_backup of restore_time). :param str ha_mode: HA mode. choices=['async', 'semisync', 'sync'] :param str name: dict(required=True, type=str), :param str network: dict(type=str), :param str password: dict(type=str, no_log=True), :param int port: dict(type=int), :param str region: dict(type=str, choices=['eu-de'], default='eu -de'), :param str replica_of: dict(type=str), :param str restore_time: Restoration time. :param str router: dict(type=str), :param str security_group: dict(type=str), :param str volume_type: dict(required=True, type=str), :param int volume_size: dict(required=True, type=int), :param bool wait: dict(type=bool, default=True), :param int timeout: dict(type=int, default=180) :param int wait_interval: Check interval. :returns: The results of server creation :rtype: :class:`~otcextensions.sdk.rds.v3.instance.Instance` """ availability_zone = kwargs.get('availability_zone') backup = kwargs.get('backup') backup_keepdays = kwargs.get('backup_keepdays') backup_timeframe = kwargs.get('backup_timeframe') charge_mode = kwargs.get('charge_mode') configuration = kwargs.get('configuration') datastore_type = kwargs.get('datastore_type') datastore_version = kwargs.get('datastore_version') disk_encryption_id = kwargs.get('disk_encryption_id') flavor = kwargs.get('flavor') from_instance = kwargs.get('from_instance') ha_mode = kwargs.get('ha_mode') network = kwargs.get('network') password = kwargs.get('password') port = kwargs.get('port') region = kwargs.get('region') replica_of = kwargs.get('replica_of') restore_time = kwargs.get('restore_time') router = kwargs.get('router') security_group = kwargs.get('security_group') volume_type = kwargs.get('volume_type') volume_size = kwargs.get('volume_size') attrs = {} attrs['name'] = name if availability_zone: attrs['availability_zone'] = availability_zone if backup_keepdays and backup_timeframe: backup_attrs = {} backup_attrs['keep_days'] = backup_keepdays backup_attrs['start_time'] = backup_timeframe attrs['backup_strategy'] = backup_attrs elif backup_keepdays or backup_timeframe: raise exceptions.SDKException( '`backup_keepdays` and `backup_timeframe` must be passed' 'together') if charge_mode: attrs['charge_info'] = {'charge_mode': charge_mode} if configuration: # TODO(not_gtema): find configuration attrs['configuration_id'] = configuration if datastore_type: datastore = {'type': datastore_type, 'version': datastore_version} attrs['datastore'] = datastore if disk_encryption_id: attrs['disk_encryption_id'] = disk_encryption_id if flavor: attrs['flavor_ref'] = flavor if ha_mode: ha = {'mode': 'ha', 'replication_mode': ha_mode} attrs['ha'] = ha if port: attrs['port'] = port if password: attrs['password'] = password if region: attrs['region'] = region volume = {} if volume_size: volume = {"size": volume_size} if volume_type: volume['type'] = volume_type attrs['volume'] = volume new_instance_required = [router, network, security_group, password] if (not replica_of and not (datastore_type and datastore_version)): raise exceptions.SDKException( '`--datastore-type` and `--datastore-version` are ' 'required') if replica_of: # Create replica if (password or port or router or security_group or network): raise exceptions.SDKException( 'Setting password/port/router/network/sg is not ' 'supported when creating replica') src = self.rds.find_instance(replica_of, ignore_missing=False) datastore_type = src['datastore']['type'] datastore_version = src['datastore']['version'] attrs['replica_of_id'] = src.id attrs.pop('datastore', None) elif from_instance: source = self.rds.find_instance(from_instance, ignore_missing=False) if backup: # Create from backup backup_obj = self.rds.find_backup(name_or_id=backup, instance=source, ignore_missing=False) attrs['restore_point'] = { 'type': 'backup', 'backup_id': backup_obj.id, 'instance_id': backup_obj.instance_id } elif restore_time: attrs['restore_point'] = { 'type': 'timestamp', 'restore_time': restore_time, 'instance_id': source.id } elif backup or restore_time: raise exceptions.SDKException( '`from-instance` is required when restoring from ' 'backup or using PITR.') elif not all(new_instance_required): raise exceptions.SDKException( '`router`, `subnet`, `security-group`, ' '`password` parameters are required when creating ' 'new primary instance.') flavors = list( self.rds.flavors(datastore_name=datastore_type, version_name=datastore_version)) flavor_obj = None for f in flavors: if f.name == flavor: flavor_obj = f if not flavor_obj: raise exceptions.SDKException( 'Flavor {flavor} can not be found'.format(flavor=flavor)) if flavor_obj.instance_mode == 'ha' and not ha_mode: raise exceptions.SDKException( '`ha_mode` is required when using HA enabled flavor') if flavor_obj.instance_mode != 'ha' and ha_mode: raise exceptions.SDKException('`ha` enabled flavor must be ' 'chosen when setting ha_mode') if flavor_obj.instance_mode != 'replica' and replica_of: raise exceptions.SDKException('`replica` enabled flavor must be ' 'chosen when creating replica') if ha_mode: if ',' not in availability_zone: raise exceptions.SDKException( 'List of availability zones must be used when ' 'creating ha instance') if ha_mode: mode = ha_mode if (datastore_type.lower() == 'postgresql' and mode not in ['async', 'sync']): raise exceptions.SDKException( '`async` or `sync` ha_mode can be used for ' 'PostgreSQL isntance') elif (datastore_type.lower() == 'mysql' and mode not in ['async', 'semisync']): raise exceptions.SDKException( '`async` or `semisync` ha_mode can be used for ' 'MySQL isntance') elif (datastore_type.lower() == 'sqlserver' and mode not in ['sync']): raise exceptions.SDKException( 'Only `sync` ha_mode can be used for ' 'SQLServer isntance') if wait_interval and not wait: raise exceptions.SDKException( '`wait-interval` is only valid with `wait`') if network: network_obj = self.network.find_network(network, ignore_missing=False) attrs['network_id'] = network_obj.id if security_group: security_group_obj = self.network.find_security_group( security_group, ignore_missing=False) attrs['security_group_id'] = security_group_obj.id if router: router_obj = self.network.find_router(router, ignore_missing=False) attrs['router_id'] = router_obj.id obj = self.rds.create_instance(**attrs) if obj.job_id and wait: wait_args = {} if wait_interval: wait_args['interval'] = wait_interval self.rds.wait_for_job(obj.job_id, **wait_args) obj = self.rds.get_instance(obj.id) return obj
def create_image( self, name, filename=None, container=None, md5=None, sha256=None, disk_format=None, container_format=None, disable_vendor_agent=True, allow_duplicates=False, meta=None, wait=False, timeout=3600, data=None, validate_checksum=False, use_import=False, stores=None, tags=None, all_stores=None, all_stores_must_succeed=None, **kwargs, ): """Upload an image. :param str name: Name of the image to create. If it is a pathname of an image, the name will be constructed from the extensionless basename of the path. :param str filename: The path to the file to upload, if needed. (optional, defaults to None) :param data: Image data (string or file-like object). It is mutually exclusive with filename :param str container: Name of the container in swift where images should be uploaded for import if the cloud requires such a thing. (optional, defaults to 'images') :param str md5: md5 sum of the image file. If not given, an md5 will be calculated. :param str sha256: sha256 sum of the image file. If not given, an md5 will be calculated. :param str disk_format: The disk format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param str container_format: The container format the image is in. (optional, defaults to the os-client-config config value for this cloud) :param list tags: List of tags for this image. Each tag is a string of at most 255 chars. :param bool disable_vendor_agent: Whether or not to append metadata flags to the image to inform the cloud in question to not expect a vendor agent to be runing. (optional, defaults to True) :param allow_duplicates: If true, skips checks that enforce unique image name. (optional, defaults to False) :param meta: A dict of key/value pairs to use for metadata that bypasses automatic type conversion. :param bool wait: If true, waits for image to be created. Defaults to true - however, be aware that one of the upload methods is always synchronous. :param timeout: Seconds to wait for image creation. None is forever. :param bool validate_checksum: If true and cloud returns checksum, compares return value with the one calculated or passed into this call. If value does not match - raises exception. Default is 'false' :param bool use_import: Use the interoperable image import mechanism to import the image. This defaults to false because it is harder on the target cloud so should only be used when needed, such as when the user needs the cloud to transform image format. If the cloud has disabled direct uploads, this will default to true. :param stores: List of stores to be used when enabled_backends is activated in glance. List values can be the id of a store or a :class:`~openstack.image.v2.service_info.Store` instance. Implies ``use_import`` equals ``True``. :param all_stores: Upload to all available stores. Mutually exclusive with ``store`` and ``stores``. Implies ``use_import`` equals ``True``. :param all_stores_must_succeed: When set to True, if an error occurs during the upload in at least one store, the worfklow fails, the data is deleted from stores where copying is done (not staging), and the state of the image is unchanged. When set to False, the workflow will fail (data deleted from stores, …) only if the import fails on all stores specified by the user. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. Default is True. Implies ``use_import`` equals ``True``. Additional kwargs will be passed to the image creation as additional metadata for the image and will have all values converted to string except for min_disk, min_ram, size and virtual_size which will be converted to int. If you are sure you have all of your data types correct or have an advanced need to be explicit, use meta. If you are just a normal consumer, using kwargs is likely the right choice. If a value is in meta and kwargs, meta wins. :returns: A ``munch.Munch`` of the Image object :raises: SDKException if there are problems uploading """ if container is None: container = self._connection._OBJECT_AUTOCREATE_CONTAINER if not meta: meta = {} if not disk_format: disk_format = self._connection.config.config['image_format'] if not container_format: # https://docs.openstack.org/image-guide/image-formats.html container_format = 'bare' if data and filename: raise exceptions.SDKException( 'Passing filename and data simultaneously is not supported') # If there is no filename, see if name is actually the filename if not filename and not data: name, filename = self._get_name_and_filename( name, self._connection.config.config['image_format']) if validate_checksum and data and not isinstance(data, bytes): raise exceptions.SDKException( 'Validating checksum is not possible when data is not a ' 'direct binary object') if not (md5 or sha256) and validate_checksum: if filename: (md5, sha256) = self._connection._get_file_hashes(filename) elif data and isinstance(data, bytes): (md5, sha256) = self._connection._calculate_data_hashes(data) if allow_duplicates: current_image = None else: current_image = self.find_image(name) if current_image: # NOTE(pas-ha) 'properties' may be absent or be None props = current_image.get('properties') or {} md5_key = props.get(self._IMAGE_MD5_KEY, props.get(self._SHADE_IMAGE_MD5_KEY, '')) sha256_key = props.get( self._IMAGE_SHA256_KEY, props.get(self._SHADE_IMAGE_SHA256_KEY, '')) up_to_date = self._connection._hashes_up_to_date( md5=md5, sha256=sha256, md5_key=md5_key, sha256_key=sha256_key) if up_to_date: self.log.debug("image %(name)s exists and is up to date", {'name': name}) return current_image else: self.log.debug( "image %(name)s exists, but contains different " "checksums. Updating.", {'name': name}) if disable_vendor_agent: kwargs.update( self._connection.config.config['disable_vendor_agent']) # If a user used the v1 calling format, they will have # passed a dict called properties along properties = kwargs.pop('properties', {}) properties[self._IMAGE_MD5_KEY] = md5 or '' properties[self._IMAGE_SHA256_KEY] = sha256 or '' properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) kwargs.update(properties) image_kwargs = dict(properties=kwargs) if disk_format: image_kwargs['disk_format'] = disk_format if container_format: image_kwargs['container_format'] = container_format if tags: image_kwargs['tags'] = tags if filename or data: image = self._upload_image(name, filename=filename, data=data, meta=meta, wait=wait, timeout=timeout, validate_checksum=validate_checksum, use_import=use_import, stores=stores, all_stores=stores, all_stores_must_succeed=stores, **image_kwargs) else: image_kwargs['name'] = name image = self._create_image(**image_kwargs) self._connection._get_cache(None).invalidate() return image
def _upload_image_put( self, name, filename, data, meta, validate_checksum, use_import=False, stores=None, all_stores=None, all_stores_must_succeed=None, **image_kwargs, ): if filename and not data: image_data = open(filename, 'rb') else: image_data = data properties = image_kwargs.pop('properties', {}) image_kwargs.update(self._make_v2_image_params(meta, properties)) image_kwargs['name'] = name image = self._create(_image.Image, **image_kwargs) image.data = image_data supports_import = (image.image_import_methods and 'glance-direct' in image.image_import_methods) if stores or all_stores or all_stores_must_succeed: use_import = True if use_import and not supports_import: raise exceptions.SDKException( "Importing image was requested but the cloud does not" " support the image import method.") try: if not use_import: response = image.upload(self) exceptions.raise_from_response(response) if use_import: image.stage(self) image.import_image(self) # image_kwargs are flat here md5 = image_kwargs.get(self._IMAGE_MD5_KEY) sha256 = image_kwargs.get(self._IMAGE_SHA256_KEY) if validate_checksum and (md5 or sha256): # Verify that the hash computed remotely matches the local # value data = image.fetch(self) checksum = data.get('checksum') if checksum: valid = (checksum == md5 or checksum == sha256) if not valid: raise Exception('Image checksum verification failed') except Exception: self.log.debug("Deleting failed upload of image %s", name) self.delete_image(image.id) raise return image
def create_dds_instance(self, name: str, router, network, security_group, flavors: List[FlavorSpec], password: str, region='eu-de', availability_zone='eu-de-01', datastore_type='DDS-Community', datastore_storage_engine='wiredTiger', datastore_version='3.2', mode='ReplicaSet', disk_encryption_id: str = None, backup_timeframe: str = None, backup_keepdays: str = None, ssl_option: str = None): """ Create DDS instance :param name: instance name, dict(required=True, type=str) :param router: router name or id, dict(type=str) :param network: network name or id, dict(type=str) :param security_group: sg name or id, dict(type=str) :param flavors: list of flavors, dict(type=list, elements=dict) :param password: password, dict(type=str, no_log=True) :param region: dict(type=str, default='eu-de') :param availability_zone: dict(type=str, default='eu-de-01'), :param datastore_type: dict(type=str, default='DDS-Community') :param datastore_storage_engine: dict(type=str, default='wiredTiger') :param datastore_version: dict(type=str, choices=['3.2', '3.4']), :param mode: dict(choices=['Sharding', 'ReplicaSet'], default='ReplicaSet') :param disk_encryption_id: dict(type=str) :param backup_timeframe: dict(type=str) :param backup_keepdays: dict(type=str) :param ssl_option: dict(type=str, choices=['0', '1']) :returns: The results of instance creation :rtype: :class:`~otcextensions.sdk.dds.v3.instance.Instance` """ attrs = {} attrs['name'] = name attrs['region'] = region attrs['availability_zone'] = availability_zone attrs['password'] = password datastore = { 'type': datastore_type, 'version': datastore_version, 'storage_engine': datastore_storage_engine } attrs['datastore'] = datastore router_obj = self.network.find_router(router, ignore_missing=False) attrs['vpc_id'] = router_obj.id network_obj = self.network.find_network(network, ignore_missing=False) attrs['subnet_id'] = network_obj.id security_group_obj = self.network.find_security_group( security_group, ignore_missing=False) attrs['security_group_id'] = security_group_obj.id if disk_encryption_id: attrs['disk_encryption_id'] = disk_encryption_id if mode not in ['Sharding', 'ReplicaSet']: raise exceptions.SDKException( '`Sharding` or `ReplicaSet` are supported values') attrs['mode'] = mode flavors_ref = list( self.dds.flavors(region=region, engine_name=datastore_type)) flavors_specs = [flavor.spec_code for flavor in flavors_ref] for flavor in flavors: if flavor['type'] in ['mongos', 'shard'] \ and flavor['num'] not in range(2, 16): raise exceptions.SDKException( '`num` value must be in ranges from 2 to 16 ' 'for mongos and shard') if flavor['type'] in ['config', 'replica'] \ and flavor['num'] != 1: raise exceptions.SDKException('`num` value must be 1 ' 'for config and replica') if flavor['type'] == 'mongos': if all(k in flavor for k in ('storage', 'size')): raise exceptions.SDKException( '`storage` and `size` parameters' ' is invalid for the mongos nodes') if 'size' in flavor: if flavor['type'] == 'replica' \ and not (10 <= flavor['size'] <= 2000): raise exceptions.SDKException( '`size` value for `replica` must be' ' between 10 and 2000 GB.') elif flavor['type'] == 'config' \ and flavor['size'] != 20: raise exceptions.SDKException( '`size` value for `config` must be 20 GB.') elif not (10 <= flavor['size'] <= 1000): raise exceptions.SDKException( '`size` value for `shard` must be' ' between 10 and 1000 GB.') if flavor['spec_code'] not in flavors_specs: raise exceptions.SDKException('`spec_code` not valid') attrs['flavor'] = flavors if backup_keepdays and backup_timeframe: attrs['backup_strategy'] = { 'keep_days': backup_keepdays, 'start_time': backup_timeframe } elif backup_keepdays or backup_timeframe: raise exceptions.SDKException( '`backup_keepdays` and `backup_timeframe` must be passed' 'together') if ssl_option: attrs['ssl_option'] = ssl_option obj = self.dds.create_instance(**attrs) obj = self.dds.get_instance(obj.id) return obj