def configure_registry_client(): """ Sets up a registry client for use in registry lookups """ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT try: host, port = CONF.registry_host, CONF.registry_port except cfg.ConfigFileValueError: msg = _("Configuration option was not valid") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) except IndexError: msg = _("Could not find required configuration option") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) _CLIENT_HOST = host _CLIENT_PORT = port _CLIENT_KWARGS = { 'use_ssl': CONF.registry_client_protocol.lower() == 'https', 'key_file': CONF.registry_client_key_file, 'cert_file': CONF.registry_client_cert_file, 'ca_file': CONF.registry_client_ca_file, 'insecure': CONF.registry_client_insecure, 'timeout': CONF.registry_client_timeout, } if not CONF.use_user_token: configure_registry_admin_creds()
def status(self, status): has_status = hasattr(self, '_status') if has_status: if status not in self.valid_state_targets[self._status]: kw = {'cur_status': self._status, 'new_status': status} e = exception.InvalidImageStatusTransition(**kw) LOG.debug(e) raise e if self._status in ('queued', 'uploading') and status in ( 'saving', 'active', 'importing'): missing = [k for k in ['disk_format', 'container_format'] if not getattr(self, k)] if len(missing) > 0: if len(missing) == 1: msg = _('Property %s must be set prior to ' 'saving data.') else: msg = _('Properties %s must be set prior to ' 'saving data.') raise ValueError(msg % ', '.join(missing)) # NOTE(flwang): Image size should be cleared as long as the image # status is updated to 'queued' if status == 'queued': self.size = None self.virtual_size = None self._status = status
def _do_remove_locations(self, image, path_pos): if CONF.show_multiple_locations == False: msg = _("It's not allowed to remove locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) if image.status not in ("active"): msg = _("It's not allowed to remove locations if image status is " "%s.") % image.status raise webob.exc.HTTPConflict(explanation=msg) if len(image.locations) == 1: LOG.debug("User forbidden to remove last location of image %s", image.image_id) msg = _("Cannot remove last location in the image.") raise exception.Forbidden(msg) pos = self._get_locations_op_pos(path_pos, len(image.locations), False) if pos is None: msg = _("Invalid position for removing a location.") raise webob.exc.HTTPBadRequest(explanation=msg) try: # NOTE(zhiyan): this actually deletes the location # from the backend store. image.locations.pop(pos) # TODO(jokke): Fix this, we should catch what store throws and # provide definitely something else than IternalServerError to user. except Exception as e: raise webob.exc.HTTPInternalServerError(explanation=encodeutils.exception_to_unicode(e))
def update(self, req, image_id, member_id, status): """ Adds a membership to the image. :param req: the Request object coming from the wsgi layer :param image_id: the image identifier :param member_id: the member identifier :retval The response body is a mapping of the following form:: {'member_id': <MEMBER>, 'image_id': <IMAGE>, 'status': <MEMBER_STATUS> 'created_at': .., 'updated_at': ..} """ image = self._lookup_image(req, image_id) member_repo = self._get_member_repo(image) member = self._lookup_member(image, member_id) try: member.status = status member_repo.save(member) return member except exception.Forbidden: msg = _("Not allowed to update members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except ValueError as e: msg = (_("Incorrect request: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg)
def set_attr(self, value): if not isinstance(value, (list, StoreLocations)): reason = _('Invalid locations') raise exception.BadStoreUri(message=reason) ori_value = getattr(getattr(self, target), attr) if ori_value != value: # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, that means user would like to only # update the metadata, not the URL. ordered_value = sorted([loc['url'] for loc in value]) ordered_ori = sorted([loc['url'] for loc in ori_value]) if len(ori_value) > 0 and ordered_value != ordered_ori: raise exception.Invalid(_('Original locations is not empty: ' '%s') % ori_value) # NOTE(zhiyan): Check locations are all valid # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, then it's not necessary to verify those # locations again. Otherwise, if there is any restricted scheme in # existing locations. _check_image_location will fail. if ordered_value != ordered_ori: for loc in value: _check_image_location(self.context, self.store_api, self.store_utils, loc) loc['status'] = 'active' if _count_duplicated_locations(value, loc) > 1: raise exception.DuplicateLocation(location=loc['url']) _set_image_size(self.context, getattr(self, target), value) else: for loc in value: loc['status'] = 'active' return setattr(getattr(self, target), attr, list(value))
def _validate_change(self, change): path_root = change["path"][0] if path_root in self._readonly_properties: msg = _("Attribute '%s' is read-only.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if path_root in self._reserved_properties: msg = _("Attribute '%s' is reserved.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if change["op"] == "remove": return partial_image = None if len(change["path"]) == 1: partial_image = {path_root: change["value"]} elif (path_root in get_base_properties().keys()) and ( get_base_properties()[path_root].get("type", "") == "array" ): # NOTE(zhiyan): client can use the PATCH API to add an element # directly to an existing property # Such as: 1. using '/locations/N' path to add a location # to the image's 'locations' list at position N. # (implemented) # 2. using '/tags/-' path to append a tag to the # image's 'tags' list at the end. (Not implemented) partial_image = {path_root: [change["value"]]} if partial_image: try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg)
def _unstage(self, image_repo, image, staging_store): """ Restore the image to queued status and remove data from staging. :param image_repo: The instance of ImageRepo :param image: The image will be restored :param staging_store: The store used for staging """ # NOTE(abhishek): staging_store not being used in this function # because of bug #1803498 # TODO(abhishek): refactor to use the staging_store when the # "Rethinking Filesystem Access" spec is implemented in Train file_path = str(CONF.node_staging_uri + '/' + image.image_id)[7:] if os.path.exists(file_path): try: os.unlink(file_path) except OSError as e: LOG.error(_("Cannot delete staged image data %(fn)s " "[Errno %(en)d]"), {'fn': file_path, 'en': e.errno}) else: LOG.warning(_("Staged image data not found " "at %(fn)s"), {'fn': file_path}) self._restore(image_repo, image)
def __init__(self, type_name=None, type_version=None, **kwargs): """Defines an artifact reference :param type_name: type name of the target artifact :param type_version: type version of the target artifact """ super(ArtifactReference, self).__init__(**kwargs) if type_name is not None: if isinstance(type_name, list): type_names = list(type_name) if type_version is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('Unable to specify version ' 'if multiple types are possible')) else: type_names = [type_name] def validate_reference(artifact): if artifact.type_name not in type_names: return False if (type_version is not None and artifact.type_version != type_version): return False return True self._add_validator('referenced_type', validate_reference, _("Invalid referenced type")) elif type_version is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('Unable to specify version ' 'if type is not specified')) self._check_definition()
def _get_base_properties(): return { "name": { "type": "string" }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, "schema": { "type": "string" }, "self": { "type": "string" }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of object creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last object modification"), "format": "date-time" } }
def delete(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.delete() image_repo.remove(image) except (glance_store.Forbidden, exception.Forbidden) as e: LOG.debug("User not permitted to delete image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound) as e: msg = (_("Failed to find image %(image_id)s to delete") % {'image_id': image_id}) LOG.warn(msg) raise webob.exc.HTTPNotFound(explanation=msg) except glance_store.exceptions.InUseByStore as e: msg = (_("Image %(id)s could not be deleted " "because it is in use: %(exc)s") % {"id": image_id, "exc": e.msg}) LOG.warn(msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg)
def _validate_change(self, change): path_root = change['path'][0] if path_root in self._readonly_properties: msg = _("Attribute '%s' is read-only.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if path_root in self._reserved_properties: msg = _("Attribute '%s' is reserved.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if change['op'] == 'remove': return partial_image = None if len(change['path']) == 1: partial_image = {path_root: change['value']} elif ((path_root in get_base_properties().keys()) and (get_base_properties()[path_root].get('type', '') == 'array')): # NOTE(zhiyan): cient can use PATCH API to adding element to # the image's existing set property directly. # Such as: 1. using '/locations/N' path to adding a location # to the image's 'locations' list at N position. # (implemented) # 2. using '/tags/-' path to appending a tag to the # image's 'tags' list at last. (Not implemented) partial_image = {path_root: [change['value']]} if partial_image: try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg)
def replication_size(options, args): """%(prog)s size <server:port> Determine the size of a glance instance if dumped to disk. server:port: the location of the glance instance. """ # Make sure server info is provided if args is None or len(args) < 1: raise TypeError(_("Too few arguments.")) server, port = utils.parse_valid_host_port(args.pop()) total_size = 0 count = 0 imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.slavetoken) for image in client.get_images(): LOG.debug('Considering image: %(image)s', {'image': image}) if image['status'] == 'active': total_size += int(image['size']) count += 1 print(_('Total size is %(size)d bytes (%(human_size)s) across ' '%(img_count)d images') % {'size': total_size, 'human_size': _human_readable_size(total_size), 'img_count': count})
def purge(self, age_in_days=30, max_rows=100): """Purge deleted rows older than a given age from glance tables.""" try: age_in_days = int(age_in_days) except ValueError: sys.exit(_("Invalid int value for age_in_days: " "%(age_in_days)s") % {'age_in_days': age_in_days}) try: max_rows = int(max_rows) except ValueError: sys.exit(_("Invalid int value for max_rows: " "%(max_rows)s") % {'max_rows': max_rows}) if age_in_days <= 0: sys.exit(_("Must supply a positive, non-zero value for age.")) if age_in_days >= (int(time.time()) / 86400): sys.exit(_("Maximal age is count of days since epoch.")) if max_rows < 1: sys.exit(_("Minimal rows limit is 1.")) ctx = context.get_admin_context(show_deleted=True) try: db_api.purge_deleted_rows(ctx, age_in_days, max_rows) except exception.Invalid as exc: sys.exit(exc.msg)
def place_database_under_alembic_control(): a_config = get_alembic_config() if not is_database_under_migrate_control(): return if not is_database_under_alembic_control(): print(_("Database is currently not under Alembic's migration " "control.")) head = get_current_legacy_head() if head == 42: alembic_version = 'liberty' elif head == 43: alembic_version = 'mitaka01' elif head == 44: alembic_version = 'mitaka02' elif head == 45: alembic_version = 'ocata01' elif head in range(1, 42): print("Legacy head: ", head) sys.exit(_("The current database version is not supported any " "more. Please upgrade to Liberty release first.")) else: sys.exit(_("Unable to place database under Alembic's migration " "control. Unknown database state, can't proceed " "further.")) print(_("Placing database under Alembic's migration control at " "revision:"), alembic_version) alembic_command.stamp(a_config, alembic_version)
def _check_item_type(self, item): if not isinstance(item, self.ALLOWED_ITEM_TYPES): raise exc.InvalidArtifactTypePropertyDefinition( _('Invalid item type specification')) if item.default is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('List definitions may hot have defaults'))
def __init__(self, item_type, min_size=0, max_size=None, unique=False, **kwargs): super(ListAttributeDefinition, self).__init__(**kwargs) if isinstance(item_type, list): for it in item_type: self._check_item_type(it) # we need to copy the item_type collection self.item_type = item_type[:] if min_size != 0: raise exc.InvalidArtifactTypePropertyDefinition(_("Cannot specify 'min_size' explicitly")) if max_size is not None: raise exc.InvalidArtifactTypePropertyDefinition(_("Cannot specify 'max_size' explicitly")) # setting max_size and min_size to the length of item_type, # as tuple-semantic assumes that the number of elements is set # by the type spec min_size = max_size = len(item_type) else: self._check_item_type(item_type) self.item_type = item_type if min_size: self.min_size(min_size) if max_size: self.max_size(max_size) if unique: self.unique()
def update(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.tags.add(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to update tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Invalid as e: msg = (_("Could not update image: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.ImageTagLimitExceeded as e: msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
def configure_registry_client(): """ Sets up a registry client for use in registry lookups """ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY try: host, port = CONF.registry_host, CONF.registry_port except cfg.ConfigFileValueError: msg = _("Configuration option was not valid") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) except IndexError: msg = _("Could not find required configuration option") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) _CLIENT_HOST = host _CLIENT_PORT = port _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key _CLIENT_KWARGS = { "use_ssl": CONF.registry_client_protocol.lower() == "https", "key_file": CONF.registry_client_key_file, "cert_file": CONF.registry_client_cert_file, "ca_file": CONF.registry_client_ca_file, "insecure": CONF.registry_client_insecure, "timeout": CONF.registry_client_timeout, } if not CONF.use_user_token: configure_registry_admin_creds()
def update_store_acls(self, req, image_id, location_uri, public=False): if location_uri: try: read_tenants = [] write_tenants = [] members = registry.get_image_members(req.context, image_id) if members: for member in members: if member['can_share']: write_tenants.append(member['member_id']) else: read_tenants.append(member['member_id']) store.set_acls(location_uri, public=public, read_tenants=read_tenants, write_tenants=write_tenants, context=req.context) except store.UnknownScheme: msg = _("Store for image_id not found: %s") % image_id raise webob.exc.HTTPBadRequest(explanation=msg, request=req, content_type='text/plain') except store.NotFound: msg = _("Data for image_id not found: %s") % image_id raise webob.exc.HTTPNotFound(explanation=msg, request=req, content_type='text/plain')
def validate_location_uri(location): """Validate location uri into acceptable format. :param location: Location uri to be validated """ if not location: raise exception.BadStoreUri(_('Invalid location: %s') % location) elif location.startswith(('http://', 'https://')): return location # NOTE: file type uri is being avoided for security reasons, # see LP bug #942118 #1400966. elif location.startswith(("file:///", "filesystem:///")): msg = _("File based imports are not allowed. Please use a non-local " "source of image data.") # NOTE: raise BadStoreUri and let the encompassing block save the error # msg in the task.message. raise exception.BadStoreUri(msg) else: # TODO(nikhil): add other supported uris supported = ['http', ] msg = _("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} raise urllib.error.URLError(msg)
def index(self, req, image_id): """ Return a list of dictionaries indicating the members of the image, i.e., those tenants the image is shared with. :param req: the Request object coming from the wsgi layer :param image_id: The opaque image identifier :returns: The response body is a mapping of the following form .. code-block:: json {'members': [ {'member_id': <MEMBER>, 'can_share': <SHARE_PERMISSION>, ...}, ... ]} """ self._enforce(req, 'get_members') self._raise_404_if_image_deleted(req, image_id) try: members = registry.get_image_members(req.context, image_id) except exception.NotFound: msg = _("Image with identifier %s not found") % image_id LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: msg = _("Unauthorized image access") LOG.warn(msg) raise webob.exc.HTTPForbidden(msg) return dict(members=members)
def download(self, req, id, type_name, type_version, attr, index, **kwargs): artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = artifact_repo.get(id, type_name, type_version) if attr in artifact.metadata.attributes.blobs: if isinstance(artifact.metadata.attributes.blobs[attr], list): if index is None: raise webob.exc.HTTPBadRequest( explanation=_("Index is required")) blob_list = getattr(artifact, attr) try: return blob_list[index] except IndexError as e: raise webob.exc.HTTPBadRequest(explanation=e.message) else: if index is not None: raise webob.exc.HTTPBadRequest(_("Not a list " "property")) return getattr(artifact, attr) else: message = _("Not a downloadable entity") raise webob.exc.HTTPBadRequest(explanation=message) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.msg)
def update(self, req, id, type_name, type_version, changes, **kwargs): """Performs an update via json patch request""" artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = self._get_artifact_with_dependencies(artifact_repo, id, type_name, type_version) self._ensure_write_access(artifact, req.context) updated = artifact for change in changes: if artifact.metadata.attributes.blobs.get(change['path']): msg = _('Invalid request PATCH for work with blob') raise webob.exc.HTTPBadRequest(explanation=msg) else: updated = self._do_update_op(updated, change) artifact_repo.save(updated) return self._get_artifact_with_dependencies(artifact_repo, id) except (exception.InvalidJsonPatchPath, exception.Invalid) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload artifact because it exceeds " "the quota: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, request=req, content_type='text/plain') except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg)
def launch(pid_file, conf_file=None, capture_output=False, await_time=0): args = [server] if conf_file: args += ['--config-file', conf_file] msg = (_('%(verb)sing %(serv)s with %(conf)s') % {'verb': verb, 'serv': server, 'conf': conf_file}) else: msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server}) print(msg) close_stdio_on_exec() pid = os.fork() if pid == 0: os.setsid() redirect_stdio(server, capture_output) try: os.execlp('%s' % server, *args) except OSError as e: msg = (_('unable to launch %(serv)s. Got error: %(e)s') % {'serv': server, 'e': e}) sys.exit(msg) sys.exit(0) else: write_pid_file(pid_file, pid) await_child(pid, await_time) return pid
def do_stop(server, args, graceful=False): if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: sig = signal.SIGHUP else: sig = signal.SIGTERM did_anything = False pfiles = pid_files(server, CONF.pid_file) for pid_file, pid in pfiles: did_anything = True try: os.unlink(pid_file) except OSError: pass try: print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)') % {'serv': server, 'pid': pid, 'sig': sig}) os.kill(pid, sig) except OSError: print(_("Process %d not running") % pid) for pid_file, pid in pfiles: for _junk in range(150): # 15 seconds if not os.path.exists('/proc/%s' % pid): break time.sleep(0.1) else: print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;' ' giving up') % {'pid': pid, 'file': pid_file}) if not did_anything: print(_('%s is already stopped') % server)
def get_connect_kwargs(self): # Both secure and insecure connections have a timeout option connect_kwargs = {'timeout': self.timeout} if self.use_ssl: if self.key_file is None: self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE') if self.cert_file is None: self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE') if self.ca_file is None: self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE') # Check that key_file/cert_file are either both set or both unset if self.cert_file is not None and self.key_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a cert, " "however you have failed to supply either a " "key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable") raise exception.ClientConnectionError(msg) if self.key_file is not None and self.cert_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a key, " "however you have failed to supply either a " "cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable") raise exception.ClientConnectionError(msg) if (self.key_file is not None and not os.path.exists(self.key_file)): msg = _("The key file you specified %s does not " "exist") % self.key_file raise exception.ClientConnectionError(msg) connect_kwargs['key_file'] = self.key_file if (self.cert_file is not None and not os.path.exists(self.cert_file)): msg = _("The cert file you specified %s does not " "exist") % self.cert_file raise exception.ClientConnectionError(msg) connect_kwargs['cert_file'] = self.cert_file if (self.ca_file is not None and not os.path.exists(self.ca_file)): msg = _("The CA file you specified %s does not " "exist") % self.ca_file raise exception.ClientConnectionError(msg) if self.ca_file is None: for ca in self.DEFAULT_CA_FILE_PATH.split(":"): if os.path.exists(ca): self.ca_file = ca break connect_kwargs['ca_file'] = self.ca_file connect_kwargs['insecure'] = self.insecure return connect_kwargs
def do_check_status(pid_file, server): if os.path.exists(pid_file): with open(pid_file, 'r') as pidfile: pid = pidfile.read().strip() print(_("%(serv)s (pid %(pid)s) is running...") % {'serv': server, 'pid': pid}) else: print(_("%s is stopped") % server)
def validate_key_cert(key_file, cert_file): try: error_key_name = "private key" error_filename = key_file with open(key_file, "r") as keyfile: key_str = keyfile.read() key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) error_key_name = "certificate" error_filename = cert_file with open(cert_file, "r") as certfile: cert_str = certfile.read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) except IOError as ioe: raise RuntimeError( _( "There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it." " Error: %(ioe)s" ) % {"error_key_name": error_key_name, "error_filename": error_filename, "ioe": ioe} ) except crypto.Error as ce: raise RuntimeError( _( "There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it. OpenSSL" " error: %(ce)s" ) % {"error_key_name": error_key_name, "error_filename": error_filename, "ce": ce} ) try: data = str(uuid.uuid4()) digest = CONF.digest_algorithm if digest == "sha1": LOG.warn( "The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)" " state that the SHA-1 is not suitable for" " general-purpose digital signature applications (as" " specified in FIPS 186-3) that require 112 bits of" " security. The default value is sha1 in Kilo for a" " smooth upgrade process, and it will be updated" " with sha256 in next release(L)." ) out = crypto.sign(key, data, digest) crypto.verify(cert, out, data, digest) except crypto.Error as ce: raise RuntimeError( _( "There is a problem with your key pair. " "Please verify that cert %(cert_file)s and " "key %(key_file)s belong together. OpenSSL " "error %(ce)s" ) % {"cert_file": cert_file, "key_file": key_file, "ce": ce} )
def _check_for_path_errors(self, pointer): if not re.match(self.PATH_REGEX_COMPILED, pointer): msg = _("Json path should start with a '/', " "end with no '/', no 2 subsequent '/' are allowed.") raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg) if re.search('~[^01]', pointer) or pointer.endswith('~'): msg = _("Pointer contains '~' which is not part of" " a recognized escape sequence [~0, ~1].") raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg)
def _get_sorting_params(self, params): """ Process sorting params. Currently glance supports two sorting syntax: classic and new one, that is uniform for all OpenStack projects. Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc New syntax: sort=name:asc,size:desc """ sort_keys = [] sort_dirs = [] if 'sort' in params: # use new sorting syntax here if 'sort_key' in params or 'sort_dir' in params: msg = _('Old and new sorting syntax cannot be combined') raise webob.exc.HTTPBadRequest(explanation=msg) for sort_param in params.pop('sort').strip().split(','): key, _sep, dir = sort_param.partition(':') if not dir: dir = self._default_sort_dir sort_keys.append(self._validate_sort_key(key.strip())) sort_dirs.append(self._validate_sort_dir(dir.strip())) else: # continue with classic syntax # NOTE(mfedosin): we have 3 options here: # 1. sort_dir wasn't passed: we use default one - 'desc'. # 2. Only one sort_dir was passed: use it for every sort_key # in the list. # 3. Multiple sort_dirs were passed: consistently apply each one to # the corresponding sort_key. # If number of sort_dirs and sort_keys doesn't match then raise an # exception. while 'sort_key' in params: sort_keys.append(self._validate_sort_key( params.pop('sort_key').strip())) while 'sort_dir' in params: sort_dirs.append(self._validate_sort_dir( params.pop('sort_dir').strip())) if sort_dirs: dir_len = len(sort_dirs) key_len = len(sort_keys) if dir_len > 1 and dir_len != key_len: msg = _('Number of sort dirs does not match the number ' 'of sort keys') raise webob.exc.HTTPBadRequest(explanation=msg) if not sort_keys: sort_keys = [self._default_sort_key] if not sort_dirs: sort_dirs = [self._default_sort_dir] return sort_keys, sort_dirs
def upload(self, req, image_id, data, size): image_repo = self.gateway.get_repo(req.context) image = None refresher = None cxt = req.context try: image = image_repo.get(image_id) image.status = 'saving' try: if CONF.data_api == 'glance.db.registry.api': # create a trust if backend is registry try: # request user plugin for current token user_plugin = req.environ.get('keystone.token_auth') roles = [] # use roles from request environment because they # are not transformed to lower-case unlike cxt.roles for role_info in req.environ.get( 'keystone.token_info')['token']['roles']: roles.append(role_info['name']) refresher = trust_auth.TokenRefresher( user_plugin, cxt.tenant, roles) except Exception as e: LOG.info( _LI("Unable to create trust: %s " "Use the existing user token."), encodeutils.exception_to_unicode(e)) image_repo.save(image, from_state='queued') image.set_data(data, size) try: image_repo.save(image, from_state='saving') except exception.NotAuthenticated: if refresher is not None: # request a new token to update an image in database cxt.auth_token = refresher.refresh_token() image_repo = self.gateway.get_repo(req.context) image_repo.save(image, from_state='saving') else: raise try: # release resources required for re-auth if refresher is not None: refresher.release_resources() except Exception as e: LOG.info( _LI("Unable to delete trust %(trust)s: %(msg)s"), { "trust": refresher.trust_id, "msg": encodeutils.exception_to_unicode(e) }) except (glance_store.NotFound, exception.ImageNotFound, exception.Conflict): msg = (_("Image %s could not be found after upload. " "The image may have been deleted during the " "upload, cleaning up the chunks uploaded.") % image_id) LOG.warn(msg) # NOTE(sridevi): Cleaning up the uploaded chunks. try: image.delete() except exception.ImageNotFound: # NOTE(sridevi): Ignore this exception pass raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.NotAuthenticated: msg = (_("Authentication error - the token may have " "expired during file upload. Deleting image data for " "%s.") % image_id) LOG.debug(msg) try: image.delete() except exception.NotAuthenticated: # NOTE: Ignore this exception pass raise webob.exc.HTTPUnauthorized(explanation=msg, request=req, content_type='text/plain') except ValueError as e: LOG.debug("Cannot save data for image %(id)s: %(e)s", { 'id': image_id, 'e': encodeutils.exception_to_unicode(e) }) self._restore(image_repo, image) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except glance_store.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) self._restore(image_repo, image) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.exception(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except exception.Forbidden as e: msg = ("Not allowed to upload image data for image %s" % image_id) LOG.debug(msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except cursive_exception.SignatureVerificationError as e: msg = ( _LE("Signature verification failed for image %(id)s: %(e)s") % { 'id': image_id, 'e': encodeutils.exception_to_unicode(e) }) LOG.error(msg) self._delete(image_repo, image) raise webob.exc.HTTPBadRequest(explanation=msg) except webob.exc.HTTPGone as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) self._restore(image_repo, image) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to upload image data due to " "internal error")) self._restore(image_repo, image)
class Conflict(GlanceException): message = _("An object with the same identifier is currently being " "operated on.")
class BadStoreUri(GlanceException): message = _("The Store URI was malformed.")
def stage(self, req, image_id, data, size): image_repo = self.gateway.get_repo(req.context) image = None # NOTE(jokke): this is horrible way to do it but as long as # glance_store is in a shape it is, the only way. Don't hold me # accountable for it. def _build_staging_store(): conf = cfg.ConfigOpts() backend.register_opts(conf) conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') staging_store = backend._load_store(conf, 'file') try: staging_store.configure() except AttributeError: msg = _("'node_staging_uri' is not set correctly. Could not " "load staging store.") raise exception.BadStoreUri(message=msg) return staging_store staging_store = _build_staging_store() try: image = image_repo.get(image_id) image.status = 'uploading' image_repo.save(image, from_state='queued') try: staging_store.add(image_id, data, 0) except glance_store.Duplicate as e: msg = _("The image %s has data on staging") % image_id raise webob.exc.HTTPConflict(explanation=msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to stage image data due to " "internal error")) self._restore(image_repo, image)
class AuthBadRequest(GlanceException): message = _("Connect error/bad request to Auth service at URL %(url)s.")
def replication_compare(options, args): """%(prog)s compare <fromserver:port> <toserver:port> Compare the contents of fromserver with those of toserver. fromserver:port: the location of the source glance instance. toserver:port: the location of the target glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() target_server, target_port = utils.parse_valid_host_port(args.pop()) target_conn = http.HTTPConnection(target_server, target_port) target_client = imageservice(target_conn, options.targettoken) source_server, source_port = utils.parse_valid_host_port(args.pop()) source_conn = http.HTTPConnection(source_server, source_port) source_client = imageservice(source_conn, options.sourcetoken) differences = {} for image in source_client.get_images(): if _image_present(target_client, image['id']): headers = target_client.get_image_meta(image['id']) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from target metadata', {'header': key}) del headers[key] for key in image: if image[key] != headers.get(key): LOG.warn(_LW('%(image_id)s: field %(key)s differs ' '(source is %(source_value)s, destination ' 'is %(target_value)s)') % {'image_id': image['id'], 'key': key, 'source_value': image[key], 'target_value': headers.get(key, 'undefined')}) differences[image['id']] = 'diff' else: LOG.debug('%(image_id)s is identical', {'image_id': image['id']}) elif image['status'] == 'active': LOG.warn(_LW('Image %(image_id)s ("%(image_name)s") ' 'entirely missing from the destination') % {'image_id': image['id'], 'image_name': image.get('name', '--unnamed')}) differences[image['id']] = 'missing' return differences
def replication_load(options, args): """%(prog)s load <server:port> <path> Load the contents of a local directory into glance. server:port: the location of the glance instance. path: a directory on disk containing the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.targettoken) updated = [] for ent in os.listdir(path): if uuidutils.is_uuid_like(ent): image_uuid = ent LOG.info(_LI('Considering: %s'), image_uuid) meta_file_name = os.path.join(path, image_uuid) with open(meta_file_name) as meta_file: meta = jsonutils.loads(meta_file.read()) # Remove keys which don't make sense for replication for key in options.dontreplicate.split(' '): if key in meta: LOG.debug('Stripping %(header)s from saved ' 'metadata', {'header': key}) del meta[key] if _image_present(client, image_uuid): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. LOG.debug('Image %s already present', image_uuid) headers = client.get_image_meta(image_uuid) for key in options.dontreplicate.split(' '): if key in headers: LOG.debug('Stripping %(header)s from target ' 'metadata', {'header': key}) del headers[key] if _dict_diff(meta, headers): LOG.info(_LI('Image %s metadata has changed'), image_uuid) headers, body = client.add_image_meta(meta) _check_upload_response_headers(headers, body) updated.append(meta['id']) else: if not os.path.exists(os.path.join(path, image_uuid + '.img')): LOG.debug('%s dump is missing image data, skipping', image_uuid) continue # Upload the image itself with open(os.path.join(path, image_uuid + '.img')) as img_file: try: headers, body = client.add_image(meta, img_file) _check_upload_response_headers(headers, body) updated.append(meta['id']) except exc.HTTPConflict: LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image_uuid) # noqa return updated
def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=six.text_type(msg))
def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body']
def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value']
def _get_change_path_d10(self, raw_change): try: return raw_change['path'] except KeyError: msg = _("Unable to find '%s' in JSON Schema change") % 'path' raise webob.exc.HTTPBadRequest(explanation=msg)
def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir
def _validate_member_status(self, member_status): if member_status not in ['pending', 'accepted', 'rejected', 'all']: msg = _('Invalid status: %s') % member_status raise webob.exc.HTTPBadRequest(explanation=msg) return member_status
def import_image(self, req, image_id, body): image_repo = self.gateway.get_repo(req.context) task_factory = self.gateway.get_task_factory(req.context) executor_factory = self.gateway.get_task_executor_factory(req.context) task_repo = self.gateway.get_task_repo(req.context) import_method = body.get('method').get('name') uri = body.get('method').get('uri') try: image = image_repo.get(image_id) if image.status == 'active': msg = _("Image with status active cannot be target for import") raise exception.Conflict(msg) if image.status != 'queued' and import_method == 'web-download': msg = _("Image needs to be in 'queued' state to use " "'web-download' method") raise exception.Conflict(msg) if (image.status != 'uploading' and import_method == 'glance-direct'): msg = _("Image needs to be staged before 'glance-direct' " "method can be used") raise exception.Conflict(msg) if not getattr(image, 'container_format', None): msg = _("'container_format' needs to be set before import") raise exception.Conflict(msg) if not getattr(image, 'disk_format', None): msg = _("'disk_format' needs to be set before import") raise exception.Conflict(msg) backend = None if CONF.enabled_backends: backend = req.headers.get('x-image-meta-store', CONF.glance_store.default_backend) try: glance_store.get_store_from_store_identifier(backend) except glance_store.UnknownScheme: msg = _("Store for scheme %s not found") % backend LOG.warn(msg) raise exception.Conflict(msg) except exception.Conflict as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) task_input = { 'image_id': image_id, 'import_req': body, 'backend': backend } if (import_method == 'web-download' and not utils.validate_import_uri(uri)): LOG.debug("URI for web-download does not pass filtering: %s", uri) msg = (_("URI for web-download does not pass filtering: %s") % uri) raise webob.exc.HTTPBadRequest(explanation=msg) try: import_task = task_factory.new_task(task_type='api_image_import', owner=req.context.owner, task_input=task_input) task_repo.add(import_task) task_executor = executor_factory.new_task_executor(req.context) pool = common.get_thread_pool("tasks_eventlet_pool") pool.spawn_n(import_task.run, task_executor) except exception.Forbidden as e: LOG.debug("User not permitted to create image import task.") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Conflict as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPConflict(explanation=e.msg) except ValueError as e: LOG.debug("Cannot import data for image %(id)s: %(e)s", { 'id': image_id, 'e': encodeutils.exception_to_unicode(e) }) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) return image_id
def _parse_OVF(self, ovf): """Parses the OVF file Parses the OVF file for specified metadata properties. Interested properties must be specified in ovf-metadata.json conf file. The OVF file's qualified namespaces are removed from the included properties. :param ovf: a file object containing the OVF file :returns: a tuple of disk filename and a properties dictionary :raises RuntimeError: an error for malformed OVF file """ def _get_namespace_and_tag(tag): """Separate and return the namespace and tag elements. There is no native support for this operation in elementtree package. See http://bugs.python.org/issue18304 for details. """ m = re.match(r'\{(.+)\}(.+)', tag) if m: return m.group(1), m.group(2) else: return '', tag disk_filename, file_elements, file_ref = None, None, None properties = {} for event, elem in ET.iterparse(ovf): if event == 'end': ns, tag = _get_namespace_and_tag(elem.tag) if ns in CIM_NS and tag in self.interested_properties: properties[CIM_NS[ns] + '_' + tag] = (elem.text.strip() if elem.text else '') if tag == 'DiskSection': disks = [child for child in list(elem) if _get_namespace_and_tag(child.tag)[1] == 'Disk'] if len(disks) > 1: """ Currently only single disk image extraction is supported. FIXME(dramakri): Support multiple images in OVA package """ raise RuntimeError(_('Currently, OVA packages ' 'containing multiple disk are ' 'not supported.')) disk = next(iter(disks)) file_ref = next(value for key, value in disk.items() if _get_namespace_and_tag(key)[1] == 'fileRef') if tag == 'References': file_elements = list(elem) # Clears elements to save memory except for 'File' and 'Disk' # references, which we will need to later access if tag != 'File' and tag != 'Disk': elem.clear() for file_element in file_elements: file_id = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'id') if file_id != file_ref: continue disk_filename = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'href') return (disk_filename, properties)
def get_base_properties(): return { 'id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': ['null', 'string'], 'description': _('Descriptive name for the image'), 'maxLength': 255, }, 'status': { 'type': 'string', 'readOnly': True, 'description': _('Status of the image'), 'enum': [ 'queued', 'saving', 'active', 'killed', 'deleted', 'uploading', 'importing', 'pending_delete', 'deactivated' ], }, 'visibility': { 'type': 'string', 'description': _('Scope of image accessibility'), 'enum': ['community', 'public', 'private', 'shared'], }, 'protected': { 'type': 'boolean', 'description': _('If true, image will not be deletable.'), }, 'os_hidden': { 'type': 'boolean', 'description': _('If true, image will not appear in default ' 'image list response.'), }, 'checksum': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('md5 hash of image contents.'), 'maxLength': 32, }, 'os_hash_algo': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('Algorithm to calculate the os_hash_value'), 'maxLength': 64, }, 'os_hash_value': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('Hexdigest of the image contents using the ' 'algorithm specified by the os_hash_algo'), 'maxLength': 128, }, 'owner': { 'type': ['null', 'string'], 'description': _('Owner of the image'), 'maxLength': 255, }, 'size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Size of image file in bytes'), }, 'virtual_size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Virtual size of image in bytes'), }, 'container_format': { 'type': ['null', 'string'], 'description': _('Format of the container'), 'enum': [None] + CONF.image_format.container_formats, }, 'disk_format': { 'type': ['null', 'string'], 'description': _('Format of the disk'), 'enum': [None] + CONF.image_format.disk_formats, }, 'created_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of image registration'), # TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! # 'format': 'date-time', }, 'updated_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of the last image modification'), # 'format': 'date-time', }, 'tags': { 'type': 'array', 'description': _('List of strings related to the image'), 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'readOnly': True, 'description': _('URL to access the image file kept in external ' 'store'), }, 'min_ram': { 'type': 'integer', 'description': _('Amount of ram (in MB) required to boot image.'), }, 'min_disk': { 'type': 'integer', 'description': _('Amount of disk space (in GB) required to boot ' 'image.'), }, 'self': { 'type': 'string', 'readOnly': True, 'description': _('An image self url'), }, 'file': { 'type': 'string', 'readOnly': True, 'description': _('An image file url'), }, 'backend': { 'type': 'string', 'readOnly': True, 'description': _('Backend store to upload image to'), }, 'schema': { 'type': 'string', 'readOnly': True, 'description': _('An image schema url'), }, 'locations': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'url': { 'type': 'string', 'maxLength': 255, }, 'metadata': { 'type': 'object', }, }, 'required': ['url', 'metadata'], }, 'description': _('A set of URLs to access the image file kept in ' 'external store'), }, }
from oslo_config import cfg import semantic_version from stevedore import enabled from glance.common.artifacts import definitions from glance.common import exception from glance.i18n import _, _LE, _LI, _LW from oslo_log import log as logging LOG = logging.getLogger(__name__) plugins_opts = [ cfg.BoolOpt('load_enabled', default=True, help=_('When false, no artifacts can be loaded regardless of' ' available_plugins. When true, artifacts can be' ' loaded.')), cfg.ListOpt('available_plugins', default=[], help=_('A list of artifacts that are allowed in the' ' format name or name-version. Empty list means that' ' any artifact can be loaded.')) ] CONF = cfg.CONF CONF.register_opts(plugins_opts) class ArtifactsPluginLoader(object): def __init__(self, namespace): self.mgr = enabled.EnabledExtensionManager(
def _check_unexpected(self, kwargs): if kwargs: msg = _('new_image() got unexpected keywords %s') raise TypeError(msg % kwargs.keys())
def replication_livecopy(options, args): """%(prog)s livecopy <fromserver:port> <toserver:port> Load the contents of one glance instance into another. fromserver:port: the location of the source glance instance. toserver:port: the location of the target glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() target_server, target_port = utils.parse_valid_host_port(args.pop()) target_conn = http.HTTPConnection(target_server, target_port) target_client = imageservice(target_conn, options.targettoken) source_server, source_port = utils.parse_valid_host_port(args.pop()) source_conn = http.HTTPConnection(source_server, source_port) source_client = imageservice(source_conn, options.sourcetoken) updated = [] for image in source_client.get_images(): LOG.debug('Considering %(id)s', {'id': image['id']}) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source metadata', {'header': key}) del image[key] if _image_present(target_client, image['id']): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. headers = target_client.get_image_meta(image['id']) if headers['status'] == 'active': for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source ' 'metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from target ' 'metadata', {'header': key}) del headers[key] if _dict_diff(image, headers): LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' 'metadata has changed'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--')}) headers, body = target_client.add_image_meta(image) _check_upload_response_headers(headers, body) updated.append(image['id']) elif image['status'] == 'active': LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' '(%(image_size)d bytes) ' 'is being synced'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size']}) if not options.metaonly: image_response = source_client.get_image(image['id']) try: headers, body = target_client.add_image(image, image_response) _check_upload_response_headers(headers, body) updated.append(image['id']) except exc.HTTPConflict: LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa return updated
def status(self, status): if status not in ('pending', 'accepted', 'rejected'): msg = _('Status must be "pending", "accepted" or "rejected".') raise ValueError(msg) self._status = status
COMMANDS = """Commands: help <command> Output help for one of the commands below compare What is missing from the target glance? dump Dump the contents of a glance instance to local disk. livecopy Load the contents of one glance instance into another. load Load the contents of a local directory into glance. size Determine the size of a glance instance if dumped to disk. """ IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' 'the target, but our check for it did ' 'not find it. This indicates that we ' 'do not have permissions to see all ' 'the images on the target server.') class ImageService(object): def __init__(self, conn, auth_token): """Initialize the ImageService. conn: a http_client.HTTPConnection to the glance server auth_token: authentication token to pass in the x-auth-token header """ self.auth_token = auth_token self.conn = conn def _http_request(self, method, url, headers, body,
def visibility(self, visibility): if visibility not in ('community', 'public', 'private', 'shared'): raise ValueError( _('Visibility must be one of "community", ' '"public", "private", or "shared"')) self._visibility = visibility
class AuthUrlNotFound(GlanceException): message = _("Auth service at URL %(url)s not found.")
from oslo_policy import policy from six.moves import configparser import glance.api.policy from glance.common import exception from glance.i18n import _, _LE, _LW CONFIG = configparser.SafeConfigParser() LOG = logging.getLogger(__name__) property_opts = [ cfg.StrOpt('property_protection_file', help=_('The location of the property protection file.' 'This file contains the rules for property protections ' 'and the roles/policies associated with it. If this ' 'config value is not specified, by default, property ' 'protections won\'t be enforced. If a value is ' 'specified and the file is not found, then the ' 'glance-api service will not start.')), cfg.StrOpt('property_protection_rule_format', default='roles', choices=('roles', 'policies'), help=_('This config value indicates whether "roles" or ' '"policies" are used in the property protection file.')), ] CONF = cfg.CONF CONF.register_opts(property_opts) # NOTE (spredzy): Due to the particularly lengthy name of the exception # and the number of occurrence it is raise in this file, a variable is
class StorageQuotaFull(GlanceException): message = _("The size of the data %(image_size)s will exceed the limit. " "%(remaining)s bytes remaining.")
from glance.i18n import _ from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor', sample_default='keystone', help=_(""" Deployment flavor to use in the server application pipeline. Provide a string value representing the appropriate deployment flavor used in the server application pipleline. This is typically the partial name of a pipeline in the paste configuration file with the service name removed. For example, if your paste section name in the paste configuration file is [pipeline:glance-api-keystone], set ``flavor`` to ``keystone``. Possible values: * String value representing a partial pipeline name. Related Options: * config_file """)), cfg.StrOpt('config_file', sample_default='glance-api-paste.ini', help=_(""" Name of the paste configuration file. Provide a string value representing the name of the paste
class Duplicate(GlanceException): message = _("An object with the same identifier already exists.")
totalview['schema'] = '/v2/schemas/members' body = jsonutils.dumps(totalview, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def show(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' _MEMBER_SCHEMA = { 'member_id': { 'type': 'string', 'description': _('An identifier for the image member (tenantId)') }, 'image_id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'created_at': { 'type': 'string', 'description': _('Date and time of image member creation'), # TODO(brian-rosmaita): our jsonschema library doesn't seem to like the # format attribute, figure out why (and also fix in images.py) # 'format': 'date-time', }, 'updated_at': {
class BadAuthStrategy(GlanceException): message = _("Incorrect auth strategy, expected \"%(expected)s\" but " "received \"%(received)s\"")
class NotFound(GlanceException): message = _("An object with the specified identifier was not found.")