def _get_images(self, context, filters, **params): """Get images, wrapping in exception if necessary.""" # NOTE(markwash): for backwards compatibility, is_public=True for # admins actually means "treat me as if I'm not an admin and show me # all my images" if context.is_admin and params.get('is_public') is True: params['admin_as_user'] = True del params['is_public'] try: return self.db_api.image_get_all(context, filters=filters, **params) except exception.ImageNotFound: LOG.warn( _LW("Invalid marker. Image %(id)s could not be " "found.") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except exception.Forbidden: LOG.warn( _LW("Access denied to image %(id)s but returning " "'not found'") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except Exception: LOG.exception(_LE("Unable to get images")) raise
def _get(context, artifact_id, session, type_name=None, type_version=None, show_level=ga.Showlevel.BASIC): values = dict(id=artifact_id) if type_name is not None: values['type_name'] = type_name if type_version is not None: values['type_version'] = type_version _set_version_fields(values) try: if show_level == ga.Showlevel.NONE: query = ( session.query(models.Artifact). options(joinedload(models.Artifact.tags)). filter_by(**values)) else: query = ( session.query(models.Artifact). options(joinedload(models.Artifact.properties)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs). joinedload(models.ArtifactBlob.locations)). filter_by(**values)) artifact = query.one() except orm.exc.NoResultFound: LOG.warn(_LW("Artifact with id=%s not found") % artifact_id) raise exception.ArtifactNotFound(id=artifact_id) if not _check_visibility(context, artifact): LOG.warn(_LW("Artifact with id=%s is not accessible") % artifact_id) raise exception.ArtifactForbidden(id=artifact_id) return artifact
def _get(context, artifact_id, session, type_name=None, type_version=None, show_level=ga.Showlevel.BASIC): values = dict(id=artifact_id) if type_name is not None: values['type_name'] = type_name if type_version is not None: values['type_version'] = type_version _set_version_fields(values) try: if show_level == ga.Showlevel.NONE: query = (session.query(models.Artifact).options( joinedload(models.Artifact.tags)).filter_by(**values)) else: query = (session.query(models.Artifact).options( joinedload(models.Artifact.properties)).options( joinedload(models.Artifact.tags)).options( joinedload(models.Artifact.blobs).joinedload( models.ArtifactBlob.locations)).filter_by( **values)) artifact = query.one() except orm.exc.NoResultFound: LOG.warn(_LW("Artifact with id=%s not found") % artifact_id) raise exception.ArtifactNotFound(id=artifact_id) if not _check_visibility(context, artifact): LOG.warn(_LW("Artifact with id=%s is not accessible") % artifact_id) raise exception.ArtifactForbidden(id=artifact_id) return artifact
def replication_compare(options, args): """%(prog)s compare <fromserver:port> <toserver:port> Compare the contents of fromserver with those of toserver. fromserver:port: the location of the master xmonitor instance. toserver:port: the location of the slave xmonitor instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() slave_server, slave_port = utils.parse_valid_host_port(args.pop()) slave_conn = http_client.HTTPConnection(slave_server, slave_port) slave_client = imageservice(slave_conn, options.slavetoken) master_server, master_port = utils.parse_valid_host_port(args.pop()) master_conn = http_client.HTTPConnection(master_server, master_port) master_client = imageservice(master_conn, options.mastertoken) differences = {} for image in master_client.get_images(): if _image_present(slave_client, image['id']): headers = slave_client.get_image_meta(image['id']) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from master metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from slave metadata', {'header': key}) del headers[key] for key in image: if image[key] != headers.get(key, None): LOG.warn(_LW('%(image_id)s: field %(key)s differs ' '(source is %(master_value)s, destination ' 'is %(slave_value)s)') % {'image_id': image['id'], 'key': key, 'master_value': image[key], 'slave_value': headers.get(key, 'undefined')}) differences[image['id']] = 'diff' else: LOG.debug('%(image_id)s is identical', {'image_id': image['id']}) elif image['status'] == 'active': LOG.warn(_LW('Image %s entirely missing from the destination') % image['id']) differences[image['id']] = 'missing' return differences
def new_task_executor(self, context): try: # NOTE(flaper87): Backwards compatibility layer. # It'll allow us to provide a deprecation path to # users that are currently consuming the `eventlet` # executor. task_executor = CONF.task.task_executor if task_executor == 'eventlet': # NOTE(jokke): Making sure we do not log the deprecation # warning 1000 times or anything crazy like that. if not TaskExecutorFactory.eventlet_deprecation_warned: msg = _LW("The `eventlet` executor has been deprecated. " "Use `taskflow` instead.") LOG.warn(msg) TaskExecutorFactory.eventlet_deprecation_warned = True task_executor = 'taskflow' executor_cls = ('xmonitor.async.%s_executor.' 'TaskExecutor' % task_executor) LOG.debug("Loading %s executor", task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to load the %s executor provided " "in the config.") % CONF.task.task_executor)
def _update_rt_association(table, values, rt_id, namespace_id): try: (table.update(values=values).where( and_(table.c.resource_type_id == rt_id, table.c.namespace_id == namespace_id)).execute()) except sqlalchemy.exc.IntegrityError: LOG.warning(_LW("Duplicate entry for values: %s"), values)
def new_task_executor(self, context): try: # NOTE(flaper87): Backwards compatibility layer. # It'll allow us to provide a deprecation path to # users that are currently consuming the `eventlet` # executor. task_executor = CONF.task.task_executor if task_executor == 'eventlet': # NOTE(jokke): Making sure we do not log the deprecation # warning 1000 times or anything crazy like that. if not TaskExecutorFactory.eventlet_deprecation_warned: msg = _LW("The `eventlet` executor has been deprecated. " "Use `taskflow` instead.") LOG.warn(msg) TaskExecutorFactory.eventlet_deprecation_warned = True task_executor = 'taskflow' executor_cls = ('xmonitor.async.%s_executor.' 'TaskExecutor' % task_executor) LOG.debug("Loading %s executor", task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to load the %s executor provided " "in the config.") % CONF.task.task_executor)
def get_data(self, offset=0, chunk_size=None): if not self.image.locations: # NOTE(mclaren): This is the only set of arguments # which work with this exception currently, see: # https://bugs.launchpad.net/glance-store/+bug/1501443 # When the above glance_store bug is fixed we can # add a msg as usual. raise store.NotFound(image=None) err = None for loc in self.image.locations: try: data, size = self.store_api.get_from_backend( loc['url'], offset=offset, chunk_size=chunk_size, context=self.context) return data except Exception as e: LOG.warn(_LW('Get image %(id)s data failed: ' '%(err)s.') % {'id': self.image.image_id, 'err': encodeutils.exception_to_unicode(e)}) err = e # tried all locations LOG.error(_LE('Glance tried all active locations to get data for ' 'image %s but all have failed.') % self.image.image_id) raise err
def delete_cached_file(path): if os.path.exists(path): LOG.debug("Deleting image cache file '%s'", path) os.unlink(path) else: LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" " delete") % path)
def _do_dependencies(artifact, new_dependencies, session): deps_to_update = [] # small check that all dependencies are new if artifact.dependencies is not None: for db_dep in artifact.dependencies: for dep in new_dependencies.keys(): if db_dep.name == dep: msg = _LW("Artifact with the specified type, name " "and versions already has the direct " "dependency=%s") % dep LOG.warn(msg) # change values of former dependency for dep in artifact.dependencies: session.delete(dep) artifact.dependencies = [] for depname, depvalues in new_dependencies.items(): for pos, depvalue in enumerate(depvalues): db_dep = models.ArtifactDependency() db_dep.name = depname db_dep.artifact_source = artifact.id db_dep.artifact_dest = depvalue db_dep.artifact_origin = artifact.id db_dep.is_direct = True db_dep.position = pos deps_to_update.append(db_dep) artifact.dependencies = deps_to_update
def safe_delete_from_backend(context, image_id, location): """ Given a location, delete an image from the store and update location status to db. This function try to handle all known exceptions which might be raised by those calls on store and DB modules in its implementation. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ try: ret = store_api.delete_from_backend(location['url'], context=context) location['status'] = 'deleted' if 'id' in location: db_api.get_api().image_location_delete(context, image_id, location['id'], 'deleted') return ret except store_api.NotFound: msg = _LW('Failed to delete image %s in store from URI') % image_id LOG.warn(msg) except store_api.StoreDeleteNotSupported as e: LOG.warn(encodeutils.exception_to_unicode(e)) except store_api.UnsupportedBackend: exc_type = sys.exc_info()[0].__name__ msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') % dict(image_id=image_id, exc=exc_type)) LOG.error(msg)
def _do_transitive_dependencies(artifact, session): deps_to_update = [] for dependency in artifact.dependencies: depvalue = dependency.artifact_dest transitdeps = session.query(models.ArtifactDependency).filter_by( artifact_source=depvalue).all() for transitdep in transitdeps: if not transitdep.is_direct: # transitive dependencies are already created msg = _LW("Artifact with the specified type, " "name and version already has the " "direct dependency=%d") % transitdep.id LOG.warn(msg) raise exception.ArtifactDuplicateTransitiveDependency( dep=transitdep.id) db_dep = models.ArtifactDependency() db_dep.name = transitdep['name'] db_dep.artifact_source = artifact.id db_dep.artifact_dest = transitdep.artifact_dest db_dep.artifact_origin = transitdep.artifact_source db_dep.is_direct = False db_dep.position = transitdep.position deps_to_update.append(db_dep) return deps_to_update
def get_data(self, offset=0, chunk_size=None): if not self.image.locations: # NOTE(mclaren): This is the only set of arguments # which work with this exception currently, see: # https://bugs.launchpad.net/glance-store/+bug/1501443 # When the above glance_store bug is fixed we can # add a msg as usual. raise store.NotFound(image=None) err = None for loc in self.image.locations: try: data, size = self.store_api.get_from_backend( loc['url'], offset=offset, chunk_size=chunk_size, context=self.context) return data except Exception as e: LOG.warn( _LW('Get image %(id)s data failed: ' '%(err)s.') % { 'id': self.image.image_id, 'err': encodeutils.exception_to_unicode(e) }) err = e # tried all locations LOG.error( _LE('Glance tried all active locations to get data for ' 'image %s but all have failed.') % self.image.image_id) raise err
def execute(self, image_id, file_path): # NOTE(flaper87): A format must be explicitly # specified. There's no "sane" default for this # because the dest format may work differently depending # on the environment OpenStack is running in. conversion_format = CONF.taskflow_executor.conversion_format if conversion_format is None: if not _Convert.conversion_missing_warned: msg = (_LW('The conversion format is None, please add a value ' 'for it in the config file for this task to ' 'work: %s') % self.task_id) LOG.warn(msg) _Convert.conversion_missing_warned = True return # TODO(flaper87): Check whether the image is in the desired # format already. Probably using `qemu-img` just like the # `Introspection` task. dest_path = os.path.join(CONF.task.work_dir, "%s.converted" % image_id) stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O', conversion_format, file_path, dest_path, log_errors=putils.LOG_ALL_ERRORS) if stderr: raise RuntimeError(stderr) os.rename(dest_path, file_path.split("file://")[-1]) return file_path
def set_image_data(image, uri, task_id): data_iter = None try: LOG.info( _LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported"), { "data_uri": uri, "task_id": task_id }) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warn( _LW("Task %(task_id)s failed with exception %(error)s") % { "error": encodeutils.exception_to_unicode(e), "task_id": task_id }) LOG.info( _LI("Task %(task_id)s: Could not import image file" " %(image_data)s"), { "image_data": uri, "task_id": task_id }) finally: if hasattr(data_iter, 'close'): data_iter.close()
def _create_or_update(context, values, artifact_id, session, type_name, type_version=None): values = copy.deepcopy(values) with session.begin(): _set_version_fields(values) _validate_values(values) _drop_protected_attrs(models.Artifact, values) if artifact_id: # update existing artifact state = values.get('state') show_level = ga.Showlevel.BASIC if state is not None: if state == 'active': show_level = ga.Showlevel.DIRECT values['published_at'] = timeutils.utcnow() if state == 'deleted': values['deleted_at'] = timeutils.utcnow() artifact = _get(context, artifact_id, session, type_name, type_version, show_level=show_level) _validate_transition(artifact.state, values.get('state') or artifact.state) else: # create new artifact artifact = models.Artifact() if 'id' not in values: artifact.id = str(uuid.uuid4()) else: artifact.id = values['id'] if 'tags' in values: tags = values.pop('tags') artifact.tags = _do_tags(artifact, tags) if 'properties' in values: properties = values.pop('properties', {}) artifact.properties = _do_properties(artifact, properties) if 'blobs' in values: blobs = values.pop('blobs') artifact.blobs = _do_blobs(artifact, blobs) if 'dependencies' in values: dependencies = values.pop('dependencies') _do_dependencies(artifact, dependencies, session) if values.get('state', None) == 'publish': artifact.dependencies.extend( _do_transitive_dependencies(artifact, session)) artifact.update(values) try: artifact.save(session=session) except db_exc.DBDuplicateEntry: LOG.warn(_LW("Artifact with the specified type, name and version " "already exists")) raise exception.ArtifactDuplicateNameTypeVersion() return artifact
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: msg = (_LW("An optional task has failed, " "the failure was: %s") % encodeutils.exception_to_unicode(exc)) LOG.warn(msg)
def delete(self, req, image_id, id): """ Removes a membership from the image. """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % {'id': image_id}) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Look up an existing membership members = self.db_api.image_member_find(req.context, image_id=image_id, member=id) if members: self.db_api.image_member_delete(req.context, members[0]['id']) else: LOG.debug("%(id)s is not a member of image %(image_id)s", {'id': id, 'image_id': image_id}) msg = _("Membership could not be found.") raise webob.exc.HTTPNotFound(explanation=msg) # Make an appropriate result LOG.info(_LI("Successfully deleted a membership from image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent()
def _get(context, id, session): try: query = (session.query(models.MetadefTag).filter_by(id=id)) metadef_tag = query.one() except sa_orm.exc.NoResultFound: msg = (_LW("Metadata tag not found for id %s") % id) LOG.warn(msg) raise exc.MetadefTagNotFound(message=msg) return metadef_tag
def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warn(_LW('Unrecognised child %s') % pid)
def get(self, req, task_id): try: task_repo = self.gateway.get_task_repo(req.context) task = task_repo.get(task_id) except exception.NotFound as e: msg = (_LW("Failed to find task %(task_id)s. Reason: %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: msg = (_LW("Forbidden to get task %(task_id)s. Reason:" " %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return task
def check_quota(context, image_size, db_api, image_id=None): """Method called to see if the user is allowed to store an image. Checks if it is allowed based on the given size in xmonitor based on their quota and current usage. :param context: :param image_size: The size of the image we hope to store :param db_api: The db_api in use for this configuration :param image_id: The image that will be replaced with this new data size :returns: """ remaining = get_remaining_quota(context, db_api, image_id=image_id) if remaining is None: return user = getattr(context, 'user', '<unknown>') if image_size is None: # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit if remaining <= 0: LOG.warn(_LW("User %(user)s attempted to upload an image of" " unknown size that will exceed the quota." " %(remaining)d bytes remaining.") % {'user': user, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return if image_size > remaining: LOG.warn(_LW("User %(user)s attempted to upload an image of size" " %(size)d that will exceed the quota. %(remaining)d" " bytes remaining.") % {'user': user, 'size': image_size, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return remaining
def validate_key_cert(key_file, cert_file): try: error_key_name = "private key" error_filename = key_file with open(key_file, 'r') as keyfile: key_str = keyfile.read() key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) error_key_name = "certificate" error_filename = cert_file with open(cert_file, 'r') as certfile: cert_str = certfile.read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) except IOError as ioe: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it." " Error: %(ioe)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ioe': ioe}) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it. OpenSSL" " error: %(ce)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ce': ce}) try: data = str(uuid.uuid4()) # On Python 3, explicitly encode to UTF-8 to call crypto.sign() which # requires bytes. Otherwise, it raises a deprecation warning (and # will raise an error later). data = encodeutils.to_utf8(data) digest = CONF.digest_algorithm if digest == 'sha1': LOG.warn( _LW('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)' ' state that the SHA-1 is not suitable for' ' general-purpose digital signature applications (as' ' specified in FIPS 186-3) that require 112 bits of' ' security. The default value is sha1 in Kilo for a' ' smooth upgrade process, and it will be updated' ' with sha256 in next release(L).')) out = crypto.sign(key, data, digest) crypto.verify(cert, out, data, digest) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your key pair. " "Please verify that cert %(cert_file)s and " "key %(key_file)s belong together. OpenSSL " "error %(ce)s") % {'cert_file': cert_file, 'key_file': key_file, 'ce': ce})
def _check_ext(ext): try: next(n for n, v in available if n == ext.plugin.metadata.type_name and (v is None or v == ext.plugin.metadata.type_version)) except StopIteration: LOG.warn(_LW("Can't load artifact %s: not in" " available_plugins list") % ext.name) raise exception.ArtifactLoadError(name=ext.name) LOG.info( _LI("Artifact %s has been successfully loaded"), ext.name) return True
def _check_ext(ext): try: next(n for n, v in available if n == ext.plugin.metadata.type_name and ( v is None or v == ext.plugin.metadata.type_version)) except StopIteration: LOG.warn( _LW("Can't load artifact %s: not in" " available_plugins list") % ext.name) raise exception.ArtifactLoadError(name=ext.name) LOG.info(_LI("Artifact %s has been successfully loaded"), ext.name) return True
def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: with open(match, 'r') as schema_file: schema_data = schema_file.read() return json.loads(schema_data) else: msg = (_LW('Could not find schema properties file %s. Continuing ' 'without custom properties') % filename) LOG.warn(msg) return {}
def __init__(self, mapper): versionutils.report_deprecated_feature( LOG, _LW('/v3 controller is deprecated and will be removed from ' 'xmonitor-api soon. Remove the reference to it from ' 'xmonitor-api-paste.ini configuration file and use Glance ' 'Artifact Service API instead')) redirector = self._get_redirector() mapper.connect(None, "/artifacts", controller=redirector, action='redirect') mapper.connect(None, "/artifacts/{path:.*}", controller=redirector, action='redirect') super(API, self).__init__(mapper)
def process_response(self, resp): try: request_id = resp.request.context.request_id except AttributeError: LOG.warn(_LW('Unable to retrieve request id from context')) else: # For python 3 compatibility need to use bytes type prefix = b'req-' if isinstance(request_id, bytes) else 'req-' if not request_id.startswith(prefix): request_id = prefix + request_id resp.headers['x-openstack-request-id'] = request_id return resp
def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': LOG.warn(_LW("Image '%s' is not active. Not caching.") % image_id) return False except exception.NotFound: LOG.warn(_LW("No metadata found for image '%s'") % image_id) return False location = image_meta['location'] image_data, image_size = glance_store.get_from_backend(location, context=ctx) LOG.debug("Caching image '%s'", image_id) cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, image_meta['checksum']) # Image is tee'd into cache and checksum verified # as we iterate list(cache_tee_iter) return True
def _get_images(self, context, filters, **params): """Get images, wrapping in exception if necessary.""" # NOTE(markwash): for backwards compatibility, is_public=True for # admins actually means "treat me as if I'm not an admin and show me # all my images" if context.is_admin and params.get('is_public') is True: params['admin_as_user'] = True del params['is_public'] try: return self.db_api.image_get_all(context, filters=filters, **params) except exception.ImageNotFound: LOG.warn(_LW("Invalid marker. Image %(id)s could not be " "found.") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except exception.Forbidden: LOG.warn(_LW("Access denied to image %(id)s but returning " "'not found'") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except Exception: LOG.exception(_LE("Unable to get images")) raise
def rollback(e): with self.get_db() as db: if os.path.exists(incomplete_path): invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling " "back by moving '%(incomplete_path)s' to " "'%(invalid_path)s'") % {'e': e, 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) os.rename(incomplete_path, invalid_path) db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit()
def index_shared_images(self, req, id): """ Retrieves images shared with the given member. """ try: members = self.db_api.image_member_find(req.context, member=id) except exception.NotFound: msg = _LW("Member %(id)s not found") % {'id': id} LOG.warn(msg) msg = _("Membership could not be found.") raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Returning list of images shared with member %(id)s", {'id': id}) return dict(shared_images=make_member_list(members, image_id='image_id', can_share='can_share'))
def delete_stalled_files(self, older_than): """ Removes any incomplete cache entries older than a supplied modified time. :param older_than: Files written to on or before this timestamp will be deleted. """ for path in self.get_cache_files(self.incomplete_dir): if os.path.getmtime(path) < older_than: try: os.unlink(path) LOG.info(_LI("Removed stalled cache file %s"), path) except Exception as e: msg = (_LW("Failed to delete file %(path)s. " "Got error: %(e)s"), dict(path=path, e=e)) LOG.warn(msg)
def set_image_data(image, uri, task_id): data_iter = None try: LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported"), {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") % {"error": encodeutils.exception_to_unicode(e), "task_id": task_id}) LOG.info(_LI("Task %(task_id)s: Could not import image file" " %(image_data)s"), {"image_data": uri, "task_id": task_id}) finally: if hasattr(data_iter, 'close'): data_iter.close()
def delete_from_store(self, location): try: ret = self.store_api.delete_from_backend(location['value'], context=self.context) location['status'] = 'deleted' return ret except self.store_api.NotFound: msg = _LW('Failed to delete blob' ' %s in store from URI') % self.blob.id LOG.warn(msg) except self.store_api.StoreDeleteNotSupported as e: LOG.warn(encodeutils.exception_to_unicode(e)) except self.store_api.UnsupportedBackend: exc_type = sys.exc_info()[0].__name__ msg = (_LE('Failed to delete blob' ' %(blob_id)s from store: %(exc)s') % dict(blob_id=self.blob.id, exc=exc_type)) LOG.error(msg)
def create(self, req, task): task_factory = self.gateway.get_task_factory(req.context) executor_factory = self.gateway.get_task_executor_factory(req.context) task_repo = self.gateway.get_task_repo(req.context) try: new_task = task_factory.new_task(task_type=task['type'], owner=req.context.owner, task_input=task['input']) task_repo.add(new_task) task_executor = executor_factory.new_task_executor(req.context) pool = common.get_thread_pool("tasks_eventlet_pool") pool.spawn_n(new_task.run, task_executor) except exception.Forbidden as e: msg = (_LW("Forbidden to create task. Reason: %(reason)s") % {'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return new_task
def rollback(e): with self.get_db() as db: if os.path.exists(incomplete_path): invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.warn( _LW("Fetch of cache file failed (%(e)s), rolling " "back by moving '%(incomplete_path)s' to " "'%(invalid_path)s'") % { 'e': e, 'incomplete_path': incomplete_path, 'invalid_path': invalid_path }) os.rename(incomplete_path, invalid_path) db.execute( """DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit()
def configure_driver(self): """ Configure the driver for the cache and, if it fails to configure, fall back to using the SQLite driver which has no odd dependencies """ try: self.driver = self.driver_class() self.driver.configure() except exception.BadDriverConfiguration as config_err: driver_module = self.driver_class.__module__ LOG.warn(_LW("Image cache driver " "'%(driver_module)s' failed to configure. " "Got error: '%(config_err)s"), {'driver_module': driver_module, 'config_err': config_err}) LOG.info(_LI("Defaulting to SQLite driver.")) default_module = __name__ + '.drivers.sqlite.Driver' self.driver_class = importutils.import_class(default_module) self.driver = self.driver_class() self.driver.configure()
def run(self): images = self.cache.get_queued_images() if not images: LOG.debug("Nothing to prefetch.") return True num_images = len(images) LOG.debug("Found %d images to prefetch", num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: LOG.warn(_LW("Failed to successfully cache all " "images in queue.")) return False LOG.info(_LI("Successfully cached all %d images"), num_images) return True
def init_driver(self): """ Create the driver for the cache """ driver_name = CONF.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = importutils.import_class(driver_module) LOG.info(_LI("Image cache loaded driver '%s'."), driver_name) except ImportError as import_err: LOG.warn(_LW("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s."), {'driver_name': driver_name, 'import_err': import_err}) driver_module = __name__ + '.drivers.sqlite.Driver' LOG.info(_LI("Defaulting to SQLite driver.")) self.driver_class = importutils.import_class(driver_module) self.configure_driver()
def _do_artifacts_query(context, session, show_level=ga.Showlevel.NONE): """Build the query to get all artifacts based on the context""" LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", {'is_admin': context.is_admin, 'owner': context.owner}) if show_level == ga.Showlevel.NONE: query = session.query(models.Artifact).options( joinedload(models.Artifact.tags)) elif show_level == ga.Showlevel.BASIC: query = ( session.query(models.Artifact). options(joinedload( models.Artifact.properties). defer(models.ArtifactProperty.text_value)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs). joinedload(models.ArtifactBlob.locations))) else: # other show_levels aren't supported msg = _LW("Show level %s is not supported in this " "operation") % ga.Showlevel.to_str(show_level) LOG.warn(msg) raise exception.ArtifactUnsupportedShowLevel(shl=show_level) # If admin, return everything. if context.is_admin: return query else: # If regular user, return only public artifacts. # However, if context.owner has a value, return both # public and private artifacts of the context.owner. if context.owner is not None: query = query.filter( or_(models.Artifact.owner == context.owner, models.Artifact.visibility == 'public')) else: query = query.filter( models.Artifact.visibility == 'public') return query