def _delete_image_location_from_backend(self, image_id, loc_id, uri, backend=None): try: LOG.debug("Scrubbing image %s from a location.", image_id) try: if CONF.enabled_backends: self.store_api.delete(uri, backend, self.admin_context) else: self.store_api.delete_from_backend(uri, self.admin_context) except store_exceptions.NotFound: LOG.info(_LI("Image location for image '%s' not found in " "backend; Marking image location deleted in " "db."), image_id) if loc_id != '-': db_api.get_api().image_location_delete(self.admin_context, image_id, int(loc_id), 'deleted') LOG.info(_LI("Image %s is scrubbed from a location."), image_id) except Exception as e: LOG.error(_LE("Unable to scrub image %(id)s from a location. " "Reason: %(exc)s ") % {'id': image_id, 'exc': encodeutils.exception_to_unicode(e)}) raise
def delete(self, req, id): """Deletes an existing image with the registry. :param req: wsgi Request object :param id: The opaque internal identifier for the image :retval Returns 200 if delete was successful, a fault if not. On success, the body contains the deleted image information as a mapping. """ try: deleted_image = self.db_api.image_destroy(req.context, id) LOG.info(_LI("Successfully deleted image %(id)s"), {'id': id}) return dict(image=make_image_dict(deleted_image)) except exception.ForbiddenPublicImage: LOG.info(_LI("Delete denied for public image %(id)s"), {'id': id}) raise exc.HTTPForbidden() except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists LOG.info(_LI("Access denied to image %(id)s but returning" " 'not found'"), {'id': id}) return exc.HTTPNotFound() except exception.ImageNotFound: LOG.info(_LI("Image %(id)s not found"), {'id': id}) return exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unable to delete image %s") % id) raise
def _scrub_image(self, image_id, delete_jobs): if len(delete_jobs) == 0: return LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations."), {'id': image_id, 'count': len(delete_jobs)}) success = True if CONF.enabled_backends: for img_id, loc_id, uri, backend in delete_jobs: try: self._delete_image_location_from_backend(img_id, loc_id, uri, backend=backend) except Exception: success = False else: for img_id, loc_id, uri in delete_jobs: try: self._delete_image_location_from_backend(img_id, loc_id, uri) except Exception: success = False if success: image = db_api.get_api().image_get(self.admin_context, image_id) if image['status'] == 'pending_delete': db_api.get_api().image_update(self.admin_context, image_id, {'status': 'deleted'}) LOG.info(_LI("Image %s has been scrubbed successfully"), image_id) else: LOG.warn(_LW("One or more image locations couldn't be scrubbed " "from backend. Leaving image '%s' in 'pending_delete'" " status") % image_id)
def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally'), os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s'), pid) self.children.add(pid)
def build_image_owner_map(owner_map, db, context): image_owner_map = {} for image in db.image_get_all(context): image_id = image['id'] owner_name = image['owner'] if not owner_name: LOG.info(_LI('Image %s has no owner. Skipping.') % image_id) continue try: owner_id = owner_map[owner_name] except KeyError: msg = (_LE('Image "%(image)s" owner "%(owner)s" was not found. ' 'Skipping.'), {'image': image_id, 'owner': owner_name}) LOG.error(msg) continue image_owner_map[image_id] = owner_id LOG.info(_LI('Image "%(image)s" owner "%(owner)s" -> "%(owner_id)s"'), {'image': image_id, 'owner': owner_name, 'owner_id': owner_id}) return image_owner_map
def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') # Touch the file to add it to the queue with open(path, "w"): pass return True
def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warn(_LW('Unrecognised child %s') % pid)
def create(self, req, body): """Registers a new image with the registry. :param req: wsgi Request object :param body: Dictionary of information about the image :returns: The newly-created image information as a mapping, which will include the newly-created image's internal id in the 'id' field """ image_data = body['image'] # Ensure the image has a status set image_data.setdefault('status', 'active') # Set up the image owner if not req.context.is_admin or 'owner' not in image_data: image_data['owner'] = req.context.owner image_id = image_data.get('id') if image_id and not uuidutils.is_uuid_like(image_id): LOG.info(_LI("Rejecting image creation request for invalid image " "id '%(bad_id)s'"), {'bad_id': image_id}) msg = _("Invalid image id format") return exc.HTTPBadRequest(explanation=msg) if 'location' in image_data: image_data['locations'] = [image_data.pop('location')] try: image_data = _normalize_image_location_for_db(image_data) image_data = self.db_api.image_create(req.context, image_data, v1_mode=True) image_data = dict(image=make_image_dict(image_data)) LOG.info(_LI("Successfully created image %(id)s"), {'id': image_data['image']['id']}) return image_data except exception.Duplicate: msg = _("Image with identifier %s already exists!") % image_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: msg = (_("Failed to add image metadata. " "Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) return exc.HTTPBadRequest(msg) except Exception: LOG.exception(_LE("Unable to create image %s"), image_id) raise
def delete_invalid_files(self): """ Removes any invalid cache entries """ for path in self.get_cache_files(self.invalid_dir): os.unlink(path) LOG.info(_LI("Removed invalid cache file %s"), path)
def run_task(task_id, task_type, context, task_repo=None, image_repo=None, image_factory=None): # TODO(nikhil): if task_repo is None get new task repo # TODO(nikhil): if image_repo is None get new image repo # TODO(nikhil): if image_factory is None get new image factory LOG.info(_LI("Loading known task scripts for task_id %(task_id)s " "of type %(task_type)s"), {'task_id': task_id, 'task_type': task_type}) if task_type == 'import': image_import.run(task_id, context, task_repo, image_repo, image_factory) elif task_type == 'api_image_import': api_image_import.run(task_id, context, task_repo, image_repo, image_factory) else: msg = _LE("This task type %(task_type)s is not supported by the " "current deployment of Glance. Please refer the " "documentation provided by OpenStack or your operator " "for more information.") % {'task_type': task_type} LOG.error(msg) task = task_repo.get(task_id) task.fail(msg) if task_repo: task_repo.save(task) else: LOG.error(_LE("Failed to save task %(task_id)s in DB as task_repo " "is %(task_repo)s"), {"task_id": task_id, "task_repo": task_repo})
def __init__(self, store_api): LOG.info(_LI("Initializing scrubber with configuration: %s"), six.text_type({'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port})) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() # Here we create a request context with credentials to support # delayed delete when using multi-tenant backend storage admin_user = CONF.admin_user admin_tenant = CONF.admin_tenant_name if CONF.send_identity_headers: # When registry is operating in trusted-auth mode roles = [CONF.admin_role] self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=None, roles=roles) self.registry = registry.get_registry_client(self.admin_context) else: ctxt = context.RequestContext() self.registry = registry.get_registry_client(ctxt) auth_token = self.registry.auth_token self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=auth_token) self.db_queue = get_scrub_queue() self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size)
def execute(self, image_id): """Finishing the task flow :param image_id: Glance Image ID """ task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return try: task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " "%(e)s") LOG.exception(log_msg, {'exc_type': six.text_type(type(e)), 'e': encodeutils.exception_to_unicode(e), 'task_id': task.task_id}) err_msg = _("Error: %(exc_type)s: %(e)s") task.fail(err_msg % {'exc_type': six.text_type(type(e)), 'e': encodeutils.exception_to_unicode(e)}) finally: self.task_repo.save(task) LOG.info(_LI("%(task_id)s of %(task_type)s completed"), {'task_id': self.task_id, 'task_type': self.task_type})
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context) # Verify the signature (if correct properties are present) if (signature_utils.should_verify_signature( self.image.extra_properties)): # NOTE(bpoulos): if verification fails, exception will be raised result = signature_utils.verify_signature( self.context, checksum, self.image.extra_properties) if result: LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) self.image.locations = [{'url': location, 'metadata': loc_meta, 'status': 'active'}] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def __init__(self, wakeup_time=300, threads=100): LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s"), {'wakeup_time': wakeup_time, 'threads': threads}) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() # This pool is used for periodic instantiation of scrubber self.daemon_pool = eventlet.greenpool.GreenPool(threads)
def _single_run(self, application, sock): """Start a WSGI server in a new green thread.""" LOG.info(_LI("Starting single process server")) eventlet.wsgi.server(sock, application, custom_pool=self.pool, log=self._logger, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout)
def show(self, req, id): """Return data about the given image id.""" try: image = self.db_api.image_get(req.context, id) LOG.debug("Successfully retrieved image %(id)s", {"id": id}) except exception.ImageNotFound: LOG.info(_LI("Image %(id)s not found"), {"id": id}) raise exc.HTTPNotFound() except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists LOG.info(_LI("Access denied to image %(id)s but returning" " 'not found'"), {"id": id}) raise exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unable to show image %s") % id) raise return dict(image=make_image_dict(image))
def replication_dump(options, args): """%(prog)s dump <server:port> <path> Dump the contents of a glance instance to local disk. server:port: the location of the glance instance. path: a directory on disk to contain the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.mastertoken) for image in client.get_images(): LOG.debug('Considering: %(image_id)s (%(image_name)s) ' '(%(image_size)d bytes)', {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size']}) data_path = os.path.join(path, image['id']) data_filename = data_path + '.img' if not os.path.exists(data_path): LOG.info(_LI('Storing: %(image_id)s (%(image_name)s)' ' (%(image_size)d bytes) in %(data_filename)s'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size'], 'data_filename': data_filename}) # Dump glance information if six.PY3: f = open(data_path, 'w', encoding='utf-8') else: f = open(data_path, 'w') with f: f.write(jsonutils.dumps(image)) if image['status'] == 'active' and not options.metaonly: # Now fetch the image. The metadata returned in headers here # is the same as that which we got from the detailed images # request earlier, so we can ignore it here. Note that we also # only dump active images. LOG.debug('Image %s is active', image['id']) image_response = client.get_image(image['id']) with open(data_filename, 'wb') as f: while True: chunk = image_response.read(options.chunksize) if not chunk: break f.write(chunk)
def set_image_data(image, uri, task_id, backend=None): data_iter = None try: LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported"), {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter, backend=backend) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") % {"error": encodeutils.exception_to_unicode(e), "task_id": task_id}) LOG.info(_LI("Task %(task_id)s: Could not import image file" " %(image_data)s"), {"image_data": uri, "task_id": task_id}) finally: if hasattr(data_iter, 'close'): data_iter.close()
def _verify_signature_if_needed(self, checksum): # Verify the signature (if correct properties are present) if (signature_utils.should_verify_signature( self.image.extra_properties)): # NOTE(bpoulos): if verification fails, exception will be raised result = signature_utils.verify_signature( self.context, checksum, self.image.extra_properties) if result: LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id)
def init_driver(self): """ Create the driver for the cache """ driver_name = CONF.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = importutils.import_class(driver_module) LOG.info(_LI("Image cache loaded driver '%s'."), driver_name) except ImportError as import_err: LOG.warn(_LW("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s."), {'driver_name': driver_name, 'import_err': import_err}) driver_module = __name__ + '.drivers.sqlite.Driver' LOG.info(_LI("Defaulting to SQLite driver.")) self.driver_class = importutils.import_class(driver_module) self.configure_driver()
def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug("No stale children") if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE("Not respawning child %d, cannot " "recover from termination") % pid) if not self.children and not self.stale_children: LOG.info(_LI("All workers have terminated. Exiting")) self.running = False else: if len(self.children) < CONF.workers: self.run_child()
def _clear_metadata(meta): metadef_tables = [get_metadef_properties_table(meta), get_metadef_objects_table(meta), get_metadef_tags_table(meta), get_metadef_namespace_resource_types_table(meta), get_metadef_namespaces_table(meta), get_metadef_resource_types_table(meta)] for table in metadef_tables: table.delete().execute() LOG.info(_LI("Table %s has been cleared"), table)
def _check_ext(ext): try: next(n for n, v in available if n == ext.plugin.metadata.type_name and (v is None or v == ext.plugin.metadata.type_version)) except StopIteration: LOG.warn(_LW("Can't load artifact %s: not in" " available_plugins list") % ext.name) raise exception.ArtifactLoadError(name=ext.name) LOG.info( _LI("Artifact %s has been successfully loaded"), ext.name) return True
def start_wsgi(self): if CONF.workers == 0: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers"), CONF.workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) while len(self.children) < CONF.workers: self.run_child()
def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE('Not respawning child %d, cannot ' 'recover from termination') % pid) if not self.children and not self.stale_children: LOG.info( _LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < get_num_workers(): self.run_child()
def reactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.reactivate() image_repo.save(image) LOG.info(_LI("Image %s is reactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to reactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg)
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) extra_props = self.image.extra_properties if (signature_utils.should_create_verifier(extra_props)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised img_signature = extra_props[signature_utils.SIGNATURE] hash_method = extra_props[signature_utils.HASH_METHOD] key_type = extra_props[signature_utils.KEY_TYPE] cert_uuid = extra_props[signature_utils.CERT_UUID] verifier = signature_utils.get_verifier( context=self.context, img_signature_certificate_uuid=cert_uuid, img_signature_hash_method=hash_method, img_signature=img_signature, img_signature_key_type=key_type ) else: verifier = None location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context, verifier=verifier) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: raise cursive_exception.SignatureVerificationError( _('Signature verification failed') ) self.image.locations = [{'url': location, 'metadata': loc_meta, 'status': 'active'}] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def _delete_image_location_from_backend(self, image_id, loc_id, uri): if CONF.metadata_encryption_key: uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) try: LOG.debug("Scrubbing image %s from a location.", image_id) try: self.store_api.delete_from_backend(uri, self.admin_context) except store_exceptions.NotFound: LOG.info(_LI("Image location for image '%s' not found in " "backend; Marking image location deleted in " "db."), image_id) if loc_id != '-': db_api.get_api().image_location_delete(self.admin_context, image_id, int(loc_id), 'deleted') LOG.info(_LI("Image %s is scrubbed from a location."), image_id) except Exception as e: LOG.error(_LE("Unable to scrub image %(id)s from a location. " "Reason: %(exc)s ") % {'id': image_id, 'exc': encodeutils.exception_to_unicode(e)}) raise
def set_data(self, data, size=None, backend=None): remaining = glance.api.common.check_quota(self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader( data, remaining, exception_class=exception.StorageQuotaFull) self.image.set_data(data, size=size, backend=backend) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute[1] then there is a race condition # that will allow for the quota to be broken[2]. Thus we must recheck # the quota after the upload and thus after we know the size. # # Also, when an upload doesn't set the size properly then the call to # check_quota above returns None and so utils.LimitingReader is not # used above. Hence the store (e.g. filesystem store) may have to # download the entire file before knowing the actual file size. Here # also we need to check for the quota again after the image has been # downloaded to the store. # # [1] For e.g. when using chunked transfers the 'Content-Length' # header is not set. # [2] For e.g.: # - Upload 1 does not exceed quota but upload 2 exceeds quota. # Both uploads are to different locations # - Upload 2 completes before upload 1 and writes image.size. # - Immediately, upload 1 completes and (over)writes image.size # with the smaller size. # - Now, to glance, image has not exceeded quota but, in # reality, the quota has been exceeded. try: glance.api.common.check_quota(self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding the quota.'), self.image.image_id) self.store_utils.safe_delete_from_backend( self.context, self.image.image_id, self.image.locations[0])
def replication_dump(options, args): """%(prog)s dump <server:port> <path> Dump the contents of a glance instance to local disk. server:port: the location of the glance instance. path: a directory on disk to contain the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http_client.HTTPConnection(server, port), options.mastertoken) for image in client.get_images(): LOG.debug('Considering: %s', image['id']) data_path = os.path.join(path, image['id']) if not os.path.exists(data_path): LOG.info(_LI('Storing: %s'), image['id']) # Dump glance information if six.PY3: f = open(data_path, 'w', encoding='utf-8') else: f = open(data_path, 'w') with f: f.write(jsonutils.dumps(image)) if image['status'] == 'active' and not options.metaonly: # Now fetch the image. The metadata returned in headers here # is the same as that which we got from the detailed images # request earlier, so we can ignore it here. Note that we also # only dump active images. LOG.debug('Image %s is active', image['id']) image_response = client.get_image(image['id']) with open(data_path + '.img', 'wb') as f: while True: chunk = image_response.read(options.chunksize) if not chunk: break f.write(chunk)
def _set_task_status(self, new_status): if self._validate_task_status_transition(self.status, new_status): old_status = self.status self._status = new_status LOG.info(_LI("Task [%(task_id)s] status changing from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': old_status, 'new_status': new_status}) else: LOG.error(_LE("Task [%(task_id)s] status failed to change from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': self.status, 'new_status': new_status}) raise exception.InvalidTaskStatusTransition( cur_status=self.status, new_status=new_status )
def reactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) status = image.status image.reactivate() # not necessary to change the status if it's already 'active' if status == 'deactivated': image_repo.save(image, from_state='deactivated') LOG.info(_LI("Image %s is reactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to reactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg)
def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE('Not respawning child %d, cannot ' 'recover from termination') % pid) if not self.children and not self.stale_children: LOG.info( _LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < get_num_workers(): self.run_child() if self.raw_caching_pid is not None and self.raw_caching_pid < 0: LOG.error(_LE('Not respawning raw_caching child %d, cannot ' 'recover from termination') % self.raw_caching_pid) self.running = False
def delete_stalled_files(self, older_than): """ Removes any incomplete cache entries older than a supplied modified time. :param older_than: Files written to on or before this timestamp will be deleted. """ for path in self.get_cache_files(self.incomplete_dir): if os.path.getmtime(path) < older_than: try: fileutils.delete_if_exists(path) LOG.info(_LI("Removed stalled cache file %s"), path) except Exception as e: msg = (_LW("Failed to delete file %(path)s. " "Got error: %(e)s"), dict(path=path, e=e)) LOG.warn(msg)
def set_data(self, data, size=None): remaining = glance.api.common.check_quota( self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader(data, remaining) try: self.image.set_data(data, size=size) except exception.ImageSizeLimitExceeded: raise exception.StorageQuotaFull(image_size=size, remaining=remaining) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute[1] then there is a race condition # that will allow for the quota to be broken[2]. Thus we must recheck # the quota after the upload and thus after we know the size. # # Also, when an upload doesn't set the size properly then the call to # check_quota above returns None and so utils.LimitingReader is not # used above. Hence the store (e.g. filesystem store) may have to # download the entire file before knowing the actual file size. Here # also we need to check for the quota again after the image has been # downloaded to the store. # # [1] For e.g. when using chunked transfers the 'Content-Length' # header is not set. # [2] For e.g.: # - Upload 1 does not exceed quota but upload 2 exceeds quota. # Both uploads are to different locations # - Upload 2 completes before upload 1 and writes image.size. # - Immediately, upload 1 completes and (over)writes image.size # with the smaller size. # - Now, to glance, image has not exceeded quota but, in # reality, the quota has been exceeded. try: glance.api.common.check_quota( self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding the quota.'), self.image.image_id) self.store_utils.safe_delete_from_backend( self.context, self.image.image_id, self.image.locations[0])
def delete(self, req, image_id, id): """ Removes a membership from the image. """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id, v1_mode=True) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % {'id': image_id}) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Look up an existing membership members = self.db_api.image_member_find(req.context, image_id=image_id, member=id) if members: self.db_api.image_member_delete(req.context, members[0]['id']) else: LOG.debug("%(id)s is not a member of image %(image_id)s", {'id': id, 'image_id': image_id}) msg = _("Membership could not be found.") raise webob.exc.HTTPNotFound(explanation=msg) # Make an appropriate result LOG.info(_LI("Successfully deleted a membership from image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent()
def migrate_location_credentials(migrate_engine, to_quoted): """ Migrate location credentials for encrypted swift uri's between the quoted and unquoted forms. :param migrate_engine: The configured db engine :param to_quoted: If True, migrate location credentials from unquoted to quoted form. If False, do the reverse. """ if not CONF.metadata_encryption_key: msg = _LI("'metadata_encryption_key' was not specified in the config" " file or a config file was not specified. This means that" " this migration is a NOOP.") LOG.info(msg) return meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) images = list(images_table.select().execute()) for image in images: try: fixed_uri = fix_uri_credentials(image['location'], to_quoted) images_table.update().where( images_table.c.id == image['id']).values( location=fixed_uri).execute() except exception.Invalid: msg = _LW("Failed to decrypt location value for image" " %(image_id)s") % { 'image_id': image['id'] } LOG.warn(msg) except exception.BadStoreUri as e: reason = encodeutils.exception_to_unicode(e) msg = _LE("Invalid store uri for image: %(image_id)s. " "Details: %(reason)s") % { 'image_id': image.id, 'reason': reason } LOG.exception(msg) raise
def __init__(self, app): mapper = routes.Mapper() resource = cached_images.create_resource() mapper.connect("/v2/cached_images", controller=resource, action="get_cached_images", conditions=dict(method=["GET"])) mapper.connect("/v2/cached_images/{image_id}", controller=resource, action="delete_cached_image", conditions=dict(method=["DELETE"])) mapper.connect("/v2/cached_images", controller=resource, action="delete_cached_images", conditions=dict(method=["DELETE"])) mapper.connect("/v2/queued_images/{image_id}", controller=resource, action="queue_image", conditions=dict(method=["PUT"])) mapper.connect("/v2/queued_images", controller=resource, action="get_queued_images", conditions=dict(method=["GET"])) mapper.connect("/v2/queued_images/{image_id}", controller=resource, action="delete_queued_image", conditions=dict(method=["DELETE"])) mapper.connect("/v2/queued_images", controller=resource, action="delete_queued_images", conditions=dict(method=["DELETE"])) self._mapper = mapper self._resource = resource LOG.info(_LI("Initialized image cache management middleware")) super(CacheManageFilter, self).__init__(app)
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) if (signature_utils.should_create_verifier( self.image.extra_properties)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised verifier = signature_utils.get_verifier( self.context, self.image.extra_properties) else: verifier = None location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context, verifier=verifier) self._verify_signature_if_needed(checksum) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: raise exception.SignatureVerificationError( _('Signature verification failed')) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def run(self): images = self.cache.get_queued_images() if not images: LOG.debug("Nothing to prefetch.") return True num_images = len(images) LOG.debug("Found %d images to prefetch", num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: LOG.warn(_LW("Failed to successfully cache all " "images in queue.")) return False LOG.info(_LI("Successfully cached all %d images"), num_images) return True
def _pipe_watcher(self): def _on_timeout_exit(*args): LOG.info(_LI('Graceful shutdown timeout exceeded, ' 'instantaneous exiting')) os._exit(1) # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read(1) LOG.info(_LI('Parent process has died unexpectedly, exiting')) # allow up to 1 second for sys.exit to gracefully shutdown signal.signal(signal.SIGALRM, _on_timeout_exit) signal.alarm(1) # do the same cleanup as child_hup eventlet.wsgi.is_accepting = False self.sock.close() sys.exit(1)
def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) break except exception.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited')
def run_task(task_id, task_type, context, task_repo=None, image_repo=None, image_factory=None): # TODO(nikhil): if task_repo is None get new task repo # TODO(nikhil): if image_repo is None get new image repo # TODO(nikhil): if image_factory is None get new image factory LOG.info( _LI("Loading known task scripts for task_id %(task_id)s " "of type %(task_type)s"), { 'task_id': task_id, 'task_type': task_type }) if task_type == 'import': image_import.run(task_id, context, task_repo, image_repo, image_factory) elif task_type == 'api_image_import': api_image_import.run(task_id, context, task_repo, image_repo, image_factory) else: msg = _LE("This task type %(task_type)s is not supported by the " "current deployment of Glance. Please refer the " "documentation provided by OpenStack or your operator " "for more information.") % { 'task_type': task_type } LOG.error(msg) task = task_repo.get(task_id) task.fail(msg) if task_repo: task_repo.save(task) else: LOG.error( _LE("Failed to save task %(task_id)s in DB as task_repo " "is %(task_repo)s"), { "task_id": task_id, "task_repo": task_repo })
def configure_driver(self): """ Configure the driver for the cache and, if it fails to configure, fall back to using the SQLite driver which has no odd dependencies """ try: self.driver = self.driver_class() self.driver.configure() except exception.BadDriverConfiguration as config_err: driver_module = self.driver_class.__module__ LOG.warn(_LW("Image cache driver " "'%(driver_module)s' failed to configure. " "Got error: '%(config_err)s"), {'driver_module': driver_module, 'config_err': config_err}) LOG.info(_LI("Defaulting to SQLite driver.")) default_module = __name__ + '.drivers.sqlite.Driver' self.driver_class = importutils.import_class(default_module) self.driver = self.driver_class() self.driver.configure()
def start_wsgi(self): workers = get_num_workers() if workers == 0: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers"), workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) # Adding this to detect if a parent dies abruptly rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') if CONF.graceful_shutdown: signal.signal(signal.SIGUSR1, self.disable_children) while len(self.children) < workers: self.run_child()
def _verify_signature(self, verifier, location, loc_meta): """ Verify signature of uploaded data. :param verifier: for signature verification """ # NOTE(bpoulos): if verification fails, exception will be raised if verifier is not None: try: verifier.verify() msg = _LI("Successfully verified signature for image %s") LOG.info(msg, self.image.image_id) except crypto_exception.InvalidSignature: if CONF.enabled_backends: self.store_api.delete(location, loc_meta.get('store'), context=self.context) else: self.store_api.delete_from_backend(location, context=self.context) raise cursive_exception.SignatureVerificationError( _('Signature verification failed'))
def execute(self, image_id): """Finishing the task flow :param image_id: Glance Image ID """ task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return try: task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " "%(e)s") LOG.exception( log_msg, { 'exc_type': six.text_type(type(e)), 'e': encodeutils.exception_to_unicode(e), 'task_id': task.task_id }) err_msg = _("Error: %(exc_type)s: %(e)s") task.fail( err_msg % { 'exc_type': six.text_type(type(e)), 'e': encodeutils.exception_to_unicode(e) }) finally: self.task_repo.save(task) LOG.info(_LI("%(task_id)s of %(task_type)s completed"), { 'task_id': self.task_id, 'task_type': self.task_type })
def get_orphaned_cached_images(self): """ WRS specific In case glance-caching is used, returns a list of images that were cached, but the original source image has since been deleted """ admin_context = self._get_context() registry.configure_registry_client() active_images = registry.get_images_list(admin_context) cached_images = self.get_cached_images() for c_image in cached_images: if not (any(image['id'] == c_image['image_id'] for image in active_images)): LOG.info( _LI("Image %s no longer present in the " "primary region. Deleting cached file.") % str(c_image['image_id'])) self.delete_cached_image(c_image['image_id']) else: LOG.debug("Image %s still present in the " "primary region." % str(c_image['image_id']))
def __init__(self, store_api): LOG.info( _LI("Initializing scrubber with configuration: %s"), six.text_type({ 'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port })) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() # Here we create a request context with credentials to support # delayed delete when using multi-tenant backend storage admin_user = CONF.admin_user admin_tenant = CONF.admin_tenant_name if CONF.send_identity_headers: # When registry is operating in trusted-auth mode roles = [CONF.admin_role] self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=None, roles=roles) self.registry = registry.get_registry_client(self.admin_context) else: ctxt = context.RequestContext() self.registry = registry.get_registry_client(ctxt) auth_token = self.registry.auth_token self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=auth_token) self.db_queue = get_scrub_queue() self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size)
def __init__(self, store_api): LOG.info(_LI("Initializing scrubber")) self.store_api = store_api self.admin_context = context.get_admin_context(show_deleted=True) self.db_queue = get_scrub_queue() self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size)
def set_data(self, data, size=None, backend=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) extra_props = self.image.extra_properties if (signature_utils.should_create_verifier(extra_props)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised img_signature = extra_props[signature_utils.SIGNATURE] hash_method = extra_props[signature_utils.HASH_METHOD] key_type = extra_props[signature_utils.KEY_TYPE] cert_uuid = extra_props[signature_utils.CERT_UUID] verifier = signature_utils.get_verifier( context=self.context, img_signature_certificate_uuid=cert_uuid, img_signature_hash_method=hash_method, img_signature=img_signature, img_signature_key_type=key_type) else: verifier = None hashing_algo = CONF['hashing_algorithm'] if CONF.enabled_backends: (location, size, checksum, multihash, loc_meta) = self.store_api.add_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, backend, hashing_algo, context=self.context, verifier=verifier) else: (location, size, checksum, multihash, loc_meta) = self.store_api.add_to_backend_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, hashing_algo, context=self.context, verifier=verifier) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: if CONF.enabled_backends: self.store_api.delete(location, loc_meta.get('backend'), context=self.context) else: self.store_api.delete_from_backend(location, context=self.context) raise cursive_exception.SignatureVerificationError( _('Signature verification failed')) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.os_hash_value = multihash self.image.os_hash_algo = hashing_algo self.image.status = 'active'
def replication_load(options, args): """%(prog)s load <server:port> <path> Load the contents of a local directory into glance. server:port: the location of the glance instance. path: a directory on disk containing the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http_client.HTTPConnection(server, port), options.slavetoken) updated = [] for ent in os.listdir(path): if uuidutils.is_uuid_like(ent): image_uuid = ent LOG.info(_LI('Considering: %s'), image_uuid) meta_file_name = os.path.join(path, image_uuid) with open(meta_file_name) as meta_file: meta = jsonutils.loads(meta_file.read()) # Remove keys which don't make sense for replication for key in options.dontreplicate.split(' '): if key in meta: LOG.debug('Stripping %(header)s from saved ' 'metadata', {'header': key}) del meta[key] if _image_present(client, image_uuid): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. LOG.debug('Image %s already present', image_uuid) headers = client.get_image_meta(image_uuid) for key in options.dontreplicate.split(' '): if key in headers: LOG.debug( 'Stripping %(header)s from slave ' 'metadata', {'header': key}) del headers[key] if _dict_diff(meta, headers): LOG.info(_LI('Image %s metadata has changed'), image_uuid) headers, body = client.add_image_meta(meta) _check_upload_response_headers(headers, body) updated.append(meta['id']) else: if not os.path.exists(os.path.join(path, image_uuid + '.img')): LOG.debug('%s dump is missing image data, skipping', image_uuid) continue # Upload the image itself with open(os.path.join(path, image_uuid + '.img')) as img_file: try: headers, body = client.add_image(meta, img_file) _check_upload_response_headers(headers, body) updated.append(meta['id']) except exc.HTTPConflict: LOG.error( _LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image_uuid) # noqa return updated
def replication_livecopy(options, args): """%(prog)s livecopy <fromserver:port> <toserver:port> Load the contents of one glance instance into another. fromserver:port: the location of the master glance instance. toserver:port: the location of the slave glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() slave_server, slave_port = utils.parse_valid_host_port(args.pop()) slave_conn = http_client.HTTPConnection(slave_server, slave_port) slave_client = imageservice(slave_conn, options.slavetoken) master_server, master_port = utils.parse_valid_host_port(args.pop()) master_conn = http_client.HTTPConnection(master_server, master_port) master_client = imageservice(master_conn, options.mastertoken) updated = [] for image in master_client.get_images(): LOG.debug('Considering %(id)s', {'id': image['id']}) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from master metadata', {'header': key}) del image[key] if _image_present(slave_client, image['id']): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. headers = slave_client.get_image_meta(image['id']) if headers['status'] == 'active': for key in options.dontreplicate.split(' '): if key in image: LOG.debug( 'Stripping %(header)s from master ' 'metadata', {'header': key}) del image[key] if key in headers: LOG.debug( 'Stripping %(header)s from slave ' 'metadata', {'header': key}) del headers[key] if _dict_diff(image, headers): LOG.info( _LI('Image %(image_id)s (%(image_name)s) ' 'metadata has changed'), { 'image_id': image['id'], 'image_name': image.get('name', '--unnamed--') }) headers, body = slave_client.add_image_meta(image) _check_upload_response_headers(headers, body) updated.append(image['id']) elif image['status'] == 'active': LOG.info( _LI('Image %(image_id)s (%(image_name)s) ' '(%(image_size)d bytes) ' 'is being synced'), { 'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size'] }) if not options.metaonly: image_response = master_client.get_image(image['id']) try: headers, body = slave_client.add_image( image, image_response) _check_upload_response_headers(headers, body) updated.append(image['id']) except exc.HTTPConflict: LOG.error( _LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa return updated
def update(self, req, image_id, id, body=None): """ Adds a membership to the image, or updates an existing one. If a body is present, it is a dict with the following format:: {'member': { 'can_share': [True|False] }} If `can_share` is provided, the member's ability to share is set accordingly. If it is not provided, existing memberships remain unchanged and new memberships default to False. """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % { 'id': image_id } LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % { 'id': image_id }) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Determine the applicable can_share value can_share = None if body: try: can_share = bool(body['member']['can_share']) except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % { 'id': image_id } LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) # Look up an existing membership... members = self.db_api.image_member_find(req.context, image_id=image_id, member=id, include_deleted=True) if members: if can_share is not None: values = dict(can_share=can_share) self.db_api.image_member_update(req.context, members[0]['id'], values) else: values = dict(image_id=image['id'], member=id, can_share=bool(can_share)) self.db_api.image_member_create(req.context, values) LOG.info(_LI("Successfully updated a membership for image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent()
def update_all(self, req, image_id, body): """ Replaces the members of the image with those specified in the body. The body is a dict with the following format:: {'memberships': [ {'member_id': <MEMBER_ID>, ['can_share': [True|False]]}, ... ]} """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % { 'id': image_id } LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % { 'id': image_id }) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Get the membership list try: memb_list = body['memberships'] except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % { 'id': image_id } LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) add = [] existing = {} # Walk through the incoming memberships for memb in memb_list: try: datum = dict(image_id=image['id'], member=memb['member_id'], can_share=None) except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % { 'id': image_id } LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) # Figure out what can_share should be if 'can_share' in memb: datum['can_share'] = bool(memb['can_share']) # Try to find the corresponding membership members = self.db_api.image_member_find(req.context, image_id=datum['image_id'], member=datum['member'], include_deleted=True) try: member = members[0] except IndexError: # Default can_share datum['can_share'] = bool(datum['can_share']) add.append(datum) else: # Are we overriding can_share? if datum['can_share'] is None: datum['can_share'] = members[0]['can_share'] existing[member['id']] = { 'values': datum, 'membership': member, } # We now have a filtered list of memberships to add and # memberships to modify. Let's start by walking through all # the existing image memberships... existing_members = self.db_api.image_member_find(req.context, image_id=image['id'], include_deleted=True) for member in existing_members: if member['id'] in existing: # Just update the membership in place update = existing[member['id']]['values'] self.db_api.image_member_update(req.context, member['id'], update) else: if not member['deleted']: # Outdated one; needs to be deleted self.db_api.image_member_delete(req.context, member['id']) # Now add the non-existent ones for memb in add: self.db_api.image_member_create(req.context, memb) # Make an appropriate result LOG.info(_LI("Successfully updated memberships for image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent()
def run(t_id, context, task_repo, image_repo, image_factory): LOG.info(_LI('Task %(task_id)s beginning import ' 'execution.'), {'task_id': t_id}) _execute(t_id, task_repo, image_repo, image_factory)
def wait(self): try: self.event.wait() except KeyboardInterrupt: msg = _LI("Daemon Shutdown on KeyboardInterrupt") LOG.info(msg)
def upload(self, req, image_id, data, size): image_repo = self.gateway.get_repo(req.context) image = None refresher = None cxt = req.context try: image = image_repo.get(image_id) image.status = 'saving' try: if CONF.data_api == 'glance.db.registry.api': # create a trust if backend is registry try: # request user plugin for current token user_plugin = req.environ.get('keystone.token_auth') roles = [] # use roles from request environment because they # are not transformed to lower-case unlike cxt.roles for role_info in req.environ.get( 'keystone.token_info')['token']['roles']: roles.append(role_info['name']) refresher = trust_auth.TokenRefresher( user_plugin, cxt.tenant, roles) except Exception as e: LOG.info( _LI("Unable to create trust: %s " "Use the existing user token."), encodeutils.exception_to_unicode(e)) image_repo.save(image, from_state='queued') image.set_data(data, size) try: image_repo.save(image, from_state='saving') except exception.NotAuthenticated: if refresher is not None: # request a new token to update an image in database cxt.auth_token = refresher.refresh_token() image_repo = self.gateway.get_repo(req.context) image_repo.save(image, from_state='saving') else: raise try: # release resources required for re-auth if refresher is not None: refresher.release_resources() except Exception as e: LOG.info( _LI("Unable to delete trust %(trust)s: %(msg)s"), { "trust": refresher.trust_id, "msg": encodeutils.exception_to_unicode(e) }) except (glance_store.NotFound, exception.ImageNotFound, exception.Conflict): msg = (_("Image %s could not be found after upload. " "The image may have been deleted during the " "upload, cleaning up the chunks uploaded.") % image_id) LOG.warn(msg) # NOTE(sridevi): Cleaning up the uploaded chunks. try: image.delete() except exception.ImageNotFound: # NOTE(sridevi): Ignore this exception pass raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.NotAuthenticated: msg = (_("Authentication error - the token may have " "expired during file upload. Deleting image data for " "%s.") % image_id) LOG.debug(msg) try: image.delete() except exception.NotAuthenticated: # NOTE: Ignore this exception pass raise webob.exc.HTTPUnauthorized(explanation=msg, request=req, content_type='text/plain') except ValueError as e: LOG.debug("Cannot save data for image %(id)s: %(e)s", { 'id': image_id, 'e': encodeutils.exception_to_unicode(e) }) self._restore(image_repo, image) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except glance_store.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) self._restore(image_repo, image) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.exception(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except exception.Forbidden as e: msg = ("Not allowed to upload image data for image %s" % image_id) LOG.debug(msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except cursive_exception.SignatureVerificationError as e: msg = ( _LE("Signature verification failed for image %(id)s: %(e)s") % { 'id': image_id, 'e': encodeutils.exception_to_unicode(e) }) LOG.error(msg) self._delete(image_repo, image) raise webob.exc.HTTPBadRequest(explanation=msg) except webob.exc.HTTPGone as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) self._restore(image_repo, image) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to upload image data due to " "internal error")) self._restore(image_repo, image)