class Store(tank.store.base.Store): """An implementation of the chase backend adapter.""" EXAMPLE_URL = "chase://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('chase_enable_snet', default=False), cfg.StrOpt('chase_store_auth_address'), cfg.StrOpt('chase_store_user'), cfg.StrOpt('chase_store_key'), cfg.StrOpt('chase_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('chase_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('chase_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('chase_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.chase_enable_snet def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('chase_store_auth_address') self.user = self._option_get('chase_store_user') self.key = self._option_get('chase_store_key') self.container = self.conf.chase_store_container try: # The config file has chase_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.chase_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.chase_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="chase", reason=reason) self.scheme = 'chase+https' if self.auth_address.startswith('http://'): self.scheme = 'chase+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address
class RabbitStrategy(strategy.Strategy): """A notifier that puts a message on a queue when called.""" opts = [ cfg.StrOpt('rabbit_host', default='localhost'), cfg.IntOpt('rabbit_port', default=5672), cfg.BoolOpt('rabbit_use_ssl', default=False), cfg.StrOpt('rabbit_userid', default='guest'), cfg.StrOpt('rabbit_password', default='guest'), cfg.StrOpt('rabbit_virtual_host', default='/'), cfg.StrOpt('rabbit_notification_exchange', default='tank'), cfg.StrOpt('rabbit_notification_topic', default='tank_notifications') ] def __init__(self, conf): """Initialize the rabbit notification strategy.""" self._conf = conf self._conf.register_opts(self.opts) self.topic = self._conf.rabbit_notification_topic self.connect() def connect(self): self.connection = kombu.connection.BrokerConnection( hostname=self._conf.rabbit_host, userid=self._conf.rabbit_userid, password=self._conf.rabbit_password, virtual_host=self._conf.rabbit_virtual_host, ssl=self._conf.rabbit_use_ssl) self.channel = self.connection.channel() self.exchange = kombu.entity.Exchange( channel=self.channel, type="topic", name=self._conf.rabbit_notification_exchange) self.exchange.declare() def _send_message(self, message, priority): routing_key = "%s.%s" % (self.topic, priority.lower()) # NOTE(jerdfelt): Normally the consumer would create the queue, but # we do this to ensure that messages don't get dropped if the # consumer is started after we do queue = kombu.entity.Queue(channel=self.channel, exchange=self.exchange, durable=True, name=routing_key, routing_key=routing_key) queue.declare() msg = self.exchange.Message(json.dumps(message)) self.exchange.publish(msg, routing_key=routing_key) def warn(self, msg): self._send_message(msg, "WARN") def info(self, msg): self._send_message(msg, "INFO") def error(self, msg): self._send_message(msg, "ERROR")
""" loc = location.get_location_from_uri(uri) return loc.store_name scrubber_datadir_opt = cfg.StrOpt('scrubber_datadir', default='/var/lib/tank/scrubber') def get_scrubber_datadir(conf): conf.register_opt(scrubber_datadir_opt) return conf.scrubber_datadir delete_opts = [ cfg.BoolOpt('delayed_delete', default=False), cfg.IntOpt('scrub_time', default=0) ] def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.NotFound):
class ContextMiddleware(wsgi.Middleware): opts = [ cfg.BoolOpt('owner_is_tenant', default=True), ] def __init__(self, app, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) # Determine the context class to use self.ctxcls = RequestContext if 'context_class' in local_conf: self.ctxcls = utils.import_class(local_conf['context_class']) super(ContextMiddleware, self).__init__(app) def make_context(self, *args, **kwargs): """ Create a context with the given arguments. """ kwargs.setdefault('owner_is_tenant', self.conf.owner_is_tenant) return self.ctxcls(*args, **kwargs) def process_request(self, req): """ Extract any authentication information in the request and construct an appropriate context from it. A few scenarios exist: 1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers to determine permissions. 2. An X-Auth-Token was passed in, but the Identity-Status is not confirmed. For now, just raising a NotAuthorized exception. 3. X-Auth-Token is omitted. If we were using Keystone, then the tokenauth middleware would have rejected the request, so we must be using NoAuth. In that case, assume that is_admin=True. """ # TODO(sirp): should we be using the tank_tokeauth shim from # Keystone here? If we do, we need to make sure it handles the NoAuth # case auth_tok = req.headers.get('X-Auth-Token', req.headers.get('X-Storage-Token')) if auth_tok: if req.headers.get('X-Identity-Status') == 'Confirmed': # 1. Auth-token is passed, check other headers user = req.headers.get('X-User') tenant = req.headers.get('X-Tenant') roles = [ r.strip() for r in req.headers.get('X-Role', '').split(',') ] is_admin = 'Admin' in roles else: # 2. Indentity-Status not confirmed # FIXME(sirp): not sure what the correct behavior in this case # is; just raising NotAuthorized for now raise exception.NotAuthorized() else: # 3. Auth-token is ommited, assume NoAuth user = None tenant = None roles = [] is_admin = True req.context = self.make_context(auth_tok=auth_tok, user=user, tenant=tenant, roles=roles, is_admin=is_admin)
class Scrubber(object): CLEANUP_FILE = ".cleanup" opts = [ cfg.BoolOpt('cleanup_scrubber', default=False), cfg.IntOpt('cleanup_scrubber_time', default=86400) ] def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { 'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port }) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf) def run(self, pool, event=None): now = time.time() if not os.path.exists(self.datadir): logger.info(_("%s does not exist") % self.datadir) return delete_work = [] for root, dirs, files in os.walk(self.datadir): for id in files: if id == self.CLEANUP_FILE: continue file_name = os.path.join(root, id) delete_time = os.stat(file_name).st_mtime if delete_time > now: continue uri, delete_time = read_queue_file(file_name) if delete_time > now: continue delete_work.append((id, uri, now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work) if self.cleanup: self._cleanup() def _delete(self, id, uri, now): file_path = os.path.join(self.datadir, str(id)) try: logger.debug(_("Deleting %(uri)s") % {'uri': uri}) store.delete_from_backend(uri) except store.UnsupportedBackend: msg = _("Failed to delete image from store (%(uri)s).") logger.error(msg % {'uri': uri}) write_queue_file(file_path, uri, now) self.registry.update_image(id, {'status': 'deleted'}) utils.safe_remove(file_path) def _cleanup(self): now = time.time() cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE) if not os.path.exists(cleanup_file): write_queue_file(cleanup_file, 'cleanup', now) return _uri, last_run_time = read_queue_file(cleanup_file) cleanup_time = last_run_time + self.cleanup_time if cleanup_time > now: return logger.info(_("Getting images deleted before %s") % self.cleanup_time) write_queue_file(cleanup_file, 'cleanup', now) filters = { 'deleted': True, 'is_public': 'none', 'status': 'pending_delete' } pending_deletes = self.registry.get_images_detailed(filters=filters) delete_work = [] for pending_delete in pending_deletes: deleted_at = pending_delete.get('deleted_at') if not deleted_at: continue time_fmt = "%Y-%m-%dT%H:%M:%S" delete_time = calendar.timegm(time.strptime(deleted_at, time_fmt)) if delete_time + self.cleanup_time > now: continue delete_work.append( (pending_delete['id'], pending_delete['location'], now)) logger.info(_("Deleting %s images") % len(delete_work)) pool.starmap(self._delete, delete_work)
class Store(tank.store.base.Store): """An implementation of the s3 adapter.""" EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>" opts = [ cfg.StrOpt('s3_store_host'), cfg.StrOpt('s3_store_access_key'), cfg.StrOpt('s3_store_secret_key'), cfg.StrOpt('s3_store_bucket'), cfg.StrOpt('s3_store_object_buffer_dir'), cfg.BoolOpt('s3_store_create_bucket_on_put', default=False), ] def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.conf.register_opts(self.opts) self.s3_host = self._option_get('s3_store_host') access_key = self._option_get('s3_store_access_key') secret_key = self._option_get('s3_store_secret_key') # NOTE(jaypipes): Need to encode to UTF-8 here because of a # bug in the HMAC library that boto uses. # See: http://bugs.python.org/issue5285 # See: http://trac.edgewall.org/ticket/8083 self.access_key = access_key.encode('utf-8') self.secret_key = secret_key.encode('utf-8') self.bucket = self._option_get('s3_store_bucket') self.scheme = 's3' if self.s3_host.startswith('https://'): self.scheme = 's3+https' self.full_s3_host = self.s3_host elif self.s3_host.startswith('http://'): self.full_s3_host = self.s3_host else: # Defaults http self.full_s3_host = 'http://' + self.s3_host self.s3_store_object_buffer_dir = \ self.conf.s3_store_object_buffer_dir def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = _("Could not find %(param)s in configuration " "options.") % locals() logger.error(reason) raise exception.BadStoreConfiguration(store_name="s3", reason=reason) return result def get(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises `tank.exception.NotFound` if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) key = get_key(bucket_obj, loc.key) msg = _("Retrieved image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) #if expected_size and (key.size != expected_size): # msg = "Expected %s bytes, got %s" % (expected_size, key.size) # logger.error(msg) # raise tank.store.BackendException(msg) key.BufferSize = self.CHUNKSIZE return (ChunkedFile(key), key.size) def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `tank.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `tank.store.ImageAddResult` object :raises `tank.common.exception.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({ 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) create_bucket_if_missing(self.bucket, s3_conn, self.conf) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exception.Duplicate( _("S3 already has an image at " "location %s") % loc.get_uri()) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) logger.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % loc.get_uri() logger.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() chunk = image_file.read(self.CHUNKSIZE) while chunk: checksum.update(chunk) temp_file.write(chunk) chunk = image_file.read(self.CHUNKSIZE) temp_file.flush() msg = _("Uploading temporary file to S3 for %s") % loc.get_uri() logger.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() logger.debug( _("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s") % locals()) return (loc.get_uri(), size, checksum_hex) def delete(self, location): """ Takes a `tank.store.location.Location` object that indicates where to find the image file to delete :location `tank.store.location.Location` object, supplied from tank.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location from boto.s3.connection import S3Connection s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https')) bucket_obj = get_bucket(s3_conn, loc.bucket) # Close the key when we're through. key = get_key(bucket_obj, loc.key) msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, " "access_key=%(accesskey)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': loc.s3serviceurl, 'accesskey': loc.accesskey, 'bucket': loc.bucket, 'obj_name': loc.key }) logger.debug(msg) return key.delete()