def put(self, data, mime, family=None, comp=compression.COMPRESS_NONE, compress_on_server=False, deletion_policy=None, compression_policy=None, meta=None, uid=None, keys=None, timeout=10.0 ): """ Stores data on a server, returning a dictionary containing the keys 'uid', which points to the UID of the stored data, and 'keys', which is another dictionary containing 'read' and 'write', the keys needed to perform either type of action on the stored data. `data` is a string or file-like object containing the payload to be stored and `mime` is the MIME-type of the data. `timeout` defaults to 10.0s, but should be adjusted depending on your needs. All other arguments are the same as in ``media_storage.interfaces.ControlConstruct.put``. """ description = { 'uid': uid, 'keys': keys, 'physical': { 'family': family, 'format': { 'mime': mime, 'comp': comp, }, }, 'policy': { 'delete': deletion_policy, 'compress': compression_policy, }, 'meta': meta, } headers = {} if comp: if not compress_on_server: try: if type(data) in types.StringTypes: #The compressors expect file-like objects data = StringIO.StringIO(data) data = compression.get_compressor(comp)(data) except ValueError: headers[common.HEADER_COMPRESS_ON_SERVER] = common.HEADER_COMPRESS_ON_SERVER_TRUE else: headers[common.HEADER_COMPRESS_ON_SERVER] = common.HEADER_COMPRESS_ON_SERVER_TRUE request = common.assemble_request(self._server.get_host() + common.SERVER_PUT, description, headers=headers, data=data) (properties, response) = common.send_request(request, timeout=timeout) return json.loads(response)
def _post(self): (header, data) = self._get_payload() current_time = time.time() try: _logger.debug("Assembling database record...") record = { '_id': header.get('uid') or uuid.uuid1().hex, 'keys': self._build_keys(header), 'physical': { 'family': header['physical'].get('family'), 'ctime': current_time, 'minRes': CONFIG.storage_minute_resolution, 'atime': int(current_time), 'format': self._build_format(header), }, 'policy': self._build_policy(header), 'stats': { 'accesses': 0, }, 'meta': header.get('meta') or {}, } except (KeyError, TypeError, AttributeError) as e: _logger.error("Request received did not adhere to expected structure: %(error)s" % { 'error': str(e), }) self.send_error(409) return else: _logger.info("Proceeding with storage request for '%(uid)s'..." % { 'uid': record['_id'], }) _logger.debug("Evaluating compression requirements...") target_compression = record['physical']['format'].get('comp') if target_compression and self.request.headers.get('Media-Storage-Compress-On-Server') == 'yes': _logger.info("Compressing file...") data = compression.get_compressor(target_compression)(data) _logger.debug("Storing entity...") database.add_record(record) fs = state.get_filesystem(record['physical']['family']) fs.put(record, data) return { 'uid': record['_id'], 'keys': record['keys'], }
def _process_record(self, record): """ Determines whether the given `record` is a candidate for compression, compressing the associated file and updating the record if it is. """ _logger.info("Compressing record '%(uid)s'..." % { 'uid': record['_id'], }) current_compression = record['physical']['format'].get('comp') target_compression = record['policy']['compress'].get('comp') if current_compression == target_compression: _logger.debug("File already compressed in target format") record['policy']['compress'].clear() #Drop the compression policy try: database.update_record(record) except Exception as e: _logger.error("Unable to update record to reflect already-applied compression; compression routine will retry later: %(error)s" % { 'error': str(e), }) return False else: return True filesystem = state.get_filesystem(record['physical']['family']) data = filesystem.get(record) if current_compression: #Must be decompressed first _logger.info("Decompressing file...") data = compression.get_decompressor(current_compression)(data) data = compression.get_compressor(target_compression)(data) _logger.info("Updating entity...") old_format = record['physical']['format'].copy() record['physical']['format']['comp'] = target_compression try: filesystem.put(record, data, tempfile=True) except Exception as e: #Harmless backout point _logger.warn("Unable to write compressed file to disk; backing out with no consequences") return False else: old_compression_policy = record['policy']['compress'].copy() record['policy']['compress'].clear() #Drop the compression policy try: database.update_record(record) except Exception as e: #Results in wasted space until the next attempt _logger.error("Unable to update record; old file will be served, and new file will be replaced on a subsequent compression attempt: %(error)s" % { 'error': str(e), }) return False else: try: filesystem.make_permanent(record) except Exception as e: _logger.error("Unable to update on-disk file; rolling back database update: %(error)s" % { 'error': str(e), }) record['policy']['compress'] = old_compression_policy record['physical']['format'] = old_format try: database.update_record(record) except Exception as e: _logger.error("Unable to roll back database update; '%(uid)s' is inaccessible and must be manually decompressed from '%(comp)s' format: %(error)s" % { 'error': str(e), 'uid': record['_id'], 'comp': target_compression, }) return False record['physical']['format'] = old_format try: filesystem.unlink(record) except Exception as e: #Results in wasted space, but non-fatal _logger.error("Unable to unlink old file; space occupied by '%(uid)s' non-recoverable unless unlinked manually: %(family)r | %(file)s : %(error)s" % { 'family': record['physical']['family'], 'file': filesystem.resolve_path(record), 'uid': record['_id'], 'error': str(e), }) return True