Esempio n. 1
0
 def _post(self):
     request = _get_json(self.request.body)
     uid = request['uid']
     _logger.info("Proceeding with retrieval request for '%(uid)s'..." % {
      'uid': uid,
     })
     
     record = database.get_record(uid)
     if not record:
         self.send_error(404)
         return
         
     trust = _get_trust(record, request.get('keys'), self.request.remote_ip)
     if not trust.read:
         self.send_error(403)
         return
         
     current_time = int(time.time())
     record['physical']['atime'] = current_time
     record['stats']['accesses'] += 1
     for policy in ('delete', 'compress'):
         if 'stale' in record['policy'][policy]:
             record['policy'][policy]['staleTime'] = current_time + record['policy'][policy]['stale']
     database.update_record(record)
     
     fs = state.get_filesystem(record['physical']['family'])
     try:
         data = fs.get(record)
     except filesystem.FileNotFoundError as e:
         _logger.error("Database record exists for '%(uid)s', but filesystem entry does not" % {
          'uid': uid,
         })
         self.send_error(404)
         return
     else:
         _logger.debug("Evaluating decompression requirements...")
         applied_compression = record['physical']['format'].get('comp')
         supported_compressions = (c.strip() for c in (self.request.headers.get('Media-Storage-Supported-Compression') or '').split(';'))
         if applied_compression and not applied_compression in supported_compressions: #Must be decompressed first
             data = compression.get_decompressor(applied_compression)(data)
             applied_compression = None
             
         _logger.debug("Returning entity...")
         self.set_header('Content-Type', record['physical']['format']['mime'])
         if applied_compression:
             self.set_header('Media-Storage-Applied-Compression', applied_compression)
         while True:
             chunk = data.read(_CHUNK_SIZE)
             if chunk:
                 self.write(chunk)
             else:
                 break
Esempio n. 2
0
 def get(self, uid, read_key, output_file=None, decompress_on_server=False, timeout=5.0):
     """
     Retrieves the requested data from the server, returning its MIME and the decompressed
     content as a file-like object (optionally that supplied as `output_file`) in a tuple; the
     file-like object has a ``length`` parameter that contains its length in bytes.
     
     `output_file` is an optional file-like object to which data should be written (a spooled
     tempfile is used by default).
     
     `timeout` defaults to 5.0s.
     
     All other arguments are the same as in ``media_storage.interfaces.ControlConstruct.get``.
     """
     headers = {}
     if not decompress_on_server: #Tell the server what the client supports
         headers[common.HEADER_SUPPORTED_COMPRESSION] = common.HEADER_SUPPORTED_COMPRESSION_DELIMITER.join(compression.SUPPORTED_FORMATS)
         
     request = common.assemble_request(self._server.get_host() + common.SERVER_GET, {
      'uid': uid,
      'keys': {
       'read': read_key,
      },
     }, headers=headers)
     if not output_file:
         output = tempfile.SpooledTemporaryFile(_TEMPFILE_SIZE)
     else:
         output = output_file
     properties = common.send_request(request, output=output, timeout=timeout)
     
     length = properties.get(common.PROPERTY_CONTENT_LENGTH)
     if properties.get(common.PROPERTY_APPLIED_COMPRESSION):
         output = compression.get_decompressor(properties.get(common.PROPERTY_APPLIED_COMPRESSION))(output)
         if output_file: #The decompression process returns a tempfile
             output_file.seek(0)
             output_file.truncate()
             length = common.transfer_data(output, output_file)
             output_file.seek(0)
             output = output_file
     
     output.length = length
     return (properties.get(common.PROPERTY_CONTENT_TYPE), output)
Esempio n. 3
0
 def _process_record(self, record):
     """
     Determines whether the given `record` is a candidate for compression, compressing the
     associated file and updating the record if it is.
     """
     _logger.info("Compressing record '%(uid)s'..." % {
      'uid': record['_id'],
     })
     current_compression = record['physical']['format'].get('comp')
     target_compression = record['policy']['compress'].get('comp')
     if current_compression == target_compression:
         _logger.debug("File already compressed in target format")
         record['policy']['compress'].clear() #Drop the compression policy
         try:
             database.update_record(record)
         except Exception as e:
             _logger.error("Unable to update record to reflect already-applied compression; compression routine will retry later: %(error)s" % {
              'error': str(e),
             })
             return False
         else:
             return True
             
     filesystem = state.get_filesystem(record['physical']['family'])
     data = filesystem.get(record)
     if current_compression: #Must be decompressed first
         _logger.info("Decompressing file...")
         data = compression.get_decompressor(current_compression)(data)
     data = compression.get_compressor(target_compression)(data)
     
     _logger.info("Updating entity...")
     old_format = record['physical']['format'].copy()
     record['physical']['format']['comp'] = target_compression
     try:
         filesystem.put(record, data, tempfile=True)
     except Exception as e: #Harmless backout point
         _logger.warn("Unable to write compressed file to disk; backing out with no consequences")
         return False
     else:
         old_compression_policy = record['policy']['compress'].copy()
         record['policy']['compress'].clear() #Drop the compression policy
         try:
             database.update_record(record)
         except Exception as e: #Results in wasted space until the next attempt
             _logger.error("Unable to update record; old file will be served, and new file will be replaced on a subsequent compression attempt: %(error)s" % {
              'error': str(e),
             })
             return False
         else:
             try:
                 filesystem.make_permanent(record)
             except Exception as e:
                 _logger.error("Unable to update on-disk file; rolling back database update: %(error)s" % {
                  'error': str(e),
                 })
                 record['policy']['compress'] = old_compression_policy
                 record['physical']['format'] = old_format
                 try:
                     database.update_record(record)
                 except Exception as e:
                     _logger.error("Unable to roll back database update; '%(uid)s' is inaccessible and must be manually decompressed from '%(comp)s' format: %(error)s" % {
                      'error': str(e),
                      'uid': record['_id'],
                      'comp': target_compression,
                     })
                 return False
                 
             record['physical']['format'] = old_format
             try:
                 filesystem.unlink(record)
             except Exception as e: #Results in wasted space, but non-fatal
                 _logger.error("Unable to unlink old file; space occupied by '%(uid)s' non-recoverable unless unlinked manually: %(family)r | %(file)s : %(error)s" % {
                  'family': record['physical']['family'],
                  'file': filesystem.resolve_path(record),
                  'uid': record['_id'],
                  'error': str(e),
                 })
             return True