def _post(self): request = _get_json(self.request.body) uid = request['uid'] _logger.info("Proceeding with unlink request for '%(uid)s'..." % { 'uid': uid, }) record = database.get_record(uid) if not record: self.send_error(404) return trust = _get_trust(record, request.get('keys'), self.request.remote_ip) if not trust.write: self.send_error(403) return self._update_policy(record, request) for removed in request['meta']['removed']: if removed in record['meta']: del record['meta'][removed] record['meta'].update(request['meta']['new']) database.update_record(record)
def _post(self): request = _get_json(self.request.body) uid = request['uid'] _logger.info("Proceeding with retrieval request for '%(uid)s'..." % { 'uid': uid, }) record = database.get_record(uid) if not record: self.send_error(404) return trust = _get_trust(record, request.get('keys'), self.request.remote_ip) if not trust.read: self.send_error(403) return current_time = int(time.time()) record['physical']['atime'] = current_time record['stats']['accesses'] += 1 for policy in ('delete', 'compress'): if 'stale' in record['policy'][policy]: record['policy'][policy]['staleTime'] = current_time + record['policy'][policy]['stale'] database.update_record(record) fs = state.get_filesystem(record['physical']['family']) try: data = fs.get(record) except filesystem.FileNotFoundError as e: _logger.error("Database record exists for '%(uid)s', but filesystem entry does not" % { 'uid': uid, }) self.send_error(404) return else: _logger.debug("Evaluating decompression requirements...") applied_compression = record['physical']['format'].get('comp') supported_compressions = (c.strip() for c in (self.request.headers.get('Media-Storage-Supported-Compression') or '').split(';')) if applied_compression and not applied_compression in supported_compressions: #Must be decompressed first data = compression.get_decompressor(applied_compression)(data) applied_compression = None _logger.debug("Returning entity...") self.set_header('Content-Type', record['physical']['format']['mime']) if applied_compression: self.set_header('Media-Storage-Applied-Compression', applied_compression) while True: chunk = data.read(_CHUNK_SIZE) if chunk: self.write(chunk) else: break
def do_PUT(self): parameters = self.path.split('/') parameters = list(filter(lambda a: a != "", parameters)) try: arguments = json.loads( self.rfile.read(int(self.headers['content-length'])).decode()) except Exception as e: print(e) self.send_response(400) self.send_header('Access-Control-Allow-Origin', '*') self.end_headers() self.wfile.write(b'Bad request') query, table = helper_put.build_put_select(parameters) if query == 0: self.send_response(404) self.wfile.write(b'Not found') else: query = helper_put.build_update_query(arguments, table, parameters[1]) print(query) status_code = database.update_record(query) if status_code == 200: message = 'Updated' else: message = 'Bad request' self.send_response(status_code) self.send_header('Access-Control-Allow-Origin', '*') self.end_headers() self.wfile.write(message.encode())
def _process_record(self, record): """ Determines whether the given `record` is a candidate for compression, compressing the associated file and updating the record if it is. """ _logger.info("Compressing record '%(uid)s'..." % { 'uid': record['_id'], }) current_compression = record['physical']['format'].get('comp') target_compression = record['policy']['compress'].get('comp') if current_compression == target_compression: _logger.debug("File already compressed in target format") record['policy']['compress'].clear() #Drop the compression policy try: database.update_record(record) except Exception as e: _logger.error("Unable to update record to reflect already-applied compression; compression routine will retry later: %(error)s" % { 'error': str(e), }) return False else: return True filesystem = state.get_filesystem(record['physical']['family']) data = filesystem.get(record) if current_compression: #Must be decompressed first _logger.info("Decompressing file...") data = compression.get_decompressor(current_compression)(data) data = compression.get_compressor(target_compression)(data) _logger.info("Updating entity...") old_format = record['physical']['format'].copy() record['physical']['format']['comp'] = target_compression try: filesystem.put(record, data, tempfile=True) except Exception as e: #Harmless backout point _logger.warn("Unable to write compressed file to disk; backing out with no consequences") return False else: old_compression_policy = record['policy']['compress'].copy() record['policy']['compress'].clear() #Drop the compression policy try: database.update_record(record) except Exception as e: #Results in wasted space until the next attempt _logger.error("Unable to update record; old file will be served, and new file will be replaced on a subsequent compression attempt: %(error)s" % { 'error': str(e), }) return False else: try: filesystem.make_permanent(record) except Exception as e: _logger.error("Unable to update on-disk file; rolling back database update: %(error)s" % { 'error': str(e), }) record['policy']['compress'] = old_compression_policy record['physical']['format'] = old_format try: database.update_record(record) except Exception as e: _logger.error("Unable to roll back database update; '%(uid)s' is inaccessible and must be manually decompressed from '%(comp)s' format: %(error)s" % { 'error': str(e), 'uid': record['_id'], 'comp': target_compression, }) return False record['physical']['format'] = old_format try: filesystem.unlink(record) except Exception as e: #Results in wasted space, but non-fatal _logger.error("Unable to unlink old file; space occupied by '%(uid)s' non-recoverable unless unlinked manually: %(family)r | %(file)s : %(error)s" % { 'family': record['physical']['family'], 'file': filesystem.resolve_path(record), 'uid': record['_id'], 'error': str(e), }) return True