async def put_crypto(self, path): if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE: return file_abspath = self.path_on_filesystem(path) file_dir_abspath = dirname(file_abspath) self.ensure_dir(file_dir_abspath) if not self.context.server.security_key: raise RuntimeError("STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be " "True if no SECURITY_KEY specified") crypto_path = "%s.txt" % splitext(file_abspath)[0] temp_abspath = "%s.%s" % (crypto_path, str(uuid4()).replace("-", "")) with open(temp_abspath, "wb") as _file: _file.write(self.context.server.security_key.encode()) move(temp_abspath, crypto_path) logger.debug( "Stored crypto at %s (security key: %s)", crypto_path, self.context.server.security_key, ) return file_abspath
def execute_image_operations(self): self.context.request.quality = None req = self.context.request conf = self.context.config should_store = self.context.config.RESULT_STORAGE_STORES_UNSAFE or not self.context.request.unsafe if self.context.modules.result_storage and should_store: start = datetime.datetime.now() result = yield gen.maybe_future(self.context.modules.result_storage.get()) finish = datetime.datetime.now() self.context.metrics.timing('result_storage.incoming_time', (finish - start).total_seconds() * 1000) if result is None: self.context.metrics.incr('result_storage.miss') else: self.context.metrics.incr('result_storage.hit') self.context.metrics.incr('result_storage.bytes_read', len(result)) logger.debug('[RESULT_STORAGE] IMAGE FOUND: %s' % req.url) self.finish_request(self.context, result) return if conf.MAX_WIDTH and (not isinstance(req.width, basestring)) and req.width > conf.MAX_WIDTH: req.width = conf.MAX_WIDTH if conf.MAX_HEIGHT and (not isinstance(req.height, basestring)) and req.height > conf.MAX_HEIGHT: req.height = conf.MAX_HEIGHT req.meta_callback = conf.META_CALLBACK_NAME or self.request.arguments.get('callback', [None])[0] self.filters_runner = self.context.filters_factory.create_instances(self.context, self.context.request.filters) # Apply all the filters from the PRE_LOAD phase and call get_image() afterwards. self.filters_runner.apply_filters(thumbor.filters.PHASE_PRE_LOAD, self.get_image)
def post(self, **kwargs): self.should_return_image = False # URL can be passed as a URL argument or in the body url = kwargs['url'] if 'url' in kwargs else kwargs['key'] if not url: logger.error("Couldn't find url param in body or key in URL...") raise tornado.web.HTTPError(404) options = RequestParser.path_to_parameters(url) yield self.check_image(options) # We check the status code, if != 200 the image is incorrect, and we shouldn't store the key if self.get_status() == 200: logger.debug("Image is checked, clearing the response before trying to store...") self.clear() try: shortener = Shortener(self.context) key = shortener.generate(url) shortener.put(key, url) self.write(json.dumps({'key': key})) self.set_header("Content-Type", "application/json") except Exception as e: logger.error("An error occurred while trying to store shortened URL: {error}.".format(error=e.message)) self.set_status(500) self.write(json.dumps({'error': e.message}))
def execute_image_operations(self): self.context.request.quality = None req = self.context.request conf = self.context.config req.extension = splitext(req.image_url)[-1].lower() should_store = self.context.config.RESULT_STORAGE_STORES_UNSAFE or not self.context.request.unsafe if self.context.modules.result_storage and should_store: result = self.context.modules.result_storage.get() if result is not None: logger.debug('[RESULT_STORAGE] IMAGE FOUND: %s' % req.url) self.finish_request(self.context, result) return if conf.MAX_WIDTH and (not isinstance( req.width, basestring)) and req.width > conf.MAX_WIDTH: req.width = conf.MAX_WIDTH if conf.MAX_HEIGHT and (not isinstance( req.height, basestring)) and req.height > conf.MAX_HEIGHT: req.height = conf.MAX_HEIGHT req.meta_callback = conf.META_CALLBACK_NAME or self.request.arguments.get( 'callback', [None])[0] self.get_image()
def __init__(self, context): ''' :param context: `Context` instance ''' if context.config.get('COMMUNITY_EXTENSIONS', None): for extension in context.config.get('COMMUNITY_EXTENSIONS'): Extensions.load(extension) Importer.import_community_modules(context.modules.importer) self.context = Context.from_context(context) if self.context.config.get('COMMUNITY_MONKEYPATCH', True): logger.debug("Monkey patching ContextHandler.initialize") # Monkey patch the ContextHandler.initialize method to generate a # community context instead of the one from vanilla thumbor. def initialize(self, context): '''Initialize a new Context object :param context: thumbor.context.Context ''' self.context = Context.from_context( context, request_handler=self ) ContextHandler.initialize = initialize super(App, self).__init__(self.get_handlers())
async def put(self, image_bytes): if self.context.request.max_age is not None and self.context.request.max_age == 0: return file_abspath = self.normalize_path(self.context.request.url) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to write outside root path: %s", file_abspath) return temp_abspath = "%s.%s" % (file_abspath, str(uuid4()).replace("-", "")) file_dir_abspath = dirname(file_abspath) logger.debug("[RESULT_STORAGE] putting at %s (%s)", file_abspath, file_dir_abspath) self.ensure_dir(file_dir_abspath) with open(temp_abspath, "wb") as _file: _file.write(image_bytes) if self.context.request.max_age is not None: with open(temp_abspath + Storage.EXPIRE_EXT, "wb") as _file: _file.write(str.encode(str(self.context.request.max_age))) move(temp_abspath + Storage.EXPIRE_EXT, file_abspath + Storage.EXPIRE_EXT) move(temp_abspath, file_abspath)
def execute_image_operations(self): self.context.request.quality = None req = self.context.request conf = self.context.config req.extension = splitext(req.image_url)[-1].lower() should_store = self.context.config.RESULT_STORAGE_STORES_UNSAFE or not self.context.request.unsafe if self.context.modules.result_storage and should_store: result = self.context.modules.result_storage.get() if result is not None: mime = BaseEngine.get_mimetype(result) if mime == '.gif' and self.context.config.USE_GIFSICLE_ENGINE: self.context.request.engine = GifEngine(self.context) else: self.context.request.engine = self.context.modules.engine logger.debug('[RESULT_STORAGE] IMAGE FOUND: %s' % req.url) self.finish_request(self.context, result) return if conf.MAX_WIDTH and (not isinstance(req.width, basestring)) and req.width > conf.MAX_WIDTH: req.width = conf.MAX_WIDTH if conf.MAX_HEIGHT and (not isinstance(req.height, basestring)) and req.height > conf.MAX_HEIGHT: req.height = conf.MAX_HEIGHT req.meta_callback = conf.META_CALLBACK_NAME or self.request.arguments.get('callback', [None])[0] self.filters_runner = self.context.filters_factory.create_instances(self.context, self.context.request.filters) self.filters_runner.apply_filters(thumbor.filters.PHASE_PRE_LOAD, self.get_image)
def srgb(self): try: if not isinstance(self.engine, PILEngine): logger.warn('Could not perform profileToProfile conversion: engine is not PIL engine') return if (ImageCms is None): logger.warn('ImageCms is not installed. Could not perform profileToProfile conversion') return image = self.engine.image embedded_profile = image.info.get('icc_profile') if not embedded_profile: logger.debug('Image does not have embedded profile. Assuming already in sRGB') return embedded_profile = BytesIO(embedded_profile) srgb_profile = BytesIO(tiny_srgb) output_mode = 'RGBA' if 'A' in image.mode else 'RGB' image = ImageCms.profileToProfile(image, embedded_profile, srgb_profile, renderingIntent=0, outputMode=output_mode) self.engine.image = image self.engine.icc_profile = image.info.get('icc_profile') except Exception as err: logger.exception(err)
def optimize(self, buffer, input_file, output_file): command = [ self.zopflipng_path, '-y', ] if self.context.config.ZOPFLIPNG_COMPRESS_MORE: command += [ '-m' ] if self.context.config.ZOPFLIPNG_LOSSY_TRANSPARENT: command += [ '--lossy_transparent' ] if self.context.config.ZOPFLIPNG_LOSSY_8BIT: command += [ '--lossy_8bit' ] command += [ input_file, output_file ] with open(os.devnull) as null: logger.debug("[ZOPFLIPNG] running: %s" % " ".join(command)) subprocess.call(command, stdin=null, stdout=null)
def define_image_type(self, context, result): if result is not None: image_extension = BaseEngine.get_mimetype(result) else: image_extension = context.request.format if image_extension is not None: image_extension = '.%s' % image_extension logger.debug('Image format specified as %s.' % image_extension) elif context.config.AUTO_WEBP and context.request.accepts_webp and not context.request.engine.is_multiple(): image_extension = '.webp' logger.debug('Image format set by AUTO_WEBP as %s.' % image_extension) else: image_extension = context.request.engine.extension logger.debug('No image format specified. Retrieving from the image extension: %s.' % image_extension) content_type = CONTENT_TYPE.get(image_extension, CONTENT_TYPE['.jpg']) if context.request.meta: context.request.meta_callback = context.config.META_CALLBACK_NAME or self.request.arguments.get('callback', [None])[0] content_type = 'text/javascript' if context.request.meta_callback else 'application/json' logger.debug('Metadata requested. Serving content type of %s.' % content_type) logger.debug('Content Type of %s detected.' % content_type) return image_extension, content_type
def put(self, bytes): '''Save to redis :param bytes: Bytes to write to the storage. :return: Redis key for the current url :rettype: string ''' key = self.get_key_from_request() result_ttl = self.get_max_age() logger.debug( "[REDIS_RESULT_STORAGE] putting `{key}` with ttl `{ttl}`".format( key=key, ttl=result_ttl ) ) with (yield Storage.pool.connected_client()) as client: if result_ttl > 0: yield client.call('SETEX', key, result_ttl, bytes) else: yield client.call('SET', key, bytes) raise tornado.gen.Return(key)
def distributed_collage(self, callback, orientation, alignment, urls): logger.debug('filters.distributed_collage: distributed_collage invoked') self.storage = self.context.modules.storage self.callback = callback self.orientation = orientation self.alignment = alignment self.urls = urls.split('|') self.images = {} total = len(self.urls) if total > self.MAX_IMAGES: logger.error('filters.distributed_collage: Too many images to join') callback() elif total == 0: logger.error('filters.distributed_collage: No images to join') callback() else: self.urls = self.urls[:self.MAX_IMAGES] for url in self.urls: self.images[url] = Picture(url, self) # second loop needed to ensure that all images are in self.images # otherwise, self.on_image_fetch can call the self.assembly() # without that all images had being loaded for url in self.urls: buffer = yield tornado.gen.maybe_future(self.storage.get(url)) pic = self.images[url] if buffer is not None: pic.fill_buffer(buffer) self.on_image_fetch() else: pic.request()
def __init__(self, context): ''' :param context: `Context` instance ''' if context.config.get('COMMUNITY_EXTENSIONS', None): for extension in context.config.get('COMMUNITY_EXTENSIONS'): Extensions.load(extension) Importer.import_community_modules(context.modules.importer) self.context = Context.from_context(context) if self.context.config.get('COMMUNITY_MONKEYPATCH', True): logger.debug("Monkey patching ContextHandler.initialize") # Monkey patch the ContextHandler.initialize method to generate a # community context instead of the one from vanilla thumbor. def initialize(self, context): '''Initialize a new Context object :param context: thumbor.context.Context ''' self.context = Context.from_context(context, request_handler=self) ContextHandler.initialize = initialize super(App, self).__init__(context)
def define_image_type(self, context, result): if result is not None: if isinstance(result, ResultStorageResult): buffer = result.buffer else: buffer = result image_extension = EXTENSION.get(BaseEngine.get_mimetype(buffer), ".jpg") else: image_extension = context.request.format if image_extension is not None: image_extension = ".%s" % image_extension logger.debug("Image format specified as %s." % image_extension) elif self.is_webp(context): image_extension = ".webp" logger.debug("Image format set by AUTO_WEBP as %s." % image_extension) else: image_extension = context.request.engine.extension logger.debug("No image format specified. Retrieving from the image extension: %s." % image_extension) content_type = CONTENT_TYPE.get(image_extension, CONTENT_TYPE[".jpg"]) if context.request.meta: context.request.meta_callback = ( context.config.META_CALLBACK_NAME or self.request.arguments.get("callback", [None])[0] ) content_type = "text/javascript" if context.request.meta_callback else "application/json" logger.debug("Metadata requested. Serving content type of %s." % content_type) logger.debug("Content Type of %s detected." % content_type) return (image_extension, content_type)
def format(self, format): if format.lower() not in ALLOWED_FORMATS: logger.debug('Format not allowed: %s' % format.lower()) self.context.request.format = None else: logger.debug('Format specified: %s' % format.lower()) self.context.request.format = format.lower()
def exists(self, path): file_abspath = self.normalize_path(path) logger.debug("[STORAGE] getting from %s" % file_abspath) bucket = self.get_bucket() blob = bucket.get_blob(file_abspath) return blob and not self.is_expired(blob)
def load(context, path, callback): key = ( context.config.RACKSPACE_PYRAX_REGION, context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace'), context.config.RACKSPACE_PYRAX_CFG, context.config.RACKSPACE_PYRAX_PUBLIC, context.config.RACKSPACE_LOADER_CONTAINER ) if key not in CONNECTIONS: if(context.config.RACKSPACE_PYRAX_REGION): pyrax.set_default_region(context.config.RACKSPACE_PYRAX_REGION) pyrax.set_setting('identity_type', context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace')) pyrax.set_credential_file(expanduser(context.config.RACKSPACE_PYRAX_CFG)) cf = pyrax.connect_to_cloudfiles(public=context.config.RACKSPACE_PYRAX_PUBLIC) CONNECTIONS[key] = cf.get_container(context.config.RACKSPACE_LOADER_CONTAINER) cont = CONNECTIONS[key] file_abspath = normalize_path(context, path) logger.debug("[LOADER] getting from %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) try: obj = cont.get_object(file_abspath) if obj: logger.debug("[LOADER] Found object at %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) else: logger.warning("[LOADER] Unable to find object %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath )) except: callback(None) else: callback(obj.get())
def get(self, path): file_abspath = self.normalize_path(path) logger.debug('GET ' + path + ' (' + file_abspath + ')') try: return self.storage.read(file_abspath, self.storage.stat(file_abspath)[0]) except rados.ObjectNotFound: return None
def get(self, callback): path = self.context.request.url file_abspath = self.normalize_path(path) if not self.validate_path(file_abspath): logger.warn( "[RESULT_STORAGE] unable to read from outside root path: %s" % file_abspath) return None logger.debug("[RESULT_STORAGE] getting from %s" % file_abspath) if not exists(file_abspath) or self.is_expired(file_abspath): logger.debug("[RESULT_STORAGE] image not found at %s" % file_abspath) callback(None) else: with open(file_abspath, 'r') as f: buffer = f.read() result = ResultStorageResult( buffer=buffer, metadata={ 'LastModified': datetime.fromtimestamp( getmtime(file_abspath)).replace(tzinfo=pytz.utc), 'ContentLength': len(buffer), 'ContentType': BaseEngine.get_mimetype(buffer) }) callback(result)
async def format(self, file_format): if file_format.lower() not in ALLOWED_FORMATS: logger.debug("Format not allowed: %s", file_format.lower()) self.context.request.format = None else: logger.debug("Format specified: %s", file_format.lower()) self.context.request.format = file_format.lower()
def select_engine(self): if self.lcl['selected_engine'] is not None: return self.lcl['selected_engine'] if self.lcl['extension'] is None: ext = None else: ext = self.lcl['extension'].lstrip('.') logger.debug('[Proxy] Looking for a %s engine' % ext) for enginename, extensions in self.lcl['engines'].iteritems(): engine = self.lcl[enginename] if ext in extensions: if hasattr(engine, 'should_run'): if engine.should_run(self.lcl['buffer']): self.lcl['selected_engine'] = enginename return enginename else: self.lcl['selected_engine'] = enginename return enginename raise Exception( 'Unable to find a suitable engine, tried %r' % self.lcl['engines'] ) # pragma: no cover
def load(self, buffer, extension): self.engine = self.get_engine(buffer, extension) if self.context.request.format and not self.context.request.filters: # RequestParameters.filters is an empty list when none are in the url, # and ImagingHandler._write_results_to_client assumes that if # context.request.format is set then it came from the format filter. # Since we set the format in the engine this causes a TypeError, # so we need to ensure that it is a string here. self.context.request.filters = "" logger.debug("Set engine to %s (extension %s)" % (type(self.engine).__module__, extension)) still_frame_pos = getattr(self.context.request, 'still_position', None) # Are we requesting a still frame? if self.engine is self.ffmpeg_engine and still_frame_pos: with named_tmp_file(data=buffer, suffix=extension) as src_file: buffer = self.ffmpeg_engine.run_ffmpeg( src_file, 'png', ['-ss', still_frame_pos, '-frames:v', '1']) self.engine = self.image_engine extension = '.png' if not self.context.request.format: self.context.request.format = 'jpg' # Change the default extension if we're transcoding video if self.engine is self.ffmpeg_engine and extension == ".jpg": extension = ".mp4" self.extension = extension self.engine.load(buffer, extension)
def get(self, callback): path = self.context.request.url file_abspath = self.normalize_path(path) if not self.validate_path(file_abspath): logger.warn("[RESULT_STORAGE] unable to read from outside root path: %s" % file_abspath) return None logger.debug("[RESULT_STORAGE] getting from %s" % file_abspath) if not exists(file_abspath) or self.is_expired(file_abspath): logger.debug("[RESULT_STORAGE] image not found at %s" % file_abspath) callback(None) else: with open(file_abspath, 'r') as f: buffer = f.read() result = ResultStorageResult( buffer=buffer, metadata={ 'LastModified': datetime.fromtimestamp(getmtime(file_abspath)).replace(tzinfo=pytz.utc), 'ContentLength': len(buffer), 'ContentType': BaseEngine.get_mimetype(buffer) } ) callback(result)
def optimize(self, buffer, input_file, output_file): input_image = Image.open(input_file) stats = ImageStat.Stat(input_image).extrema has_alpha = False if len(stats) > 3 and (stats[3][0] < 255): has_alpha = True if has_alpha == False: intermediary = output_file + "-intermediate" input_image.save(intermediary, "JPEG", quality=100) input_file = intermediary command = ( "%s --error-threshold %s --color-density-ratio %s --min-unique-colors %s --quality-out-max %s --quality-out-min %s --quality-in-min %s --max-steps %s %s %s > /dev/null 2>&1" % ( self.imgmin_path, self.error_threshold, self.color_density_ratio, self.min_unique_colors, self.quality_out_max, self.quality_out_min, self.quality_in_min, self.max_steps, input_file, output_file, ) ) with open(os.devnull) as null: logger.debug("[AUTO IMGMIN] running: " + command) subprocess.call(command, shell=True, stdin=null)
def distributed_collage(self, callback, orientation, alignment, urls): logger.debug( 'filters.distributed_collage: distributed_collage invoked') self.storage = self.context.modules.storage self.callback = callback self.orientation = orientation self.alignment = alignment self.urls = urls.split('|') self.images = {} total = len(self.urls) if total > self.MAX_IMAGES: logger.error( 'filters.distributed_collage: Too many images to join') callback() elif total == 0: logger.error('filters.distributed_collage: No images to join') callback() else: self.urls = self.urls[:self.MAX_IMAGES] for url in self.urls: self.images[url] = Picture(url, self) # second loop needed to ensure that all images are in self.images # otherwise, self.on_image_fetch can call the self.assembly() # without that all images had being loaded for url in self.urls: buffer = yield tornado.gen.maybe_future(self.storage.get(url)) pic = self.images[url] if buffer is not None: pic.fill_buffer(buffer) self.on_image_fetch() else: pic.request()
def load_sync(context, url, callback): # Disable storage of original. These lines are useful if # you want your Thumbor instance to store all originals persistently # except video frames. # # from thumbor.storages.no_storage import Storage as NoStorage # context.modules.storage = NoStorage(context) unquoted_url = unquote(url) command = BaseWikimediaEngine.wrap_command([ context.config.FFPROBE_PATH, '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', '%s%s' % (uri_scheme, unquoted_url) ], context) logger.debug('Command: %r' % command) process = Subprocess(command, stdout=Subprocess.STREAM) process.set_exit_callback( partial( _parse_time_status, context, unquoted_url, callback, process ) )
def _write_results_to_client(self, context, results, content_type): max_age = context.config.MAX_AGE if context.request.max_age is not None: max_age = context.request.max_age if context.request.prevent_result_storage or context.request.detection_error: max_age = context.config.MAX_AGE_TEMP_IMAGE if max_age: self.set_header('Cache-Control', 'max-age=' + str(max_age) + ',public') self.set_header('Expires', datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age)) if hasattr(self.context.config, 'ACCESS_CONTROL_ALLOW_ORIGIN_HEADER'): ac_header = self.context.config.ACCESS_CONTROL_ALLOW_ORIGIN_HEADER self.set_header('Access-Control-Allow-Origin', ac_header) logger.debug('CORS header found. Set to: %s' % ac_header) self.set_header('Server', 'Thumbor/%s' % __version__) self.set_header('Content-Type', content_type) if self.is_webp(self.context): self.set_header('Vary', 'Accept') context.headers = self._headers.copy() if isinstance(results, ResultStorageResult): buffer = results.buffer else: buffer = results self.write(buffer) self.finish()
async def put(self, path, file_bytes): if self.context.request.max_age is not None and self.context.request.max_age == 0: return file_abspath = self.path_on_filesystem(path) temp_abspath = "%s.%s" % (file_abspath, str(uuid4()).replace("-", "")) file_dir_abspath = dirname(file_abspath) logger.debug("creating tempfile for %s in %s...", path, temp_abspath) self.ensure_dir(file_dir_abspath) with open(temp_abspath, "wb") as _file: _file.write(file_bytes) if self.context.request.max_age is not None: with open(temp_abspath + Storage.EXPIRE_EXT, "wb") as _file: _file.write(str.encode(str(self.context.request.max_age))) move(temp_abspath + Storage.EXPIRE_EXT, file_abspath + Storage.EXPIRE_EXT) logger.debug("moving tempfile %s to %s...", temp_abspath, file_abspath) move(temp_abspath, file_abspath) return path
def define_image_type(self, context, result): if result is not None: image_extension = EXTENSION.get(BaseEngine.get_mimetype(result), '.jpg') else: image_extension = context.request.format if image_extension is not None: image_extension = '.%s' % image_extension logger.debug('Image format specified as %s.' % image_extension) elif self.is_webp(context): image_extension = '.webp' logger.debug('Image format set by AUTO_WEBP as %s.' % image_extension) else: image_extension = context.request.engine.extension logger.debug('No image format specified. Retrieving from the image extension: %s.' % image_extension) content_type = CONTENT_TYPE.get(image_extension, CONTENT_TYPE['.jpg']) if context.request.meta: context.request.meta_callback = context.config.META_CALLBACK_NAME or self.request.arguments.get('callback', [None])[0] content_type = 'text/javascript' if context.request.meta_callback else 'application/json' logger.debug('Metadata requested. Serving content type of %s.' % content_type) logger.debug('Content Type of %s detected.' % content_type) return (image_extension, content_type)
def get(self, callback): logger.debug("INSIDE SPACES GET") key = self.get_key_name(self.context.request.url) buff = BytesIO() session = boto3.session.Session() client = session.resource( 's3', region_name=self.context.config.SPACES_REGION, endpoint_url='https://' + self.context.config.SPACES_ENDPOINT + '.digitaloceanspaces.com', aws_access_key_id=self.context.config.SPACES_KEY, aws_secret_access_key=self.context.config.SPACES_SECRET) try: objkey = client.Bucket( self.context.config.SPACES_BUCKET).Object(key) objkey.download_fileobj(buff) result = ResultStorageResult() result.buffer = buff result.metadata = objkey.get_metadata("Metadata") result.metadata.pop('Body') logger.debug(result.metadata) callback(result) except: callback(None) callback(None)
def command( cls, context, pre=[], post=[], buffer='', input_temp_file=None ): if not input_temp_file: input_temp_file = NamedTemporaryFile() input_temp_file.write(buffer) input_temp_file.flush() command = [context.config.EXIFTOOL_PATH] command += pre command.append(input_temp_file.name) command += post logger.debug('[ExiftoolRunner] command: %r' % command, extra=log_extra(context)) code, stderr, stdout = ShellRunner.command(command, context) input_temp_file.close() if stderr: logger.error('[ExiftoolRunner] error: %r' % stderr, extra=log_extra(context)) return stdout
def put(self, bytes): '''Save to redis :param bytes: Bytes to write to the storage. :return: Redis key for the current url :rettype: string ''' key = self.get_key_from_request() result_ttl = self.get_max_age() logger.debug( "[REDIS_RESULT_STORAGE] putting `{key}` with ttl `{ttl}`".format( key=key, ttl=result_ttl ) ) storage = self.get_storage() storage.set(key, bytes) if result_ttl > 0: storage.expireat( key, datetime.now() + timedelta( seconds=result_ttl ) ) return key
def get(self): path = self.context.request.url file_abspath = self.normalize_path(path) logger.debug('GET (result) ' + path + ' (' + file_abspath + ')') try: return self.storage.read(file_abspath, self.storage.stat(file_abspath)[0]) except rados.ObjectNotFound: return None
def put(self, bytes): cf = pyrax.cloudfiles cont = cf.get_container(self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER) file_abspath = self.normalize_path(self.context.request.url) obj = cont.store_object(file_abspath, bytes) logger.debug("[RESULT_STORAGE] putting in %s/%s" % (self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER, file_abspath)) if(self.context.config.RACKSPACE_RESULT_STORAGE_EXPIRES): cont.delete_object_in_seconds(obj=obj,seconds=str(self.context.config.RESULT_STORAGE_EXPIRATION_SECONDS))
def resize(self, width, height): if self.image.mode == 'P': logger.debug( 'converting image from 8-bit palette to 32-bit RGBA for resize' ) self.image = self.image.convert('RGBA') self.image = self.image.resize((int(width), int(height)), Image.ANTIALIAS)
def stop_loop(now, deadline): if now < deadline and (io_loop._callbacks or io_loop._timeouts): logger.debug('Waiting for next tick') now += 1 io_loop.add_timeout(now, stop_loop, now, deadline) else: io_loop.stop() logger.debug('Shutdown finally')
def format(self, format): logger.warn('Setting format to %s' % format) if format.lower() not in ALLOWED_FORMATS: logger.debug('Format not allowed: %s' % format.lower()) self.context.request.format = None else: logger.debug('Format specified: %s' % format.lower()) self.context.request.format = format.lower()
def _normalize_path(self, path): # https://cloud.google.com/storage/docs/request-rate normalized = self.no_http.sub('', path) digest = hashlib.sha1(normalized.encode('utf-8')).hexdigest() parts = normalized.split('/') normalized = os.path.join(parts[0],digest,*parts[1:]) logger.debug("[GoogleCloudStorage] for path: %s normalized %s" % (path, normalized)) return normalized
async def put(self): whitelist_dimensions = await self.get_whitelist_dimensions_contents() whitelist_dimensions += self.request.query + "\n" logger.debug("Adding to whitelist dimensions: %s", self.request.query) await self.context.modules.storage.put("whitelist_dimensions.txt", whitelist_dimensions.encode()) self.set_status(200)
async def _write_results_to_client(self, results, content_type): max_age = self.context.config.MAX_AGE if self.context.request.max_age is not None: max_age = self.context.request.max_age if (self.context.request.prevent_result_storage or self.context.request.detection_error): max_age = self.context.config.MAX_AGE_TEMP_IMAGE if max_age: self.set_header("Cache-Control", "max-age=" + str(max_age) + ",public") self.set_header( "Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), ) if hasattr(self.context.config, "ACCESS_CONTROL_ALLOW_ORIGIN_HEADER"): ac_header = self.context.config.ACCESS_CONTROL_ALLOW_ORIGIN_HEADER if ac_header is not False: self.set_header("Access-Control-Allow-Origin", ac_header) logger.debug("CORS header found. Set to: %s", ac_header) self.set_header("Server", f"Thumbor/{__version__}") self.set_header("Content-Type", content_type) if isinstance(results, ResultStorageResult): buffer = results.buffer else: buffer = results # auto-convert configured? should_vary = self.context.config.AUTO_WEBP # we have image (not video) should_vary = should_vary and content_type.startswith("image/") # output format is not requested via format filter should_vary = should_vary and not ( self.context.request.format and bool( # format is supported by filter re.search( r"format\([^)]+\)", self.context.request.filters)) # filter is in request ) # our image is not animated gif should_vary = should_vary and not self.is_animated_gif(buffer) if should_vary: self.set_header("Vary", "Accept") self.context.headers = self._headers.copy() self._response_ext = EXTENSION.get(content_type) self._response_length = len(buffer) self.write(buffer) self.finish()
def get(self): file_abspath = self.normalize_path(self.context.request.url) file_key = self.storage.get_key(file_abspath) if not file_key or self.is_expired(file_key): logger.debug("[RESULT_STORAGE] s3 key not found at %s" % file_abspath) return None return file_key.read()
def get(self, path): file_abspath = self.normalize_path(path) logger.debug("[STORAGE] getting from %s" % file_abspath) bucket = self.get_bucket() blob = bucket.get_blob(file_abspath) if not blob or self.is_expired(blob): return None return blob.download_as_string()
def exists(self, path): file_abspath = self.normalize_path(path) try: self.storage.stat(file_abspath) logger.debug('EXIST (result) ' + path + ' (' + file_abspath + ') : YES') return True except rados.ObjectNotFound: logger.debug('EXIST (result) ' + path + ' (' + file_abspath + ') : NO') return False
def optimize(self, buffer, input_file, output_file): command = '%s --strip --accurate --loops 10 %s %s ' % ( self.jpegrecompress_path, input_file, output_file, ) with open(os.devnull) as null: logger.debug("[JPEG-RECOMPRESS] running: " + command) subprocess.call(command, shell=True, stdin=null)
def get(self, path): file_abspath = self.normalize_path(path) file_key = self.storage.get_key(file_abspath) if not file_key or self.is_expired(file_key): logger.debug("[STORAGE] s3 key not found at %s" % file_abspath) return None return file_key.read()
def put(self, bytes): file_abspath = self.__normalize_path(self.context.request.url) file_dir_abspath = dirname(file_abspath) logger.debug("[RESULT_STORAGE] putting at %s (%s)" % (file_abspath, file_dir_abspath)) self.__ensure_dir(file_dir_abspath) with open(file_abspath, 'w') as _file: _file.write(bytes)
def get(self): path = self.context.request.url file_abspath = self.__normalize_path(path) logger.debug("[RESULT_STORAGE] getting from %s" % file_abspath) if not exists(file_abspath) or self.__is_expired(file_abspath): logger.debug("[RESULT_STORAGE] image not found at %s" % file_abspath) return None return open(file_abspath, 'r').read()
async def execute_image_operations(self): self.context.request.quality = None req = self.context.request conf = self.context.config should_store = (self.context.config.RESULT_STORAGE_STORES_UNSAFE or not self.context.request.unsafe) if self.context.modules.result_storage and should_store: start = datetime.datetime.now() try: result = await self.context.modules.result_storage.get() except Exception as error: logger.exception("[BaseHander.execute_image_operations] %s", error) self._error( 500, "Error while trying to get the image " "from the result storage: {}".format(error), ) return finish = datetime.datetime.now() self.context.metrics.timing( "result_storage.incoming_time", (finish - start).total_seconds() * 1000, ) if result is None: self.context.metrics.incr("result_storage.miss") else: self.context.metrics.incr("result_storage.hit") self.context.metrics.incr("result_storage.bytes_read", len(result)) logger.debug("[RESULT_STORAGE] IMAGE FOUND: %s", req.url) await self.finish_request(result) return if (conf.MAX_WIDTH and (not isinstance(req.width, str)) and req.width > conf.MAX_WIDTH): req.width = conf.MAX_WIDTH if (conf.MAX_HEIGHT and (not isinstance(req.height, str)) and req.height > conf.MAX_HEIGHT): req.height = conf.MAX_HEIGHT req.meta_callback = (conf.META_CALLBACK_NAME or self.request.arguments.get("callback", [None])[0]) self.filters_runner = self.context.filters_factory.create_instances( self.context, self.context.request.filters) # Apply all the filters from the PRE_LOAD phase # and call get_image() afterwards. await self.filters_runner.apply_filters( thumbor.filters.PHASE_PRE_LOAD, ) await self.get_image()
def resize(self, width, height): if self.image.mode in ['1', 'P']: logger.debug( 'converting image from 8-bit/1-bit palette to 32-bit RGBA for resize' ) self.image = self.image.convert('RGBA') resample = self.get_resize_filter() self.image = self.image.resize((int(width), int(height)), resample)
def get(self, path): logging.debug('Retrieving %s from S3' % (path,)) k = Key(self.s3) k.key = path try: return s3.get_contents_from_string(bytes) except: return None
def resize(self, width, height): mode = self.image.mode if self.image.mode == 'P': logger.debug('converting image from 8-bit palette to 32-bit RGBA for resize') mode = 'RGBA' resample = get_resize_filter(self.context.config) self.image.draft(mode, (int(width), int(height))) self.image = self.image.resize((int(width), int(height)), resample)