def signal_handler(server, config, sig, frame): io_loop = tornado.ioloop.IOLoop.instance() def stop_loop(now, deadline): if now < deadline and (io_loop._callbacks or io_loop._timeouts): logger.debug('Waiting for next tick') now += 1 io_loop.add_timeout(now, stop_loop, now, deadline) else: io_loop.stop() logger.debug('Shutdown finally') def stop_server(now, deadline): if now < deadline: logger.debug('Waiting for next tick') now += 1 io_loop.add_timeout(now, stop_server, now, deadline) else: server.stop() logger.debug('Stopped http server.') logger.debug('Will shutdown io in maximum %d seconds ...', config.MAX_WAIT_SECONDS_BEFORE_IO_SHUTDOWN) now = time.time() stop_loop(now, now + config.MAX_WAIT_SECONDS_BEFORE_IO_SHUTDOWN) def shutdown(): logger.debug('Stopping http server (i.e. stopping accepting new requests) in %d seconds ...', config.MAX_WAIT_SECONDS_BEFORE_SERVER_SHUTDOWN) now = time.time() stop_server(now, now + config.MAX_WAIT_SECONDS_BEFORE_SERVER_SHUTDOWN) logger.warning('Caught signal: %d. Shutting down server.', sig) io_loop.add_callback_from_signal(shutdown)
def get(self, **kw): url = self.request.uri if not self.validate(kw["image"]): self._error(404, "No original image was specified in the given URL") return self.context.request = RequestParameters(**kw) self.context.request.unsafe = self.context.request.unsafe == "unsafe" if self.request.query: self.context.request.image_url += "?%s" % self.request.query self.context.request.image_url = quote(self.context.request.image_url, "/:?%=&") has_none = not self.context.request.unsafe and not self.context.request.hash has_both = self.context.request.unsafe and self.context.request.hash if has_none or has_both: self._error(404, "URL does not have hash or unsafe, or has both: %s" % url) return if self.context.request.unsafe and not self.context.config.ALLOW_UNSAFE_URL: self._error(404, "URL has unsafe but unsafe is not allowed by the config: %s" % url) return url_signature = self.context.request.hash if url_signature: signer = Signer(self.context.server.security_key) url_to_validate = url.replace("/%s/" % self.context.request.hash, "") valid = signer.validate(url_signature, url_to_validate) if not valid and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE: # Retrieves security key for this image if it has been seen before security_key = self.context.modules.storage.get_crypto(self.context.request.image_url) if security_key is not None: signer = Signer(security_key) valid = signer.validate(url_signature, url_to_validate) if not valid: is_valid = True if self.context.config.ALLOW_OLD_URLS: cr = Cryptor(self.context.server.security_key) options = cr.get_options(self.context.request.hash, self.context.request.image_url) if options is None: is_valid = False else: self.context.request = RequestParameters(**options) logger.warning( "OLD FORMAT URL DETECTED!!! This format of URL will be discontinued in upcoming versions. Please start using the new format as soon as possible. More info at https://github.com/globocom/thumbor/wiki/3.0.0-release-changes" ) else: is_valid = False if not is_valid: self._error(404, "Malformed URL: %s" % url) return return self.execute_image_operations()
async def detect(self): engine = self.context.modules.engine try: img = np.array( engine.convert_to_grayscale(update_image=False, alpha=False)) except Exception as error: logger.exception(error) logger.warning( "Error during feature detection; skipping to next detector") return await self.next() # pylint: disable=not-callable points = cv2.goodFeaturesToTrack( # pylint: disable=no-member img, maxCorners=20, qualityLevel=0.04, minDistance=1.0, useHarrisDetector=False, ) if points is not None: for point in points: x_pos, y_pos = point.ravel() self.context.request.focal_points.append( FocalPoint(x_pos.item(), y_pos.item(), 1)) return await self.next() # pylint: disable=not-callable
def read(self, extension=None, quality=None): if quality is None: quality = self.context.request.quality #returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = { 'quality': quality } if ext == '.jpg' or ext == '.jpeg': options['optimize'] = True options['progressive'] = True if self.icc_profile is not None: options['icc_profile'] = self.icc_profile try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError: logger.warning('Could not save as improved image, consider to increase ImageFile.MAXBLOCK') self.image.save(img_buffer, FORMATS[ext]) results = img_buffer.getvalue() img_buffer.close() return results
def load(context, path, callback): file_path = join(context.config.FILE_LOADER_ROOT_PATH.rstrip('/'), unquote(path).lstrip('/')) file_path = abspath(file_path) inside_root_path = file_path.startswith( context.config.FILE_LOADER_ROOT_PATH) if inside_root_path and is_video(file_path): # Extract a frame from the video and load it instead of the original path logger.warning('processing video... %s', file_path) with get_video_frame(context, file_path) as image_path: if image_path: callback(read_file(image_path)) return elif inside_root_path and is_pdf(file_path): # extract first page of pdf and load it logger.warning('processing pdf... %s', file_path) with get_pdf_page(context, file_path) as image_path: if image_path: callback(read_file(image_path)) return else: # First attempt to load with file_loader file_loader.load(context, path, callback) return # If we got here, there was a failure result = LoaderResult() result.error = LoaderResult.ERROR_NOT_FOUND result.successful = False callback(result)
async def _process_result_from_storage(self, result): if self.context.config.SEND_IF_MODIFIED_LAST_MODIFIED_HEADERS: # Handle If-Modified-Since & Last-Modified header try: if isinstance(result, ResultStorageResult): result_last_modified = result.last_modified else: result_last_modified = await ( self.context.modules.result_storage.last_updated()) if result_last_modified: if "If-Modified-Since" in self.request.headers: date_modified_since = datetime.datetime.strptime( self.request.headers["If-Modified-Since"], HTTP_DATE_FMT, ).replace(tzinfo=pytz.utc) if result_last_modified <= date_modified_since: self.set_status(304) self.finish() return self.set_header( "Last-Modified", result_last_modified.strftime(HTTP_DATE_FMT), ) except NotImplementedError: logger.warning( "last_updated method is not supported by your " "result storage service, hence If-Modified-Since & " "Last-Updated headers support is disabled.")
async def trim(self): is_gifsicle = (self.context.request.engine.extension == ".gif" and self.context.config.USE_GIFSICLE_ENGINE) if (self.context.request.trim is None or not TRIM_ENABLED or is_gifsicle): return mode, data = self.engine.image_data_as_rgb() box = _bounding_box.apply( mode, self.engine.size[0], self.engine.size[1], self.context.request.trim_pos, self.context.request.trim_tolerance, data, ) if box[2] < box[0] or box[3] < box[1]: logger.warning("Ignoring trim, there wouldn't be any image " "left, check the tolerance.") return self.engine.crop(box[0], box[1], box[2] + 1, box[3] + 1) if self.context.request.should_crop: self.context.request.crop["left"] -= box[0] self.context.request.crop["top"] -= box[1] self.context.request.crop["right"] -= box[0] self.context.request.crop["bottom"] -= box[1]
def read(self, extension=None, quality=None): if quality is None: quality = self.context.request.quality #returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = { 'quality': quality } if ext == '.jpg' or ext == '.jpeg': options['optimize'] = True options['progressive'] = True if self.icc_profile is not None: options['icc_profile'] = self.icc_profile try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError: logger.warning('Could not save as improved image, consider to increase ImageFile.MAXBLOCK') self.image.save(img_buffer, FORMATS[ext]) except KeyError: #extension is not present or could not help determine format => force JPEG #TODO : guess format by image headers maybe if self.image.mode in ['P','RGBA','LA']: self.image.format = FORMATS['.png'] self.image.save(img_buffer, FORMATS['.png']) else: self.image.format = FORMATS['.jpg'] self.image.save(img_buffer, FORMATS['.jpg']) results = img_buffer.getvalue() img_buffer.close() return results
def read(self, extension=None, quality=None): if quality is None: quality = self.context.request.quality #returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = {'quality': quality} if ext == '.jpg' or ext == '.jpeg': options['optimize'] = True options['progressive'] = True if self.icc_profile is not None: options['icc_profile'] = self.icc_profile try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError: logger.warning( 'Could not save as improved image, consider to increase ImageFile.MAXBLOCK' ) self.image.save(img_buffer, FORMATS[ext]) except KeyError: #extension is not present or could not help determine format => force JPEG #TODO : guess format by image headers maybe if self.image.mode in ['P', 'RGBA', 'LA']: self.image.format = FORMATS['.png'] self.image.save(img_buffer, FORMATS['.png']) else: self.image.format = FORMATS['.jpg'] self.image.save(img_buffer, FORMATS['.jpg']) results = img_buffer.getvalue() img_buffer.close() return results
def signal_handler(server, config, sig, frame): io_loop = tornado.ioloop.IOLoop.instance() def stop_loop(now, deadline): if now < deadline and (io_loop._callbacks or io_loop._timeouts): logger.debug('Waiting for next tick') now += 1 io_loop.add_timeout(now, stop_loop, now, deadline) else: io_loop.stop() logger.debug('Shutdown finally') def stop_server(now, deadline): if now < deadline: logger.debug('Waiting for next tick') now += 1 io_loop.add_timeout(now, stop_server, now, deadline) else: server.stop() logger.debug('Stopped http server.') logger.debug('Will shutdown io in maximum %d seconds ...', config.MAX_WAIT_SECONDS_BEFORE_IO_SHUTDOWN) now = time.time() stop_loop(now, now + config.MAX_WAIT_SECONDS_BEFORE_IO_SHUTDOWN) def shutdown(): logger.debug( 'Stopping http server (i.e. stopping accepting new requests) in %d seconds ...', config.MAX_WAIT_SECONDS_BEFORE_SERVER_SHUTDOWN) now = time.time() stop_server(now, now + config.MAX_WAIT_SECONDS_BEFORE_SERVER_SHUTDOWN) logger.warning('Caught signal: %d. Shutting down server.', sig) io_loop.add_callback_from_signal(shutdown)
def import_item(self, config_key=None, class_name=None, is_multiple=False, item_value=None, ignore_errors=False): if item_value is None: conf_value = getattr(self.config, config_key) else: conf_value = item_value if is_multiple: modules = [] if conf_value: for module_name in conf_value: try: if class_name is not None: module = self.import_class('%s.%s' % (module_name, class_name)) else: module = self.import_class(module_name, get_module=True) modules.append(module) except ImportError as e: if ignore_errors: logger.warning('Module %s could not be imported: %s', module_name, e) else: raise setattr(self, config_key.lower(), tuple(modules)) else: if class_name is not None: module = self.import_class('%s.%s' % (conf_value, class_name)) else: module = self.import_class(conf_value, get_module=True) setattr(self, config_key.lower(), module)
def icc_profile_apply(self, profile=None): # Check whether input image has color management. if not self.engine.icc_profile: logger.info('ICC: Image has no embedded profile. Skipping this image.') return # Sanitize profile parameter. if profile != None: profile = os.path.basename(profile).lstrip('.') if len(profile) == 0: logger.warning('ICC: Invalid profile name.') return # Find output profile. outprofile = self._find_profile(profile) if not outprofile: logger.warning('ICC: Failed to load profile: {:s}'.format(profile)) return try: ext = self.engine.extension fmt = Image.EXTENSION[ext.lower()] except: logger.exception('ICC: Failed to determine image format and extension before attempting to apply profile: {:s}'.format(profile)) return try: inmode = self.engine.get_image_mode() insize = self.engine.size inimg = Image.frombytes(inmode, insize, self.engine.get_image_data()) # In PIL>=3.0.0 / Thumbor 6, icc_profile is sometimes a tuple :/ # https://github.com/python-pillow/Pillow/issues/1462 profile_data = self.engine.icc_profile if type(profile_data) == tuple: profile_data = profile_data[0] inprofile = StringIO(profile_data) outmode = 'RGBA' if 'A' in inmode else 'RGB' except: logger.exception('ICC: Failed to determine image properties before attempting to apply profile: {:s}'.format(profile)) return logger.info('ICC: Attempting to apply profile: {:s}, inmode: {:s}, outmode: {:s}'.format(profile, inmode, outmode)) try: outimg = ImageCms.profileToProfile(inimg, inprofile, outprofile, outputMode=outmode) except: logger.exception('ICC: Failed to apply profile: {:s}, inmode: {:s}, outmode: {:s}'.format(profile, inmode, outmode)) return # Reload the image into the engine. outbuf = StringIO() try: outimg.save(outbuf, fmt) self.engine.load(outbuf.getvalue(), ext) except: logger.exception('ICC: Failed load the image with an applied profile: {:s}, inmode: {:s}, outmode: {:s}'.format(profile, inmode, outmode)) return finally: outbuf.close()
def read(self, extension=None, quality=None): if quality is None: quality = self.context.request.quality # returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = {"quality": quality} if ext == ".jpg" or ext == ".jpeg": options["optimize"] = True options["progressive"] = True if self.icc_profile is not None: options["icc_profile"] = self.icc_profile try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError: logger.warning("Could not save as improved image, consider to increase ImageFile.MAXBLOCK") self.image.save(img_buffer, FORMATS[ext]) except KeyError: # extension is not present or could not help determine format => force JPEG # TODO : guess format by image headers maybe if self.image.mode in ["P", "RGBA", "LA"]: self.image.format = FORMATS[".png"] self.image.save(img_buffer, FORMATS[".png"]) else: self.image.format = FORMATS[".jpg"] self.image.save(img_buffer, FORMATS[".jpg"]) results = img_buffer.getvalue() img_buffer.close() return results
async def put(self, image_bytes): if self.context.request.max_age is not None and self.context.request.max_age == 0: return file_abspath = self.normalize_path(self.context.request.url) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to write outside root path: %s", file_abspath) return temp_abspath = "%s.%s" % (file_abspath, str(uuid4()).replace("-", "")) file_dir_abspath = dirname(file_abspath) logger.debug("[RESULT_STORAGE] putting at %s (%s)", file_abspath, file_dir_abspath) self.ensure_dir(file_dir_abspath) with open(temp_abspath, "wb") as _file: _file.write(image_bytes) if self.context.request.max_age is not None: with open(temp_abspath + Storage.EXPIRE_EXT, "wb") as _file: _file.write(str.encode(str(self.context.request.max_age))) move(temp_abspath + Storage.EXPIRE_EXT, file_abspath + Storage.EXPIRE_EXT) move(temp_abspath, file_abspath)
def load(context, path, callback): key = ( context.config.RACKSPACE_PYRAX_REGION, context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace'), context.config.RACKSPACE_PYRAX_CFG, context.config.RACKSPACE_PYRAX_PUBLIC, context.config.RACKSPACE_LOADER_CONTAINER ) if key not in CONNECTIONS: if(context.config.RACKSPACE_PYRAX_REGION): pyrax.set_default_region(context.config.RACKSPACE_PYRAX_REGION) pyrax.set_setting('identity_type', context.config.get('RACKSPACE_PYRAX_IDENTITY_TYPE','rackspace')) pyrax.set_credential_file(expanduser(context.config.RACKSPACE_PYRAX_CFG)) cf = pyrax.connect_to_cloudfiles(public=context.config.RACKSPACE_PYRAX_PUBLIC) CONNECTIONS[key] = cf.get_container(context.config.RACKSPACE_LOADER_CONTAINER) cont = CONNECTIONS[key] file_abspath = normalize_path(context, path) logger.debug("[LOADER] getting from %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) try: obj = cont.get_object(file_abspath) if obj: logger.debug("[LOADER] Found object at %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) else: logger.warning("[LOADER] Unable to find object %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath )) except: callback(None) else: callback(obj.get())
def create_image(self, buffer): try: img = Image.open(BytesIO(buffer)) except DECOMPRESSION_BOMB_EXCEPTIONS as error: logger.warning("[PILEngine] create_image failed: %s", error) return None self.icc_profile = img.info.get("icc_profile") self.exif = img.info.get("exif") self.original_mode = img.mode self.subsampling = JpegImagePlugin.get_sampling(img) if self.subsampling == -1: # n/a for this file self.subsampling = None self.qtables = getattr(img, "quantization", None) if (self.context.config.ALLOW_ANIMATED_GIFS and self.extension == ".gif"): frames = [] for frame in ImageSequence.Iterator(img): frames.append(frame.convert("P")) img.seek(0) self.frame_count = len(frames) return frames return img
def read(self, extension=None, quality=None): if quality is None: quality = self.context.request.quality #returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = {'quality': quality} if ext == '.jpg' or ext == '.jpeg': options['optimize'] = True options['progressive'] = True if self.icc_profile is not None: options['icc_profile'] = self.icc_profile try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError: logger.warning( 'Could not save as improved image, consider to increase ImageFile.MAXBLOCK' ) self.image.save(img_buffer, FORMATS[ext]) results = img_buffer.getvalue() img_buffer.close() return results
def get_image(self): try: result = yield self._fetch(self.context.request.image_url) if not result.successful: if result.loader_error == LoaderResult.ERROR_NOT_FOUND: self._error(404) return elif result.loader_error == LoaderResult.ERROR_UPSTREAM: # Return a Bad Gateway status if the error came from upstream self._error(502) return elif result.loader_error == LoaderResult.ERROR_TIMEOUT: # Return a Gateway Timeout status if upstream timed out (i.e. 599) self._error(504) return else: self._error(500) return except Exception as e: msg = "[BaseHandler] get_image failed for url `{url}`. error: `{error}`".format( url=self.context.request.image_url, error=e ) self.log_exception(*sys.exc_info()) if "cannot identify image file" in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return normalized = result.normalized buffer = result.buffer engine = result.engine req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine engine.load(buffer, self.context.request.extension) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.request.engine = JSONEngine(engine, req.image_url, req.meta_callback) after_transform_cb = functools.partial(self.after_transform, self.context) Transformer(self.context).transform(after_transform_cb) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
def validate(self, url): if not hasattr(self.context.modules.loader, "validate"): return True if not self.context.modules.loader.validate(self.context, url): logger.warning('watermark source not allowed: "%s"', url) return False return True
def get_image(self): try: result = yield self._fetch( self.context.request.image_url ) if not result.successful: if result.loader_error == LoaderResult.ERROR_NOT_FOUND: self._error(404) return elif result.loader_error == LoaderResult.ERROR_UPSTREAM: # Return a Bad Gateway status if the error came from upstream self._error(502) return else: self._error(500) return except Exception as e: msg = '[BaseHandler] get_image failed for url `{url}`. error: `{error}`'.format( url=self.context.request.image_url, error=e ) self.log_exception(*sys.exc_info()) if 'cannot identify image file' in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return normalized = result.normalized buffer = result.buffer engine = result.engine req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine engine.load(buffer, self.context.request.extension) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.request.engine = JSONEngine(engine, req.image_url, req.meta_callback) after_transform_cb = functools.partial(self.after_transform, self.context) Transformer(self.context).transform(after_transform_cb) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
async def get(self): path = self.context.request.url file_abspath = self.normalize_path(path) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to read from outside root path: %s", file_abspath, ) return None logger.debug("[RESULT_STORAGE] getting from %s", file_abspath) if isdir(file_abspath): logger.warning( "[RESULT_STORAGE] cache location is a directory: %s", file_abspath) return None if not exists(file_abspath): legacy_path = self.normalize_path_legacy(path) if isfile(legacy_path): logger.debug( "[RESULT_STORAGE] migrating image from old location at %s", legacy_path, ) self.ensure_dir(dirname(file_abspath)) move(legacy_path, file_abspath) else: logger.debug("[RESULT_STORAGE] image not found at %s", file_abspath) return None expire_time = self.get_expire_time(file_abspath) if self.is_expired(file_abspath, expire_time): logger.debug("[RESULT_STORAGE] cached image has expired") return None with open(file_abspath, "rb") as image_file: buffer = image_file.read() if expire_time is not None: self.context.request.max_age = expire_time result = ResultStorageResult( buffer=buffer, metadata={ "LastModified": datetime.fromtimestamp( getmtime(file_abspath)).replace(tzinfo=pytz.utc), "ContentLength": len(buffer), "ContentType": BaseEngine.get_mimetype(buffer), }, ) return result
def should_run(self, image_extension, image_buffer): if "gif" in image_extension and "gifv" in self.context.request.filters: if not exists(self.context.config.FFMPEG_PATH): logger.warning( "gifv optimizer enabled but binary FFMPEG_PATH does not exist" ) return False return True return False
def format(self, format): logger.warning('Setting format to %s' % format) if format.lower() not in ALLOWED_FORMATS: logger.debug('Format not allowed: %s' % format.lower()) self.context.request.format = None else: logger.debug('Format specified: %s' % format.lower()) self.context.request.format = format.lower() return filter_retval()
def should_run(self, image_extension, image_buffer): if image_extension in [".jpg", ".jpeg"]: if self.context.config.JPEGTRAN_PATH is None or not exists( self.context.config.JPEGTRAN_PATH): logger.warning( "jpegtran optimizer enabled but binary JPEGTRAN_PATH does not exist" ) return False return True return False
def return_contents(response, url, context, req_start=None): res = urlparse(url) if req_start: finish = datetime.datetime.now() context.metrics.timing( "original_image.fetch.{0}.{1}".format(response.code, res.netloc.replace(".", "_")), (finish - req_start).total_seconds() * 1000, ) context.metrics.incr("original_image.fetch.{0}.{1}".format( response.code, res.netloc.replace(".", "_"))) result = LoaderResult() context.metrics.incr("original_image.status." + str(response.code)) context.metrics.incr("original_image.status.{0}.{1}".format( response.code, res.netloc.replace(".", "_"))) if response.error: result.successful = False if response.code == 599: # Return a Gateway Timeout status downstream if upstream times out result.error = LoaderResult.ERROR_TIMEOUT else: result.error = LoaderResult.ERROR_NOT_FOUND logger.warning(u"ERROR retrieving image %s: %s", url, str(response.error)) elif response.body is None or len(response.body) == 0: result.successful = False result.error = LoaderResult.ERROR_UPSTREAM logger.warning(u"ERROR retrieving image %s: Empty response.", url) else: if response.time_info: for metric_name in response.time_info: context.metrics.timing( "original_image.time_info." + metric_name, response.time_info[metric_name] * 1000, ) context.metrics.timing( "original_image.time_info.bytes_per_second", len(response.body) // response.time_info["total"], ) result.buffer = response.body result.metadata.update(response.headers) context.metrics.incr("original_image.response_bytes", len(response.body)) return result
def validate(self, path): if not hasattr(self.context.modules.loader, "validate"): return True is_valid = self.context.modules.loader.validate(self.context, path) if not is_valid: logger.warning( ('Request denied because the specified path "%s" was ' "not identified by the loader as a valid path"), path, ) return is_valid
class Optimizer(gifv.Optimizer): logger.warning(""" This optimizer will be deprecated on the next major version. Add thumbor-plugins-gifv as a dependency, and change thumbor.optimizers.gifv to thumbor_plugins.optimizers.gifv on the OPTIMIZERS list on you thumbor.conf. """)
def last_updated(self): path = self.context.request.url file_abspath = self.normalize_path(path) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to read from outside root path: %s", file_abspath, ) return True logger.debug("[RESULT_STORAGE] getting from %s", file_abspath) if not exists(file_abspath) or self.is_expired(file_abspath): logger.debug("[RESULT_STORAGE] image not found at %s", file_abspath) return True return datetime.fromtimestamp(getmtime(file_abspath)).replace(tzinfo=pytz.utc)
async def watermark(self, url, x, y, alpha, w_ratio=False, h_ratio=False): self.url = url self.x = x self.y = y self.alpha = alpha self.w_ratio = (float(w_ratio) / 100.0 if w_ratio and w_ratio != "none" else False) self.h_ratio = (float(h_ratio) / 100.0 if h_ratio and h_ratio != "none" else False) self.watermark_engine = self.context.modules.engine.__class__( self.context) self.storage = self.context.modules.storage try: buffer = await self.storage.get(self.url) if buffer is not None: return self.on_image_ready(buffer) if not self.validate(self.url): raise tornado.web.HTTPError(400) result = await self.context.modules.loader.load( self.context, self.url) if isinstance(result, LoaderResult) and not result.successful: logger.warning( "bad watermark result error=%s metadata=%s", result.error, result.metadata, ) raise tornado.web.HTTPError(400) if isinstance(result, LoaderResult): buffer = result.buffer else: buffer = result await self.storage.put(self.url, buffer) await self.storage.put_crypto(self.url) self.on_image_ready(buffer) except Exception as error: if isinstance(error, tornado.web.HTTPError): raise error logger.warning("bad watermark") raise tornado.web.HTTPError(500)
def read(self, extension=None, quality=options.QUALITY): #returns image buffer in byte format. img_buffer = StringIO() ext = extension or self.extension options = { 'quality': quality } if ext == '.jpg' or ext == '.jpeg': options['optimize'] = True options['progressive'] = True try: self.image.save(img_buffer, FORMATS[ext], **options) except IOError, e: logger.warning('Could not save as improved image, consider to increase ImageFile.MAXBLOCK') self.image.save(img_buffer, FORMATS[ext])
def get(self): cf = pyrax.cloudfiles cont = cf.get_container(self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER) file_abspath = self.normalize_path(self.context.request.url) try: logger.debug("[RESULT_STORAGE] getting from %s/%s" % (self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER, file_abspath)) obj = cont.get_object(file_abspath) if obj: logger.debug("[RESULT_STORAGE] Found object at %s/%s" % (self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER, file_abspath)) if(self.context.config.RACKSPACE_RESULT_STORAGE_EXPIRES): cont.delete_object_in_seconds(obj=obj,seconds=str(self.context.config.RESULT_STORAGE_EXPIRATION_SECONDS)) return obj.get() else: logger.warning("[RESULT_STORAGE] Unable to find object %s/%s" % (self.context.config.RACKSPACE_RESULT_STORAGES_CONTAINER, file_abspath )) except: return None return None
def __init__(self, context, importer): ''' :param context: :param importer: ''' ThumborContextImporter.__init__(self, context, importer) # Dynamically load registered modules for name in self._community_modules: if hasattr(importer, name): instance = getattr(importer, name)(context) setattr(self, name, instance) else: logger.warning("Module {name} is not configured.".format( name=name.upper() )) setattr(self, name, None)
def import_community_modules(cls, instance): for module in cls._community_modules: setattr(instance, module['config_key'].lower(), None) # Autoload for module in cls._community_modules: config_key = module['config_key'] if hasattr(instance.config, config_key): instance.import_item(config_key, module['class_name']) else: logger.warning( "Configuration not found for module " \ "{config_key} {config_name}".format( config_key=config_key, class_name=module['class_name'] ) )
def put(self, bytes): connection, db, storage = self.__conn__() key = self.get_key_from_request() max_age = self.get_max_age() result_ttl = self.get_max_age() ref_img = '' ref_img = re.findall(r'/[a-zA-Z0-9]{24}(?:$|/)', key) if ref_img: ref_img2 = ref_img[0].replace('/', '') else: ref_img2 = 'undef' if self.is_auto_webp: content_t = 'webp' else: content_t = 'default' doc = { 'path': key, 'created_at': datetime.utcnow(), 'data': Binary(bytes), 'content-type': content_t, 'ref_id': ref_img2 } doc_cpm = dict(doc) if result_ttl > 0: ref = datetime.utcnow() + timedelta(seconds=result_ttl) doc_cpm['expire'] = ref try: self.context.config.MONGO_RESULT_STORAGE_MAXCACHESIZE maxs = self.context.config.MONGO_RESULT_STORAGE_MAXCACHESIZE except: maxs = 0 amd = getsizeof(bytes) if amd > maxs and maxs > 0: logger.warning( u"OVERSIZE %s: %s > %s pas de mise en cache possible", key, amd, maxs) return None else: storage.insert(doc_cpm) return key
async def detect(self): try: features = self.get_features() except Exception as error: logger.exception(error) logger.warning("Error during face detection; skipping to next detector") return await self.next() if features: for (left, top, width, height), _ in features: top = self.__add_hair_offset(top, height) self.context.request.focal_points.append( FocalPoint.from_square( left, top, width, height, origin="Face Detection" ) ) return await self.next()
async def put(self, image_bytes): file_abspath = self.normalize_path(self.context.request.url) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to write outside root path: %s", file_abspath ) return temp_abspath = "%s.%s" % (file_abspath, str(uuid4()).replace("-", "")) file_dir_abspath = dirname(file_abspath) logger.debug( "[RESULT_STORAGE] putting at %s (%s)", file_abspath, file_dir_abspath ) self.ensure_dir(file_dir_abspath) with open(temp_abspath, "wb") as _file: _file.write(image_bytes) move(temp_abspath, file_abspath)
def return_contents(response, url, callback, context, req_start=None): if req_start: finish = datetime.datetime.now() res = urlparse(url) context.metrics.timing( 'original_image.fetch.{0}.{1}'.format(response.code, res.netloc.replace('.', '_')), (finish - req_start).total_seconds() * 1000, ) result = LoaderResult() context.metrics.incr('original_image.status.' + str(response.code)) if response.error: result.successful = False if response.code == 599: # Return a Gateway Timeout status downstream if upstream times out result.error = LoaderResult.ERROR_TIMEOUT else: result.error = LoaderResult.ERROR_NOT_FOUND logger.warning(u"ERROR retrieving image {0}: {1}".format( url, str(response.error))) elif response.body is None or len(response.body) == 0: result.successful = False result.error = LoaderResult.ERROR_UPSTREAM logger.warn(u"ERROR retrieving image {0}: Empty response.".format(url)) else: if response.time_info: for x in response.time_info: context.metrics.timing('original_image.time_info.' + x, response.time_info[x] * 1000) context.metrics.timing( 'original_image.time_info.bytes_per_second', len(response.body) // response.time_info['total']) result.buffer = response.body result.metadata.update(response.headers) context.metrics.incr('original_image.response_bytes', len(response.body)) callback(result)
def load_multiple_item( self, config_key, conf_value, class_name, item_value, ignore_errors ): modules = [] if conf_value: for module_name in conf_value: try: if class_name is not None: module = self.import_class("%s.%s" % (module_name, class_name)) else: module = self.import_class(module_name, get_module=True) modules.append(module) except ImportError as error: if ignore_errors: logger.warning( "Module %s could not be imported: %s", module_name, error, ) else: raise setattr(self, config_key.lower(), tuple(modules))
def get_image(self): try: normalized, buffer, engine = yield self._fetch( self.context.request.image_url ) except Exception as e: msg = '[BaseHandler] get_image failed for url `{url}`. error: `{error}`'.format( url=self.context.request.image_url, error=e ) self.log_exception(*sys.exc_info()) if 'cannot identify image file' in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine engine.load(buffer, self.context.request.extension) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.request.engine = JSONEngine(engine, req.image_url, req.meta_callback) after_transform_cb = functools.partial(self.after_transform, self.context) Transformer(self.context).transform(after_transform_cb) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
def get_image(self): try: normalized, buffer, engine = yield self._fetch( self.context.request.image_url) except Exception as e: msg = '[BaseHandler] get_image failed for url `{url}`. error: `{error}`'.format( url=self.context.request.image_url, error=e) self.log_exception(*sys.exc_info()) if 'cannot identify image file' in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine engine.load(buffer, self.context.request.extension) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.request.engine = JSONEngine( engine, req.image_url, req.meta_callback) after_transform_cb = functools.partial(self.after_transform, self.context) Transformer(self.context).transform(after_transform_cb) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
def run_optimizer(self, image_extension, buffer): if not self.should_run(image_extension, buffer): return buffer if "strip_icc" in self.context.request.filters: copy_chunks = "comments" else: # have to copy everything to preserve icc profile copy_chunks = "all" command = [ self.context.config.JPEGTRAN_PATH, "-copy", copy_chunks, "-optimize", ] if self.context.config.PROGRESSIVE_JPEG: command += ["-progressive"] if self.context.config.JPEGTRAN_SCANS_FILE: if exists(self.context.config.JPEGTRAN_SCANS_FILE): command += ["-scans", self.context.config.JPEGTRAN_SCANS_FILE] else: logger.warning("jpegtran optimizer scans file does not exist") jpg_process = Popen( # pylint: disable=consider-using-with command, stdin=PIPE, stdout=PIPE, stderr=PIPE ) output_stdout, output_stderr = jpg_process.communicate(buffer) if jpg_process.returncode != 0: logger.warning( "jpegtran finished with non-zero return code (%d): %s", jpg_process.returncode, output_stderr, ) return buffer return output_stdout
def create_image(self, buffer): try: img = Image.open(BytesIO(buffer)) except Image.DecompressionBombWarning as e: logger.warning("[PILEngine] create_image failed: {0}".format(e)) return None self.icc_profile = img.info.get('icc_profile') self.exif = img.info.get('exif') self.subsampling = JpegImagePlugin.get_sampling(img) if (self.subsampling == -1): # n/a for this file self.subsampling = None self.qtables = getattr(img, 'quantization', None) if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == '.gif': frames = [] for frame in ImageSequence.Iterator(img): frames.append(frame.convert('P')) img.seek(0) self.frame_count = len(frames) return frames return img
def __init__(self, context, importer): ''' :param context: :param importer: ''' ThumborContextImporter.__init__(self, context, importer) # Dynamically load registered modules for name in self._community_modules: if hasattr(importer, name): init = getattr(importer, name) if not hasattr(init, '__call__'): logger.error("Attr {attr} of object {obj} is not callable".format( attr=name, obj=importer, )) instance = getattr(importer, name)(context) setattr(self, name, instance) else: logger.warning("Module {name} is not configured.".format( name=name.upper() )) setattr(self, name, None)
def create_image(self, buffer): try: img = Image.open(BytesIO(buffer)) except Image.DecompressionBombWarning as e: logger.warning("[PILEngine] create_image failed: {0}".format(e)) return None self.icc_profile = img.info.get('icc_profile') self.transparency = img.info.get('transparency') self.exif = img.info.get('exif') self.subsampling = JpegImagePlugin.get_sampling(img) if (self.subsampling == -1): # n/a for this file self.subsampling = None self.qtables = getattr(img, 'quantization', None) if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == '.gif': frames = [] for frame in ImageSequence.Iterator(img): frames.append(frame.convert('P')) img.seek(0) self.frame_count = len(frames) return frames return img
def extract_whitelist_dimensions(self, blob): """ Extracts the dimensions from the new-line separated into a list of (wxh) tuples :param blob: The file contents, can be of the form w1xh1 w2xh2 w3x .... :return: A list of tuples [(w1, h1), (w2, h2), (w3, h3)] """ whitelisted_dimensions = [] if not blob: return whitelisted_dimensions lines = blob.split('\n') for line in list(filter(lambda x: x, lines)): line = line.strip().lower() # filtering out the empty dimension dimensions = list(filter(lambda d: d, line.split('x'))) if len(dimensions) > 2: logger.warning("whitelist dimensions %s not in the form wxh ", line) self._error( 400, "Unsupported whitelist dimension expected wxh, given %s" % line) return if len(dimensions) == 1: w = h = dimensions[0] else: w, h = dimensions whitelisted_dimensions.append((int(w), int(h))) return whitelisted_dimensions
def get_image(self): """ This function is called after the PRE_LOAD filters have been applied. It applies the AFTER_LOAD filters on the result, then crops the image. """ try: result = yield self._fetch( self.context.request.image_url ) if not result.successful: if result.loader_error == LoaderResult.ERROR_NOT_FOUND: self._error(404) return elif result.loader_error == LoaderResult.ERROR_UPSTREAM: # Return a Bad Gateway status if the error came from upstream self._error(502) return elif result.loader_error == LoaderResult.ERROR_TIMEOUT: # Return a Gateway Timeout status if upstream timed out (i.e. 599) self._error(504) return elif isinstance(result.loader_error, int): self._error(result.loader_error) return elif hasattr(result, 'engine_error') and result.engine_error == EngineResult.COULD_NOT_LOAD_IMAGE: self._error(400) return else: self._error(500) return except Exception as e: msg = '[BaseHandler] get_image failed for url `{url}`. error: `{error}`'.format( url=self.context.request.image_url, error=e ) self.log_exception(*sys.exc_info()) if 'cannot identify image file' in e.message: logger.warning(msg) self._error(400) else: logger.error(msg) self._error(500) return normalized = result.normalized buffer = result.buffer engine = result.engine req = self.context.request if engine is None: if buffer is None: self._error(504) return engine = self.context.request.engine try: engine.load(buffer, self.context.request.extension) except Exception: self._error(504) return self.context.transformer = Transformer(self.context) def transform(): self.normalize_crops(normalized, req, engine) if req.meta: self.context.transformer.engine = \ self.context.request.engine = \ JSONEngine(engine, req.image_url, req.meta_callback) self.context.transformer.transform(self.after_transform) self.filters_runner.apply_filters(thumbor.filters.PHASE_AFTER_LOAD, transform)
def check_image(self, kw): if self.context.config.MAX_ID_LENGTH > 0: # Check if an image with an uuid exists in storage exists = yield gen.maybe_future(self.context.modules.storage.exists(kw['image'][:self.context.config.MAX_ID_LENGTH])) if exists: kw['image'] = kw['image'][:self.context.config.MAX_ID_LENGTH] url = self.request.uri if not self.validate(kw['image']): self._error(400, 'No original image was specified in the given URL') return kw['request'] = self.request self.context.request = RequestParameters(**kw) has_none = not self.context.request.unsafe and not self.context.request.hash has_both = self.context.request.unsafe and self.context.request.hash if has_none or has_both: self._error(400, 'URL does not have hash or unsafe, or has both: %s' % url) return if self.context.request.unsafe and not self.context.config.ALLOW_UNSAFE_URL: self._error(400, 'URL has unsafe but unsafe is not allowed by the config: %s' % url) return if self.context.config.USE_BLACKLIST: blacklist = yield self.get_blacklist_contents() if self.context.request.image_url in blacklist: self._error(400, 'Source image url has been blacklisted: %s' % self.context.request.image_url ) return url_signature = self.context.request.hash if url_signature: signer = Signer(self.context.server.security_key) url_to_validate = Url.encode_url(url).replace('/%s/' % self.context.request.hash, '') valid = signer.validate(url_signature, url_to_validate) if not valid and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE: # Retrieves security key for this image if it has been seen before security_key = yield gen.maybe_future(self.context.modules.storage.get_crypto(self.context.request.image_url)) if security_key is not None: signer = Signer(security_key) valid = signer.validate(url_signature, url_to_validate) if not valid: is_valid = True if self.context.config.ALLOW_OLD_URLS: cr = Cryptor(self.context.server.security_key) options = cr.get_options(self.context.request.hash, self.context.request.image_url) if options is None: is_valid = False else: options['request'] = self.request self.context.request = RequestParameters(**options) logger.warning( 'OLD FORMAT URL DETECTED!!! This format of URL will be discontinued in ' + 'upcoming versions. Please start using the new format as soon as possible. ' + 'More info at https://github.com/globocom/thumbor/wiki/3.0.0-release-changes' ) else: is_valid = False if not is_valid: self._error(400, 'Malformed URL: %s' % url) return self.execute_image_operations()
def get(self, **kw): # Check if an image with an uuid exists in storage if self.context.modules.storage.exists(kw['image'][:32]): kw['image'] = kw['image'][:32] url = self.request.uri if not self.validate(kw['image']): self._error(404, 'No original image was specified in the given URL') return self.context.request = RequestParameters(**kw) self.context.request.unsafe = self.context.request.unsafe == 'unsafe' if (self.request.query): self.context.request.image_url += '?%s' % self.request.query self.context.request.image_url = self.encode_url(self.context.request.image_url.encode('utf-8')) has_none = not self.context.request.unsafe and not self.context.request.hash has_both = self.context.request.unsafe and self.context.request.hash if has_none or has_both: self._error(404, 'URL does not have hash or unsafe, or has both: %s' % url) return if self.context.request.unsafe and not self.context.config.ALLOW_UNSAFE_URL: self._error(404, 'URL has unsafe but unsafe is not allowed by the config: %s' % url) return url_signature = self.context.request.hash if url_signature: signer = Signer(self.context.server.security_key) url_to_validate = self.encode_url(url).replace('/%s/' % self.context.request.hash, '') valid = signer.validate(url_signature, url_to_validate) if not valid and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE: # Retrieves security key for this image if it has been seen before security_key = self.context.modules.storage.get_crypto(self.context.request.image_url) if security_key is not None: signer = Signer(security_key) valid = signer.validate(url_signature, url_to_validate) if not valid: is_valid = True if self.context.config.ALLOW_OLD_URLS: cr = Cryptor(self.context.server.security_key) options = cr.get_options(self.context.request.hash, self.context.request.image_url) if options is None: is_valid = False else: self.context.request = RequestParameters(**options) logger.warning('OLD FORMAT URL DETECTED!!! This format of URL will be discontinued in upcoming versions. Please start using the new format as soon as possible. More info at https://github.com/globocom/thumbor/wiki/3.0.0-release-changes') else: is_valid = False if not is_valid: self._error(404, 'Malformed URL: %s' % url) return return self.execute_image_operations()