def get_page_image_thumb(workflow, seq_num, img_type, plugname): """ Return thumbnail for page image from requested workflow. """ if img_type not in ('raw', 'processed'): raise ApiException("Image type must be one of 'raw' or 'processed', " "not '{0}'".format(img_type), 400) if img_type == 'processed' and plugname is None: raise ApiException("Need to supply additional path parameter for " "plugin to get processed file for.", 400) page = get_next(p for p in workflow.pages if p.sequence_num == seq_num) if not page: raise ApiException("Could not find page with sequence number {0}" .format(seq_num), 404) if img_type == 'raw': fpath = page.raw_image elif plugname is None: fpath = page.get_latest_processed(image_only=True) else: fpath = page.processed_images[plugname] if fpath.suffix.lower() not in ('.jpg', '.jpeg', '.tif', '.tiff', '.png'): raise ApiException("Can not serve thumbnails for files with type {0}" .format(fpath.suffix), 400) cache_key = "{0}.{1}.{2}".format(workflow.id, img_type, fpath.name) thumbnail = None if not request.args: thumbnail = cache.get(cache_key) if thumbnail is None: thumbnail = get_thumbnail(fpath) cache.set(cache_key, thumbnail) return Response(thumbnail, mimetype='image/jpeg')
def find_all(cls, location, key='slug', reload=False): """ List all workflows in the given location. :param location: Location where the workflows are located :type location: unicode/pathlib.Path :param key: Attribute to use as key for returned dict :type key: str :param reload: Do not load workflows from cache :type reload: bool :return: All found workflows :rtype: dict """ if not isinstance(location, Path): location = Path(location) if key not in ('slug', 'id'): raise ValueError("'key' must be one of ('id', 'slug')") if location in cls._cache and not reload: found = cls._cache[location] else: found = [] for candidate in location.iterdir(): is_workflow = (location.is_dir() and ((candidate / 'bagit.txt').exists or (candidate / 'raw').exists)) if not is_workflow: continue if not util.get_next(wf for wf in found if wf.path == candidate): logging.debug( "Cache missed, instantiating workflow from {0}.".format( candidate)) workflow = cls(candidate) found.append(workflow) cls._cache[location] = found return {getattr(wf, key): wf for wf in cls._cache[location]}
def get_page_image(workflow, seq_num, img_type, plugname): """ Return image for requested page. """ if img_type not in ('raw', 'processed'): raise ApiException("Image type must be one of 'raw' or 'processed', " "not '{0}'".format(img_type), 400) # Scale image if requested width = request.args.get('width', None) img_format = request.args.get('format', None) page = get_next(p for p in workflow.pages if p.sequence_num == seq_num) if not page: raise ApiException("Could not find page with sequence number {0}" .format(seq_num), 404) if img_type == 'raw': fpath = page.raw_image elif plugname is None: fpath = page.get_latest_processed(image_only=True) else: fpath = page.processed_images[plugname] if width and fpath.suffix.lower() in ('.jpg', '.jpeg', '.tif', '.tiff', '.png'): return scale_image(fpath, width=int(width)) elif fpath.suffix.lower() in ('.tif', '.tiff') and img_format: img_format = 'png' if img_format == 'browser' else img_format return convert_image(fpath, img_format) else: return send_file(unicode(fpath))
def find_all(cls, location, key='slug', reload=False): """ List all workflows in the given location. :param location: Location where the workflows are located :type location: unicode/pathlib.Path :param key: Attribute to use as key for returned dict :type key: str :param reload: Do not load workflows from cache :type reload: bool :return: All found workflows :rtype: dict """ if not isinstance(location, Path): location = Path(location) if key not in ('slug', 'id'): raise ValueError("'key' must be one of ('id', 'slug')") if location in cls._cache and not reload: found = cls._cache[location] else: found = [] for candidate in location.iterdir(): is_workflow = (location.is_dir() and ((candidate/'bagit.txt').exists or (candidate/'raw').exists)) if not is_workflow: continue if not util.get_next(wf for wf in found if wf.path == candidate): logging.debug( "Cache missed, instantiating workflow from {0}." .format(candidate)) workflow = cls(candidate) found.append(workflow) cls._cache[location] = found return {getattr(wf, key): wf for wf in cls._cache[location]}
def delete_page(workflow, seq_num): """ Remove a single page from a workflow. """ page = get_next(p for p in workflow.pages if p.sequence_num == seq_num) if not page: raise ApiException("Could not find page with sequence number {0}" .format(seq_num), 404) workflow.remove_pages(page) return jsonify(page)
def crop_workflow_image(workflow, seq_num, img_type): # TODO: We have to update the checksum! page = get_next(p for p in workflow.pages if p.sequence_num == seq_num) if not page: raise ApiException("Could not find page with sequence number {0}" .format(seq_num), 404) if img_type != 'raw': raise ApiException("Can only crop raw images.", 400) left = int(request.args.get('left', 0)) top = int(request.args.get('top', 0)) width = int(request.args.get('width', 0)) or None height = int(request.args.get('height', 0)) or None crop_image(unicode(page.raw_image), left, top, width, height) cache_key = "{0}.{1}.{2}".format(workflow.id, 'raw', page.raw_image.name) cache.delete(cache_key) return 'OK'
def find_page(workflow, number): page = get_next(p for p in workflow.pages if p.capture_num == number) if not page: raise ApiException("Could not find page with capture number {1}" .format(number), 404) return page
def get_single_page(workflow, seq_num): page = get_next(p for p in workflow.pages if p.sequence_num == seq_num) if not page: raise ApiException("Could not find page with sequence number {0}" .format(seq_num), 404) return jsonify(page)