def show_instrument(path: Path = Depends(imagepath_parameter)): """ Get image instrument info """ original = path.get_original() check_representation_existence(original) return InstrumentInfo.from_image(original)
def export_upload( background: BackgroundTasks, path: Path = Depends(imagepath_parameter), ): """ Export the upload representation of an image. """ image = path.get_original() check_representation_existence(image) upload_file = image.get_upload().resolve() media_type = image.media_type if upload_file.is_dir(): # if archive has been deleted tmp_export = Path(f"/tmp/{unique_name_generator()}") make_zip_archive(tmp_export, upload_file) def cleanup(tmp): tmp.unlink(missing_ok=True) background.add_task(cleanup, tmp_export) upload_file = tmp_export media_type = "application/zip" return FileResponse(upload_file, media_type=media_type, filename=path.name)
def show_channels(path: Path = Depends(imagepath_parameter)): """ Get image channel info """ original = path.get_original() check_representation_existence(original) return response_list(ChannelsInfo.from_image(original))
def show_normalized_pyramid(path: Path = Depends(imagepath_parameter)): """ Get image normalized pyramid """ original = path.get_original() check_representation_existence(original) return PyramidInfo.from_pyramid(original.normalized_pyramid)
def show_associated(path: Path = Depends(imagepath_parameter)): """ Get associated file info """ original = path.get_original() check_representation_existence(original) return response_list(AssociatedInfo.from_image(original))
def show_image(path: Path = Depends(imagepath_parameter)): """ Get standard image info """ original = path.get_original() check_representation_existence(original) return ImageInfo.from_image(original)
def _show_associated_image( request: Request, response: Response, # required for @cache # noqa path: Path, height, width, length, associated_key, headers, config: Settings): in_image = path.get_spatial() check_representation_existence(in_image) associated = getattr(in_image, f'associated_{associated_key.value}') if not associated or not associated.exists: raise NoAppropriateRepresentationProblem(path, associated_key) out_format, mimetype = get_output_format(OutputExtension.NONE, headers.accept, VISUALISATION_MIMETYPES) req_size = get_thumb_output_dimensions(associated, height, width, length) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size return AssociatedResponse(in_image, associated_key, out_width, out_height, out_format).http_response( mimetype, extra_headers=add_image_size_limit_header( dict(), *req_size, *out_size))
def show_plane_histogram( z_slices: conint(ge=0), timepoints: conint(ge=0), path: Path = Depends(imagepath_parameter), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram per plane. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) z_slices = get_zslice_indexes(in_image, z_slices) timepoints = get_timepoint_indexes(in_image, timepoints) hist_info = [] htype = in_image.histogram_type() for c, z, t in itertools.product(channels, z_slices, timepoints): mini, maxi = in_image.plane_bounds(c, z, t) hist_info.append( PlaneHistogramInfo( channel=c, z_slice=z, timepoint=t, type=htype, color=in_image.channels[c].hex_color, minimum=mini, maximum=maxi ) ) return response_list(hist_info)
def show_channels_histogram_bounds( path: Path = Depends(imagepath_parameter), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram bounds per channel where all planes (Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) channels = get_channel_indexes(in_image, channels) hist_info = [] htype = in_image.histogram_type() hist_filter = operator.itemgetter(*channels) channels_bounds = hist_filter(in_image.channels_bounds()) if len(channels) == 1: channels_bounds = [channels_bounds] for channel, bounds in zip(channels, channels_bounds): mini, maxi = bounds hist_info.append( ChannelHistogramInfo( channel=channel, type=htype, color=in_image.channels[channel].hex_color, minimum=mini, maximum=maxi ) ) return response_list(hist_info)
def show_metadata(path: Path = Depends(imagepath_parameter)): """ Get image metadata """ original = path.get_original() check_representation_existence(original) store = original.raw_metadata return response_list([Metadata.from_metadata(md) for md in store.values()])
def show_metadata_annotations(path: Path = Depends(imagepath_parameter)): """ Get image annotation metadata """ original = path.get_original() check_representation_existence(original) return response_list([ MetadataAnnotation.from_metadata_annotation(a) for a in original.annotations ])
async def _show_drawing( request: Request, response: Response, # required for @cache # noqa path: Path, annotations, context_factor, try_square, point_cross, point_envelope_length, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, log, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None, ): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['fill_color'], default={'stroke_width': 1}, point_envelope_length=point_envelope_length, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor, try_square) annot_style = dict(mode=AnnotationStyleMode.DRAWING, point_cross=point_cross, point_envelope_length=point_envelope_length) return await _show_window(request, response, path, region, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, 8, Colorspace.AUTO, annots, annot_style, extension, headers, config, colormaps, c_reduction, z_reduction, t_reduction)
def show_image_histogram_bounds( path: Path = Depends(imagepath_parameter) ): """ Get histogram info for full image where all planes (C,Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) htype = in_image.histogram_type() mini, maxi = in_image.image_bounds() return HistogramInfo(type=htype, minimum=mini, maximum=maxi)
async def _show_crop( request: Request, response: Response, path: Path, annotations, context_factor, background_transparency, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None, ): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['stroke_width', 'stroke_color'], default={'fill_color': WHITE}, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor) annot_style = dict(mode=AnnotationStyleMode.CROP, background_transparency=background_transparency) return await _show_window(request, response, path, region, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, annots, annot_style, extension, headers, config, colormaps, c_reduction, z_reduction, t_reduction)
def show_info(path: Path = Depends(imagepath_parameter)): """ Get all image info """ original = path.get_original() check_representation_existence(original) data = dict() data["image"] = ImageInfo.from_image(original) data["instrument"] = InstrumentInfo.from_image(original) data["associated"] = AssociatedInfo.from_image(original) data["channels"] = ChannelsInfo.from_image(original) data["representations"] = [ RepresentationInfo.from_path(rpr) for rpr in original.get_representations() ] return data
def show_image_histogram( path: Path = Depends(imagepath_parameter), hist_config: HistogramConfig = Depends() ): """ Get histogram for full image where all planes (C,Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) n_bins = parse_n_bins(hist_config.n_bins, len(in_image.value_range)) htype = in_image.histogram_type() return Histogram( type=htype, **histogram_formatter( in_image.image_histogram(), in_image.image_bounds(), n_bins, hist_config.full_range ) )
def _show_mask( request: Request, response: Response, # required for @cache # noqa path: Path, annotations, context_factor, height, width, length, zoom, level, extension, headers, config): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['stroke_width', 'stroke_color'], default={'fill_color': WHITE}, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor) out_format, mimetype = get_output_format(extension, headers.accept, PROCESSING_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_window_output_dimensions(in_image, region, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size affine = annotation_crop_affine_matrix(annots.region, region, out_width, out_height) return MaskResponse(in_image, annots, affine, out_width, out_height, 8, out_format).http_response( mimetype, extra_headers=add_image_size_limit_header( dict(), *req_size, *out_size))
def show_plane_histogram( z_slices: conint(ge=0), timepoints: conint(ge=0), path: Path = Depends(imagepath_parameter), hist_config: HistogramConfig = Depends(), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram per plane. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) z_slices = get_zslice_indexes(in_image, z_slices) timepoints = get_timepoint_indexes(in_image, timepoints) histograms = [] n_bins = parse_n_bins(hist_config.n_bins, len(in_image.value_range)) htype = in_image.histogram_type() for c, z, t in itertools.product(channels, z_slices, timepoints): histograms.append( PlaneHistogram( channel=c, z_slice=z, timepoint=t, type=htype, color=in_image.channels[c].hex_color, **histogram_formatter( in_image.plane_histogram(c, z, t), in_image.plane_bounds(c, z, t), n_bins, hist_config.full_range ) ) ) return response_list(histograms)
def compute_histogram( response: Response, background: BackgroundTasks, path: Path = Depends(imagepath_parameter), # companion_file_id: Optional[int] = Body(None, description="Cytomine ID for the histogram") sync: bool = True, overwrite: bool = True ): """ Ask for histogram computation """ in_image = path.get_spatial() check_representation_existence(in_image) hist_type = HistogramType.FAST # TODO: allow to build complete histograms hist_path = in_image.processed_root() / Path(HISTOGRAM_STEM) if sync: build_histogram_file(in_image, hist_path, hist_type, overwrite) response.status_code = status.HTTP_201_CREATED else: background.add_task(build_histogram_file, in_image, hist_path, hist_type, overwrite) response.status_code = status.HTTP_202_ACCEPTED
def show_channels_histogram( path: Path = Depends(imagepath_parameter), hist_config: HistogramConfig = Depends(), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histograms per channel where all planes (Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) channels = get_channel_indexes(in_image, channels) histograms = [] n_bins = parse_n_bins(hist_config.n_bins, len(in_image.value_range)) htype = in_image.histogram_type() # hist_filter = operator.itemgetter(*channels) # channels_bounds = hist_filter(in_image.channels_bounds()) # channels_histograms = hist_filter(in_image.channel_histogram()) TODO for channel in channels: histograms.append( ChannelHistogram( channel=channel, type=htype, color=in_image.channels[channel].hex_color, **histogram_formatter( in_image.channel_histogram(channel), in_image.channel_bounds(channel), n_bins, hist_config.full_range ) ) ) return response_list(histograms)
def _show_tile( request: Request, response: Response, # required for @cache # noqa path: Path, normalized: bool, tile: dict, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, log, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None): in_image = path.get_spatial() check_representation_existence(in_image) if not normalized or in_image.is_pyramid_normalized: pyramid = in_image.pyramid is_window = False else: pyramid = in_image.normalized_pyramid is_window = True if 'zoom' in tile: reference_tier_index = tile['zoom'] tier_index_type = TierIndexType.ZOOM else: reference_tier_index = tile['level'] tier_index_type = TierIndexType.LEVEL if 'ti' in tile: check_tileindex_validity(pyramid, tile['ti'], reference_tier_index, tier_index_type) tile_region = pyramid.get_tier_at( reference_tier_index, tier_index_type).get_ti_tile(tile['ti']) else: check_tilecoord_validity(pyramid, tile['tx'], tile['ty'], reference_tier_index, tier_index_type) tile_region = pyramid.get_tier_at(reference_tier_index, tier_index_type).get_txty_tile( tile['tx'], tile['ty']) out_format, mimetype = get_output_format(extension, headers.accept, VISUALISATION_MIMETYPES) req_size = tile_region.width, tile_region.height out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters(array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False) intensities = parse_intensity_bounds(in_image, channels, z_slices, timepoints, min_intensities, max_intensities) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters', ) check_array_size_parameters(array_parameters, locals(), allowed=[0, 1], nullable=False) filters = parse_filter_ids(filters, FILTERS) if is_window: tile = WindowResponse(in_image, channels, z_slices, timepoints, tile_region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, log, 8, threshold, Colorspace.AUTO) else: tile = TileResponse(in_image, channels, z_slices, timepoints, tile_region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, log, threshold) return tile.http_response(mimetype, extra_headers=add_image_size_limit_header( dict(), *req_size, *out_size))
def _show_window( request: Request, response: Response, # required for @cache # noqa path: Path, region: Union[Region, dict], height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, annotations: Union[ParsedAnnotations, dict, List[dict]], annotation_style: dict, extension, headers, config: Settings, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None ): in_image = path.get_spatial() check_representation_existence(in_image) if not isinstance(region, Region): tier_index_type = region['tier_index_type'] reference_tier_index = region['reference_tier_index'] if reference_tier_index is None: if tier_index_type == TierIndexType.LEVEL: reference_tier_index = 0 else: reference_tier_index = in_image.pyramid.max_zoom if 'top' in region: # Parse raw WindowRegion to Region region = parse_region( in_image, region['top'], region['left'], region['width'], region['height'], reference_tier_index, tier_index_type, silent_oob=False ) elif 'ti' in region: # Parse raw WindowTileIndex region to Region check_tileindex_validity( in_image.pyramid, region['ti'], reference_tier_index, tier_index_type ) region = in_image.pyramid.get_tier_at( reference_tier_index, tier_index_type ).get_ti_tile(region['ti']) elif ('tx', 'ty') in region: # Parse raw WindowTileCoord region to Region check_tilecoord_validity( in_image.pyramid, region['tx'], region['ty'], reference_tier_index, tier_index_type ) region = in_image.pyramid.get_tier_at( reference_tier_index, tier_index_type ).get_txty_tile(region['tx'], region['ty']) out_format, mimetype = get_output_format(extension, headers.accept, VISUALISATION_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_window_output_dimensions(in_image, region, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters( array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False ) intensities = parse_intensity_bounds( in_image, channels, z_slices, timepoints, min_intensities, max_intensities ) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters',) check_array_size_parameters( array_parameters, locals(), allowed=[0, 1], nullable=False ) filters = parse_filter_ids(filters, FILTERS) out_bitdepth = parse_bitdepth(in_image, bits) if annotations and annotation_style and not isinstance(annotations, ParsedAnnotations): if annotation_style['mode'] == AnnotationStyleMode.DRAWING: ignore_fields = ['fill_color'] default = {'stroke_color': RED, 'stroke_width': 1} point_envelope_length = annotation_style['point_envelope_length'] else: ignore_fields = ['stroke_width', 'stroke_color'] default = {'fill_color': WHITE} point_envelope_length = None annotations = parse_annotations( ensure_list(annotations), ignore_fields, default, point_envelope_length, origin=headers.annot_origin, im_height=in_image.height ) affine = None if annotations: affine = annotation_crop_affine_matrix(annotations.region, region, *out_size) if annotations and annotation_style and \ annotation_style['mode'] == AnnotationStyleMode.MASK: window = MaskResponse( in_image, annotations, affine, out_width, out_height, out_bitdepth, out_format ) else: window = WindowResponse( in_image, channels, z_slices, timepoints, region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, False, out_bitdepth, threshold, colorspace, annotations, affine, annotation_style ) return window.http_response( mimetype, extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size) )
def _show_resized( request: Request, response: Response, # required for @cache # noqa path: Path, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, extension, headers, config: Settings, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None ): in_image = path.get_spatial() check_representation_existence(in_image) out_format, mimetype = get_output_format(extension, headers.accept, PROCESSING_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_thumb_output_dimensions(in_image, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters( array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False ) intensities = parse_intensity_bounds( in_image, channels, z_slices, timepoints, min_intensities, max_intensities ) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters',) check_array_size_parameters( array_parameters, locals(), allowed=[0, 1], nullable=False ) filters = parse_filter_ids(filters, FILTERS) out_bitdepth = parse_bitdepth(in_image, bits) return ResizedResponse( in_image, channels, z_slices, timepoints, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, False, out_bitdepth, threshold, colorspace ).http_response( mimetype, extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size) )