def show_plane_histogram( z_slices: conint(ge=0), timepoints: conint(ge=0), path: Path = Depends(imagepath_parameter), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram per plane. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) z_slices = get_zslice_indexes(in_image, z_slices) timepoints = get_timepoint_indexes(in_image, timepoints) hist_info = [] htype = in_image.histogram_type() for c, z, t in itertools.product(channels, z_slices, timepoints): mini, maxi = in_image.plane_bounds(c, z, t) hist_info.append( PlaneHistogramInfo( channel=c, z_slice=z, timepoint=t, type=htype, color=in_image.channels[c].hex_color, minimum=mini, maximum=maxi ) ) return response_list(hist_info)
def show_channels_histogram_bounds( path: Path = Depends(imagepath_parameter), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram bounds per channel where all planes (Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) channels = get_channel_indexes(in_image, channels) hist_info = [] htype = in_image.histogram_type() hist_filter = operator.itemgetter(*channels) channels_bounds = hist_filter(in_image.channels_bounds()) if len(channels) == 1: channels_bounds = [channels_bounds] for channel, bounds in zip(channels, channels_bounds): mini, maxi = bounds hist_info.append( ChannelHistogramInfo( channel=channel, type=htype, color=in_image.channels[channel].hex_color, minimum=mini, maximum=maxi ) ) return response_list(hist_info)
async def _show_drawing( request: Request, response: Response, # required for @cache # noqa path: Path, annotations, context_factor, try_square, point_cross, point_envelope_length, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, log, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None, ): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['fill_color'], default={'stroke_width': 1}, point_envelope_length=point_envelope_length, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor, try_square) annot_style = dict(mode=AnnotationStyleMode.DRAWING, point_cross=point_cross, point_envelope_length=point_envelope_length) return await _show_window(request, response, path, region, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, 8, Colorspace.AUTO, annots, annot_style, extension, headers, config, colormaps, c_reduction, z_reduction, t_reduction)
def show_plane_histogram( z_slices: conint(ge=0), timepoints: conint(ge=0), path: Path = Depends(imagepath_parameter), hist_config: HistogramConfig = Depends(), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histogram per plane. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) z_slices = get_zslice_indexes(in_image, z_slices) timepoints = get_timepoint_indexes(in_image, timepoints) histograms = [] n_bins = parse_n_bins(hist_config.n_bins, len(in_image.value_range)) htype = in_image.histogram_type() for c, z, t in itertools.product(channels, z_slices, timepoints): histograms.append( PlaneHistogram( channel=c, z_slice=z, timepoint=t, type=htype, color=in_image.channels[c].hex_color, **histogram_formatter( in_image.plane_histogram(c, z, t), in_image.plane_bounds(c, z, t), n_bins, hist_config.full_range ) ) ) return response_list(histograms)
def __init__(self, geometry: BaseGeometry, c: PlaneIndex, z: PlaneIndex, t: PlaneIndex, terms: Optional[List[str]] = None, properties: Optional[Dict[str, Any]] = None): """ Initialize an annotation from image metadata. Parameters ---------- geometry A valid geometry c The channel(s) which the annotation is linked to z The z-slices(s) which the annotation is linked to t The timepoint(s) which the annotation is linked to terms The terms (labels) associated to the annotation properties Other properties (key-value pairs) associated to the annotation """ self.geometry = geometry self.channels = ensure_list(c) self.z_slices = ensure_list(z) self.timepoints = ensure_list(t) if terms is None: terms = [] self.terms = terms if properties is None: properties = dict() self.properties = properties
def _pages_to_read(self, spp, channels, z, t): pages = OrderedDict() if channels is None: channels = list(range(self.format.main_imd.n_channels)) for c in ensure_list(channels): intrinsic_c = c // spp s = c % spp page_index = self.format.planes_info.get(intrinsic_c, z, t, 'page_index') if page_index in pages: pages[page_index].append(s) else: pages[page_index] = [s] return pages
async def _show_crop( request: Request, response: Response, path: Path, annotations, context_factor, background_transparency, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None, ): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['stroke_width', 'stroke_color'], default={'fill_color': WHITE}, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor) annot_style = dict(mode=AnnotationStyleMode.CROP, background_transparency=background_transparency) return await _show_window(request, response, path, region, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, annots, annot_style, extension, headers, config, colormaps, c_reduction, z_reduction, t_reduction)
def parse_planes( planes_to_parse: List[Union[int, str]], n_planes: int, default: Union[int, List[int]] = 0, name: str = 'planes' ) -> List[int]: """ Get a set of planes from a list of plane indexes and ranges. Parameters ---------- planes_to_parse List of plane indexes and ranges to parse. n_planes Number of planes. It is the maximum output set size. default Plane index or list of plane indexes used as default set if `planes_to_parse` is empty. Default is returned as a set but default values are expected to be in acceptable range. name Name of plane dimension (e.g. 'channels', 'z_slices', ...) used for exception messages. Returns ------- plane_set Ordered list of valid plane indexes (where duplicates have been removed). Raises ------ BadRequestException If an item of `planes_to_parse´ is invalid. If the set of valid planes is empty """ plane_indexes = list() if len(planes_to_parse) == 0: return sorted(set((ensure_list(default)))) for plane in planes_to_parse: if type(plane) is int: plane_indexes.append(plane) elif is_range(plane): plane_indexes += [*parse_range(plane, 0, n_planes)] else: raise BadRequestException( detail=f'{plane} is not a valid index or range for {name}.' ) plane_set = sorted(set([idx for idx in plane_indexes if 0 <= idx < n_planes])) if len(plane_set) == 0: raise BadRequestException(detail=f"No valid indexes for {name}") return plane_set
def _show_mask( request: Request, response: Response, # required for @cache # noqa path: Path, annotations, context_factor, height, width, length, zoom, level, extension, headers, config): in_image = path.get_spatial() check_representation_existence(in_image) annots = parse_annotations(ensure_list(annotations), ignore_fields=['stroke_width', 'stroke_color'], default={'fill_color': WHITE}, origin=headers.annot_origin, im_height=in_image.height) region = get_annotation_region(in_image, annots, context_factor) out_format, mimetype = get_output_format(extension, headers.accept, PROCESSING_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_window_output_dimensions(in_image, region, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size affine = annotation_crop_affine_matrix(annots.region, region, out_width, out_height) return MaskResponse(in_image, annots, affine, out_width, out_height, 8, out_format).http_response( mimetype, extra_headers=add_image_size_limit_header( dict(), *req_size, *out_size))
def show_channels_histogram( path: Path = Depends(imagepath_parameter), hist_config: HistogramConfig = Depends(), channels: Optional[List[conint(ge=0)]] = Query( None, description="Only return histograms for these channels" ), ): """ Get histograms per channel where all planes (Z,T) are merged. """ in_image = path.get_spatial() check_representation_existence(in_image) channels = ensure_list(channels) channels = get_channel_indexes(in_image, channels) histograms = [] n_bins = parse_n_bins(hist_config.n_bins, len(in_image.value_range)) htype = in_image.histogram_type() # hist_filter = operator.itemgetter(*channels) # channels_bounds = hist_filter(in_image.channels_bounds()) # channels_histograms = hist_filter(in_image.channel_histogram()) TODO for channel in channels: histograms.append( ChannelHistogram( channel=channel, type=htype, color=in_image.channels[channel].hex_color, **histogram_formatter( in_image.channel_histogram(channel), in_image.channel_bounds(channel), n_bins, hist_config.full_range ) ) ) return response_list(histograms)
async def legacy_import(request: Request, background: BackgroundTasks, core: Optional[str] = None, cytomine: Optional[str] = None, storage: Optional[int] = None, id_storage: Optional[int] = Query(None, alias='idStorage'), projects: Optional[str] = None, id_project: Optional[str] = Query(None, alias='idProject'), sync: Optional[bool] = False, keys: Optional[str] = None, values: Optional[str] = None, upload_name: str = Form(..., alias="files[].name"), upload_path: str = Form(..., alias="files[].path"), upload_size: int = Form(..., alias="files[].size"), config: Settings = Depends(get_settings)): """ Import a file (legacy) """ core = cytomine if cytomine is not None else core if not core: raise BadRequestException(detail="core or cytomine parameter missing.") id_storage = id_storage if id_storage is not None else storage if not id_storage: raise BadRequestException( detail="idStorage or storage parameter missing.") projects_to_parse = id_project if id_project is not None else projects try: id_projects = [] if projects_to_parse: projects = ensure_list(projects_to_parse.split(",")) id_projects = [int(p) for p in projects] except ValueError: raise BadRequestException( detail="Invalid projects or idProject parameter.") public_key, signature = parse_authorization_header(request.headers) cytomine_auth = (core, config.cytomine_public_key, config.cytomine_private_key) with Cytomine(*cytomine_auth, configure_logging=False) as c: if not c.current_user: raise AuthenticationException( "PIMS authentication to Cytomine failed.") this = get_this_image_server(config.pims_url) cyto_keys = c.get(f"userkey/{public_key}/keys.json") private_key = cyto_keys["privateKey"] if sign_token(private_key, parse_request_token(request)) != signature: raise AuthenticationException("Authentication to Cytomine failed") c.set_credentials(public_key, private_key) user = c.current_user storage = Storage().fetch(id_storage) if not storage: raise CytomineProblem(f"Storage {id_storage} not found") projects = ProjectCollection() for pid in id_projects: project = Project().fetch(pid) if not project: raise CytomineProblem(f"Project {pid} not found") projects.append(project) keys = keys.split(',') if keys is not None else [] values = values.split(',') if values is not None else [] if len(keys) != len(values): raise CytomineProblem( f"Keys {keys} and values {values} have varying size.") user_properties = zip(keys, values) upload_name = sanitize_filename(upload_name) root = UploadedFile(upload_name, upload_path, upload_size, "", "", id_projects, id_storage, user.id, this.id, UploadedFile.UPLOADED) cytomine = CytomineListener(cytomine_auth, root, projects=projects, user_properties=user_properties) if sync: try: run_import(upload_path, upload_name, extra_listeners=[cytomine], prefer_copy=False) root = cytomine.initial_uf.fetch() images = cytomine.images return [{ "status": 200, "name": upload_name, "uploadedFile": serialize_cytomine_model(root), "images": [{ "image": serialize_cytomine_model(image[0]), "imageInstances": serialize_cytomine_model(image[1]) } for image in images] }] except Exception as e: traceback.print_exc() return JSONResponse(content=[{ "status": 500, "error": str(e), "files": [{ "name": upload_name, "size": 0, "error": str(e) }] }], status_code=400) else: send_task(Task.IMPORT_WITH_CYTOMINE, args=[ cytomine_auth, upload_path, upload_name, cytomine, False ], starlette_background=background) return JSONResponse(content=[{ "status": 200, "name": upload_name, "uploadedFile": serialize_cytomine_model(root), "images": [] }], status_code=200)
def test_ensure_list(): assert ensure_list(3) == [3] assert ensure_list((2, 4)) == [(2, 4)] assert ensure_list("a") == ['a'] assert ensure_list([2]) == [2] assert ensure_list(None) == []
def _show_resized( request: Request, response: Response, # required for @cache # noqa path: Path, height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, extension, headers, config: Settings, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None ): in_image = path.get_spatial() check_representation_existence(in_image) out_format, mimetype = get_output_format(extension, headers.accept, PROCESSING_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_thumb_output_dimensions(in_image, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters( array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False ) intensities = parse_intensity_bounds( in_image, channels, z_slices, timepoints, min_intensities, max_intensities ) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters',) check_array_size_parameters( array_parameters, locals(), allowed=[0, 1], nullable=False ) filters = parse_filter_ids(filters, FILTERS) out_bitdepth = parse_bitdepth(in_image, bits) return ResizedResponse( in_image, channels, z_slices, timepoints, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, False, out_bitdepth, threshold, colorspace ).http_response( mimetype, extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size) )
def _show_window( request: Request, response: Response, # required for @cache # noqa path: Path, region: Union[Region, dict], height, width, length, zoom, level, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, bits, colorspace, annotations: Union[ParsedAnnotations, dict, List[dict]], annotation_style: dict, extension, headers, config: Settings, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None ): in_image = path.get_spatial() check_representation_existence(in_image) if not isinstance(region, Region): tier_index_type = region['tier_index_type'] reference_tier_index = region['reference_tier_index'] if reference_tier_index is None: if tier_index_type == TierIndexType.LEVEL: reference_tier_index = 0 else: reference_tier_index = in_image.pyramid.max_zoom if 'top' in region: # Parse raw WindowRegion to Region region = parse_region( in_image, region['top'], region['left'], region['width'], region['height'], reference_tier_index, tier_index_type, silent_oob=False ) elif 'ti' in region: # Parse raw WindowTileIndex region to Region check_tileindex_validity( in_image.pyramid, region['ti'], reference_tier_index, tier_index_type ) region = in_image.pyramid.get_tier_at( reference_tier_index, tier_index_type ).get_ti_tile(region['ti']) elif ('tx', 'ty') in region: # Parse raw WindowTileCoord region to Region check_tilecoord_validity( in_image.pyramid, region['tx'], region['ty'], reference_tier_index, tier_index_type ) region = in_image.pyramid.get_tier_at( reference_tier_index, tier_index_type ).get_txty_tile(region['tx'], region['ty']) out_format, mimetype = get_output_format(extension, headers.accept, VISUALISATION_MIMETYPES) check_zoom_validity(in_image.pyramid, zoom) check_level_validity(in_image.pyramid, level) req_size = get_window_output_dimensions(in_image, region, height, width, length, zoom, level) out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters( array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False ) intensities = parse_intensity_bounds( in_image, channels, z_slices, timepoints, min_intensities, max_intensities ) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters',) check_array_size_parameters( array_parameters, locals(), allowed=[0, 1], nullable=False ) filters = parse_filter_ids(filters, FILTERS) out_bitdepth = parse_bitdepth(in_image, bits) if annotations and annotation_style and not isinstance(annotations, ParsedAnnotations): if annotation_style['mode'] == AnnotationStyleMode.DRAWING: ignore_fields = ['fill_color'] default = {'stroke_color': RED, 'stroke_width': 1} point_envelope_length = annotation_style['point_envelope_length'] else: ignore_fields = ['stroke_width', 'stroke_color'] default = {'fill_color': WHITE} point_envelope_length = None annotations = parse_annotations( ensure_list(annotations), ignore_fields, default, point_envelope_length, origin=headers.annot_origin, im_height=in_image.height ) affine = None if annotations: affine = annotation_crop_affine_matrix(annotations.region, region, *out_size) if annotations and annotation_style and \ annotation_style['mode'] == AnnotationStyleMode.MASK: window = MaskResponse( in_image, annotations, affine, out_width, out_height, out_bitdepth, out_format ) else: window = WindowResponse( in_image, channels, z_slices, timepoints, region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, False, out_bitdepth, threshold, colorspace, annotations, affine, annotation_style ) return window.http_response( mimetype, extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size) )
def _show_tile( request: Request, response: Response, # required for @cache # noqa path: Path, normalized: bool, tile: dict, channels, z_slices, timepoints, min_intensities, max_intensities, filters, gammas, threshold, log, extension, headers, config, colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None): in_image = path.get_spatial() check_representation_existence(in_image) if not normalized or in_image.is_pyramid_normalized: pyramid = in_image.pyramid is_window = False else: pyramid = in_image.normalized_pyramid is_window = True if 'zoom' in tile: reference_tier_index = tile['zoom'] tier_index_type = TierIndexType.ZOOM else: reference_tier_index = tile['level'] tier_index_type = TierIndexType.LEVEL if 'ti' in tile: check_tileindex_validity(pyramid, tile['ti'], reference_tier_index, tier_index_type) tile_region = pyramid.get_tier_at( reference_tier_index, tier_index_type).get_ti_tile(tile['ti']) else: check_tilecoord_validity(pyramid, tile['tx'], tile['ty'], reference_tier_index, tier_index_type) tile_region = pyramid.get_tier_at(reference_tier_index, tier_index_type).get_txty_tile( tile['tx'], tile['ty']) out_format, mimetype = get_output_format(extension, headers.accept, VISUALISATION_MIMETYPES) req_size = tile_region.width, tile_region.height out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size) out_width, out_height = out_size channels = ensure_list(channels) z_slices = ensure_list(z_slices) timepoints = ensure_list(timepoints) channels = get_channel_indexes(in_image, channels) check_reduction_validity(channels, c_reduction, 'channels') z_slices = get_zslice_indexes(in_image, z_slices) check_reduction_validity(z_slices, z_reduction, 'z_slices') timepoints = get_timepoint_indexes(in_image, timepoints) check_reduction_validity(timepoints, t_reduction, 'timepoints') min_intensities = ensure_list(min_intensities) max_intensities = ensure_list(max_intensities) colormaps = ensure_list(colormaps) filters = ensure_list(filters) gammas = ensure_list(gammas) array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas') check_array_size_parameters(array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False) intensities = parse_intensity_bounds(in_image, channels, z_slices, timepoints, min_intensities, max_intensities) min_intensities, max_intensities = intensities colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels) array_parameters = ('filters', ) check_array_size_parameters(array_parameters, locals(), allowed=[0, 1], nullable=False) filters = parse_filter_ids(filters, FILTERS) if is_window: tile = WindowResponse(in_image, channels, z_slices, timepoints, tile_region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, log, 8, threshold, Colorspace.AUTO) else: tile = TileResponse(in_image, channels, z_slices, timepoints, tile_region, out_format, out_width, out_height, c_reduction, z_reduction, t_reduction, gammas, filters, colormaps, min_intensities, max_intensities, log, threshold) return tile.http_response(mimetype, extra_headers=add_image_size_limit_header( dict(), *req_size, *out_size))