Beispiel #1
0
def test_parse_intensity_bounds():
    class FakeImage:
        def __init__(self, significant_bits, n_channels):
            self.n_channels = n_channels
            self.significant_bits = significant_bits

        def channel_bounds(self, channel):
            return [channel, channel + 10]

    assert parse_intensity_bounds(FakeImage(8, 1), [0], [0], [0], [], []) == ([0], [255])
    assert parse_intensity_bounds(
        FakeImage(8, 1), [0], [0], [0], [IntensitySelectionEnum.AUTO_IMAGE],
        [IntensitySelectionEnum.AUTO_IMAGE]
    ) == ([0], [255])
    assert parse_intensity_bounds(
        FakeImage(8, 1), [0], [0], [0], [IntensitySelectionEnum.STRETCH_IMAGE],
        [IntensitySelectionEnum.STRETCH_IMAGE]
    ) == ([0], [10])
    assert parse_intensity_bounds(FakeImage(8, 1), [0], [0], [0], [10], [100]) == ([10], [100])
    assert parse_intensity_bounds(FakeImage(8, 1), [0], [0], [0], [10], [1000]) == ([10], [255])

    assert parse_intensity_bounds(FakeImage(16, 1), [0], [0], [0], [], []) == ([0], [65535])
    assert parse_intensity_bounds(
        FakeImage(16, 1), [0], [0], [0], [IntensitySelectionEnum.AUTO_IMAGE],
        [IntensitySelectionEnum.AUTO_IMAGE]
    ) == ([0], [10])
    assert parse_intensity_bounds(
        FakeImage(16, 1), [0], [0], [0], [IntensitySelectionEnum.STRETCH_IMAGE],
        [IntensitySelectionEnum.STRETCH_IMAGE]
    ) == ([0], [10])
    assert parse_intensity_bounds(FakeImage(16, 1), [0], [0], [0], [10], [100]) == ([10], [100])
    assert parse_intensity_bounds(FakeImage(16, 1), [0], [0], [0], [10], [1000]) == ([10], [1000])
    assert parse_intensity_bounds(
        FakeImage(16, 1), [0], [0], [0], [10], [100000]
    ) == ([10], [65535])

    assert parse_intensity_bounds(
        FakeImage(8, 2), [0, 1], [0], [0], [IntensitySelectionEnum.AUTO_IMAGE],
        [IntensitySelectionEnum.AUTO_IMAGE]
    ) == ([0, 0], [255, 255])
    assert parse_intensity_bounds(
        FakeImage(8, 2), [0, 1], [0], [0], [IntensitySelectionEnum.STRETCH_IMAGE],
        [IntensitySelectionEnum.STRETCH_IMAGE]
    ) == ([0, 1], [10, 11])
    assert parse_intensity_bounds(
        FakeImage(8, 2), [0, 1], [0], [0], [10], [100]
    ) == ([10, 10], [100, 100])
    assert parse_intensity_bounds(
        FakeImage(8, 2), [0, 1], [0], [0], [10], [1000, 20]
    ) == ([10, 10], [255, 20])

    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [IntensitySelectionEnum.AUTO_IMAGE],
        [IntensitySelectionEnum.AUTO_IMAGE]
    ) == ([0, 1], [10, 11])
    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [IntensitySelectionEnum.STRETCH_IMAGE],
        [IntensitySelectionEnum.STRETCH_IMAGE]
    ) == ([0, 1], [10, 11])
    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [10], [100]
    ) == ([10, 10], [100, 100])
    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [10], [1000, 20]
    ) == ([10, 10], [1000, 20])
    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [10, 5], [100000, 20]
    ) == ([10, 5], [65535, 20])
    assert parse_intensity_bounds(
        FakeImage(16, 2), [0, 1], [0], [0], [10, IntensitySelectionEnum.AUTO_IMAGE],
        [100000, 20]
    ) == ([10, 1], [65535, 20])
Beispiel #2
0
def _show_window(
    request: Request, response: Response,  # required for @cache  # noqa
    path: Path,
    region: Union[Region, dict],
    height, width, length, zoom, level,
    channels, z_slices, timepoints,
    min_intensities, max_intensities, filters, gammas, threshold,
    bits, colorspace,
    annotations: Union[ParsedAnnotations, dict, List[dict]],
    annotation_style: dict,
    extension,
    headers,
    config: Settings,
    colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None
):
    in_image = path.get_spatial()
    check_representation_existence(in_image)

    if not isinstance(region, Region):
        tier_index_type = region['tier_index_type']
        reference_tier_index = region['reference_tier_index']
        if reference_tier_index is None:
            if tier_index_type == TierIndexType.LEVEL:
                reference_tier_index = 0
            else:
                reference_tier_index = in_image.pyramid.max_zoom

        if 'top' in region:
            # Parse raw WindowRegion to Region
            region = parse_region(
                in_image, region['top'], region['left'],
                region['width'], region['height'],
                reference_tier_index, tier_index_type,
                silent_oob=False
            )
        elif 'ti' in region:
            # Parse raw WindowTileIndex region to Region
            check_tileindex_validity(
                in_image.pyramid, region['ti'],
                reference_tier_index, tier_index_type
            )
            region = in_image.pyramid.get_tier_at(
                reference_tier_index,
                tier_index_type
            ).get_ti_tile(region['ti'])
        elif ('tx', 'ty') in region:
            # Parse raw WindowTileCoord region to Region
            check_tilecoord_validity(
                in_image.pyramid, region['tx'], region['ty'],
                reference_tier_index, tier_index_type
            )
            region = in_image.pyramid.get_tier_at(
                reference_tier_index,
                tier_index_type
            ).get_txty_tile(region['tx'], region['ty'])

    out_format, mimetype = get_output_format(extension, headers.accept, VISUALISATION_MIMETYPES)
    check_zoom_validity(in_image.pyramid, zoom)
    check_level_validity(in_image.pyramid, level)
    req_size = get_window_output_dimensions(in_image, region, height, width, length, zoom, level)
    out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size)
    out_width, out_height = out_size

    channels = ensure_list(channels)
    z_slices = ensure_list(z_slices)
    timepoints = ensure_list(timepoints)

    channels = get_channel_indexes(in_image, channels)
    check_reduction_validity(channels, c_reduction, 'channels')
    z_slices = get_zslice_indexes(in_image, z_slices)
    check_reduction_validity(z_slices, z_reduction, 'z_slices')
    timepoints = get_timepoint_indexes(in_image, timepoints)
    check_reduction_validity(timepoints, t_reduction, 'timepoints')

    min_intensities = ensure_list(min_intensities)
    max_intensities = ensure_list(max_intensities)
    colormaps = ensure_list(colormaps)
    filters = ensure_list(filters)
    gammas = ensure_list(gammas)

    array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas')
    check_array_size_parameters(
        array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False
    )
    intensities = parse_intensity_bounds(
        in_image, channels, z_slices, timepoints, min_intensities, max_intensities
    )
    min_intensities, max_intensities = intensities
    colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels)

    array_parameters = ('filters',)
    check_array_size_parameters(
        array_parameters, locals(), allowed=[0, 1], nullable=False
    )
    filters = parse_filter_ids(filters, FILTERS)

    out_bitdepth = parse_bitdepth(in_image, bits)

    if annotations and annotation_style and not isinstance(annotations, ParsedAnnotations):
        if annotation_style['mode'] == AnnotationStyleMode.DRAWING:
            ignore_fields = ['fill_color']
            default = {'stroke_color': RED, 'stroke_width': 1}
            point_envelope_length = annotation_style['point_envelope_length']
        else:
            ignore_fields = ['stroke_width', 'stroke_color']
            default = {'fill_color': WHITE}
            point_envelope_length = None

        annotations = parse_annotations(
            ensure_list(annotations), ignore_fields,
            default, point_envelope_length,
            origin=headers.annot_origin, im_height=in_image.height
        )

    affine = None
    if annotations:
        affine = annotation_crop_affine_matrix(annotations.region, region, *out_size)

    if annotations and annotation_style and \
            annotation_style['mode'] == AnnotationStyleMode.MASK:
        window = MaskResponse(
            in_image,
            annotations, affine,
            out_width, out_height, out_bitdepth, out_format
        )
    else:
        window = WindowResponse(
            in_image, channels, z_slices, timepoints,
            region, out_format, out_width, out_height,
            c_reduction, z_reduction, t_reduction,
            gammas, filters, colormaps,
            min_intensities, max_intensities, False,
            out_bitdepth, threshold, colorspace,
            annotations, affine, annotation_style
        )

    return window.http_response(
        mimetype,
        extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size)
    )
Beispiel #3
0
def _show_resized(
    request: Request, response: Response,  # required for @cache  # noqa
    path: Path,
    height, width, length, zoom, level,
    channels, z_slices, timepoints,
    min_intensities, max_intensities, filters, gammas, threshold,
    bits, colorspace,
    extension,
    headers,
    config: Settings,
    colormaps=None, c_reduction=ChannelReduction.ADD, z_reduction=None, t_reduction=None
):
    in_image = path.get_spatial()
    check_representation_existence(in_image)

    out_format, mimetype = get_output_format(extension, headers.accept, PROCESSING_MIMETYPES)
    check_zoom_validity(in_image.pyramid, zoom)
    check_level_validity(in_image.pyramid, level)
    req_size = get_thumb_output_dimensions(in_image, height, width, length, zoom, level)
    out_size = safeguard_output_dimensions(headers.safe_mode, config.output_size_limit, *req_size)
    out_width, out_height = out_size

    channels = ensure_list(channels)
    z_slices = ensure_list(z_slices)
    timepoints = ensure_list(timepoints)

    channels = get_channel_indexes(in_image, channels)
    check_reduction_validity(channels, c_reduction, 'channels')
    z_slices = get_zslice_indexes(in_image, z_slices)
    check_reduction_validity(z_slices, z_reduction, 'z_slices')
    timepoints = get_timepoint_indexes(in_image, timepoints)
    check_reduction_validity(timepoints, t_reduction, 'timepoints')

    min_intensities = ensure_list(min_intensities)
    max_intensities = ensure_list(max_intensities)
    colormaps = ensure_list(colormaps)
    filters = ensure_list(filters)
    gammas = ensure_list(gammas)

    array_parameters = ('min_intensities', 'max_intensities', 'colormaps', 'gammas')
    check_array_size_parameters(
        array_parameters, locals(), allowed=[0, 1, len(channels)], nullable=False
    )
    intensities = parse_intensity_bounds(
        in_image, channels, z_slices, timepoints, min_intensities, max_intensities
    )
    min_intensities, max_intensities = intensities
    colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels, in_image.channels)

    array_parameters = ('filters',)
    check_array_size_parameters(
        array_parameters, locals(), allowed=[0, 1], nullable=False
    )
    filters = parse_filter_ids(filters, FILTERS)

    out_bitdepth = parse_bitdepth(in_image, bits)

    return ResizedResponse(
        in_image, channels, z_slices, timepoints,
        out_format, out_width, out_height,
        c_reduction, z_reduction, t_reduction,
        gammas, filters, colormaps, min_intensities, max_intensities,
        False, out_bitdepth, threshold, colorspace
    ).http_response(
        mimetype,
        extra_headers=add_image_size_limit_header(dict(), *req_size, *out_size)
    )
Beispiel #4
0
def _show_tile(
        request: Request,
        response: Response,  # required for @cache  # noqa
        path: Path,
        normalized: bool,
        tile: dict,
        channels,
        z_slices,
        timepoints,
        min_intensities,
        max_intensities,
        filters,
        gammas,
        threshold,
        log,
        extension,
        headers,
        config,
        colormaps=None,
        c_reduction=ChannelReduction.ADD,
        z_reduction=None,
        t_reduction=None):
    in_image = path.get_spatial()
    check_representation_existence(in_image)

    if not normalized or in_image.is_pyramid_normalized:
        pyramid = in_image.pyramid
        is_window = False
    else:
        pyramid = in_image.normalized_pyramid
        is_window = True

    if 'zoom' in tile:
        reference_tier_index = tile['zoom']
        tier_index_type = TierIndexType.ZOOM
    else:
        reference_tier_index = tile['level']
        tier_index_type = TierIndexType.LEVEL

    if 'ti' in tile:
        check_tileindex_validity(pyramid, tile['ti'], reference_tier_index,
                                 tier_index_type)
        tile_region = pyramid.get_tier_at(
            reference_tier_index, tier_index_type).get_ti_tile(tile['ti'])
    else:
        check_tilecoord_validity(pyramid, tile['tx'], tile['ty'],
                                 reference_tier_index, tier_index_type)
        tile_region = pyramid.get_tier_at(reference_tier_index,
                                          tier_index_type).get_txty_tile(
                                              tile['tx'], tile['ty'])

    out_format, mimetype = get_output_format(extension, headers.accept,
                                             VISUALISATION_MIMETYPES)
    req_size = tile_region.width, tile_region.height
    out_size = safeguard_output_dimensions(headers.safe_mode,
                                           config.output_size_limit, *req_size)
    out_width, out_height = out_size

    channels = ensure_list(channels)
    z_slices = ensure_list(z_slices)
    timepoints = ensure_list(timepoints)

    channels = get_channel_indexes(in_image, channels)
    check_reduction_validity(channels, c_reduction, 'channels')
    z_slices = get_zslice_indexes(in_image, z_slices)
    check_reduction_validity(z_slices, z_reduction, 'z_slices')
    timepoints = get_timepoint_indexes(in_image, timepoints)
    check_reduction_validity(timepoints, t_reduction, 'timepoints')

    min_intensities = ensure_list(min_intensities)
    max_intensities = ensure_list(max_intensities)
    colormaps = ensure_list(colormaps)
    filters = ensure_list(filters)
    gammas = ensure_list(gammas)

    array_parameters = ('min_intensities', 'max_intensities', 'colormaps',
                        'gammas')
    check_array_size_parameters(array_parameters,
                                locals(),
                                allowed=[0, 1, len(channels)],
                                nullable=False)
    intensities = parse_intensity_bounds(in_image, channels, z_slices,
                                         timepoints, min_intensities,
                                         max_intensities)
    min_intensities, max_intensities = intensities
    colormaps = parse_colormap_ids(colormaps, ALL_COLORMAPS, channels,
                                   in_image.channels)

    array_parameters = ('filters', )
    check_array_size_parameters(array_parameters,
                                locals(),
                                allowed=[0, 1],
                                nullable=False)
    filters = parse_filter_ids(filters, FILTERS)

    if is_window:
        tile = WindowResponse(in_image, channels, z_slices, timepoints,
                              tile_region, out_format, out_width, out_height,
                              c_reduction, z_reduction, t_reduction, gammas,
                              filters, colormaps, min_intensities,
                              max_intensities, log, 8, threshold,
                              Colorspace.AUTO)
    else:
        tile = TileResponse(in_image, channels, z_slices, timepoints,
                            tile_region, out_format, out_width, out_height,
                            c_reduction, z_reduction, t_reduction, gammas,
                            filters, colormaps, min_intensities,
                            max_intensities, log, threshold)

    return tile.http_response(mimetype,
                              extra_headers=add_image_size_limit_header(
                                  dict(), *req_size, *out_size))