Пример #1
0
    def raw_tile(self, event, context):
        uuid = event_path_param(event, 'uuid')
        validate_uuid(uuid)
        self._has_image_permission(self.user_uuid, uuid, 'Read')

        x = int(event_path_param(event, 'x'))
        y = int(event_path_param(event, 'y'))
        z = int(event_path_param(event, 'z'))
        t = int(event_path_param(event, 't'))
        level = int(event_path_param(event, 'level'))
        channel = int(event_path_param(event, 'channels'))
        raw_format = self.get_raw_format(event)

        tile_provider = S3TileProvider(
            bucket.split(':')[-1],
            missing_tile_callback=handle_missing_tile,
            cache_client=redis_client_raw)
        tile = tile_provider.get_tile(uuid, x, y, z, t, channel, level,
                                      raw_format)

        # Encode rendered image as PNG
        img = BytesIO()
        imagecodecs.imwrite(img, tile, codec="png", level=1)
        img.seek(0)

        return img.read()
Пример #2
0
    def _render_tile(
        self,
        uuid,
        x,
        y,
        z,
        t,
        level,
        channels,
        gamma=1,
        codec="jpg",
        raw_format="tiff",
        tile_size=1024,
    ):
        # Prepare for blending
        args = [(uuid, x, y, z, t, channel["index"], level, raw_format)
                for channel in channels]

        # Fetch raw tiles in parallel
        tile_provider = S3TileProvider(
            bucket.split(":")[-1],
            missing_tile_callback=handle_missing_tile,
            cache_client=redis_client_raw,
            tile_size=tile_size,
        )
        try:
            images = pool.starmap(tile_provider.get_tile, args)
        finally:
            pass

        # Update channel dictionary with image data
        for channel, image in zip(channels, images):
            channel["image"] = image

        # Blend the raw tiles
        composite = render.composite_channels(channels, gamma=gamma)

        # Encode rendered image as JPG
        img = BytesIO()
        imagecodecs.imwrite(img, composite, codec=codec, level=85)
        img.seek(0)

        return img.read()
Пример #3
0
def imwrite(file_name, image, image_type='file-extension-default'):
    '''
	Save image
	:param file_name:
		File name and path in the disk that function save to.
	:param image:
		The image array that will be saved on disl as file_name.
	:param image_type:
		The type of image as: 
			'kitti-disparity', 'optical-flow', 'disparity', 'file-extension-default'
	:return: None
	'''

    ext = os.path.splitext(file_name)[1].lower()[1:]
    image_type = image_type.lower()
    if not image_type in IMAGE_TYPES:
        print(IMAGE_TYPES_ERR_MSG)
        return

    if image_type == 'file-extension-default':
        if ext in NORMAL_IMAGE_FORMATS:
            return write_image(file_name, image)
        elif ext in ['flo']:
            return save_flo(file_name, image)
        elif ext in ['dpt']:
            return save_dpt(file_name, image)
        elif ext in ['pfm']:
            return save_pfm(file_name, image)
        elif ext in ['jxr']:
            return imagecodecs.imwrite(file_name,
                                       (image * (2**16 - 1)).astype(np.uint16),
                                       codec='jpegxr')
        elif ext in ['exr']:
            return cv2.imwrite(file_name, image, [cv2.IMWRITE_EXR_TYPE_HALF])
        else:
            print("ERROR: The image type is unknown.")
    elif image_type == 'optical-flow':
        if len(image.shape) == 3 and image.shape[2] == 2:
            return save_flo(file_name, image.astype(np.float32))
        else:
            print("ERROR: The image type is unknown.")
    elif image_type == 'disparity':
        return save_pfm(file_name, image)
    elif image_type == 'depth':
        return save_dpt(file_name, image)
    elif image_type == 'kitti-disparity':
        return write_image(file_name, (image * 256.).astype(np.uint16))
    else:
        print("This image type is not implemented.")
Пример #4
0
    def render_region(self, event, context):
        from minerva_db.sql.api import Client as db_client
        self._open_session()
        client = db_client(self.session)
        '''Render the specified region with the given settings'''

        uuid = event_path_param(event, 'uuid')
        validate_uuid(uuid)
        self._has_image_permission(self.user_uuid, uuid, 'Read')

        x = int(event_path_param(event, 'x'))
        y = int(event_path_param(event, 'y'))
        width = int(event_path_param(event, 'width'))
        height = int(event_path_param(event, 'height'))
        z = int(event_path_param(event, 'z'))
        t = int(event_path_param(event, 't'))

        # Split the channels path parameters
        channel_path_params = event['pathParameters']['channels'].split('/')

        # Read the path parameter for the channels and convert
        channels = [
            _parse_channel_params(param) for param in channel_path_params
        ]

        # Read the optional query parameters for output shape
        output_width = event['queryStringParameters'].get('output-width')
        output_height = event['queryStringParameters'].get('output-height')
        output_width = int(output_width) if output_width is not None else None
        output_height = (int(output_height)
                         if output_height is not None else None)

        # Set prefer_higher_resolution from query parameter
        prefer_higher_resolution = (
            event['queryStringParameters'].get('prefer-higher-resolution'))
        prefer_higher_resolution = (prefer_higher_resolution.lower() == 'true'
                                    if prefer_higher_resolution is not None
                                    else False)

        # Query the shape of the full image
        image = client.get_image(uuid)
        fileset_uuid = image['data']['fileset_uuid']
        fileset = client.get_fileset(fileset_uuid)

        if fileset['data']['complete'] is not True:
            raise ValueError(
                f'Fileset has not had metadata extracted yet: {fileset_uuid}')

        obj = boto3.resource('s3').Object(
            bucket.split(':')[-1], f'{fileset_uuid}/metadata.xml')
        body = obj.get()['Body']
        data = body.read()
        stream = BytesIO(data)
        import xml.etree.ElementTree as ET
        e_root = ET.fromstring(stream.getvalue().decode('UTF-8'))
        e_image = e_root.find('ome:Image[@ID="Image:{}"]'.format(uuid),
                              {'ome': OME_NS})
        e_pixels = e_image.find('ome:Pixels', {'ome': OME_NS})

        image_shape = (int(e_pixels.attrib['SizeX']),
                       int(e_pixels.attrib['SizeY']))

        # Query the number of levels available
        level_count = image['data']['pyramid_levels']

        # Create shape tuples
        tile_shape = (1024, 1024)
        target_shape = (height, width)

        # Get the optimum level of the pyramid from which to use tiles
        try:
            output_max = max(
                [d for d in (output_height, output_width) if d is not None])
            level = render.get_optimum_pyramid_level(image_shape, level_count,
                                                     output_max,
                                                     prefer_higher_resolution)
        except ValueError:
            level = 0

        # Transform origin and shape of target region into that required for
        # the pyramid level being used
        origin = render.transform_coordinates_to_level((x, y), level)
        shape = render.transform_coordinates_to_level(target_shape, level)

        # Calculate the scaling factor
        if output_width is not None:
            if output_height is not None:
                # Use both supplied scaling factors
                scaling_factor = (output_height / shape[0],
                                  output_width / shape[1])
            else:
                # Calculate scaling factor from output_width only
                scaling_factor = output_width / shape[1]
        else:
            if output_height is not None:
                # Calcuate scaling factor from output_height only
                scaling_factor = output_height / shape[0]
            else:
                # No scaling
                scaling_factor = 1

        args = []
        tiles = []
        for channel in channels:

            color = channel['color']
            _id = channel['index']
            _min = channel['min']
            _max = channel['max']

            for indices in render.select_grids(tile_shape, origin, shape):

                (i, j) = indices

                # Disallow negative tiles
                if i < 0 or j < 0:
                    continue

                # Add to list of tiles to fetch
                args.append((uuid, j, i, z, t, _id, level))

                # Add to list of tiles
                tiles.append({
                    'grid': (i, j),
                    'color': color,
                    'min': _min,
                    'max': _max
                })

        # Fetch raw tiles in parallel
        s3_tile_provider = S3TileProvider(
            bucket.split(':')[-1], missing_tile_callback=handle_missing_tile)
        try:
            pool = ThreadPool(len(args))
            images = pool.starmap(s3_tile_provider.get_tile, args)
        finally:
            pool.close()

        # Update tiles dictionary with image data
        for image_tile, image in zip(tiles, images):
            image_tile['image'] = image

        # Blend the raw tiles
        composite = render.composite_subtiles(tiles, tile_shape, origin, shape)

        #Rescale for desired output size
        if scaling_factor != 1:
            scaled = render.scale_image_nearest_neighbor(
                composite, scaling_factor)
        else:
            scaled = composite

        #  requires 0 - 255 values
        scaled *= 255
        scaled = scaled.astype(np.uint8, copy=False)

        # Encode rendered image as JPG
        img = BytesIO()
        imagecodecs.imwrite(img, scaled, codec="jpg")
        img.seek(0)
        return img.read()
def _write_test_jpg(
    pth: os.PathLike[str],
    size: tuple[int, int],
):
    arr = np.random.randint(0, 255, size=(size[0], size[1], 3), dtype=np.uint8)
    imwrite(pth, arr)