Ejemplo n.º 1
0
def _image_to_tensor(image):
    if isinstance(image, (bytes, bytearray)):
        # Only JPEG is supported on the bonnet side.
        return pb2.ByteTensor(shape=pb2.TensorShape(batch=1,
                                                    height=0,
                                                    width=0,
                                                    depth=0),
                              data=image)

    width, height = image.size
    if image.mode == 'RGB':
        r, g, b = image.split()
        return pb2.ByteTensor(shape=pb2.TensorShape(batch=1,
                                                    height=height,
                                                    width=width,
                                                    depth=3),
                              data=r.tobytes() + g.tobytes() + b.tobytes())

    if image.mode == 'L':
        return pb2.ByteTensor(shape=pb2.TensorShape(batch=1,
                                                    height=height,
                                                    width=width,
                                                    depth=1),
                              data=image.tobytes())

    raise InferenceException(
        'Unsupported image format: %s. Must be L or RGB.' % image.mode)
Ejemplo n.º 2
0
    def load_model(self, descriptor):
        """Loads model on VisionBonnet.

        Args:
          descriptor: ModelDescriptor, meta info that defines model name,
            where to get the model and etc.
        Returns:
          Model identifier.
        """
        _check_firmware_info(self.get_firmware_info())
        mean, stddev = descriptor.input_normalizer
        batch, height, width, depth = descriptor.input_shape
        if batch != 1:
            raise ValueError('Unsupported batch value: %d. Must be 1.')

        if depth != 3:
            raise ValueError('Unsupported depth value: %d. Must be 3.')

        try:
            logger.info('Load model "%s".', descriptor.name)
            self._communicate(
                pb2.Request(load_model=pb2.Request.LoadModel(
                    model_name=descriptor.name,
                    input_shape=pb2.TensorShape(
                        batch=batch, height=height, width=width, depth=depth),
                    input_normalizer=pb2.TensorNormalizer(mean=mean,
                                                          stddev=stddev),
                    compute_graph=descriptor.compute_graph)))
        except InferenceException as e:
            logger.warning(str(e))

        return descriptor.name
Ejemplo n.º 3
0
def _image_to_tensor(image):
    width, height = image.size
    if image.mode == 'RGB':
        r, g, b = image.split()
        return pb2.ByteTensor(shape=pb2.TensorShape(batch=1,
                                                    height=height,
                                                    width=width,
                                                    depth=3),
                              data=r.tobytes() + g.tobytes() + b.tobytes())
    elif image.mode == 'L':
        return pb2.ByteTensor(shape=pb2.TensorShape(batch=1,
                                                    height=height,
                                                    width=width,
                                                    depth=1),
                              data=image.tobytes())
    else:
        raise InferenceException(
            'Unsupported image format: %s. Must be L or RGB.' % image.mode)