Esempio n. 1
0
 def get_bytes_batch(base64_or_bytes):
     response = Response()
     try:
         if isinstance(base64_or_bytes, bytes):
             bytes_batch = [base64_or_bytes]
         elif isinstance(base64_or_bytes, list):
             bytes_batch = [
                 base64.b64decode(i.encode('utf-8'))
                 for i in base64_or_bytes if isinstance(i, str)
             ]
             if not bytes_batch:
                 bytes_batch = [
                     base64.b64decode(i) for i in base64_or_bytes
                     if isinstance(i, bytes)
                 ]
         else:
             bytes_batch = base64.b64decode(
                 base64_or_bytes.encode('utf-8')).split(
                     SystemConfig.split_flag)
     except binascii.Error:
         return None, response.INVALID_BASE64_STRING
     what_img = [ImageUtils.test_image(i) for i in bytes_batch]
     if None in what_img:
         return None, response.INVALID_IMAGE_FORMAT
     return bytes_batch, response.SUCCESS
Esempio n. 2
0
    def get_image_batch(model: ModelConfig, bytes_batch, color=None):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        hsv_map = {
            "blue": {
                "lower_hsv": np.array([100, 128, 46]),
                "high_hsv": np.array([124, 255, 255])
            },
            "red": {
                "lower_hsv": np.array([0, 128, 46]),
                "high_hsv": np.array([5, 255, 255])
            },
            "yellow": {
                "lower_hsv": np.array([15, 128, 46]),
                "high_hsv": np.array([34, 255, 255])
            },
            "green": {
                "lower_hsv": np.array([35, 128, 46]),
                "high_hsv": np.array([77, 255, 255])
            },
            "black": {
                "lower_hsv": np.array([0, 0, 0]),
                "high_hsv": np.array([180, 255, 46])
            }
        }

        def separate_color(pil_image, color):
            hsv = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_BGR2HSV)
            lower_hsv = hsv_map[color]['lower_hsv']
            high_hsv = hsv_map[color]['high_hsv']
            mask = cv2.inRange(hsv, lowerb=lower_hsv, upperb=high_hsv)
            return mask

        def load_image(image_bytes, color=None):

            if color and color in ['red', 'blue', 'black', 'green', 'yellow']:
                image = np.asarray(bytearray(image_bytes), dtype="uint8")
                image = cv2.imdecode(image, -1)
                image = separate_color(image, color)
            else:
                data_stream = io.BytesIO(image_bytes)
                pil_image = PIL_Image.open(data_stream).convert('RGB')
                image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2GRAY)
            image = preprocessing(image, model.binaryzation, model.smooth,
                                  model.blur).astype(np.float32)
            image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return image[:, :, np.newaxis] / 255.

        try:
            image_batch = [load_image(i, color=color) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
Esempio n. 3
0
    def get_image_batch(model: ModelConfig, bytes_batch):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        def load_image(image_bytes):
            data_stream = io.BytesIO(image_bytes)
            pil_image = PIL_Image.open(data_stream)
            rgb = pil_image.split()
            size = pil_image.size

            if len(rgb) > 3 and model.replace_transparent:
                background = PIL_Image.new('RGB', pil_image.size, (255, 255, 255))
                background.paste(pil_image, (0, 0, size[0], size[1]), pil_image)
                pil_image = background

            if model.image_channel == 1:
                pil_image = pil_image.convert('L')

            # image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2GRAY)
            image = preprocessing(np.asarray(pil_image), model.binaryzation, model.smooth, model.blur).astype(
                np.float32)
            image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return (image[:, :, np.newaxis] if model.image_channel == 1 else image[:, :]) / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
Esempio n. 4
0
    def get_bytes_batch(self, base64_or_bytes):
        response = Response(self.conf.response_def_map)
        b64_filter_s = lambda s: re.sub("data:image/.+?base64,", "", s, 1
                                        ) if ',' in s else s
        b64_filter_b = lambda s: re.sub(b"data:image/.+?base64,", b"", s, 1
                                        ) if b',' in s else s
        try:
            if isinstance(base64_or_bytes, bytes):
                if self.conf.split_flag in base64_or_bytes:
                    bytes_batch = base64_or_bytes.split(self.conf.split_flag)
                else:
                    bytes_batch = [base64_or_bytes]
            elif isinstance(base64_or_bytes, list):
                bytes_batch = [
                    base64.b64decode(b64_filter_s(i).encode('utf-8'))
                    for i in base64_or_bytes if isinstance(i, str)
                ]
                if not bytes_batch:
                    bytes_batch = [
                        base64.b64decode(b64_filter_b(i))
                        for i in base64_or_bytes if isinstance(i, bytes)
                    ]
            else:
                base64_or_bytes = b64_filter_s(base64_or_bytes)
                bytes_batch = base64.b64decode(
                    base64_or_bytes.encode('utf-8')).split(
                        self.conf.split_flag)

        except binascii.Error:
            return None, response.INVALID_BASE64_STRING
        what_img = [ImageUtils.test_image(i) for i in bytes_batch]

        if None in what_img:
            return None, response.INVALID_IMAGE_FORMAT
        return bytes_batch, response.SUCCESS
Esempio n. 5
0
    def get_image_batch(model: ModelConfig, bytes_batch):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        def load_image(image_bytes):
            data_stream = io.BytesIO(image_bytes)
            pil_image = PIL_Image.open(data_stream)
            rgb = pil_image.split()
            size = pil_image.size

            gif_handle = model.pre_concat_frames != -1 or model.pre_blend_frames != -1

            if len(rgb) > 3 and model.pre_replace_transparent and gif_handle:
                background = PIL_Image.new('RGB', pil_image.size,
                                           (255, 255, 255))
                background.paste(pil_image, (0, 0, size[0], size[1]),
                                 pil_image)
                pil_image = background

            if model.pre_concat_frames != -1:
                im = concat_frames(pil_image, model.pre_concat_frames)
            elif model.pre_blend_frames != -1:
                im = blend_frame(pil_image, model.pre_blend_frames)
            else:
                im = np.array(pil_image)

            if model.image_channel == 1 and len(im.shape) == 3:
                im = im.mean(axis=2).astype(np.float32)

            im = preprocessing(
                image=im,
                binaryzation=model.pre_binaryzation,
            )

            if model.pre_horizontal_stitching:
                up_slice = im[0:int(size[1] / 2), 0:size[0]]
                down_slice = im[int(size[1] / 2):size[1], 0:size[0]]
                im = np.concatenate((up_slice, down_slice), axis=1)

            image = im.astype(np.float32)
            if model.resize[0] == -1:
                ratio = model.resize[1] / size[1]
                resize_width = int(ratio * size[0])
                image = cv2.resize(image, (resize_width, model.resize[1]))
            else:
                image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return (image[:, :, np.newaxis]
                    if model.image_channel == 1 else image[:, :]) / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
Esempio n. 6
0
    def get_image_batch(model: ModelConfig, bytes_batch):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        def load_image(image_bytes):
            data_stream = io.BytesIO(image_bytes)
            pil_image = PIL_Image.open(data_stream)
            rgb = pil_image.split()
            size = pil_image.size

            if len(rgb) > 3 and model.replace_transparent:
                background = PIL_Image.new('RGB', pil_image.size,
                                           (255, 255, 255))
                background.paste(pil_image, (0, 0, size[0], size[1]),
                                 pil_image)
                pil_image = background

            if model.image_channel == 1:
                pil_image = pil_image.convert('L')

            im = np.asarray(pil_image)
            if model.horizontal_stitching:
                up_slice = im[0:int(size[1] / 2), 0:size[0]]
                down_slice = im[int(size[1] / 2):size[1], 0:size[0]]
                im = np.concatenate((up_slice, down_slice), axis=1)
            # image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2GRAY)
            image = preprocessing(im, model.binaryzation, model.smooth,
                                  model.blur).astype(np.float32)

            if model.resize[0] == -1:
                ratio = model.resize[1] / size[1]
                resize_width = int(ratio * size[0])
                image = cv2.resize(image, (resize_width, model.resize[1]))
            else:
                image = cv2.resize(image, (model.resize[0], model.resize[1]))
            if model.padding:
                image = tf.keras.preprocessing.sequence.pad_sequences(
                    sequences=image,
                    maxlen=model.padding if model.lower_padding
                    and model.resize[0] < model.lower_padding else None,
                    dtype='float32',
                    padding='post',
                    truncating='post',
                    value=0)
            image = image.swapaxes(0, 1)
            return (image[:, :, np.newaxis]
                    if model.image_channel == 1 else image[:, :]) / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
Esempio n. 7
0
    def get_image_batch(model: ModelConfig, bytes_batch):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        def load_image(image_bytes):
            if isinstance(image_bytes, bytes):
                nparr = np.fromstring(image_bytes, np.uint8)
                im = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)
            else:
                im = cv2.imread(image_bytes, cv2.IMREAD_GRAYSCALE)
            # The OpenCV cannot handle gif format images, it will return None.
            if im is None:
                data_stream = io.BytesIO(image_bytes)
                pil_image = PIL_Image.open(data_stream)
                rgb = pil_image.split()
                size = pil_image.size

                if len(rgb) > 3 and model.replace_transparent:
                    background = PIL_Image.new('RGB', pil_image.size,
                                               (255, 255, 255))
                    background.paste(pil_image, (0, 0, size[0], size[1]),
                                     pil_image)
                    pil_image = background

                if model.image_channel == 1:
                    pil_image = pil_image.convert('L')
                im = np.array(pil_image)
            image = preprocessing(im, model.gamma, model.binaryzation,
                                  model.smooth, model.blur).astype(np.float32)
            if model.resize[0] == -1:
                ratio = model.resize[1] / size[1]
                resize_width = int(ratio * size[0])
                image = cv2.resize(image, (resize_width, model.resize[1]))
            else:
                image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return (image[:, :, np.newaxis]
                    if model.image_channel == 1 else image[:, :]) / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
Esempio n. 8
0
    def get_image_batch(model: ModelConfig, bytes_batch):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response()

        def load_image(image_bytes):
            data_stream = io.BytesIO(image_bytes)
            pil_image = PIL_Image.open(data_stream).convert('RGB')
            image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2GRAY)
            image = preprocessing(image, model.binaryzation, model.smooth,
                                  model.blur).astype(np.float32)
            image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return image[:, :, np.newaxis] / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.exception = Response(system_config.response_def_map)
     self.executor = ThreadPoolExecutor(workers)
     self.image_utils = ImageUtils(system_config)
Esempio n. 10
0
    def get_image_batch(model: ModelConfig,
                        bytes_batch,
                        param_key=None,
                        extract_rgb: list = None):
        # Note that there are two return objects here.
        # 1.image_batch, 2.response

        response = Response(model.conf.response_def_map)

        def load_image(image_bytes: bytes):
            data_stream = io.BytesIO(image_bytes)
            pil_image = PIL_Image.open(data_stream)

            gif_handle = model.pre_concat_frames != -1 or model.pre_blend_frames != -1

            if pil_image.mode == 'P' and not gif_handle:
                pil_image = pil_image.convert('RGB')

            rgb = pil_image.split()
            size = pil_image.size

            if (len(rgb) > 3
                    and model.pre_replace_transparent) and not gif_handle:
                background = PIL_Image.new('RGB', pil_image.size,
                                           (255, 255, 255))
                try:
                    background.paste(pil_image, (0, 0, size[0], size[1]),
                                     pil_image)
                    pil_image = background
                except:
                    pil_image = pil_image.convert('RGB')

            if len(pil_image.split()) > 3 and model.image_channel == 3:
                pil_image = pil_image.convert('RGB')

            if model.pre_concat_frames != -1:
                im = concat_frames(pil_image, model.pre_concat_frames)
            elif model.pre_blend_frames != -1:
                im = blend_frame(pil_image, model.pre_blend_frames)
            else:
                im = np.asarray(pil_image)

            if extract_rgb:
                im = rgb_filter(im, extract_rgb)

            im = preprocessing_by_func(exec_map=model.exec_map,
                                       key=param_key,
                                       src_arr=im)

            if model.image_channel == 1 and len(im.shape) == 3:
                im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)

            im = preprocessing(
                image=im,
                binaryzation=model.pre_binaryzation,
            )

            if model.pre_horizontal_stitching:
                up_slice = im[0:int(size[1] / 2), 0:size[0]]
                down_slice = im[int(size[1] / 2):size[1], 0:size[0]]
                im = np.concatenate((up_slice, down_slice), axis=1)

            image = im.astype(np.float32)
            if model.resize[0] == -1:
                ratio = model.resize[1] / size[1]
                resize_width = int(ratio * size[0])
                image = cv2.resize(image, (resize_width, model.resize[1]))
            else:
                image = cv2.resize(image, (model.resize[0], model.resize[1]))
            image = image.swapaxes(0, 1)
            return (image[:, :, np.newaxis]
                    if model.image_channel == 1 else image[:, :]) / 255.

        try:
            image_batch = [load_image(i) for i in bytes_batch]
            return image_batch, response.SUCCESS
        except OSError:
            return None, response.IMAGE_DAMAGE
        except ValueError as _e:
            print(_e)
            return None, response.IMAGE_SIZE_NOT_MATCH_GRAPH
from geventwebsocket.handler import WebSocketHandler
from config import Config
from utils import ImageUtils
from constants import Response

from interface import InterfaceManager
from signature import Signature, ServerType
from watchdog.observers import Observer
from event_handler import FileEventHandler
from middleware import *
# The order cannot be changed, it must be before the flask.

app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
sign = Signature(ServerType.FLASK)
_except = Response()

conf_path = 'config.yaml'
model_path = 'model'
graph_path = 'graph'

system_config = Config(conf_path=conf_path,
                       model_path=model_path,
                       graph_path=graph_path)
route_map = {i['Class']: i['Route'] for i in system_config.route_map}
sign.set_auth([{
    'accessKey': system_config.access_key,
    'secretKey': system_config.secret_key
}])
logger = system_config.logger
interface_manager = InterfaceManager()
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.exception = Response()
Esempio n. 13
0
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.exception = Response()
     self.executor = ThreadPoolExecutor(workers)
Esempio n. 14
0
from event_handler import FileEventHandler
from middleware import *
# The order cannot be changed, it must be before the flask.

app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})

conf_path = '../config.yaml'
model_path = '../model'
graph_path = '../graph'

system_config = Config(conf_path=conf_path,
                       model_path=model_path,
                       graph_path=graph_path)
sign = Signature(ServerType.FLASK, system_config)
_except = Response(system_config.response_def_map)
route_map = {i['Class']: i['Route'] for i in system_config.route_map}
sign.set_auth([{
    'accessKey': system_config.access_key,
    'secretKey': system_config.secret_key
}])
logger = system_config.logger
interface_manager = InterfaceManager()
image_utils = ImageUtils(system_config)


@app.after_request
def after_request(response):
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response