Exemplo n.º 1
0
def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    if interface_manager.total == 0:
        logger.info('There is currently no model deployment and services are not available.')
        return json.dumps({"message": "", "success": False, "code": -999})

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'], size_string, strict=system_config.strict_sites)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json['split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    if 'need_color' in request.json and request.json['need_color']:
        bytes_batch = [interface.separate_color(_, color_map[request.json['need_color']]) for _ in bytes_batch]

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

    if not image_batch:
        logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            interface.name, size_string, request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
        interface.name,
        size_string,
        request.json.get('model_type'),
        request.json.get('model_site'),
        result,
        (time.time() - start_time) * 1000
    ))
    response['message'] = result
    return json.dumps(response), 200
Exemplo n.º 2
0
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        # # You can separate the http service and the gRPC service like this:
        # response = rpc_request(
        #     data['image'],
        #     data['model_name'] if 'model_name' in data else '',
        #     data['model_type'] if 'model_type' in data else ''
        # )
        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))

        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data:
            interface = interface_manager.get_by_sites(model_site, size_string)
        elif 'model_type' in data:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch, color=need_color)

        if not image_batch:
            logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                interface.name, size_string, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, split_char)
        logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
            interface.name, size_string, model_type, model_site, result, (time.time() - start_time) * 1000)
        )
        response['message'] = result
        return self.write(json_encode(response))
Exemplo n.º 3
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return {"result": "", "success": False, "code": -999}

        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}

        if request.need_color:
            bytes_batch = [
                color_extract.separate_color(_, color_map[request.need_color])
                for _ in bytes_batch
            ]

        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])
Exemplo n.º 4
0
def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'),
            response, (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'],
                                                   size_string)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(
            size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(size_string,
                                                  request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json[
        'split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf,
                                                       bytes_batch)

    if not image_batch:
        logger.error(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.
            format(interface.name, size_string, request.json.get('model_type'),
                   request.json.get('model_site'), response,
                   (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info(
        '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.
        format(interface.name, size_string, request.json.get('model_type'),
               request.json.get('model_site'), result,
               (time.time() - start_time) * 1000))
    response['message'] = result
    return json.dumps(response), 200
Exemplo n.º 5
0
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))
        if interface_manager.total == 0:
            logger.info('There is currently no model deployment and services are not available.')
            return self.finish(json_encode({"message": "", "success": False, "code": -999}))
        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('[{} {}] | Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data and data['model_site']:
            interface = interface_manager.get_by_sites(model_site, size_string, strict=system_config.strict_sites)
        elif 'model_type' in data and data['model_type']:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data and data['model_name']:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(json_encode({"message": "", "success": False, "code": 999}))

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        if need_color:
            bytes_batch = [color_extract.separate_color(_, color_map[need_color]) for _ in bytes_batch]

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error('[{} {}] | [{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, interface.name, size_string, model_type, model_site, response,
                round((time.time() - start_time) * 1000))
            )
            return self.finish(json_encode(response))
        response['message'] = yield self.predict(interface, image_batch, split_char, size_string, model_type, model_site, start_time)
        return self.finish(json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
Exemplo n.º 6
0
    def post(self):
        start_time = time.time()

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": -999
                }))

        bytes_batch, response = ImageUtils.get_bytes_batch(self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": 999
                }))

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error(
                '[{}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                    self.request.remote_ip, interface.name, size_string,
                    response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info('[{}] | [{}] - Size[{}] - Predict[{}] - {} ms'.format(
            self.request.remote_ip, interface.name, size_string, result,
            (time.time() - start_time) * 1000))
        response['message'] = result
        return self.write(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
Exemplo n.º 7
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)
        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}
        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])