def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    if interface_manager.total == 0:
        logger.info('There is currently no model deployment and services are not available.')
        return json.dumps({"message": "", "success": False, "code": -999})

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'], size_string, strict=system_config.strict_sites)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json['split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    if 'need_color' in request.json and request.json['need_color']:
        bytes_batch = [interface.separate_color(_, color_map[request.json['need_color']]) for _ in bytes_batch]

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

    if not image_batch:
        logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            interface.name, size_string, request.json.get('model_type'), request.json.get('model_site'), response,
            (time.time() - start_time) * 1000)
        )
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
        interface.name,
        size_string,
        request.json.get('model_type'),
        request.json.get('model_site'),
        result,
        (time.time() - start_time) * 1000
    ))
    response['message'] = result
    return json.dumps(response), 200
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        # # You can separate the http service and the gRPC service like this:
        # response = rpc_request(
        #     data['image'],
        #     data['model_name'] if 'model_name' in data else '',
        #     data['model_type'] if 'model_type' in data else ''
        # )
        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))

        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data:
            interface = interface_manager.get_by_sites(model_site, size_string)
        elif 'model_type' in data:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch, color=need_color)

        if not image_batch:
            logger.error('[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                interface.name, size_string, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, split_char)
        logger.info('[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.format(
            interface.name, size_string, model_type, model_site, result, (time.time() - start_time) * 1000)
        )
        response['message'] = result
        return self.write(json_encode(response))
示例#3
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return {"result": "", "success": False, "code": -999}

        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}

        if request.need_color:
            bytes_batch = [
                color_extract.separate_color(_, color_map[request.need_color])
                for _ in bytes_batch
            ]

        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])
示例#4
0
def auth_request():
    """
    This api is used for captcha prediction with authentication
    :return:
    """
    start_time = time.time()
    if not request.json or 'image' not in request.json:
        abort(400)

    bytes_batch, response = ImageUtils.get_bytes_batch(request.json['image'])

    if not bytes_batch:
        logger.error('Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
            request.json.get('model_type'), request.json.get('model_site'),
            response, (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    image_sample = bytes_batch[0]
    image_size = ImageUtils.size_of_image(image_sample)
    size_string = "{}x{}".format(image_size[0], image_size[1])

    if 'model_site' in request.json:
        interface = interface_manager.get_by_sites(request.json['model_site'],
                                                   size_string)
    elif 'model_type' in request.json:
        interface = interface_manager.get_by_type_size(
            size_string, request.json['model_type'])
    elif 'model_name' in request.json:
        interface = interface_manager.get_by_name(size_string,
                                                  request.json['model_name'])
    else:
        interface = interface_manager.get_by_size(size_string)

    split_char = request.json[
        'split_char'] if 'split_char' in request.json else interface.model_conf.split_char

    image_batch, response = ImageUtils.get_image_batch(interface.model_conf,
                                                       bytes_batch)

    if not image_batch:
        logger.error(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.
            format(interface.name, size_string, request.json.get('model_type'),
                   request.json.get('model_site'), response,
                   (time.time() - start_time) * 1000))
        return json.dumps(response), 200

    result = interface.predict_batch(image_batch, split_char)
    logger.info(
        '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'.
        format(interface.name, size_string, request.json.get('model_type'),
               request.json.get('model_site'), result,
               (time.time() - start_time) * 1000))
    response['message'] = result
    return json.dumps(response), 200
示例#5
0
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        model_type = ParamUtils.filter(data.get('model_type'))
        model_site = ParamUtils.filter(data.get('model_site'))
        model_name = ParamUtils.filter(data.get('model_name'))
        split_char = ParamUtils.filter(data.get('split_char'))
        need_color = ParamUtils.filter(data.get('need_color'))
        if interface_manager.total == 0:
            logger.info('There is currently no model deployment and services are not available.')
            return self.finish(json_encode({"message": "", "success": False, "code": -999}))
        bytes_batch, response = ImageUtils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('[{} {}] | Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, model_type, model_site, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_site' in data and data['model_site']:
            interface = interface_manager.get_by_sites(model_site, size_string, strict=system_config.strict_sites)
        elif 'model_type' in data and data['model_type']:
            interface = interface_manager.get_by_type_size(size_string, model_type)
        elif 'model_name' in data and data['model_name']:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(json_encode({"message": "", "success": False, "code": 999}))

        split_char = split_char if 'split_char' in data else interface.model_conf.split_char

        if need_color:
            bytes_batch = [color_extract.separate_color(_, color_map[need_color]) for _ in bytes_batch]

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error('[{} {}] | [{}] - Size[{}] - Type[{}] - Site[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, interface.name, size_string, model_type, model_site, response,
                round((time.time() - start_time) * 1000))
            )
            return self.finish(json_encode(response))
        response['message'] = yield self.predict(interface, image_batch, split_char, size_string, model_type, model_site, start_time)
        return self.finish(json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        start_time = time.time()
        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))

        bytes_batch, response = self.image_utils.get_bytes_batch(
            self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch, param_key=None)

        if not image_batch:
            logger.error(
                '[{}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                    self.request.remote_ip, interface.name, size_string,
                    response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info('[{}] | [{}] - Size[{}] - Predict[{}] - {} ms'.format(
            self.request.remote_ip, interface.name, size_string, result,
            (time.time() - start_time) * 1000))
        response[self.message_key] = result
        return self.write(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        start_time = time.time()

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": -999
                }))

        bytes_batch, response = ImageUtils.get_bytes_batch(self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    "message": "",
                    "success": False,
                    "code": 999
                }))

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            logger.error('[{}] - Size[{}] - Response[{}] - {} ms'.format(
                interface.name, size_string, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info('[{}] - Size[{}] - Predict[{}] - {} ms'.format(
            interface.name, size_string, result,
            (time.time() - start_time) * 1000))
        response['message'] = result
        return self.write(json_encode(response))
示例#8
0
    def predict(self, request, context):
        start_time = time.time()
        bytes_batch, status = ImageUtils.get_bytes_batch(request.image)
        if not bytes_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if request.model_site:
            interface = interface_manager.get_by_sites(request.model_site,
                                                       size_string)
        elif request.model_name:
            interface = interface_manager.get_by_name(request.model_name)
        elif request.model_type:
            interface = interface_manager.get_by_type_size(
                size_string, request.model_type)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return {"result": "", "success": False, "code": 999}
        image_batch, status = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch)

        if not image_batch:
            return grpc_pb2.PredictResult(result="",
                                          success=status['success'],
                                          code=status['code'])

        result = interface.predict_batch(image_batch, request.split_char)
        logger.info(
            '[{}] - Size[{}] - Type[{}] - Site[{}] - Predict Result[{}] - {} ms'
            .format(interface.name, size_string, request.model_type,
                    request.model_site, result,
                    (time.time() - start_time) * 1000))
        return grpc_pb2.PredictResult(result=result,
                                      success=status['success'],
                                      code=status['code'])
    def post(self):
        uid = str(uuid.uuid1())
        start_time = time.time()
        data = self.parse_param()
        request_def_map = system_config.request_def_map
        input_data_key = request_def_map['InputData']
        model_name_key = request_def_map['ModelName']
        if input_data_key not in data.keys():
            raise tornado.web.HTTPError(400)

        model_name = ParamUtils.filter(data.get(model_name_key))
        output_split = ParamUtils.filter(data.get('output_split'))
        need_color = ParamUtils.filter(data.get('need_color'))
        param_key = ParamUtils.filter(data.get('param_key'))

        request_incr = self.request_incr
        request_count = " - Count[{}]".format(request_incr)
        log_params = " - ParamKey[{}]".format(param_key) if param_key else ""
        log_params += " - NeedColor[{}]".format(
            need_color) if need_color else ""

        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))
        bytes_batch, response = self.image_utils.get_bytes_batch(
            data[input_data_key])

        if not bytes_batch:
            logger.error('[{}] - [{} {}] | - Response[{}] - {} ms'.format(
                uid, self.request.remote_ip, self.request.uri, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        # auxiliary_result = None

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        if request_limit != -1 and request_incr > request_limit:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "Maximum number of requests exceeded",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key:
                    "The maximum number of requests has been exceeded",
                    self.status_bool_key: False,
                    self.status_code_key: -444
                }))
        if model_name_key in data and data[model_name_key]:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        output_split = output_split if 'output_split' in data else interface.model_conf.output_split

        if need_color:
            bytes_batch = [
                color_extract.separate_color(_, color_map[need_color])
                for _ in bytes_batch
            ]

        if interface.model_conf.corp_params:
            bytes_batch = corp_to_multi.parse_multi_img(
                bytes_batch, interface.model_conf.corp_params)

        if interface.model_conf.exec_map and not param_key:
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string, request_count, log_params,
                    "The model is missing the param_key parameter because the model is configured with ExecuteMap.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Missing the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))

        if interface.model_conf.external_model and interface.model_conf.corp_params:
            result = []
            len_of_result = []
            pre_corp_num = 0
            for corp_param in interface.model_conf.corp_params:
                corp_size = corp_param['corp_size']
                corp_num_list = corp_param['corp_num']
                corp_num = corp_num_list[0] * corp_num_list[1]
                sub_bytes_batch = bytes_batch[pre_corp_num:pre_corp_num +
                                              corp_num]
                pre_corp_num = corp_num
                size_string = "{}x{}".format(corp_size[0], corp_size[1])

                sub_interface = interface_manager.get_by_size(size_string)

                image_batch, response = ImageUtils.get_image_batch(
                    sub_interface.model_conf,
                    sub_bytes_batch,
                    param_key=param_key)

                text = yield self.predict(sub_interface,
                                          image_batch,
                                          output_split,
                                          size_string,
                                          start_time,
                                          log_params,
                                          request_count,
                                          uid=uid)
                result.append(text)
                len_of_result.append(
                    len(result[0].split(
                        sub_interface.model_conf.category_split)))

            response[
                self.message_key] = interface.model_conf.output_split.join(
                    result)
            if interface.model_conf.corp_params and interface.model_conf.output_coord:
                # final_result = auxiliary_result + "," + response[self.message_key]
                # if auxiliary_result else response[self.message_key]
                final_result = response[self.message_key]
                response[self.message_key] = corp_to_multi.get_coordinate(
                    label=final_result,
                    param_group=interface.model_conf.corp_params,
                    title_index=[i for i in range(len_of_result[0])])
            return self.finish(
                json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
        else:
            image_batch, response = ImageUtils.get_image_batch(
                interface.model_conf, bytes_batch, param_key=param_key)

        # if interface.model_conf.batch_model:
        #     auxiliary_index = list(interface.model_conf.batch_model.keys())[0]
        #     auxiliary_name = list(interface.model_conf.batch_model.values())[0]
        #     auxiliary_interface = interface_manager.get_by_name(auxiliary_name)
        #     auxiliary_image_batch, response = ImageUtils.get_image_batch(
        #         auxiliary_interface.model_conf,
        #         bytes_batch,
        #         param_key=param_key
        #     )
        #     auxiliary_result = yield self.predict(
        #         auxiliary_interface,
        #         auxiliary_image_batch[auxiliary_index: auxiliary_index+1],
        #         output_split,
        #         size_string,
        #         start_time
        #     )
        #     image_batch = np.delete(image_batch, auxiliary_index, axis=0).tolist()

        if not image_batch:
            logger.error(
                '[{}] - [{} {}] | [{}] - Size[{}] - Response[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, response,
                       round((time.time() - start_time) * 1000)))
            response[self.uid_key] = uid
            return self.finish(json_encode(response))

        response[self.message_key] = yield self.predict(interface,
                                                        image_batch,
                                                        output_split,
                                                        size_string,
                                                        start_time,
                                                        log_params,
                                                        request_count,
                                                        uid=uid)
        response[self.uid_key] = uid
        self.executor.submit(self.save_image, uid, response[self.message_key],
                             bytes_batch[0])
        # if interface.model_conf.corp_params and interface.model_conf.output_coord:
        #     # final_result = auxiliary_result + "," + response[self.message_key]
        #     # if auxiliary_result else response[self.message_key]
        #     final_result = response[self.message_key]
        #     response[self.message_key] = corp_to_multi.get_coordinate(
        #         label=final_result,
        #         param_group=interface.model_conf.corp_params,
        #         title_index=[0]
        #     )
        return self.finish(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        uid = str(uuid.uuid1())
        param_key = None
        start_time = time.time()
        if interface_manager.total == 0:
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))

        bytes_batch, response = self.image_utils.get_bytes_batch(
            self.request.body)

        if not bytes_batch:
            logger.error('Response[{}] - {} ms'.format(
                response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])

        interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        exec_map = interface.model_conf.exec_map
        if exec_map and len(exec_map.keys()) > 1:
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}] - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string,
                    "The model is configured with ExecuteMap, but the api do not support this param.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.message_key: "the api do not support [ExecuteMap].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and len(exec_map.keys()) == 1:
            param_key = list(interface.model_conf.exec_map.keys())[0]

        image_batch, response = ImageUtils.get_image_batch(
            interface.model_conf, bytes_batch, param_key=param_key)

        if not image_batch:
            logger.error(
                '[{}] - [{}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                    uid, self.request.remote_ip, interface.name, size_string,
                    response, (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        result = interface.predict_batch(image_batch, None)
        logger.info(
            '[{}] - [{}] | [{}] - Size[{}] - Predict[{}] - {} ms'.format(
                uid, self.request.remote_ip, interface.name, size_string,
                result, (time.time() - start_time) * 1000))
        response[self.uid_key] = uid
        response[self.message_key] = result
        return self.write(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        uid = str(uuid.uuid1())
        start_time = time.time()
        data = self.parse_param()
        request_def_map = system_config.request_def_map
        input_data_key = request_def_map['InputData']
        model_name_key = request_def_map['ModelName']
        if input_data_key not in data.keys():
            raise tornado.web.HTTPError(400)

        model_name = ParamUtils.filter(data.get(model_name_key))
        output_split = ParamUtils.filter(data.get('output_split'))
        need_color = ParamUtils.filter(data.get('need_color'))
        param_key = ParamUtils.filter(data.get('param_key'))
        extract_rgb = ParamUtils.filter(data.get('extract_rgb'))

        request_incr = self.request_incr
        global_count = self.global_request_incr
        request_count = " - Count[{}]".format(request_incr)
        log_params = " - ParamKey[{}]".format(param_key) if param_key else ""
        log_params += " - NeedColor[{}]".format(
            need_color) if need_color else ""

        if interface_manager.total == 0:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                'There is currently no model deployment and services are not available.'
            )
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: -999
                }))
        bytes_batch, response = self.image_utils.get_bytes_batch(
            data[input_data_key])

        if not bytes_batch:
            logger.error('[{}] - [{} {}] | - Response[{}] - {} ms'.format(
                uid, self.request.remote_ip, self.request.uri, response,
                (time.time() - start_time) * 1000))
            return self.finish(json_encode(response))

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if system_config.request_size_limit and size_string not in system_config.request_size_limit:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | Size[{}] - [{}][{}] - Error[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       size_string, global_count, log_params,
                       "Image size is invalid.",
                       round((time.time() - start_time) * 1000)))
            msg = system_config.request_size_limit.get("msg")
            msg = msg if msg else "The size of the picture is wrong. " \
                                  "Only the original image is supported. " \
                                  "Please do not take a screenshot!"
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: msg,
                        self.status_bool_key: False,
                        self.status_code_key: -250
                    },
                    ensure_ascii=False))

        if system_config.use_whitelist:
            assert_whitelist = self.match_whitelist(self.request.remote_ip)
            if not assert_whitelist:
                logger.info(
                    '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                        uid, self.request.remote_ip, self.request.uri,
                        size_string, request_count, log_params,
                        "Whitelist limit",
                        round((time.time() - start_time) * 1000)))
                return self.finish(
                    json.dumps(
                        {
                            self.uid_key: uid,
                            self.message_key:
                            "Only allow IP access in the whitelist",
                            self.status_bool_key: False,
                            self.status_code_key: -111
                        },
                        ensure_ascii=False))

        if global_request_limit != -1 and global_count > global_request_limit:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    global_count, log_params,
                    "Maximum number of requests exceeded (G)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -555
                    },
                    ensure_ascii=False))

        assert_blacklist = self.match_blacklist(self.request.remote_ip)
        if assert_blacklist:
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "The ip is on the risk blacklist (IP)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -110
                    },
                    ensure_ascii=False))
        if request_limit != -1 and request_incr > request_limit:
            self.risk_ip_count(self.request.remote_ip)
            assert_blacklist_trigger = system_config.blacklist_trigger_times != -1
            if self.risk_ip(
                    self.request.remote_ip
            ) > system_config.blacklist_trigger_times and assert_blacklist_trigger:
                if self.request.remote_ip not in blacklist():
                    set_blacklist(self.request.remote_ip)
                    update_blacklist()
            logger.info(
                '[{}] - [{} {}] | Size[{}]{}{} - Error[{}] - {} ms'.format(
                    uid, self.request.remote_ip, self.request.uri, size_string,
                    request_count, log_params,
                    "Maximum number of requests exceeded (IP)",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json.dumps(
                    {
                        self.uid_key: uid,
                        self.message_key: system_config.exceeded_msg,
                        self.status_bool_key: False,
                        self.status_code_key: -444
                    },
                    ensure_ascii=False))
        if model_name_key in data and data[model_name_key]:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            self.request_desc()
            self.global_request_desc()
            logger.info('Service is not ready!')
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "",
                    self.status_bool_key: False,
                    self.status_code_key: 999
                }))

        output_split = output_split if 'output_split' in data else interface.model_conf.output_split

        if interface.model_conf.corp_params:
            bytes_batch = corp_to_multi.parse_multi_img(
                bytes_batch, interface.model_conf.corp_params)

        exec_map = interface.model_conf.exec_map
        if exec_map and len(exec_map.keys()) > 1 and not param_key:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(
                    uid, self.request.remote_ip, self.request.uri,
                    interface.name, size_string, request_count, log_params,
                    "The model is missing the param_key parameter because the model is configured with ExecuteMap.",
                    round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Missing the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and param_key and param_key not in exec_map:
            self.request_desc()
            self.global_request_desc()
            logger.info(
                '[{}] - [{} {}] | [{}] - Size[{}]{}{} - Error[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, request_count, log_params,
                       "The param_key parameter is not support in the model.",
                       round((time.time() - start_time) * 1000)))
            return self.finish(
                json_encode({
                    self.uid_key: uid,
                    self.message_key: "Not support the parameter [param_key].",
                    self.status_bool_key: False,
                    self.status_code_key: 474
                }))
        elif exec_map and len(exec_map.keys()) == 1:
            param_key = list(interface.model_conf.exec_map.keys())[0]

        if interface.model_conf.external_model and interface.model_conf.corp_params:
            result = []
            len_of_result = []
            pre_corp_num = 0
            for corp_param in interface.model_conf.corp_params:
                corp_size = corp_param['corp_size']
                corp_num_list = corp_param['corp_num']
                corp_num = corp_num_list[0] * corp_num_list[1]
                sub_bytes_batch = bytes_batch[pre_corp_num:pre_corp_num +
                                              corp_num]
                pre_corp_num = corp_num
                size_string = "{}x{}".format(corp_size[0], corp_size[1])

                sub_interface = interface_manager.get_by_size(size_string)

                image_batch, response = ImageUtils.get_image_batch(
                    sub_interface.model_conf,
                    sub_bytes_batch,
                    param_key=param_key)

                text = yield self.predict(sub_interface,
                                          image_batch,
                                          output_split,
                                          size_string,
                                          start_time,
                                          log_params,
                                          request_count,
                                          uid=uid)
                result.append(text)
                len_of_result.append(
                    len(result[0].split(
                        sub_interface.model_conf.category_split)))

            response[
                self.message_key] = interface.model_conf.output_split.join(
                    result)
            if interface.model_conf.corp_params and interface.model_conf.output_coord:
                # final_result = auxiliary_result + "," + response[self.message_key]
                # if auxiliary_result else response[self.message_key]
                final_result = response[self.message_key]
                response[self.message_key] = corp_to_multi.get_coordinate(
                    label=final_result,
                    param_group=interface.model_conf.corp_params,
                    title_index=[i for i in range(len_of_result[0])])
            return self.finish(
                json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
        else:
            image_batch, response = ImageUtils.get_image_batch(
                interface.model_conf,
                bytes_batch,
                param_key=param_key,
                extract_rgb=extract_rgb)

        if not image_batch:
            self.request_desc()
            self.global_request_desc()
            logger.error(
                '[{}] - [{} {}] | [{}] - Size[{}] - Response[{}] - {} ms'.
                format(uid, self.request.remote_ip, self.request.uri,
                       interface.name, size_string, response,
                       round((time.time() - start_time) * 1000)))
            response[self.uid_key] = uid
            return self.finish(json_encode(response))

        predict_result = yield self.predict(interface, image_batch,
                                            output_split)

        # if need_color:
        #     # only support six label and size [90x35].
        #     color_batch = np.resize(image_batch[0], (90, 35, 3))
        #     need_index = color_extract.predict_color(image_batch=[color_batch], color=color_map[need_color])
        #     predict_result = "".join([v for i, v in enumerate(predict_result) if i in need_index])

        uid_str = "[{}] - ".format(uid)
        logger.info(
            '{}[{} {}] | [{}] - Size[{}]{}{} - Predict[{}] - {} ms'.format(
                uid_str, self.request.remote_ip, self.request.uri,
                interface.name, size_string, request_count, log_params,
                predict_result, round((time.time() - start_time) * 1000)))
        response[self.message_key] = predict_result
        response[self.uid_key] = uid
        self.executor.submit(self.save_image, uid, response[self.message_key],
                             bytes_batch[0])
        if interface.model_conf.corp_params and interface.model_conf.output_coord:
            # final_result = auxiliary_result + "," + response[self.message_key]
            # if auxiliary_result else response[self.message_key]
            final_result = response[self.message_key]
            response[self.message_key] = corp_to_multi.get_coordinate(
                label=final_result,
                param_group=interface.model_conf.corp_params,
                title_index=[0])
        return self.finish(
            json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))
    def post(self):
        start_time = time.time()
        data = self.parse_param()
        if 'image' not in data.keys():
            raise tornado.web.HTTPError(400)

        model_name = ParamUtils.filter(data.get('model_name'))
        output_split = ParamUtils.filter(data.get('output_split'))
        need_color = ParamUtils.filter(data.get('need_color'))

        if interface_manager.total == 0:
            logger.info('There is currently no model deployment and services are not available.')
            return self.finish(json_encode({"message": "", "success": False, "code": -999}))
        bytes_batch, response = self.image_utils.get_bytes_batch(data['image'])

        if not bytes_batch:
            logger.error('[{} {}] | - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, response,
                (time.time() - start_time) * 1000)
            )
            return self.finish(json_encode(response))
        auxiliary_result = None

        image_sample = bytes_batch[0]
        image_size = ImageUtils.size_of_image(image_sample)
        size_string = "{}x{}".format(image_size[0], image_size[1])
        if 'model_name' in data and data['model_name']:
            interface = interface_manager.get_by_name(model_name)
        else:
            interface = interface_manager.get_by_size(size_string)
        if not interface:
            logger.info('Service is not ready!')
            return self.finish(json_encode({"message": "", "success": False, "code": 999}))

        output_split = output_split if 'output_split' in data else interface.model_conf.output_split

        if need_color:
            bytes_batch = [color_extract.separate_color(_, color_map[need_color]) for _ in bytes_batch]
        if interface.model_conf.corp_params:
            bytes_batch = corp_to_multi.parse_multi_img(bytes_batch, interface.model_conf.corp_params)

        image_batch, response = ImageUtils.get_image_batch(interface.model_conf, bytes_batch)
        if interface.model_conf.batch_model:
            auxiliary_index = list(interface.model_conf.batch_model.keys())[0]
            auxiliary_name = list(interface.model_conf.batch_model.values())[0]
            auxiliary_interface = interface_manager.get_by_name(auxiliary_name)
            auxiliary_image_batch, response = ImageUtils.get_image_batch(auxiliary_interface.model_conf, bytes_batch)
            auxiliary_result = yield self.predict(
                auxiliary_interface,
                auxiliary_image_batch[auxiliary_index: auxiliary_index+1],
                output_split,
                size_string,
                start_time
            )
            image_batch = np.delete(image_batch, auxiliary_index, axis=0).tolist()

        if not image_batch:
            logger.error('[{} {}] | [{}] - Size[{}] - Response[{}] - {} ms'.format(
                self.request.remote_ip, self.request.uri, interface.name, size_string, response,
                round((time.time() - start_time) * 1000))
            )
            return self.finish(json_encode(response))

        response['message'] = yield self.predict(interface, image_batch, output_split, size_string, start_time)

        if interface.model_conf.corp_params and interface.model_conf.output_coord:
            final_result = auxiliary_result + "," + response['message'] if auxiliary_result else response['message']
            response['message'] = corp_to_multi.get_coordinate(
                label=final_result,
                param_group=interface.model_conf.corp_params,
                title_index=[0]
            )
        return self.finish(json.dumps(response, ensure_ascii=False).replace("</", "<\\/"))