def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    image_shape_list_h_w = []
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"])
        height, width, _ = img.shape
        if height <= 32 or width <= 32:
            raise ErrorBase(
                400, "image too small " + str(height) + "x" + str(width))
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        image_shape_list_h_w.append([height, width])
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return image_shape_list_h_w, images
Example #2
0
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        data = reqs[i]
        img = load_image(data["data"]["uri"], body=data['data']['body'])
        if img is None:
            CTX.logger.info("input data is none : %s\n", data)
            raise ErrorBase(400, "image data is None ")
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        images.append(preProcessImage(img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return images
Example #3
0
def create_net(configs):
    '''
        net init
    '''
    _t1 = time.time()

    try:
        CTX.logger.debug("enter net_init")
        CTX.logger.debug("configs:%s", configs)
        tar_files = value_check(configs, "model_files")
        digit_model = file_check(tar_files, "digits_model.pth")
        crnn_model = file_check(tar_files, "netcrnn_model.pth")
        id_card.config.RECOGNITION.ADDRESS_MODEL_PATH = crnn_model
        id_card.config.RECOGNITION.NAME_MODEL_PATH = crnn_model
        id_card.config.RECOGNITION.DIGITS_MODEL_PATH = digit_model
        id_card.config.PLATFORM = value_check(configs,
                                              'use_device',
                                              default="GPU").upper()
        net = IDCardRecognizeHandler()
    except Exception as e:
        CTX.logger.error("load error:%s", traceback.format_exc())
        return {}, 599, str(e)
    _t2 = time.time()
    monitor_rt_load().observe(_t2 - _t1)
    CTX.logger.info("load time:%f", _t2 - _t1)

    return {"net": net}, 0, ''
Example #4
0
def create_net(configs):
    '''
        net init
    '''
    _t1 = time.time()

    try:
        CTX.logger.debug("enter net_init")
        CTX.logger.debug("configs:%s", configs)
        batch_size = value_check(configs, 'batch_size', False, 1)
        tar_files = value_check(configs, "model_files")
        synset = str(file_check(tar_files, "lsvc_class_index.txt"))
        prefix = os.path.abspath(synset + "/..") + "/netvlad"

        net = FeatureCoding(featureDim=config.FEATURE_CODING.FEATURE_DIM,
                            batchsize=batch_size,
                            modelPrefix=prefix,
                            modelEpoch=config.FEATURE_CODING.MODEL_EPOCH,
                            synset=synset,
                            gpu_id=0)
    except Exception as e:
        CTX.logger.error("load error:%s", traceback.format_exc())
        return {}, 599, str(e)
    _t2 = time.time()
    monitor_rt_load().observe(_t2 - _t1)
    CTX.logger.info("load time:%f", _t2 - _t1)

    return {"net": net}, 0, ''
Example #5
0
def create_net(configs):
    '''
        net init
    '''
    _t1 = time.time()

    try:
        CTX.logger.debug("enter net_init")
        CTX.logger.debug("configs:%s", configs)

        tar_files = value_check(configs, "model_files")
        prototxt = str(file_check(tar_files, "deploy.prototxt"))
        model = str(file_check(tar_files, "weight.caffemodel"))
        net = FeatureExtraction(
            modelPrototxt=prototxt,
            modelFile=model,
            featureLayer=config.FEATURE_EXTRACTION.FEATURE_LAYER)
    except Exception as e:
        CTX.logger.error("load error:%s", traceback.format_exc())
        return {}, 599, str(e)
    _t2 = time.time()
    monitor_rt_load().observe(_t2 - _t1)
    CTX.logger.info("load time:%f", _t2 - _t1)

    return {"net": net}, 0, ''
Example #6
0
def decodeReqsImage(reqs):
    """
        process reqs image
        ----
        reqs :
        return :
            imageReadInfoDict,  # reqs image load info
            normalImageMapToOriginalImage, #  normal image id map to original image id
            images # load image result data
    """
    cur_batchsize = len(reqs)
    _t1 = time.time()
    imageReadInfoDict = dict()  # reqs image info
    normalImageMapToOriginalImage = dict(
    )  # normal image id map to original image
    images = []
    normalImageIndex = 0
    for i in range(cur_batchsize):
        data = reqs[i]
        infoOfImage = dict()
        img = None
        try:
            # load image error
            img = load_image(data["data"]["uri"], body=data['data']['body'])
            if img is None:
                CTX.logger.info("input data is none : %s\n", data)
                infoOfImage['errorInfo'] = "image data is None"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
            elif img.ndim != 3:
                CTX.logger.info("image ndim is " + str(img.ndim) +
                                ", should be 3\n")
                infoOfImage['errorInfo'] = "image ndim is " + \
                    str(img.ndim) + ", should be 3"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
        except ErrorBase as e:
            CTX.logger.info("image of index : %d,preProcess error: %s\n", i,
                            str(e))
            infoOfImage['errorInfo'] = str(e)
            infoOfImage['errorCode'] = e.code
            infoOfImage['flag'] = 1  # 1 is  the image preprocess error
        if infoOfImage.get('flag', 0) == 1:  # the image preProcess error
            imageReadInfoDict[i] = infoOfImage
            continue
        # because , some images error, so need all images's map relation .
        infoOfImage['flag'] = 0  # normal image preProcess
        infoOfImage['normalImageIndex'] = normalImageIndex
        # new image id map to old image id
        normalImageMapToOriginalImage[normalImageIndex] = i
        imageReadInfoDict[i] = infoOfImage
        normalImageIndex += 1
        images.append(img)
    _t2 = time.time()
    CTX.logger.info("load images : %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return imageReadInfoDict, normalImageMapToOriginalImage, images
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
    '''
    imageReadInfoDict = dict()
    images = []
    netinfeImageIdToinputImageId = dict() # net infe image id map to input image id
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)
    # image_shape_list_h_w = []
    # images = []
    _t1 = time.time()
    normalImageIndex = 0
    for i in range(cur_batchsize):
        data = reqs[i]
        infoOfImage = dict()
        img = None
        try:
            # load image error
            img = load_image(data["data"]["uri"],body=data['data']['body'])
            if img is None:
                CTX.logger.info("input data is none : %s\n", data)
                infoOfImage['errorInfo'] = "image data is None"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
            elif img.ndim != 3:
                CTX.logger.info("image ndim is " +
                                str(img.ndim) + ", should be 3\n")
                infoOfImage['errorInfo'] = "image ndim is " + \
                    str(img.ndim) + ", should be 3"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
        except ErrorBase as e:
            CTX.logger.info("image of index : %d,preProcess error: %s\n", i,str(e))
            infoOfImage['errorInfo'] = str(e)
            infoOfImage['errorCode'] = e.code
            infoOfImage['flag'] = 1 # 1 is  the image preprocess error
        if infoOfImage.get('flag',0) == 1: # the image preProcess error
            imageReadInfoDict[i] = infoOfImage
            continue
        height, width, _ = img.shape
        infoOfImage['flag'] = 0 # normal image preProcess
        infoOfImage['height'] = height
        infoOfImage['width'] = width
        infoOfImage['normalImageIndex'] = normalImageIndex # because , some images error, so need all images's map relation .
        netinfeImageIdToinputImageId[normalImageIndex] = i # new image id map to old image id
        imageReadInfoDict[i] = infoOfImage
        normalImageIndex += 1
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return imageReadInfoDict, netinfeImageIdToinputImageId, images
Example #8
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)
    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        img_type = reqs[i]["data"]["attribute"].get("image_type", 0)
        img_name = reqs[i]["data"]["attribute"].get("name", "")
        video = reqs[i]["data"]["attribute"].get("video", False)
        lane_pts = reqs[i]["data"]["attribute"].get("lane_pts")
        ret.append((img, img_type, video, img_name, lane_pts))
    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return ret
Example #9
0
def pre_eval(batch_size, reqs):

    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        ret.append((img))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return ret
Example #10
0
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    image_shape_list_h_w = []
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        data = reqs[i]
        image_path = data["data"]["uri"]
        if data['data']['body'] is not None:
            print("body")
            hash_sha1 = hashlib.sha1()
            hash_sha1.update(str(data['data']['body']))
            image_path = os.path.join("/tmp", hash_sha1.hexdigest())
            file = open(image_path, "wb")
            file.write(data['data']['body'])
            file.close()
        img = load_image(image_path)
        if img is None:
            CTX.logger.info("input data is none : %s\n", reqs[i])
            raise ErrorBase(400, "image data is None ")
        height, width, _ = img.shape
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        image_shape_list_h_w.append([height, width])
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return image_shape_list_h_w, images
Example #11
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        if img.shape[2] == 4:
            img = img[:, :, :3]
        #print(img.shape)
        ret.append((img))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    #print("ret:"+str(ret[0]))
    #print("ret_size:"+str(ret[0].shape))
    return ret
Example #12
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for req in reqs:
        img = load_image(req["data"]["uri"], body=req['data']['body'])
        detections = req["data"]["attribute"]["detections"]
        pts_arr = []
        for det in detections:
            pts_arr.append(det["pts"])

        ret.append((img, pts_arr))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return ret
Example #13
0
def pre_eval(model, req):
    CTX.logger.info("---> Inference pre_eval() begin ...\n")
    # feature_extractor = model["feature_extractor"]
    msg = 'success'

    big_endian = True if model.get('endian', 'big') == 'big' else False

    if 'datas' not in req:
        msg = "No 'datas' field found in request"
        CTX.logger.error(msg)

        return False, msg, []

    if len(req["datas"]) < 2:
        msg = "Must have len(req['datas']) > 1"
        CTX.logger.error(msg)

        return False, msg, []

    _t1 = time.time()

    features = []

    last_feat_len = 0
    for req_data in req['datas']:
        body = req_data.get('body', None)

        if not body:
            # -- if uri is a local file, 'body' can be omitted
            if osp.exists(req_data["uri"]):
                CTX.logger.info("Try to load feature from local file: %s\n" %
                                req_data["uri"])
                try:
                    # if req_data["uri"].endswith('.npy'):
                    #     feat_npy=np.load(req_data["uri"]).flatten()
                    #     feat_len=len(feat_npy)
                    #     req_data["feature"]=feat_npy
                    # else:
                    with open(req_data["uri"], 'rb') as fp:
                        req_data["body"] = fp.read()
                except Exception as e:
                    msg = 'Error when loading feature from local file: ' + traceback.format_exc(
                    )
                    CTX.logger.error(msg)
                    return False, msg, []
            else:  # -- if uri is a web url, try to download it
                CTX.logger.info("Download feature file: %s\n", req_data["uri"])
                try:
                    feat_data = download_url(req_data["uri"])
                    req_data["body"] = feat_data
                except Exception as e:
                    msg = 'Error when downloading feature from URI: ' + traceback.format_exc(
                    )
                    CTX.logger.error(msg)
                    return False, msg, []

        body = req_data.get('body', None)  # update body
        if not body:
            msg = 'all elements in req["datas"] must have valid "body" field or valid "uri" field'

            return False, msg, []
        else:
            try:
                feat_len, feat_npy = unpack_feature_from_stream(
                    body, big_endian)
            except Exception as e:
                msg = 'Error when unpacking one of input features: ' + traceback.format_exc(
                )
                CTX.logger.error(msg)
                return False, msg, []

            if last_feat_len <= 0:
                last_feat_len = feat_len

            if feat_len < 1 or feat_len != last_feat_len:
                msg = 'invalid unpacked feature length. (feat_len=%d vs. last_feat_len=%d)' % (
                    feat_len, last_feat_len)
                CTX.logger.error(msg)
                return False, msg, []
            else:
                attr = req_data.get('attribute', {})
                face_id = attr.get('face_id', 'null')
                group_id = attr.get('group_id', -2)

                gt_id = attr.get('gt_id', -1)

                tmp = {
                    "face_id": face_id,
                    "feature": feat_npy,
                    "group_id": group_id,
                    "gt_id": gt_id
                }

                features.append(tmp)

    _t2 = time.time()
    CTX.logger.info(
        "===> Pre-eval Time (loading images and aligning faces): %f\n",
        _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return True, msg, features
Example #14
0
def pre_eval(model, reqs):
    CTX.logger.info("---> Inference pre_eval() begin ...\n")
    # feature_extractor = model["feature_extractor"]
    face_aligner = model["face_aligner"]
    batch_size = model["batch_size"]

    output_square = model["input_height"] == model["input_width"]

    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    face_chips = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        # print reqs[i]
        CTX.logger.info('---> req[%d] image uri: %s', i,
                        reqs[i]["data"]["uri"])
        check_req_data_body(reqs[i])
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]["data"]["body"])

        CTX.logger.info('---> req[%d] image shape: %s', i, str(img.shape))

        landmarks = None
        pts = None

        try:
            if "attribute" in reqs[i]["data"]:
                if "landmarks" in reqs[i]["data"]["attribute"]:
                    landmarks = reqs[i]["data"]["attribute"]["landmarks"]
                    CTX.logger.info('---> req[%d] face landmarks: %s', i,
                                    str(landmarks))
                    checked_landmarks = check_landmarks(landmarks)

                    _faces = face_aligner.get_face_chips(
                        img, [], [checked_landmarks],
                        output_square=output_square)
                    face_chips.extend(_faces)

                elif "pts" in reqs[i]["data"]["attribute"]:
                    pts = reqs[i]["data"]["attribute"]["pts"]
                    roi_scale = model["roi_scale"]
                    if "params" in reqs[i] and "roi_scale" in reqs[i]["params"]:
                        roi_scale = reqs[i]["params"]["roi_scale"]
                        CTX.logger.info('---> req[%d] roi_scale: %s', i,
                                        str(roi_scale))

                    checked_pts = check_bbox(pts, roi_scale)

                    CTX.logger.info('---> req[%d] face bbox pts: %s', i,
                                    str(pts))
                    CTX.logger.info('---> req[%d] checked face bbox pts: %s',
                                    i, str(checked_pts))

                    _faces = face_aligner.get_face_chips(
                        img, [checked_pts], output_square=output_square)
                    face_chips.extend(_faces)

        except Exception as e:
            CTX.logger.error("Error when align and crop face: %s\n",
                             traceback.format_exc())
            raise Exception("Error align and crop face")

        if landmarks is None and pts is None:
            if (img.shape[0] == model["input_height"]
                    and img.shape[1] == model["input_width"]):
                face_chips.append(img)
            else:
                raise ErrorNoPTS(reqs[i]["data"]["uri"])

        # if pts is None:
        #     # if img.shape[0] != model["input_height"] or img.shape[1] != model["input_width"]:
        #     #     img = cv2.resize(
        #     #         img, (model["input_width"], model["input_height"]))
        #     #     face_chips.append(img)
        #         face_chips.append(img)
        # else:
        #         _faces = face_aligner.get_face_chips(
        #             img, [pts], output_square=output_square)
        #         face_chips.extend(_faces)

        # for i, chip in enumerate(face_chips):
        #     print "face chip #%d, shape: %s" % (i, str(chip.shape))
        #     win_name = "face_%d" % i
        #     cv2.imshow(win_name, chip)

        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

    _t2 = time.time()
    CTX.logger.info(
        "===> Pre-eval Time (loading images and aligning faces): %f\n",
        _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return face_chips