Exemplo n.º 1
0
def predetect(idcardcls, idcardfront, idcardback, batch_size, req):
    _t1 = time.time()
    # loading image
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    _t2 = time.time()
    CTX.logger.info("inference :: cost for loading image: %f", _t2 - _t1)

    # classify idcard
    cls = idcardcls.run(img)
    _t3 = time.time()
    CTX.logger.info("inference :: cost for loading image: %f", _t3 - _t2)

    # preprocess for adjust idcard
    # 0: front side;    1: back side;
    if (cls == 0):
        alignedImg, names, regions, boxes = idcardfront.predet(img)
    else:
        alignedImg, names, regions, boxes = idcardback.predet(img)

    _t4 = time.time()
    CTX.logger.info("inference :: cost for image preprocessing: %f", _t4 - _t3)

    return {
        "class": cls,
        "alignedImg": base64.b64encode(cv2.imencode('.jpg', alignedImg)[1]),
        "names": names,
        "regions": regions,
        "bboxes": boxes
    }
Exemplo n.º 2
0
def prerecog(handler, batch_size, req):
    _t1 = time.time()
    # loading image
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    _t2 = time.time()
    CTX.logger.info("inference :: cost for loading image: %f", _t2 - _t1)

    CTX.logger.info("input names: %s", req["params"]["names"])
    names = map(lambda name: name.encode('utf8'), req["params"]["names"])

    detboxes = req["params"]["detectedBoxes"]
    if len(detboxes[0]) == 8:
        # transform from [x0,y0,x1,y1,x2,y2,x3,y3] to [[x0,y0],[x1,y1],[x2,y2],[x3,y3]]
        detboxes = list(
            map(
                lambda x: [[x[0], x[1]], [x[2], x[3]], [x[4], x[5]],
                           [x[6], x[7]]], detboxes))

    bboxes = handler.prerecog(detboxes, img, names, req["params"]["regions"],
                              req["params"]["bboxes"])
    _t3 = time.time()
    CTX.logger.info("inference :: cost for image pre-recognize process: %f",
                    _t3 - _t2)

    return {"bboxes": bboxes}
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    image_shape_list_h_w = []
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"])
        height, width, _ = img.shape
        if height <= 32 or width <= 32:
            raise ErrorBase(
                400, "image too small " + str(height) + "x" + str(width))
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        image_shape_list_h_w.append([height, width])
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return image_shape_list_h_w, images
Exemplo n.º 4
0
def net_inference(model, reqs):

    CTX.logger.info("inference begin...")
    # datas = json.loads(args)
    predictor = model['predictor']
    classes_dict = model['labels']['class']
    # threshold uses for default 
    threshold_dict = model['labels']['minMt'] # minModelThreshold
    rets = []
    nms = py_nms_wrapper(config.TEST.NMS)
    box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH, config.TEST.BOX_VOTING_SCORE_THRESH,
                                      with_nms=True)

    try:
        for data in reqs:
            try:
                im = load_image(data['data']['uri'], body=data['data']['body'])
            except ErrorBase as e:
                rets.append({"code":e.code, "message": e.message, "result": None})
                continue
                # return [], 400, 'load image error'

            if im.shape[0] > im.shape[1]:
                long_side, short_side = im.shape[0], im.shape[1]
            else:
                long_side, short_side = im.shape[1], im.shape[0]

            if short_side > 0 and float(long_side)/float(short_side) > 50.0:
                msg = "aspect ration is too large, long_size:short_side should not larger than 50.0"
                # raise ErrorBase.__init__(400, msg)
                rets.append({"code": 400, "message": msg, "result": None})
                continue

            data_batch, data_names, im_scale = generate_batch(im)
            scores, boxes, data_dict = im_detect(predictor,
                                                data_batch,
                                                data_names,
                                                im_scale,
                                                config)
            det_ret = []
            # labels.csv file not include background
            for cls_index in sorted(classes_dict.keys()):
                cls_ind = cls_index
                cls_name = classes_dict.get(cls_ind)
                cls_boxes = boxes[0][:, 4:8] if config.CLASS_AGNOSTIC else boxes[0][:, 4 * cls_ind:4 *4 * (cls_ind + 1)]
                cls_scores = scores[0][:, cls_ind, np.newaxis]
                threshold = float(threshold_dict[cls_ind])
                keep = np.where(cls_scores > threshold)[0]
                dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
                keep = nms(dets)
                det_ret.extend(_build_result(det, cls_name, cls_ind, model['labels'])
                    for det in dets[keep, :])
            # get review value
            rets.append(dict(code=0,message='',result=dict(detections=det_ret)))

    except Exception as e:
        # print(traceback.format_exc())
        CTX.logger.info("inference error:%s"%(traceback.format_exc()))
        return [], 599, str(e)
    return rets, 0, ''
Exemplo n.º 5
0
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        data = reqs[i]
        img = load_image(data["data"]["uri"], body=data['data']['body'])
        if img is None:
            CTX.logger.info("input data is none : %s\n", data)
            raise ErrorBase(400, "image data is None ")
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        images.append(preProcessImage(img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return images
Exemplo n.º 6
0
def _load_image(req,
                width,
                height,
                mean_value=[0.0, 0.0, 0.0],
                std_value=[1.0, 1.0, 1.0]):
    try:
        img = load_image(req["uri"], body=req['body'])
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        if img is None:
            return None, {"code": 400, "message": "cv2 load image failed"}

            # convert into format (batch, RGB, width, height)
        img = img.astype(float)
        img = cv2.resize(img, (width, height))
        img -= mean_value
        img /= std_value
        img = np.swapaxes(img, 0, 2)
        img = np.swapaxes(img, 1, 2)
        return img, None
    except Exception as _e:
        logger.error("_load_image error: %s",
                     traceback.format_exc(),
                     extra={"reqid": ""})
        if isinstance(_e, ErrorBase):
            return None, {"code": _e.code, "message": str(_e)}
        return None, {"code": 400, "message": str(_e)}
Exemplo n.º 7
0
def postrecog(daikai_model, fapaiolian_model, req):
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    # CTX.logger.info("load param - dict: %s", req["params"]["dict"])
    # CTX.logger.info("load param - texts: %s", req["params"]["texts"])
    # boxes_dict = json.loads(req["params"]["dict"])
    rec_result = req["params"]["texts"]
    CTX.logger.info("load param - texts: %s", rec_result)

    _t1 = time.time()
    vat = ZenZhuiShui_reco()  # 没法传递image_dict参数,只能新建实例然后重新生成参数
    _, boxes_dict = vat.gen_img_dict(img)
    # vat.gen_img_dict_base(img)
    vat.predict_oridinary(boxes_dict, rec_result)
    vat.predict_other(boxes_dict, rec_result)
    vat.predict_XiaoShouMingXi(boxes_dict, rec_result)
    vat.predict_svm(daikai_model)
    vat.predict_FaPiaoLianCi(fapaiolian_model)
    vat.predict_XiaoLeiMingCheng()
    res = postProcess(vat.out_dict)
    _t2 = time.time()

    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)

    return res
Exemplo n.º 8
0
def decodeReqsImage(reqs):
    """
        process reqs image
        ----
        reqs :
        return :
            imageReadInfoDict,  # reqs image load info
            normalImageMapToOriginalImage, #  normal image id map to original image id
            images # load image result data
    """
    cur_batchsize = len(reqs)
    _t1 = time.time()
    imageReadInfoDict = dict()  # reqs image info
    normalImageMapToOriginalImage = dict(
    )  # normal image id map to original image
    images = []
    normalImageIndex = 0
    for i in range(cur_batchsize):
        data = reqs[i]
        infoOfImage = dict()
        img = None
        try:
            # load image error
            img = load_image(data["data"]["uri"], body=data['data']['body'])
            if img is None:
                CTX.logger.info("input data is none : %s\n", data)
                infoOfImage['errorInfo'] = "image data is None"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
            elif img.ndim != 3:
                CTX.logger.info("image ndim is " + str(img.ndim) +
                                ", should be 3\n")
                infoOfImage['errorInfo'] = "image ndim is " + \
                    str(img.ndim) + ", should be 3"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
        except ErrorBase as e:
            CTX.logger.info("image of index : %d,preProcess error: %s\n", i,
                            str(e))
            infoOfImage['errorInfo'] = str(e)
            infoOfImage['errorCode'] = e.code
            infoOfImage['flag'] = 1  # 1 is  the image preprocess error
        if infoOfImage.get('flag', 0) == 1:  # the image preProcess error
            imageReadInfoDict[i] = infoOfImage
            continue
        # because , some images error, so need all images's map relation .
        infoOfImage['flag'] = 0  # normal image preProcess
        infoOfImage['normalImageIndex'] = normalImageIndex
        # new image id map to old image id
        normalImageMapToOriginalImage[normalImageIndex] = i
        imageReadInfoDict[i] = infoOfImage
        normalImageIndex += 1
        images.append(img)
    _t2 = time.time()
    CTX.logger.info("load images : %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return imageReadInfoDict, normalImageMapToOriginalImage, images
Exemplo n.º 9
0
def _load_image(req):
    try:
        img = load_image(req["uri"], body=req['body'])
    except Exception as _e:
        logger.info("load image error:%s, trackback:%s",
                    str(_e),
                    traceback.format_exc(),
                    extra={"reqid": ""})
        return None, None, {"code": 400, "message": str(_e)}
    return img, img.shape, None
Exemplo n.º 10
0
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
    '''
    imageReadInfoDict = dict()
    images = []
    netinfeImageIdToinputImageId = dict() # net infe image id map to input image id
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)
    # image_shape_list_h_w = []
    # images = []
    _t1 = time.time()
    normalImageIndex = 0
    for i in range(cur_batchsize):
        data = reqs[i]
        infoOfImage = dict()
        img = None
        try:
            # load image error
            img = load_image(data["data"]["uri"],body=data['data']['body'])
            if img is None:
                CTX.logger.info("input data is none : %s\n", data)
                infoOfImage['errorInfo'] = "image data is None"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
            elif img.ndim != 3:
                CTX.logger.info("image ndim is " +
                                str(img.ndim) + ", should be 3\n")
                infoOfImage['errorInfo'] = "image ndim is " + \
                    str(img.ndim) + ", should be 3"
                infoOfImage['errorCode'] = 400
                infoOfImage['flag'] = 1
        except ErrorBase as e:
            CTX.logger.info("image of index : %d,preProcess error: %s\n", i,str(e))
            infoOfImage['errorInfo'] = str(e)
            infoOfImage['errorCode'] = e.code
            infoOfImage['flag'] = 1 # 1 is  the image preprocess error
        if infoOfImage.get('flag',0) == 1: # the image preProcess error
            imageReadInfoDict[i] = infoOfImage
            continue
        height, width, _ = img.shape
        infoOfImage['flag'] = 0 # normal image preProcess
        infoOfImage['height'] = height
        infoOfImage['width'] = width
        infoOfImage['normalImageIndex'] = normalImageIndex # because , some images error, so need all images's map relation .
        netinfeImageIdToinputImageId[normalImageIndex] = i # new image id map to old image id
        imageReadInfoDict[i] = infoOfImage
        normalImageIndex += 1
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return imageReadInfoDict, netinfeImageIdToinputImageId, images
Exemplo n.º 11
0
def detect(req):
    _t1 = time.time()
    # loading image
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    _t2 = time.time()
    CTX.logger.info("inference :: cost for loading image: %f", _t2 - _t1)

    _t3 = time.time()
    vat = ZenZhuiShui_reco()  # create new instance
    rect_boxes, boxes_dict = vat.gen_img_dict(img)
    _t4 = time.time()

    CTX.logger.info("forward: %f", _t4 - _t3)
    monitor_rt_forward().observe(_t4 - _t1)
    return {"bboxes": rect_boxes, "dict": boxes_dict}
Exemplo n.º 12
0
def net_inference(model, reqs):
    '''
        net inference
    '''
    CTX.logger.debug("enter net_inference")
    handler = model['net']
    rets = []

    _t1 = time.time()

    try:
        for data in reqs:
            CTX.logger.debug("data:%s", data)
            img = data["data"]["uri"]
            im = load_image(data["data"]["uri"], data['data']['body'])
            if im is None:
                CTX.logger.debug("read image failed, path:%s",
                                 data['data']['uri'])
                rets.append({"code": 400, "message": "read image failed"})
                continue

            feature = handler.ext_process([im])
            if feature is None or len(feature) == 0:
                rets.append({
                    "code": 400,
                    "message": "failed to get features of the image"
                })
                continue
            CTX.logger.info("feature length info:" + str(len(feature)) +
                            "len(feature[0]): " + str(len(feature[0])))
            stream = struct.pack('>' + str(len(feature[0])) + 'f', *feature[0])
            #CTX.logger.error("struct.unpack info:" + ">" +str(len(stream) / 4) + "f")
            #hash_sha1 = hashlib.sha1()
            #hash_sha1.update(stream)
            #feature_file_name = os.path.join("/tmp/eval/", hash_sha1.hexdigest())
            #file = open(feature_file_name, "wb")
            #file.write(stream)
            #file.close()
            #rets.append({"code": 0, "message": "", "result_file": str(feature_file_name)})
            rets.append({"code": 0, "message": "", "body": stream})
    except Exception as e:
        CTX.logger.error("inference error:%s", traceback.format_exc())
        return [], 599, str(e)
    _t2 = time.time()
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.debug("rets:%s", rets)
    return rets, 0, ''
Exemplo n.º 13
0
def net_inference(model, reqs):
    '''
        net inference
    '''
    CTX.logger.debug("enter net_inference")
    handler = model['net']
    rets = []

    _t1 = time.time()

    try:
        for data in reqs:
            CTX.logger.debug("data:%s", data)
            img = data["data"]["uri"]
            if data['data']['body'] is not None:
                hash_sha1 = hashlib.sha1()
                hash_sha1.update(str(data['data']['body']))
                img = os.path.join("/tmp", hash_sha1.hexdigest())
                file = open(img, "wb")
                file.write(data['data']['body'])
                file.close()
            im = load_image(img)
            if im is None:
                CTX.logger.debug("read image failed, path:%s",
                                 data['data']['uri'])
                rets.append({"code": 400, "message": "read image failed"})
                continue

            ret = handler.recognize(im)
            if ret["status"] == -1:
                rets.append({
                    "code": 400,
                    "message": "no valid id info obtained"
                })
                continue
            rets.append(dict(code=0, message='', result=ret['id_res']))
            if data['data']['body'] is not None and os.path.exists(img):
                os.remove(img)
    except Exception as e:
        if data['data']['body'] is not None and os.path.exists(img):
            os.remove(img)
        CTX.logger.error("inference error:%s", traceback.format_exc())
        return [], 599, str(e)
    _t2 = time.time()
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.debug("rets:%s", rets)
    return rets, 0, ''
Exemplo n.º 14
0
def pre_eval(net, batch_size, reqs):
    '''
        prepare net forward data
        Parameters
        ----------
        net: net created by net_init
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        code: error code, int
        message: error message, string
    '''
    cur_batchsize = len(reqs)
    CTX.logger.info("cur_batchsize: %d\n", cur_batchsize)
    if cur_batchsize > batch_size:
        for i in range(cur_batchsize):
            raise ErrorOutOfBatchSize(batch_size)
    image_shape_list_h_w = []
    images = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        data = reqs[i]
        image_path = data["data"]["uri"]
        if data['data']['body'] is not None:
            print("body")
            hash_sha1 = hashlib.sha1()
            hash_sha1.update(str(data['data']['body']))
            image_path = os.path.join("/tmp", hash_sha1.hexdigest())
            file = open(image_path, "wb")
            file.write(data['data']['body'])
            file.close()
        img = load_image(image_path)
        if img is None:
            CTX.logger.info("input data is none : %s\n", reqs[i])
            raise ErrorBase(400, "image data is None ")
        height, width, _ = img.shape
        if img.ndim != 3:
            raise ErrorBase(400,
                            "image ndim is " + str(img.ndim) + ", should be 3")
        image_shape_list_h_w.append([height, width])
        images.append(preProcessImage(oriImage=img))
    _t2 = time.time()
    CTX.logger.info("read image and transform: %f\n", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return image_shape_list_h_w, images
Exemplo n.º 15
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)
    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        img_type = reqs[i]["data"]["attribute"].get("image_type", 0)
        img_name = reqs[i]["data"]["attribute"].get("name", "")
        video = reqs[i]["data"]["attribute"].get("video", False)
        lane_pts = reqs[i]["data"]["attribute"].get("lane_pts")
        ret.append((img, img_type, video, img_name, lane_pts))
    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    return ret
Exemplo n.º 16
0
def pre_eval(batch_size, reqs):

    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        ret.append((img))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return ret
Exemplo n.º 17
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for req in reqs:
        img = load_image(req["data"]["uri"], body=req['data']['body'])
        bboxes = req["params"]["bboxes"]
        if len(bboxes[0]) == 8:
            # transform from [x0,y0,x1,y1,x2,y2,x3,y3] to [[x0,y0],[x1,y1],[x2,y2],[x3,y3]]
            bboxes = list(map(lambda x:[[x[0],x[1]],[x[2],x[3]],[x[4],x[5]],[x[6],x[7]]],bboxes))
        ret.append((img, bboxes))

    _t2 = time.time()
    CTX.logger.info("image load cost: %f", _t2 - _t1)
    # monitor_rt_load().observe(_t2 - _t1)

    return ret
Exemplo n.º 18
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
        if img.shape[2] == 4:
            img = img[:, :, :3]
        #print(img.shape)
        ret.append((img))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)
    #print("ret:"+str(ret[0]))
    #print("ret_size:"+str(ret[0].shape))
    return ret
Exemplo n.º 19
0
def net_inference(model, reqs):
    text_classify = model['text_classify']
    batch_size = model['batch_size']
    threshold = model['threshold']
    CTX.logger.info("inference begin ...")
    CTX.logger.info("requests: %s", reqs)

    try:
        ret = []
        for i in range(len(reqs)):
            img = load_image(reqs[i]["data"]["uri"], body=reqs[i]['data']['body'])
            resp = process_image_fun(text_classify, img, threshold)
            ret.append(resp)

    except ErrorBase as e:
        return [], e.code, str(e)
    except Exception as e:
        CTX.logger.error("inference error: %s", traceback.format_exc())
        return [], 599, str(e)

    return ret, 0, ''
Exemplo n.º 20
0
def pre_eval(batch_size, reqs):
    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    ret = []
    _t1 = time.time()
    for req in reqs:
        img = load_image(req["data"]["uri"], body=req['data']['body'])
        detections = req["data"]["attribute"]["detections"]
        pts_arr = []
        for det in detections:
            pts_arr.append(det["pts"])

        ret.append((img, pts_arr))

    _t2 = time.time()
    CTX.logger.info("load: %f", _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return ret
Exemplo n.º 21
0
def net_inference(model):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :return: None
    """

    # datas = json.loads(args)
    predictor = model['predictor']
    classes = model['classes']
    threshold = model['threshold']
    thresholds = model['thresholds']
    rets = []
    nms = py_nms_wrapper(config.TEST.NMS)
    box_voting = py_box_voting_wrapper(config.TEST.BOX_VOTING_IOU_THRESH,
                                       config.TEST.BOX_VOTING_SCORE_THRESH,
                                       with_nms=True)
    try:
        time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        fileOp = "/tmp/eval/init/20170930_guns_1083-begin-" + time_str + '.csv'
        fileOp_op = open(fileOp, 'w')
        time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        fileOp_1 = "/tmp/eval/init/20170930_guns_1083-end-" + time_str + '.csv'
        fileOp_1_op = open(fileOp_1, 'w')
        fileOp_2 = "/tmp/eval/init/20170930_guns_1083-image-" + time_str + '.jpg'
        for i in sorted(os.listdir("/tmp/eval/init/images")):
            imageFile = os.path.join("/tmp/eval/init/images", i)
            try:
                im = load_image(imageFile, 50.0)
                # cv2.imwrite('fileOp_2', im)
                print(im[0, :])
                # np.savetxt(fileOp_2, im, delimiter=",")
            except ErrorBase as e:
                rets.append({"code": e.code, "message": e.message})
                continue

            data_batch, data_names, im_scale = generate_batch(im)
            print("*" * 100)
            scores, boxes, data_dict = im_detect(predictor, data_batch,
                                                 data_names, im_scale, config)
            det_ret = []
            for cls_index, cls in enumerate(classes[1:], start=1):
                if len(cls) > 1:
                    cls_ind = int(cls[0])
                    cls_name = cls[1]
                else:
                    cls_ind = cls_index
                    cls_name = cls[0]
                cls_boxes = boxes[0][:,
                                     4:8] if config.CLASS_AGNOSTIC else boxes[
                                         0][:, 4 * cls_ind:4 * (cls_ind + 1)]
                cls_scores = scores[0][:, cls_ind, np.newaxis]
                if len(classes) <= len(thresholds):
                    threshold = thresholds[cls_ind]
                keep = np.where(cls_scores >= threshold)[0]
                dets = np.hstack(
                    (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
                if "20170930_guns_1083.jpg" in imageFile:
                    # print(dets)
                    # print('*'*100)
                    # for i in dets:
                    #     fileOp_op.write(i)
                    #     fileOp_op.write('\n')
                    np.savetxt(fileOp, dets, delimiter=",")
                    pass
                keep = nms(dets)
                if "20170930_guns_1083.jpg" in imageFile:
                    # print("end"*10)
                    # print(dets[keep, :])
                    # print('*'*100)
                    # for i in dets:
                    #     fileOp_1_op.write(i)
                    #     fileOp_1_op.write('\n')
                    np.savetxt(fileOp_1, dets[keep, :], delimiter=",")
                det_ret.extend(
                    _build_result(det, cls_name, cls_ind)
                    for det in dets[keep, :])

            rets.append(
                dict(code=0,
                     message=imageFile,
                     result=json.dumps(dict(detections=det_ret))))

    except Exception, e:
        print(traceback.format_exc())
Exemplo n.º 22
0
def pre_eval(model, reqs):
    CTX.logger.info("---> Inference pre_eval() begin ...\n")
    # feature_extractor = model["feature_extractor"]
    face_aligner = model["face_aligner"]
    batch_size = model["batch_size"]

    output_square = model["input_height"] == model["input_width"]

    cur_batchsize = len(reqs)
    if cur_batchsize > batch_size:
        raise ErrorOutOfBatchSize(batch_size)

    face_chips = []
    _t1 = time.time()
    for i in range(cur_batchsize):
        # print reqs[i]
        CTX.logger.info('---> req[%d] image uri: %s', i,
                        reqs[i]["data"]["uri"])
        check_req_data_body(reqs[i])
        img = load_image(reqs[i]["data"]["uri"], body=reqs[i]["data"]["body"])

        CTX.logger.info('---> req[%d] image shape: %s', i, str(img.shape))

        landmarks = None
        pts = None

        try:
            if "attribute" in reqs[i]["data"]:
                if "landmarks" in reqs[i]["data"]["attribute"]:
                    landmarks = reqs[i]["data"]["attribute"]["landmarks"]
                    CTX.logger.info('---> req[%d] face landmarks: %s', i,
                                    str(landmarks))
                    checked_landmarks = check_landmarks(landmarks)

                    _faces = face_aligner.get_face_chips(
                        img, [], [checked_landmarks],
                        output_square=output_square)
                    face_chips.extend(_faces)

                elif "pts" in reqs[i]["data"]["attribute"]:
                    pts = reqs[i]["data"]["attribute"]["pts"]
                    roi_scale = model["roi_scale"]
                    if "params" in reqs[i] and "roi_scale" in reqs[i]["params"]:
                        roi_scale = reqs[i]["params"]["roi_scale"]
                        CTX.logger.info('---> req[%d] roi_scale: %s', i,
                                        str(roi_scale))

                    checked_pts = check_bbox(pts, roi_scale)

                    CTX.logger.info('---> req[%d] face bbox pts: %s', i,
                                    str(pts))
                    CTX.logger.info('---> req[%d] checked face bbox pts: %s',
                                    i, str(checked_pts))

                    _faces = face_aligner.get_face_chips(
                        img, [checked_pts], output_square=output_square)
                    face_chips.extend(_faces)

        except Exception as e:
            CTX.logger.error("Error when align and crop face: %s\n",
                             traceback.format_exc())
            raise Exception("Error align and crop face")

        if landmarks is None and pts is None:
            if (img.shape[0] == model["input_height"]
                    and img.shape[1] == model["input_width"]):
                face_chips.append(img)
            else:
                raise ErrorNoPTS(reqs[i]["data"]["uri"])

        # if pts is None:
        #     # if img.shape[0] != model["input_height"] or img.shape[1] != model["input_width"]:
        #     #     img = cv2.resize(
        #     #         img, (model["input_width"], model["input_height"]))
        #     #     face_chips.append(img)
        #         face_chips.append(img)
        # else:
        #         _faces = face_aligner.get_face_chips(
        #             img, [pts], output_square=output_square)
        #         face_chips.extend(_faces)

        # for i, chip in enumerate(face_chips):
        #     print "face chip #%d, shape: %s" % (i, str(chip.shape))
        #     win_name = "face_%d" % i
        #     cv2.imshow(win_name, chip)

        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

    _t2 = time.time()
    CTX.logger.info(
        "===> Pre-eval Time (loading images and aligning faces): %f\n",
        _t2 - _t1)
    monitor_rt_load().observe(_t2 - _t1)

    return face_chips