def eval(net, images):
    '''
        eval forward inference
        Return
        ---------
        output: network numpy.mdarray
    '''
    _t1 = time.time()
    for index, i_data in enumerate(images):
        net.blobs['data'].data[index] = i_data
    _t2 = time.time()
    CTX.logger.info("load image to net: %f\n", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)

    _t1 = time.time()
    output = net.forward()
    _t2 = time.time()
    CTX.logger.info("forward: %f\n", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.info('detection_out: {}'.format(output['detection_out']))

    if 'detection_out' not in output or len(output['detection_out']) < 1:
        raise ErrorForwardInference()
    return output
Esempio n. 2
0
def net_inference(model, reqs):
    '''
        net inference
    '''
    CTX.logger.debug("enter net_inference")
    handler = model['net']
    datas = []
    rets = []
    features = []
    length = -1
    _t1 = time.time()

    datas = reqs[0].get("data", [])
    if len(datas) == 0:
        rets.append({"code": 400, "message": "no enough params provided"})
        return rets, 406, ''

    try:
        for data in datas:

            if data['uri'] is None and data['body'] is None:
                CTX.logger.debug("read data failed, None")
                rets.append({"code": 400, "message": "read data failed"})
                return rets, 400, ''

            feature = None
            if data['body'] is not None:
                feature = struct.unpack('>' + str(len(data['body']) / 4) + 'f',
                                        data['body'])
            else:
                file = open(data['uri'], 'rb')
                fdt = file.read()
                feature = struct.unpack('>' + str(len(fdt) / 4) + 'f', fdt)
                file.close()
            if length == -1:
                length = len(feature)
            if feature is None or len(feature) == 0 or len(feature) != length:
                CTX.logger.debug("json.loads failed:%s", str(len(feature)))
                rets.append({"code": 400, "message": "load data failed"})
                return rets, 400, ''
            features.append(feature)
        features = np.asarray(features, dtype=np.float32)
        ret = handler.classify(features)
        if ret is None:
            rets.append({"code": 400, "message": "inference failed"})
            return rets, 599, ''
        ret = [{
            x: y
        } for x, y in sorted(
            ret.items(), key=lambda item: item[1], reverse=True)]
        rets.append({"code": 0, "message": "success", "result": ret})
    except Exception as e:
        CTX.logger.error("inference error:%s", traceback.format_exc())
        return [], 599, str(e)
    _t2 = time.time()
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.debug("rets:%s", rets)
    return rets, 0, ''
Esempio n. 3
0
def eval(text_recognizer, imges_with_pts):
    output = []
    _t1 = time.time()
    for img, rects in imges_with_pts:
        predictions = text_recog(text_recognizer, img, rects)
        output.append((predictions, rects))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 4
0
def eval(text_detector, imges_with_type):
    output = []
    _t1 = time.time()
    for i in range(len(imges_with_type)):
        text_bboxes, text_area_ratio = text_detect(text_detector, imges_with_type[i][0], imges_with_type[i][1])
        output.append((text_bboxes, text_area_ratio, imges_with_type[i][1]))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 5
0
def eval(context, imges_no_type):
    output = []
    _t1 = time.time()
    for i in range(len(imges_no_type)):
        text_bboxes = text_detect(context, imges_no_type[i])
        output.append((text_bboxes))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 6
0
def eval(crann_recog, imges_with_bboxes):
    output = []
    _t1 = time.time()
    for img, rects in imges_with_bboxes:
        imglist = crann_recog.cutimagezz(img,rects)
        res = crann_recog.deploy(imglist)
        output.append((res, rects))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 7
0
def eval(text_recognizer, imges_with_bboxes_type):
    output = []
    _t1 = time.time()
    for i in range(len(imges_with_bboxes_type)):
        predictions, text_bboxes = text_recog(text_recognizer,
                                              imges_with_bboxes_type[i][1],
                                              imges_with_bboxes_type[i][0])
        output.append((predictions, text_bboxes, imges_with_bboxes_type[i][2]))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 8
0
def eval(text_detector, imges_no_type, f_score, f_geometry, input_images):
    output = []
    _t1 = time.time()
    for i in range(len(imges_no_type)):
        text_bboxes = text_detect(text_detector, imges_no_type[i], f_score,
                                  f_geometry, input_images)
        #print(text_bboxes)
        output.append((text_bboxes))
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 9
0
def eval(netmare, imges_with_type):
    output = []
    _t1 = time.time()
    for i in range(len(imges_with_type)):
        classify_out = netmare.process(imges_with_type[i][0],
                                       imges_with_type[i][2],
                                       imges_with_type[i][3],
                                       imges_with_type[i][4])
        output.append(
            classify_out
        )  # label 0 means illegal slug car, label 1 means other legal slug car
    _t2 = time.time()
    CTX.logger.info("forward: %f", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)
    return output
Esempio n. 10
0
def detect(req):
    _t1 = time.time()
    # loading image
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    _t2 = time.time()
    CTX.logger.info("inference :: cost for loading image: %f", _t2 - _t1)

    _t3 = time.time()
    vat = ZenZhuiShui_reco()  # create new instance
    rect_boxes, boxes_dict = vat.gen_img_dict(img)
    _t4 = time.time()

    CTX.logger.info("forward: %f", _t4 - _t3)
    monitor_rt_forward().observe(_t4 - _t1)
    return {"bboxes": rect_boxes, "dict": boxes_dict}
Esempio n. 11
0
def net_inference(model, reqs):
    '''
        net inference
    '''
    CTX.logger.debug("enter net_inference")
    handler = model['net']
    rets = []

    _t1 = time.time()

    try:
        for data in reqs:
            CTX.logger.debug("data:%s", data)
            img = data["data"]["uri"]
            im = load_image(data["data"]["uri"], data['data']['body'])
            if im is None:
                CTX.logger.debug("read image failed, path:%s",
                                 data['data']['uri'])
                rets.append({"code": 400, "message": "read image failed"})
                continue

            feature = handler.ext_process([im])
            if feature is None or len(feature) == 0:
                rets.append({
                    "code": 400,
                    "message": "failed to get features of the image"
                })
                continue
            CTX.logger.info("feature length info:" + str(len(feature)) +
                            "len(feature[0]): " + str(len(feature[0])))
            stream = struct.pack('>' + str(len(feature[0])) + 'f', *feature[0])
            #CTX.logger.error("struct.unpack info:" + ">" +str(len(stream) / 4) + "f")
            #hash_sha1 = hashlib.sha1()
            #hash_sha1.update(stream)
            #feature_file_name = os.path.join("/tmp/eval/", hash_sha1.hexdigest())
            #file = open(feature_file_name, "wb")
            #file.write(stream)
            #file.close()
            #rets.append({"code": 0, "message": "", "result_file": str(feature_file_name)})
            rets.append({"code": 0, "message": "", "body": stream})
    except Exception as e:
        CTX.logger.error("inference error:%s", traceback.format_exc())
        return [], 599, str(e)
    _t2 = time.time()
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.debug("rets:%s", rets)
    return rets, 0, ''
Esempio n. 12
0
def net_inference(model, reqs):
    '''
        net inference
    '''
    CTX.logger.debug("enter net_inference")
    handler = model['net']
    rets = []

    _t1 = time.time()

    try:
        for data in reqs:
            CTX.logger.debug("data:%s", data)
            img = data["data"]["uri"]
            if data['data']['body'] is not None:
                hash_sha1 = hashlib.sha1()
                hash_sha1.update(str(data['data']['body']))
                img = os.path.join("/tmp", hash_sha1.hexdigest())
                file = open(img, "wb")
                file.write(data['data']['body'])
                file.close()
            im = load_image(img)
            if im is None:
                CTX.logger.debug("read image failed, path:%s",
                                 data['data']['uri'])
                rets.append({"code": 400, "message": "read image failed"})
                continue

            ret = handler.recognize(im)
            if ret["status"] == -1:
                rets.append({
                    "code": 400,
                    "message": "no valid id info obtained"
                })
                continue
            rets.append(dict(code=0, message='', result=ret['id_res']))
            if data['data']['body'] is not None and os.path.exists(img):
                os.remove(img)
    except Exception as e:
        if data['data']['body'] is not None and os.path.exists(img):
            os.remove(img)
        CTX.logger.error("inference error:%s", traceback.format_exc())
        return [], 599, str(e)
    _t2 = time.time()
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.debug("rets:%s", rets)
    return rets, 0, ''
def eval(net, reqs):
    '''
        eval forward inference
        Return
        ---------
        output: network numpy.mdarray
    '''
    _t1 = time.time()
    output = net.forward()
    _t2 = time.time()
    CTX.logger.info("forward: %f\n", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)

    CTX.logger.info('detection_out: {}'.format(output['detection_out']))

    if 'detection_out' not in output or len(output['detection_out']) < 1:
        raise ErrorForwardInference()
    return output
Esempio n. 14
0
def eval(model, face_chips):
    CTX.logger.info("---> Inference eval() begin ...\n")
    feature_extractor = model["feature_extractor"]
    # face_aligner = model["face_aligner"]
    batch_size = model["batch_size"]

    features = []
    _t1 = time.time()

    for i in range(0, len(face_chips), batch_size):
        _ftrs = feature_extractor.extract_features_batch(
            face_chips[i:i + batch_size])
        features.extend(_ftrs)

    _t2 = time.time()
    CTX.logger.info("===> Eval Time (Extracting features): %f\n", _t2 - _t1)
    monitor_rt_forward().observe(_t2 - _t1)

    return features