Пример #1
0
def postrecog(daikai_model, fapaiolian_model, req):
    img = load_image(req["data"]["uri"], body=req['data']['body'])
    # CTX.logger.info("load param - dict: %s", req["params"]["dict"])
    # CTX.logger.info("load param - texts: %s", req["params"]["texts"])
    # boxes_dict = json.loads(req["params"]["dict"])
    rec_result = req["params"]["texts"]
    CTX.logger.info("load param - texts: %s", rec_result)

    _t1 = time.time()
    vat = ZenZhuiShui_reco()  # 没法传递image_dict参数,只能新建实例然后重新生成参数
    _, boxes_dict = vat.gen_img_dict(img)
    # vat.gen_img_dict_base(img)
    vat.predict_oridinary(boxes_dict, rec_result)
    vat.predict_other(boxes_dict, rec_result)
    vat.predict_XiaoShouMingXi(boxes_dict, rec_result)
    vat.predict_svm(daikai_model)
    vat.predict_FaPiaoLianCi(fapaiolian_model)
    vat.predict_XiaoLeiMingCheng()
    res = postProcess(vat.out_dict)
    _t2 = time.time()

    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)

    return res
Пример #2
0
def post_eval(output, reqs=None):
    resps = []
    cur_batchsize = len(output)
    _t1 = time.time()
    for i in xrange(cur_batchsize):
        result = dump_result(output[i])
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #3
0
def post_eval(output):
    resps = []

    _t1 = time.time()
    for res, bboxes in output:
        result = {}
        result["text"] = res
        result["bboxes"] = bboxes
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #4
0
def post_eval(text_recognizer, output, reqs=None):

    resps = []
    cur_batchsize = len(output)
    _t1 = time.time()
    for i in xrange(cur_batchsize):
        predictions = output[i][0]
        text_bboxes = output[i][1]
        img_type = output[i][2]
        text_recog_result = combine_text(predictions, text_bboxes, img_type)
        result = dump_result(text_recog_result)
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #5
0
def post_eval(output, reqs=None):
    resps = []
    cur_batchsize = len(output)
    _t1 = time.time()
    for i in xrange(cur_batchsize):
        text_bboxes = output[i]
        res_list = []
        if len(text_bboxes) == 0:
            CTX.logger.info("no text detected")
            resps.append({"code": 0, "message": "", "result": {}})
            continue
        result = dump_result(text_bboxes)
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #6
0
def post_eval(output, threshold, cur_batchsize, label_list):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        Parameters
        ----------
        net: net created by net_init
        output: list of tuple(score, boxes)
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        label_list: label list of labels.csv
        Return
        ----------
        resps: list of EvalResponse{
            "code": <code|int>,
            "message": <error message|str>,
            "result": <eval result|object>,
            "result_file": <eval result file path|string>
        }
    '''
    resps = []
    _t1 = time.time()

    for index, output_prob in enumerate(output['prob']):
        if index >= cur_batchsize:
            break
        output_prob = np.squeeze(output['prob'][index])

        result = {}
        confidences = []
        index = int(output_prob.argsort()[-1])
        class_name = str(label_list[index])
        score = float(output_prob[index])
        if score < threshold and class_name != 'normal':
            index = -1
        confidence = {"index": index, "class": class_name, "score": score}
        if class_name != "":
            confidences.append(confidence)
        result["confidences"] = confidences
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f\n", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #7
0
def post_eval(text_recognizer, output, reqs=None):
    resps = []

    _t1 = time.time()
    for predictions, text_pts in output:
        result = {}
        items = []

        for prediction, text_bbox in zip(predictions, text_pts):
            item = {}
            item['pts'] = text_bbox
            item['text'] = ' '.join(prediction.split())
            items.append(item)

        result["texts"] = items
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
def post_eval(output, thresholds, image_shape_list_h_w):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        Parameters
        ----------
        net: net created by net_init
        output: list of tuple(score, boxes)
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        resps: list of EvalResponse{
            "code": <code|int>,
            "message": <error message|str>,
            "result": <eval result|object>,
            "result_file": <eval result file path|string>
        }
    '''
    # thresholds = [0,0.1,0.1,0.1,0.1,0.1,1.0]
    resps = []
    cur_batchsize = len(image_shape_list_h_w)
    _t1 = time.time()
    output_bbox_list = output['detection_out'][0][
        0]  # output_bbox_list : bbox_count * 7
    image_result_dict = dict()  # image_id : bbox_list
    for i_bbox in output_bbox_list:
        image_id = int(i_bbox[0])
        if image_id >= cur_batchsize:
            break
        h = image_shape_list_h_w[image_id][0]
        w = image_shape_list_h_w[image_id][1]
        class_index = int(i_bbox[1])
        # [background,guns,knives,tibetan flag,islamic flag,isis flag,not terror]
        if class_index < 1 or class_index >= 6:
            continue
        score = float(i_bbox[2])
        if score < thresholds[class_index]:
            continue
        bbox_dict = dict()
        bbox_dict['index'] = class_index
        bbox_dict['score'] = score
        bbox = i_bbox[3:7] * np.array([w, h, w, h])
        bbox_dict['pts'] = []
        xmin = int(bbox[0]) if int(bbox[0]) > 0 else 0
        ymin = int(bbox[1]) if int(bbox[1]) > 0 else 0
        xmax = int(bbox[2]) if int(bbox[2]) < w else w
        ymax = int(bbox[3]) if int(bbox[3]) < h else h
        bbox_dict['pts'].append([xmin, ymin])
        bbox_dict['pts'].append([xmax, ymin])
        bbox_dict['pts'].append([xmax, ymax])
        bbox_dict['pts'].append([xmin, ymax])
        if image_id in image_result_dict.keys():
            the_image_bbox_list = image_result_dict.get(image_id)
            the_image_bbox_list.append(bbox_dict)
            pass
        else:
            the_image_bbox_list = []
            the_image_bbox_list.append(bbox_dict)
            image_result_dict[image_id] = the_image_bbox_list
    resps = []
    for image_id in range(cur_batchsize):
        if image_id in image_result_dict.keys():
            res_list = image_result_dict.get(image_id)
        else:
            res_list = []
        result = {"detections": res_list}
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f\n", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #9
0
def post_eval(output, thresholds, image_shape_list_h_w, labelmap):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        Parameters
        ----------
        net: net created by net_init
        output: list of tuple(score, boxes)
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        labelmap: file path of labels.csv
        Return
        ----------
        resps: list of EvalResponse{
            "code": <code|int>,
            "message": <error message|str>,
            "result": <eval result|object>,
            "result_file": <eval result file path|string>
        }
    '''
    # thresholds = [0, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8]
    resps = []
    cur_batchsize = len(image_shape_list_h_w)
    _t1 = time.time()
    # output_bbox_list : bbox_count * 5
    output_bbox_list = output['detection_out'][0][0]
    image_result_dict = dict()  # image_id : bbox_list
    labelList = get_labellist(labelmap)
    for i_bbox in output_bbox_list:
        image_id = int(i_bbox[0])
        if image_id >= cur_batchsize:
            break
        h = image_shape_list_h_w[image_id][0]
        w = image_shape_list_h_w[image_id][1]
        class_index = int(i_bbox[1])
        # [background, police_badge, police_car_moto, police_car_vehicle, police_uniform]
        if class_index < 1:
            continue
        score = float(i_bbox[2])
        if score < thresholds[class_index]:
            continue
        name = labelList[class_index]
        bbox_dict = dict()
        bbox_dict['index'] = class_index
        bbox_dict['score'] = score
        bbox_dict['class'] = name
        bbox = i_bbox[3:7] * np.array([w, h, w, h])
        bbox_dict['bboxes'] = []
        xmin = int(bbox[0]) if int(bbox[0]) > 0 else 0
        ymin = int(bbox[1]) if int(bbox[1]) > 0 else 0
        xmax = int(bbox[2]) if int(bbox[2]) < w else w
        ymax = int(bbox[3]) if int(bbox[3]) < h else h
        bbox_dict['bboxes'].append([xmin, ymin])
        bbox_dict['bboxes'].append([xmax, ymin])
        bbox_dict['bboxes'].append([xmax, ymax])
        bbox_dict['bboxes'].append([xmin, ymax])
        if image_id in image_result_dict.keys():
            the_image_bbox_list = image_result_dict.get(image_id)
            the_image_bbox_list.append(bbox_dict)
            pass
        else:
            the_image_bbox_list = []
            the_image_bbox_list.append(bbox_dict)
            image_result_dict[image_id] = the_image_bbox_list
    resps = []
    for image_id in range(cur_batchsize):
        if image_id in image_result_dict.keys():
            res_list = image_result_dict.get(image_id)
        else:
            res_list = []
        result = {"items": res_list}
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f\n", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
def post_eval(net, output, thresholds, reqs=None):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        Parameters
        ----------
        net: net created by net_init
        output: list of tuple(score, boxes)
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        resps: list of EvalResponse{
            "code": <code|int>,
            "message": <error message|str>,
            "result": <eval result|object>,
            "result_file": <eval result file path|string>
        }
    '''
    resps = []
    # cur_batchsize = len(output['detection_out']) # len(output['detection_out'])  always 1
    cur_batchsize = len(reqs)
    _t1 = time.time()
    # output_bbox_list : bbox_count * 7
    output_bbox_list = output['detection_out'][0][0]
    image_result_dict = dict()  # image_id : bbox_list
    for i_bbox in output_bbox_list:
        image_id = int(i_bbox[0])
        image_data = net.images[image_id]
        w = image_data[1]
        h = image_data[0]
        if image_id >= cur_batchsize:
            break
        class_index = int(i_bbox[1])
        score = float(i_bbox[2])
        if class_index == 4 or class_index < 0:
            continue
        if score < thresholds[class_index]:
            continue
        if image_id in image_result_dict.keys():
            the_image_bbox_list = image_result_dict.get(image_id)
            pass
        else:
            the_image_bbox_list = []
            bbox_dict = dict()
        bbox_dict['cls'] = class_index
        bbox_dict['score'] = score
        bbox = i_bbox[3:7] * np.array([w, h, w, h])
        bbox = bbox.tolist()
        bbox_dict['bbox'] = bbox
        the_image_bbox_list.append(bbox_dict)
    resps = []
    for image_id in range(cur_batchsize):
        if image_id in image_result_dict.keys():
            res_list = image_result_dict.get(image_id)
        else:
            res_list = []
        result = {"detections": res_list}
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f\n", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #11
0
def post_eval(imageIdMapDict,imageReadInfoDict,output, thresholds, label_list):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        imageIdMapDict : input net image id  --- original input image id 
    '''
    resps = []
    _t1 = time.time()
    # output_bbox_list : bbox_count * 7
    output_bbox_list = output['detection_out'][0][0]
    image_result_dict = dict()  # image_id : bbox_list
    for i_bbox in output_bbox_list:
        # i_bbox : length == 7 ; 0==image_id,1==class_index,2==score,3==bbox_xmin,4==bbox_ymin,5==bbox_xmax,6==bbox_ymax
        image_id = int(i_bbox[0])
        if image_id >= len(imageIdMapDict):
            break
        inputImageId = imageIdMapDict[image_id]
        h = imageReadInfoDict[inputImageId]['height']
        w = imageReadInfoDict[inputImageId]['width']
        class_index = int(i_bbox[1])
        if class_index < 1 : # background index == 0 , refinedet not output background info ,so the line not used
            continue
        score = float(i_bbox[2])
        if score < thresholds[class_index]:
            continue
        name = label_list[class_index]
        bbox_dict = dict()
        bbox_dict['index'] = class_index
        bbox_dict['score'] = score
        bbox_dict['class'] = name
        bbox = i_bbox[3:7] * np.array([w, h, w, h])
        bbox_dict['pts'] = []
        xmin = int(bbox[0]) if int(bbox[0]) > 0 else 0
        ymin = int(bbox[1]) if int(bbox[1]) > 0 else 0
        xmax = int(bbox[2]) if int(bbox[2]) < w else w
        ymax = int(bbox[3]) if int(bbox[3]) < h else h
        bbox_dict['pts'].append([xmin, ymin])
        bbox_dict['pts'].append([xmax, ymin])
        bbox_dict['pts'].append([xmax, ymax])
        bbox_dict['pts'].append([xmin, ymax])
        if image_id in image_result_dict.keys():
            the_image_bbox_list = image_result_dict.get(image_id)
            the_image_bbox_list.append(bbox_dict)
            pass
        else:
            the_image_bbox_list = []
            the_image_bbox_list.append(bbox_dict)
            image_result_dict[image_id] = the_image_bbox_list
    # merge net infe output data and data preProcess result info to resps
    resps = []
    for original_image_id in range(len(imageReadInfoDict)):
        if imageReadInfoDict[original_image_id]['flag'] == 1: # original input image error
            resps.append({
                "code": imageReadInfoDict[original_image_id]['errorCode'], 
                "message": imageReadInfoDict[original_image_id]['errorInfo'],
                "result": {}
            })
            continue
        inputNet_image_id = imageReadInfoDict[original_image_id]['normalImageIndex']
        if inputNet_image_id in image_result_dict.keys():
            res_list = image_result_dict.get(inputNet_image_id)
        else:
            res_list = []
        result = {"detections": res_list}
        resps.append({"code": 0, "message": "", "result": result})
    _t2 = time.time()
    CTX.logger.info("post: %f\n", _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resps
Пример #12
0
def eval(model, features):
    CTX.logger.info("---> Inference eval() begin ...\n")
    _t1 = time.time()

    inter_index = "_inter_index"

    min_samples = model['min_samples_per_cluster']
    dist_thresh = 1.0 - model['sim_thresh']

    clustered_data_list = []
    single_data_list = []
    new_data_list = []
    max_gid = -1

    all_ft_list = features

    # print "features: ", features

    for i in range(len(all_ft_list)):
        all_ft_list[i][inter_index] = i

    # calc_feature_list_norm_inv(all_ft_list)
    # print "all_ft_list: ", all_ft_list

    for ft in all_ft_list:
        if 'feature' not in ft or len('feature') < 1:
            continue

        if model['incrementally']:
            if ft['group_id'] >= 0:
                clustered_data_list.append(ft)
            elif ft['group_id'] == -1:
                single_data_list.append(ft)
            else:
                new_data_list.append(ft)

            if ft['group_id'] > max_gid:
                max_gid = ft['group_id']
        else:
            new_data_list.append(ft)

    rlt_list = all_ft_list

    if len(new_data_list) > 0:
        if len(clustered_data_list) < 1:
            (rlt_clustered_list,
             rlt_single_list) = cluster_all_features(all_ft_list, dist_thresh,
                                                     min_samples)

        else:
            (rlt_clustered_list, rlt_single_list) = incrementally_cluster(
                clustered_data_list, single_data_list, new_data_list,
                dist_thresh, min_samples, max_gid)
        rlt_list = rlt_clustered_list + rlt_single_list
    else:
        for item in single_data_list:
            item['group_id'] = -1
            item['distance_to_center'] = 0.0
        for item in clustered_data_list:
            item['distance_to_center'] = 0.0
        rlt_list = clustered_data_list + single_data_list

    # indices = [ft[inter_index] for ft in rlt_list]
    # new_rlt_list = [rlt_list[i] for i in indices]

    cluster_rlt = []
    for ft in rlt_list:
        tmp = {
            'face_id': ft['face_id'],
            'group_id': ft['group_id'],
            'distance_to_center': round(ft['distance_to_center'], 6)
        }

        cluster_rlt.append(tmp)

    _t2 = time.time()

    CTX.logger.info("===>clustering {} features costs {} seconds\n".format(
        len(features), _t2 - _t1))
    monitor_rt_post().observe(_t2 - _t1)

    dict_info = {"code": 0, "message": "", "result": json.dumps(cluster_rlt)}

    return dict_info
Пример #13
0
def post_eval(model, features, reqs):
    '''
        parse net output, as numpy.mdarray, to EvalResponse
        Parameters
        ----------
        net: net created by net_init
        output: network numpy.mdarray
        reqs: parsed reqs from net_inference
        reqid: reqid from net_inference
        Return
        ----------
        resp: list of EvalResponse{
            "code": <code|int>,
            "message": <error message|str>,
            "body": <eval result|object>,
        }
    '''
    CTX.logger.info("---> Inference post_eval() begin ...\n")

    resp = []
    # reqid = None
    workspace = model["workspace"]

    _t1 = time.time()
    for i, feature in enumerate(features):
        #CTX.logger.debug("featuer len {} and feature {}".format(len(feature),feature),extra = {"reqid": reqid})
        # print '---> feature: ', feature
        np.save(osp.join(workspace, '%d.npy' % i), feature)
        # stream = struct.pack('>' + str(len(feature)) + 'f', *feature)
        stream = pack_feature_into_stream(feature)
        # print '---> packed stream: ', stream

        # feature_unpack = np.array(unpack_feature_from_stream(stream), np.float32)
        # print '---> unpacked feature from stream: ', feature_unpack
        # print '---> sum(feature-feature_unpack): ',
        # (feature-feature_unpack).sum()

        CTX.logger.info("struct.unpack info:" + ">" + str(len(stream) / 4) +
                        "f")

        #---- old response format (face-feature-v1, v2, v3)
        # hash_sha1 = hashlib.sha1()
        # hash_sha1.update(stream)
        # feature_file_name = os.path.join(workspace, hash_sha1.hexdigest())
        # file = open(feature_file_name, "wb")
        # file.write(stream)
        # file.close()
        # resp.append({"code": 0, "message": "",
        #               "result_file": str(feature_file_name)})

        #---- new response format (face-feature-v4)
        res = {
            "code": 0,
            "message": "",
            "result": {
                "uri": reqs[i]["data"]["uri"]
            },
            "body": stream
        }
        resp.append(res)

    _t2 = time.time()
    CTX.logger.info("===> Post-eval Time (assembling responses): %f\n",
                    _t2 - _t1)
    monitor_rt_post().observe(_t2 - _t1)
    return resp