def show_card_detect(candis, cap, values): rect_args = load_model() for candi in candis: # k = 20 if 20 < len(candi) else len(candi) # idxs = topk_idx(candi, k) + candi[0] idxs = candi idxs = pick_up_candi(candi, 20, values) for idx in idxs: cap.set(cv.CAP_PROP_POS_FRAMES, START_FRAME + idx) res, frame = cap.read() frame = frame[250:-250, 500:-500] print(START_FRAME + idx) croped_img, rect_score = id_card_detect(frame, rect_args) if not croped_img.width: cv.imshow('imgae', put_text(frame, str(rect_score))) if cv.waitKey(1) == ord('q'): break else: croped_img = cv.cvtColor(np.asarray(croped_img), cv.COLOR_RGB2BGR) half = croped_img[int(croped_img.shape[0] * 0.1):int(croped_img.shape[0] * 0.7), int(croped_img.shape[1] * 0.55):int(croped_img.shape[1] * 0.95)] half_eye = retina_face_detect(half) cv.imshow('imgae', put_text(croped_img, str(rect_score))) if cv.waitKey(1) == ord('q'): break cv.waitKey(0)
def find_id_card(candis, cap, values): id_card_index_candidate = [] face_scores = [] rect_args = load_model() for candi in candis: idxs = pick_up_candi(candi, 10, values) for idx in idxs: cap.set(cv.CAP_PROP_POS_FRAMES, START_FRAME + idx) res, frame = cap.read() frame = frame[250:-250, 500:-500] print(START_FRAME + idx) croped_img, rect_score = id_card_detect(frame, rect_args) if not croped_img.width: continue else: croped_img = cv.cvtColor(np.asarray(croped_img), cv.COLOR_RGB2BGR) half = croped_img[int(croped_img.shape[0] * 0.1):int(croped_img.shape[0] * 0.7), int(croped_img.shape[1] * 0.55):int(croped_img.shape[1] * 0.95)] has_face, score, _ = retina_face_distinguish(half) if has_face: id_card_index_candidate.append(idx) face_scores.append(score) best_face_score_idx = np.argmax(np.array(face_scores)) best_idx = id_card_index_candidate[best_face_score_idx] return best_idx, id_card_index_candidate
def detect_id_card(img): """ looking for an id card from a frame, return the confidence score """ croped_img, rect_score = id_card_detect(img, rect_args) if not croped_img.width: return 0 else: # looking for a face in the right-middle part of the cropped area croped_img = cv.cvtColor(np.asarray(croped_img), cv.COLOR_RGB2BGR) half = croped_img[int(croped_img.shape[0] * 0.1):int(croped_img.shape[0] * 0.7), int(croped_img.shape[1] * 0.55):int(croped_img.shape[1] * 0.95)] has_face, score, _ = retina_face_distinguish(half) # cv.imshow('1', croped_img) # cv.waitKey(1) if has_face: return score else: return 0
def find_id_card(candis, cap, values): id_card_index_candidate = [] face_scores = [] rect_args = load_model() for candi in candis: idxs = pick_up_candi(candi, 10, values) for idx in idxs: cap.set(cv.CAP_PROP_POS_FRAMES, START_FRAME + idx) res, frame = cap.read() frame = frame print('find id ', START_FRAME + idx) croped_img, rect_score = id_card_detect(frame, rect_args) if not croped_img.width: continue else: croped_img = cv.cvtColor(np.asarray(croped_img), cv.COLOR_RGB2BGR) half = croped_img[int(croped_img.shape[0] * 0.1): int(croped_img.shape[0] * 0.7), int(croped_img.shape[1] * 0.55): int(croped_img.shape[1] * 0.95)] has_face, score, _ = retina_face_distinguish(half) cv.imshow(f'{idx} finding id-{has_face}', croped_img) cv.waitKey(5) if has_face: id_card_index_candidate.append(idx) face_scores.append(score) # cv.waitKey(0) if len(face_scores) == 0: raise ValueError('id_card_candidates wrong') best_face_score_idx = np.argmax(np.array(face_scores)) best_idx = id_card_index_candidate[best_face_score_idx] return best_idx, id_card_index_candidate