def response(text, name): data = dict() preds = predict(text, name) slot_dict = merge(text, preds) data['content'] = text data['slot'] = slot_dict data_str = json.dumps(data, ensure_ascii=False) logger.info(data_str) return data_str
def response(): data = request.get_json() words, preds = predict(data['content']) entitys, labels = list(), list() for word, pred in zip(words, preds): pred = ind_labels[pred] if pred != 'O': entitys.append(word) labels.append(zh_en[pred]) slot_dict = make_dict(entitys, labels) data['slot'] = slot_dict data_str = json.dumps(data, ensure_ascii=False) logger.info(data_str) return data_str
def rec(): try: base64_data = request.get_data().strip() ret_num = int(predict(base64_data)) print(ret_num) result = {'status': 'success', 'data': ret_num} except Exception as e: result = { 'status': 'error', 'data': traceback.format_exc(), } response = make_response(json.dumps(result)) response.headers['Access-Control-Allow-Origin'] = '*' return response
def test(name, sents): flat_labels, flat_preds = [0], [0] for text, pairs in sents.items(): labels = list() for pair in pairs: labels.append(label_inds[pair['label']]) bound = len(text) - seq_len if len(text) > seq_len else 0 flat_labels.extend(labels[bound:]) flat_preds.extend(predict(text, name)) precs = precision_score(flat_labels, flat_preds, average=None) recs = recall_score(flat_labels, flat_preds, average=None) with open(map_item(name, paths), 'w') as f: f.write('label,prec,rec' + '\n') for i in range(1, class_num): f.write('%s,%.2f,%.2f\n' % (ind_labels[i], precs[i], recs[i])) f1 = f1_score(flat_labels, flat_preds, average='weighted', labels=label_set) print('\n%s f1: %.2f - acc: %.2f' % (name, f1, accuracy_score(flat_labels, flat_preds)))
def test(sents): flat_labels, flat_preds = list(), list() for text, triples in sents.items(): word1s, labels = list(), list() for triple in triples: word1s.append(triple['word']) labels.append(label_inds[triple['label']]) word2s, preds = predict(text) for i in range(len(word2s)): if word2s[i] == word2s[i]: flat_labels.append(labels[i]) flat_preds.append(preds[i]) precs = precision_score(flat_labels, flat_preds, average=None) recs = recall_score(flat_labels, flat_preds, average=None) with open(path_crf, 'w') as f: f.write('label,prec,rec' + '\n') for i in range(class_num): f.write('%s,%.2f,%.2f\n' % (ind_labels[i], precs[i], recs[i])) f1 = f1_score(flat_labels, flat_preds, average='weighted', labels=label_set) print('\n%s f1: %.2f - acc: %.2f' % ('crf', f1, accuracy_score(flat_labels, flat_preds)))
time.sleep(0.1) lbp_face_cascade = cv2.CascadeClassifier( 'opencv-source/data/lbpcascades/lbpcascade_frontalface.xml') face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read('data/model.yml') for frame in camera.capture_continuous(rawCapture, format="bgr"): image = frame.array image = np.array(image) if image is None: continue image = rec.predict(image) cv2.imshow("Frame", image) key = cv2.waitKey(1) & 0xFF rawCapture.truncate(0) if key == ord("q"): break #test_img1 = cv2.imread("untrained-data/image1.jpg") #test_img2 = cv2.imread("untrained-data/image2.jpg") #test_img3 = cv2.imread("untrained-data/image3.jpg") #test_img4 = cv2.imread("untrained-data/image4.jpg") #test_img5 = cv2.imread("untrained-data/image5.jpg") #test_img6 = cv2.imread("untrained-data/image6.jpg")