Ejemplo n.º 1
0
def main(_):
    print(FLAGS.mode)
    if FLAGS.mode == "train":
        train()
    elif FLAGS.mode == 'validation':
        dct = validation()
        result_file = 'result.dict'
        logger.info('Write result into {0}'.format(result_file))
        with open(result_file, 'wb') as f:
            pickle.dump(dct, f)
        logger.info('Write file ends')
    elif FLAGS.mode == 'inference':
        label_dict = get_label_dict()
        name_list = get_file_list('./tmp/')
        #binary_pic(name_list)
        #tmp_name_list = get_file_list('../data/tmp')
        # 將待預測的圖片名字列表送入predict()進行預測,得到預測的結果及其index
        final_predict_val, final_predict_index = inference(name_list)
        final_reco_text = []  # 儲存最後識別出來的文字串
        # 給出top 3預測,candidate1是機率最高的預測
        for i in range(len(final_predict_val)):
            candidate1 = final_predict_index[i][0][0]
            candidate2 = final_predict_index[i][0][1]
            candidate3 = final_predict_index[i][0][2]
            final_reco_text.append(label_dict[int(candidate1)])
            logger.info(
                '[the result info] image: {0} predict: {1} {2} {3}; predict index {4} predict_val {5}'
                .format(name_list[i], label_dict[int(candidate1)],
                        label_dict[int(candidate2)],
                        label_dict[int(candidate3)], final_predict_index[i],
                        final_predict_val[i]))
        print('=====================OCR RESULT=======================\n')
        # 印出所有識別出來的結果(取top 1)
        f = open('./result/outcome.txt', 'r+', encoding="utf-8-sig")
        f.truncate()
        for i in range(len(final_reco_text)):
            print(final_reco_text[i], end=" ")
            with open('./result/outcome.txt', 'a+', encoding="utf-8-sig") as f:
                f.write("%s" % final_reco_text[i])
        time.sleep(1)
        outcome('./result/outcome.txt')
Ejemplo n.º 2
0
 def run(self):
     if self.device.isOpened():
         try:
             while True:
                 ret, frame = self.device.read()
                 height, width, bytesPerComponent = frame.shape
                 bytesPerLine = bytesPerComponent * width
                 # 變換彩色空間順序
                 cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, frame)
                 # 轉為QImage物件
                 image = QImage(frame.data, width, height, bytesPerLine,
                                QImage.Format_RGB888)
                 if self.paizhao == 1:
                     image.save('./origin/takephoto.png')
                     self.paizhao = 0
                     pixmap = QPixmap.fromImage(image)
                     pixmap = pixmap.scaled(400, 300,
                                            QtCore.Qt.KeepAspectRatio)
                     self.imgLab.setPixmap(pixmap)
                     objectdetection()
                     outcome('./result/audio/objoutcome.txt')
         finally:
             self.device.release()
Ejemplo n.º 3
0
def run():
    #print(command)
    c = command()
    if c == "閱讀" or c == "閱讀模式":
        takephoto()
        notblurry()
        ocr()
        outcome('./result/audio/outcome.txt')
    elif c == "用餐" or c == "用餐模式":
        takephoto()
        objectdetection()
        outcome('./result/audio/objoutcome.txt')
    elif c == "沒事" or c == "結束":
        return 0
    else:
        outcome('./result/audio/responsenoidea.txt')