예제 #1
0
 def write(x, batches, results):
     c1 = tuple(x[1:3].int())
     c2 = tuple(x[3:5].int())
     #print("c1 is {}, c2 is {}".format(c1,c2))
     img = results[int(x[0])]
     cls = int(x[-1])
     label = "{0}".format(classes[cls])
     color = random.choice(colors)
     cv2.rectangle(img, c1, c2, color, 1)
     cv2.imshow('rect1', img)
     cv2.waitKey(3000)
     t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
     c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 30
     cv2.rectangle(img, c1, c2, color, -1)
     cv2.imshow('rect2', img)
     cv2.waitKey(3000)
     ft = ft2.put_chinese_text('msyh.ttf')
     #print("label is {}".format(label))
     ft.draw_text(image=img,
                  pos=(c1[0], c1[1] + t_size[1] - 7),
                  text=label,
                  text_size=20,
                  text_color=[255, 255, 255])
     # cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1)
     cv2.imshow('hello', img)
     cv2.waitKey(3000)
     return img
예제 #2
0
def write(x, img):
    c1 = tuple(x[1:3].int())
    c2 = tuple(x[3:5].int())
    cls = int(x[-1])
    conf = str(x[-2].data)
    label = "{} {}%".format(classes[cls], float(conf[7:11])*100)
    color = random.choice(colors)
    cv2.rectangle(img, c1, c2,color, 1)
    t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
    #c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
    #cv2.rectangle(img, c1, c2,color, -1)
    #cv2.rectangle(img, c1, c2, color, -1)
    ft = ft2.put_chinese_text('msyh.ttf')
    ft.draw_text(image=img, pos=(c1[0], c1[1] + t_size[1] - 7), text=label, text_size=15, text_color=[255, 255, 255])

    #cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [225,255,255], 1);
    return img
예제 #3
0
def write(x, img):
    c1 = tuple(x[1:3].int())
    c2 = tuple(x[3:5].int())
    cls = int(x[-1])
    #label = "{0}".format(classes[cls])
    label = classes[cls]
    color = random.choice(colors)
    cv2.rectangle(img, c1, c2, color, 1)
    t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
    c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
    cv2.rectangle(img, c1, c2, color, -1)
    ft = ft2.put_chinese_text('msyh.ttf')
    ft.draw_text(image=img,
                 pos=(c1[0], c1[1] + t_size[1] - 7),
                 text=label,
                 text_size=15,
                 text_color=[255, 255, 255])
    #cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
    return img
예제 #4
0
    def data_init(self):
        config = Read_config()
        self.my_config = config.my_config
        self.font_path = "font//msyh.ttc"
        self.font_size = 18
        self.font_color = (255, 255, 255)
        self.font_position = {
            "start_x": 800,
            "start_y": 10,
            "add_x": 0,
            "add_y": 30
        }

        self.video_num = 4
        self.video_path = "video/"
        self.video_x_num = 2
        self.video_y_num = 2
        self.video_width = 500
        self.video_height = 300
        self.ft = ft2.put_chinese_text(self.font_path)
        self.detector = [Detector(self.video_width, self.video_height, self.my_config) for i in range(self.video_num)]
 def Run(self, input_dir, output_dir):
     if (not os.path.exists(output_dir)):
         os.makedirs(output_dir)
     self.ft = ft2.put_chinese_text(os.path.join(ft_path, 'msyh.ttf'))
     image_list = glob.glob(os.path.join(input_dir, "*.jpg"))
     for start in range(0, len(image_list), batch_size):
         try:
             cur_image_list = image_list[
                 start:min(start + batch_size, len(image_list))]
             l = [cv2.imread(image) for image in cur_image_list]
             start = time.time()
             ocr_result = self.ocr(l)
             print('cost', (time.time() - start))
             for image, result, name in zip(l, ocr_result['result'],
                                            cur_image_list):
                 cur_image = numpy.zeros(image.shape)
                 print(result['rect'])
                 for rect in result['rect']:
                     if not rect[5]: continue
                     cur_image = cv2.rectangle(cur_image, tuple(rect[0:2]),
                                               tuple(rect[2:4]),
                                               (0, 255, 0), 1)
                     cur_image = self.ft.draw_text(cur_image, rect[0:2],
                                                   rect[5], 15, (0, 255, 0))
                 new_name = os.path.join(
                     output_dir,
                     os.path.splitext(os.path.basename(name))[0] +
                     "_debug.jpg")
                 cur_image = numpy.hstack((image, cur_image))
                 cv2.imwrite(new_name, cur_image)
                 json_name = os.path.splitext(new_name)[0] + ".json"
                 f = open(json_name, "wb")
                 json.dump(ocr_result['result'], f)
                 f.close()
         except Exception as e:
             print(e)
예제 #6
0
# -*- coding: UTF-8 -*-
import face_recognition
import cv2
import os
import ft2
#中文支持,加载微软雅黑字体
ft = ft2.put_chinese_text('msyh.ttf')
# 获取摄像头# 0(默认)
video_capture = cv2.VideoCapture(0)
# 加载待识别人脸图像并识别。
basefacefilespath = "images"  #faces文件夹中放待识别任务正面图,文件名为人名,将显示于结果中
baseface_titles = []  #图片名字列表
baseface_face_encodings = []  #识别所需人脸编码结构集
#读取人脸资源
for fn in os.listdir(basefacefilespath):  #fn 人脸文件名
    baseface_face_encodings.append(
        face_recognition.face_encodings(
            face_recognition.load_image_file(basefacefilespath + "/" + fn))[0])
    fn = fn[:(len(fn) - 4)]
    baseface_titles.append(fn)  #
while True:
    # 获取一帧视频
    ret, frame = video_capture.read()
    # 人脸检测,并获取帧中所有人脸编码
    face_locations = face_recognition.face_locations(frame)
    face_encodings = face_recognition.face_encodings(frame, face_locations)
    # 遍历帧中所有人脸编码
    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):
        # 与baseface_face_encodings匹配否?
        for i, v in enumerate(baseface_face_encodings):
    def show_camera(self):  #展示摄像头画面并进行人脸识别的功能
        print('show_camera is open ')
        if self.video_btn == 0:  #在前面就设置了video_btn为0 为了在人脸识别的时候直接把这个值给改了 这样人脸识别和摄像头展示就分开了
            if self.source == "rtsp://*****:*****@192.168.1.61/Streaming/Channels/1":
                self.pushButton_8.setText(u'关闭摄像头1')
            else:
                self.pushButton_6.setText(u'关闭摄像头2')
            while (self.cap.isOpened()):

                ret, self.image = self.cap.read()
                QApplication.processEvents(
                )  #这句代码告诉QT处理来处理任何没有被处理的事件,并且将控制权返回给调用者  让代码变的没有那么卡
                show = cv2.resize(self.image, (800, 494))
                show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)  # 这里指的是显示原图
                # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width,
                self.showImage = QImage(show.data, show.shape[1],
                                        show.shape[0], QImage.Format_RGB888)
                self.label_5.setPixmap(QPixmap.fromImage(self.showImage))

            #  因为他最后会存留一张 图像在lable上需要对 lable_5进行清理
            self.label_5.clear()
            print('打开摄像头时间', time() - self.t3)

        elif self.video_btn == 1:
            #这段代码是 获取photo文件夹中 人的信息
            filepath = 'photo'
            filename_list = listdir(filepath)
            known_face_names = []
            known_face_encodings = []
            a = 0
            print('2')
            for filename in filename_list:  #依次读入列表中的内容
                a += 1
                QApplication.processEvents()
                if filename.endswith('jpg'):  # 后缀名'jpg'匹对
                    known_face_names.append(
                        filename[:-4])  #把文件名字的后四位.jpg去掉获取人名
                    file_str = 'photo\\' + filename
                    a_images = face_recognition.load_image_file(file_str)
                    print(file_str)
                    a_face_encoding = face_recognition.face_encodings(
                        a_images)[0]
                    known_face_encodings.append(a_face_encoding)
            print(known_face_names, a)
            #knowe_face_names里面放着每个人的名字   known_face_encodings里面放着提取出来的每个人的人脸特征信息

            face_locations = []
            face_encodings = []
            face_names = []
            process_this_frame = True
            while (self.cap.isOpened()):
                ret, frame = self.cap.read()
                QApplication.processEvents()
                # 改变摄像头图像的大小,图像小,所做的计算就少
                small_frame = cv2.resize(frame, (0, 0), fx=0.33, fy=0.33)
                # print('3 is running')
                # opencv的图像是BGR格式的,而我们需要是的RGB格式的,因此需要进行一个转换。
                rgb_small_frame = small_frame[:, :, ::-1]
                #print('4 is running')
                if process_this_frame:
                    QApplication.processEvents()
                    # 根据encoding来判断是不是同一个人,是就输出true,不是为flase
                    face_locations = face_recognition.face_locations(
                        rgb_small_frame)
                    face_encodings = face_recognition.face_encodings(
                        rgb_small_frame, face_locations)
                    face_names = []
                    # print('5 is  running')
                    for face_encoding in face_encodings:
                        # 默认为unknown
                        matches = face_recognition.compare_faces(
                            known_face_encodings,
                            face_encoding,
                            tolerance=0.42)
                        #阈值太低容易造成无法成功识别人脸,太高容易造成人脸识别混淆 默认阈值tolerance为0.6
                        #print(matches)
                        name = "Unknown"

                        if True in matches:
                            first_match_index = matches.index(True)
                            name = known_face_names[first_match_index]

                        face_names.append(name)
                process_this_frame = not process_this_frame
                # 将捕捉到的人脸显示出来
                self.set_name = set(face_names)
                self.set_names = tuple(
                    self.set_name
                )  # 把名字先设为了一个 集合 把重复的去掉 再设为tuple 以便于下面显示其他信息和记录 调用
                voice_syn = str()
                print(self.set_names)  #把人脸识别检测到的人 用set_names 这个集合收集起来
                self.write_record()  #把名字记录到excel中去
                #self.video_announce()
                for (top, right, bottom,
                     left), name in zip(face_locations, face_names):
                    #由于我们检测到的帧被缩放到1/4大小,所以要缩小面位置
                    top *= 3
                    right *= 3
                    bottom *= 3
                    left *= 3
                    # 矩形框
                    cv2.rectangle(frame, (left, top), (right, bottom),
                                  (0, 0, 255), 3)
                    ft = ft2.put_chinese_text('msyh.ttf')
                    #引入ft2中的字体
                    # cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)这是不输入汉字时可以用的代码

                    frame = ft.draw_text(frame, (left + 10, bottom), name, 25,
                                         (0, 0, 255))
                    print('face recognition is running')
                    #def draw_text(self, image, pos, text, text_size, text_color)

                self.show_picture()  # 调用显示详细信息的函数

                show_video = cv2.resize(frame, (800, 494))
                show_video = cv2.cvtColor(show_video,
                                          cv2.COLOR_BGR2RGB)  # 这里指的是显示原图
                # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width,
                self.showImage = QImage(show_video.data, show_video.shape[1],
                                        show_video.shape[0],
                                        QImage.Format_RGB888)
                self.label_5.setPixmap(QPixmap.fromImage(self.showImage))
            print('打开人脸识别所需要的时间', time() - self.t2)
        if not q_frame_main2face_detect.empty():  #非空
            frame = q_frame_main2face_detect.get()  #get()后,大小减1
            # 使用opencv的人脸检测
            face_bbox = face_opencv_detect(frame)
            if (len(face_bbox) > 0):
                q_frame_face_detect2face_recognize.put(frame)
                q_face_bbox.put(face_bbox)
                print 'detected'
            else:
                print 'not detected'
        else:
            time.sleep(0.02)  #空,sleep 20ms


import ft2
ft = ft2.put_chinese_text('simhei.ttf')
id_more = []
count = 0
#创建进程
p1 = multiprocessing.Process(target=Face_detect)
p2 = Face_recongnize()
p1.daemon = True  #设置为守护进程,主进程结束,子进程强制结束
p2.daemon = True

p2.start()  #开始
p1.start()

while (1):
    start = time.time()
    ret, frame = videoCapture.read()
예제 #9
0
                    input=True,
                    frames_per_buffer=CHUNK,
                    input_device_index=4)
    camera1 = cv2.VideoCapture(0)  # capture camera
    #camera2 = cv2.VideoCapture(1)
    camera2 = camera1
    cv2.namedWindow("Camera", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("Camera", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    video_default = cv2.VideoCapture(VIDEO_DEFAULT)
    video_60 = cv2.VideoCapture(VIDEO_60)

    font = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 1
    ft = ft2.put_chinese_text('wqy-zenhei.ttc')

    max_volume = 0

    while (True):
        data = stream.read(CHUNK, exception_on_overflow=False)
        volume = volume_of(data)
        if volume > max_volume:
            max_volume = volume
            #print max_volume

        if volume < SWITCH:
            playing_video = video_default
        else:
            playing_video = video_60
 def __init__(self):
     self.ft = ft2.put_chinese_text(os.path.join(ft_path, 'msyh.ttf'))
     if not os.path.exists(FLAGS.output_dir):
         os.makedirs(FLAGS.output_dir)
예제 #11
0
        right = right * 2 + 10
        top = top * 2 - 10
        bottom = bottom * 2 + 10
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # 画出一个带名字的标签,放在框下
        cv2.rectangle(frame, (left, bottom + 35), (right, bottom), (0, 0, 255),
                      cv2.FILLED)

        line = name

        color = (255, 255, 255)
        pos = (left + 40, bottom + 5)
        text_size = 25

        # ft = put_chinese_text('wqy-zenhei.ttc')
        ft = put_chinese_text(
            '/home/nvidia/.local/share/fonts/KaiTi_GB2312.ttf')
        frame = ft.draw_text(frame, pos, line, text_size, color)

        #font = cv2.FONT_HERSHEY_DUPLEX
        #cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
    # 显示结果图像
    cv2.imshow('人脸识别系统', frame)
    # 按q退出
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    #p暂停,c恢复
    elif cv2.waitKey(1) & 0xFF == ord('p'):
        while True:
            if cv2.waitKey(1) & 0xFF == ord('c'):
                break