예제 #1
0
 def __init__(self):
     # 加载模型
     self.emotion_classifier = load_model('models/emotion_model.hdf5')
     self.face_cascade = cv2.CascadeClassifier(
         'models/haarcascade_frontalface_default.xml')
     facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
     self.faceutil = FaceUtil(facial_recognition_model_path)
예제 #2
0
class Stranger:
    def __init__(self):
        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)

    def facedetect(self, picpath, picname, timestamp):

        pic = cv2.imread(picpath, )
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=600)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)

        for ((left, top, right, bottom), name) in zip(face_location_list,
                                                      names):
            # display label and bounding box rectangle on the output frame
            if 'Unknown' in names:
                print('有陌生人出没!')
                event = {
                    'event_type':
                    2,
                    'event_date':
                    time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(timestamp)),
                    'event_location':
                    'BJTU',
                    'event_desc':
                    '陌生人来了',
                    'oldperson_id':
                    0,
                    'pic_name':
                    picname
                }
                return event

    def detectID(self, picpath):

        pic = cv2.imread(picpath, )
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=600)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)
        dict = None
        if len(face_location_list) == 1:
            if 'old_1' in names:
                dict = 0
            if 'old_2' in names:
                dict = 1
            return dict
예제 #3
0
class Track:
    def __init__(self):

        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)
        self.turnArea = (140, 120, 500, 370)

    def test_track(self, picpath, picname, timestamp):
        pic = cv2.imread(picpath)
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=600)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)
        if 'Unknown' in names:
            (left, top, right, bottom) = face_location_list[0]
            if left < self.turnArea[0]:
                print("摄像头向右转")
                event = {
                    'event_type':
                    4,
                    'event_date':
                    time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(timestamp)),
                    'event_location':
                    '在庭院',
                    'event_desc':
                    '摄像头向右转',
                    'oldperson_id':
                    0,
                    'pic_name':
                    picname
                }
                return event
            elif right > self.turnArea[3]:
                print("摄像头向左转")
                event = {
                    'event_type':
                    4,
                    'event_date':
                    time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(timestamp)),
                    'event_location':
                    '在庭院',
                    'event_desc':
                    '摄像头向左转',
                    'oldperson_id':
                    0,
                    'pic_name':
                    picname
                }
                return event
            else:
                print("能正常检测到陌生人")
예제 #4
0
class Interact:
    def __init__(self):
        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)

    def test_distance(self, picpath, picname, timestamp):
        pic = cv2.imread(picpath)
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=600)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)
        #if ('volunteer', 'old_1') in names or ('volunteer', 'old_2') in names:
        if len(face_location_list) == 2:
            i = 0
            (x1, y1) = (0, 0)
            (x2, y2) = (0, 0)
            for (left, top, right, bottom) in face_location_list:
                i = i + 1
                if i == 1:
                    (x1, y1) = ((left + right) / 2, (top + bottom) / 2)
                if i == 2:
                    (x2, y2) = ((left + right) / 2, (top + bottom) / 2)
            pixel_distance = sqrt((x1 - x2)**2 + (y1 - y2)**2)
            measure = 20 / abs(face_location_list[0][0] -
                               face_location_list[0][2])
            actual_distance = pixel_distance * measure
            print("actual_distance", actual_distance)
            if actual_distance <= 50:
                event = {
                    'event_type':
                    1,
                    'event_date':
                    time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(timestamp)),
                    'event_location':
                    'BJTU',
                    'event_desc':
                    '有义工和老人交互!',
                    'oldperson_id':
                    0,
                    'pic_name':
                    picname
                }
                print('他俩唠嗑了')
                return event
예제 #5
0
class Forbidden:
    def __init__(self):
        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)
        self.forbidden = (190, 140, 430, 340)
        print(self.forbidden)

    def ifInFobidden(self, left, top, right, bottom, picname, timestamp):
        # 不在安全区域内
        if not (left > self.forbidden[0] and top > self.forbidden[1]
                and right < self.forbidden[2] and bottom < self.forbidden[3]):
            print("有人闯入安全区域内")
            event = {
                'event_type':
                4,
                'event_date':
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)),
                'event_location':
                'BJTU',
                'event_desc':
                '有人闯入安全区域内',
                'oldperson_id':
                0,
                'pic_name':
                picname
            }
            return event

    def testForbidden(self, picpath, picname, timestamp):

        pic = cv2.imread(picpath)
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=640)
        frame = imutils.resize(frame, height=480)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)
        for ((left, top, right, bottom), name) in zip(face_location_list,
                                                      names):
            self.ifInFobidden(left, top, right, bottom, picname, timestamp)
예제 #6
0
# 全局常量
FACIAL_EXPRESSION_TARGET_WIDTH = 28
FACIAL_EXPRESSION_TARGET_HEIGHT = 28

# load the face detector cascade and smile detector CNN
model = load_model(model_path)

# if a video path was not supplied, grab the reference to the webcam
if not input_video:
    camera = cv2.VideoCapture(0)
    time.sleep(2)
else:
    camera = cv2.VideoCapture(input_video)

faceutil = FaceUtil()

# keep looping
while True:
    # grab the current frame
    (grabbed, frame) = camera.read()

    # if we are viewing a video and we did not grab a frame, then we
    # have reached the end of the video
    if input_video and not grabbed:
        break

    if not input_video:
        frame = cv2.flip(frame, 1)

    # resize the frame, convert it to grayscale, and then clone the
예제 #7
0
ap.add_argument("-f", "--filename", required=False, default='', help="")
args = vars(ap.parse_args())

# 全局变量
facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
input_video = args['filename']

# 初始化摄像头
if not input_video:
    vs = cv2.VideoCapture(0)
    time.sleep(2)
else:
    vs = cv2.VideoCapture(input_video)

# 初始化人脸识别模型
faceutil = FaceUtil(facial_recognition_model_path)
# 不断循环
while True:
    # grab the current frame
    (grabbed, frame) = vs.read()
    # if we are viewing a video and we did not grab a frame, then we
    # have reached the end of the video
    if input_video and not grabbed:
        break
    if not input_video:
        frame = cv2.flip(frame, 1)
    # resize the frame, convert it to grayscale, and then clone the
    # original frame so we can draw on it later in the program
    frame = imutils.resize(frame, width=600)
    face_location_list, names = faceutil.get_face_location_and_name(frame)
예제 #8
0
]
action_map = {
    'blink': '请眨眼',
    'open_mouth': '请张嘴',
    'smile': '请笑一笑',
    'rise_head': '请抬头',
    'bow_head': '请低头',
    'look_left': '请看左边',
    'look_right': '请看右边'
}
# 设置摄像头
cam = cv2.VideoCapture(0)
cam.set(3, 640)  # set video widht
cam.set(4, 480)  # set video height

faceutil = FaceUtil()

counter = 0
while True:
    counter += 1
    _, image = cam.read()
    if counter <= 10:  # 放弃前10帧
        continue
    image = cv2.flip(image, 1)

    if error == 1:
        end_time = time.time()
        difference = end_time - start_time
        print(difference)
        if difference >= limit_time:
            error = 0
예제 #9
0
'''
训练人脸识别模型
'''

from imutils import paths
from oldcare.facial import FaceUtil

# global variable
dataset_path = 'dataset/'
output_encoding_file_path = 'models/new_face_recognition_hog.pickle'
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
image_paths = list(paths.list_images(dataset_path))

if len(image_paths) == 0:
    print('[ERROR] no images to train.')
else:
    faceutil = FaceUtil()
    print("[INFO] training face embeddings...")
    faceutil.save_embeddings(image_paths, output_encoding_file_path)
예제 #10
0
ANGLE = 20
ACTUAL_DISTANCE_LIMIT = 100  # cm

# 得到 ID->姓名的map 、 ID->职位类型的map
id_card_to_name, id_card_to_type = fileassistant.get_people_info(
    people_info_path)

# 初始化摄像头
if not input_video:
    vs = cv2.VideoCapture(0)
    time.sleep(2)
else:
    vs = cv2.VideoCapture(input_video)

# 加载模型
faceutil = FaceUtil(model_path)

print('[INFO] 开始检测义工和老人是否有互动...')
# 不断循环
counter = 0
while True:
    counter += 1
    camera_turned = 0
    # grab the current frame
    (grabbed, frame) = vs.read()

    # if we are viewing a video and we did not grab a frame, then we
    # have reached the end of the video
    if input_video and not grabbed:
        break
예제 #11
0
    def __init__(self):

        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)
        self.turnArea = (140, 120, 500, 370)
예제 #12
0
 def __init__(self):
     facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
     self.faceutil = FaceUtil(facial_recognition_model_path)
예제 #13
0
class EmotionEstimate:
    def __init__(self):
        # 加载模型
        self.emotion_classifier = load_model('models/emotion_model.hdf5')
        self.face_cascade = cv2.CascadeClassifier(
            'models/haarcascade_frontalface_default.xml')
        facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
        self.faceutil = FaceUtil(facial_recognition_model_path)

    # 图片预处理
    def preprocess_input(self, x, v2=True):
        x = x.astype('float32')
        x = x / 255.0
        if v2:
            x = x - 0.5
            x = x * 2.0
        return x

    def apply_offsets(self, face_coordinates, offsets):
        x, y, width, height = face_coordinates
        x_off, y_off = offsets
        return x - x_off, x + width + x_off, y - y_off, y + height + y_off

    def predict_emotion(self, id, test_img_path, picname, timestamp):
        pic = cv2.imread(test_img_path)
        frame = cv2.flip(pic, 1)
        frame = imutils.resize(frame, width=600)
        face_location_list, names = self.faceutil.get_face_location_and_name(
            frame)

        if len(face_location_list) == 1:

            # 情感类型字典
            emotion_labels = {
                0: 'angry',
                1: 'disgust',
                2: 'fear',
                3: 'happy',
                4: 'sad',
                5: 'surprise',
                6: 'neutral'
            }

            emotion_offsets = (20, 40)

            emotion_target_size = self.emotion_classifier.input_shape[1:3]

            # 读取原图片
            bgr_image = cv2.imread(test_img_path)

            # 转化为灰度图
            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)

            # 获得人脸坐标列表
            faces = self.face_cascade.detectMultiScale(
                gray_image,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE)

            # 获得人脸坐标
            for face_coordinates in faces:
                x1, x2, y1, y2 = self.apply_offsets(face_coordinates,
                                                    emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, emotion_target_size)
                except:
                    continue

                gray_face = self.preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)

                # 获取模型的预测结果
                emotion_prediction = self.emotion_classifier.predict(gray_face)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                print(emotion_text)

                # 编写事件描述和事件
                desc = str(emotion_prediction[0][0]) + ',' + str(emotion_prediction[0][1]) + ',' + str(emotion_prediction[0][2]) + ',' + \
                       str(emotion_prediction[0][3]) + ',' + str(emotion_prediction[0][5]) + ',' + str(emotion_prediction[0][6])

                event = {
                    'event_type':
                    0,
                    'event_date':
                    time.strftime("%Y-%m-%d %H:%M:%S",
                                  time.localtime(timestamp)),
                    'event_location':
                    'BJTU',
                    'event_desc':
                    desc,
                    'oldperson_id':
                    id,
                    'pic_name':
                    picname
                }

                # 返回事件
                return event
예제 #14
0
 def __init__(self):
     self.model = self.model_loader()
     facial_recognition_model_path = 'models/face_recognition_hog.pickle'
     self.faceutil = FaceUtil(facial_recognition_model_path)
예제 #15
0
def push_frame():
    # 推流函数
    facial_recognition_model_path = 'info/face_recognition_hog.pickle'  # jian ce ren lian
    facial_expression_model_path = 'models/face_expression_seven_class.hdf5'  # fenxi qinggan

    output_stranger_path = 'supervision/strangers'
    output_smile_path = 'supervision/smile'

    people_info_path = 'info/people_info.csv'
    facial_expression_info_path = 'info/facial_expression_info_seven_class.csv'
    faceutil = FaceUtil(facial_recognition_model_path)
    facial_expression_model = load_model(facial_expression_model_path,
                                         compile=False)
    # 初始化人脸识别模型

    # your python path
    python_path = '/root/anaconda3/envs/tensorflow/bin/python'

    # 全局常量
    FACIAL_EXPRESSION_TARGET_WIDTH = 48
    FACIAL_EXPRESSION_TARGET_HEIGHT = 48

    ANGLE = 20

    # 得到 ID->姓名的map 、 ID->职位类型的map、
    # 摄像头ID->摄像头名字的map、表情ID->表情名字的map
    id_card_to_name, id_card_to_type = fileassistant.get_people_info(
        people_info_path)
    facial_expression_id_to_name = fileassistant.get_facial_expression_info(
        facial_expression_info_path)

    # 控制陌生人检测
    strangers_timing = 0  # 计时开始
    strangers_start_time = 0  # 开始时间
    strangers_limit_time = 2  # if >= 2 seconds, then he/she is a stranger.

    # 控制微笑检测
    facial_expression_timing = 0  # 计时开始
    facial_expression_start_time = 0  # 开始时间
    facial_expression_limit_time = 2  # if >= 2 seconds, he/she is smiling

    accum_time = 0

    curr_fps = 0

    fps = "FPS: ??"

    #   prev_time = time()

    # 防止多线程时 command 未被设置

    while True:
        print('command lenth', len(command))
        if len(command) > 0:
            # 管道配置,其中用到管道

            p = sp.Popen(command, stdin=sp.PIPE)

            break

    while True:
        faceutil = FaceUtil(facial_recognition_model_path)
        facial_expression_model = load_model(facial_expression_model_path,
                                             compile=False)
        if frame_queue.empty() != True:
            counter = 0

            while True:
                counter += 1
                image = frame_queue.get()

                image = cv2.flip(image, 1)

                image = imutils.resize(image,
                                       width=VIDEO_WIDTH,
                                       height=VIDEO_HEIGHT)  # 压缩,加快识别速度

                # if counter%10!=0:
                # 	cv2.imshow("Checking Strangers and Ole People's Face Expression",
                # 			   image)
                #
                # 	# Press 'ESC' for exiting video
                # 	k = cv2.waitKey(1) & 0xff
                # 	continue

                gray = cv2.cvtColor(image,
                                    cv2.COLOR_BGR2GRAY)  # grayscale,表情识别

                # if True:
                # 	cv2.imshow("Checking Strangers and Ole People's Face Expression",
                # 			   gray)
                # 	continue

                face_location_list, names = faceutil.get_face_location_and_name(
                    image)

                # 得到画面的四分之一位置和四分之三位置,并垂直划线
                one_fourth_image_center = (int(VIDEO_WIDTH / 4),
                                           int(VIDEO_HEIGHT / 4))
                three_fourth_image_center = (int(VIDEO_WIDTH / 4 * 3),
                                             int(VIDEO_HEIGHT / 4 * 3))

                cv2.line(image, (one_fourth_image_center[0], 0),
                         (one_fourth_image_center[0], VIDEO_HEIGHT),
                         (0, 255, 255), 1)
                cv2.line(image, (three_fourth_image_center[0], 0),
                         (three_fourth_image_center[0], VIDEO_HEIGHT),
                         (0, 255, 255), 1)

                # 处理每一张识别到的人脸
                for ((left, top, right, bottom),
                     name) in zip(face_location_list, names):

                    # 将人脸框出来
                    rectangle_color = (0, 0, 255)
                    if id_card_to_type[name] == 'old_people':
                        rectangle_color = (0, 0, 128)
                    elif id_card_to_type[name] == 'employee':
                        rectangle_color = (255, 0, 0)
                    elif id_card_to_type[name] == 'volunteer':
                        rectangle_color = (0, 255, 0)
                    else:
                        pass
                    cv2.rectangle(image, (left, top), (right, bottom),
                                  rectangle_color, 2)

                    # 陌生人检测逻辑
                    if 'Unknown' in names:  # alert
                        if strangers_timing == 0:  # just start timing
                            strangers_timing = 1
                            strangers_start_time = time.time()
                        else:  # already started timing
                            strangers_end_time = time.time()
                            difference = strangers_end_time - strangers_start_time

                            current_time = time.strftime(
                                '%Y-%m-%d %H:%M:%S',
                                time.localtime(time.time()))

                            if difference < strangers_limit_time:
                                print('[INFO] %s, 房间, 陌生人仅出现 %.1f 秒. 忽略.' %
                                      (current_time, difference))
                            else:  # strangers appear
                                event_desc = '陌生人出现!!!'
                                event_location = '房间'
                                print('[EVENT] %s, 房间, 陌生人出现!!!' %
                                      (current_time))
                                cv2.imwrite(
                                    os.path.join(
                                        output_stranger_path,
                                        'snapshot_%s.jpg' %
                                        (time.strftime('%Y%m%d_%H%M%S'))),
                                    image)  # snapshot
                                pic_path = os.path.join(
                                    output_stranger_path, 'snapshot_%s.jpg' %
                                    (time.strftime('%Y%m%d_%H%M%S')))
                                # insert into database
                                command1 = '%s inserting.py --event_desc %s --event_type 2 --event_location %s --pic_path %s' % (
                                    python_path, event_desc, event_location,
                                    pic_path)
                                p2 = subprocess.Popen(command1, shell=True)

                                # 开始陌生人追踪
                                unknown_face_center = (int(
                                    (right + left) / 2), int(
                                        (top + bottom) / 2))

                                cv2.circle(image, (unknown_face_center[0],
                                                   unknown_face_center[1]), 4,
                                           (0, 255, 0), -1)

                                direction = ''
                                # face locates too left, servo need to turn right,
                                # so that face turn right as well
                                if unknown_face_center[
                                        0] < one_fourth_image_center[0]:
                                    direction = 'right'
                                elif unknown_face_center[
                                        0] > three_fourth_image_center[0]:
                                    direction = 'left'

                                # adjust to servo
                                if direction:
                                    print('%d-摄像头需要 turn %s %d 度' %
                                          (counter, direction, ANGLE))

                    else:  # everything is ok
                        strangers_timing = 0

                    # 表情检测逻辑
                    # 如果不是陌生人,且对象是老人
                    if name != 'Unknown' and id_card_to_type[
                            name] == 'old_people':
                        # 表情检测逻辑
                        roi = gray[top:bottom, left:right]
                        roi = cv2.resize(roi,
                                         (FACIAL_EXPRESSION_TARGET_WIDTH,
                                          FACIAL_EXPRESSION_TARGET_HEIGHT))
                        roi = roi.astype("float") / 255.0
                        roi = img_to_array(roi)
                        roi = np.expand_dims(roi, axis=0)

                        # determine facial expression
                        emotions = [
                            'Angry', 'Disgust', 'Fear', 'Happy', 'Neutral',
                            'Sad', 'Surprise'
                        ]

                        emotion_value_list = facial_expression_model.predict(
                            roi)[0]

                        facial_expression_label = emotions[np.argmax(
                            emotion_value_list)]

                        if facial_expression_label == 'Happy':  # alert
                            if facial_expression_timing == 0:  # just start timing
                                facial_expression_timing = 1
                                facial_expression_start_time = time.time()
                            else:  # already started timing
                                facial_expression_end_time = time.time()
                                difference = facial_expression_end_time - facial_expression_start_time

                                current_time = time.strftime(
                                    '%Y-%m-%d %H:%M:%S',
                                    time.localtime(time.time()))
                                if difference < facial_expression_limit_time:
                                    print('[INFO] %s, 房间, %s仅笑了 %.1f 秒. 忽略.' %
                                          (current_time, id_card_to_name[name],
                                           difference))
                                else:  # he/she is really smiling
                                    event_desc = '%s正在笑' % (
                                        id_card_to_name[name])
                                    event_location = '房间'
                                    print(
                                        '[EVENT] %s, 房间, %s正在笑.' %
                                        (current_time, id_card_to_name[name]))
                                    pic_path = os.path.join(
                                        output_smile_path, 'snapshot_%s.jpg' %
                                        (time.strftime('%Y%m%d_%H%M%S')))
                                    cv2.imwrite(pic_path, image)  # snapshot

                                    # insert into database
                                    command1 = '%s inserting.py --event_desc %s --event_type 0 --event_location %s --old_people_id %d --pic_path %s' % (
                                        python_path, event_desc,
                                        event_location, int(name), pic_path)
                                    p2 = subprocess.Popen(command1, shell=True)

                        else:  # everything is ok
                            facial_expression_timing = 0

                    else:  # 如果是陌生人,则不检测表情
                        facial_expression_label = ''

                    # 人脸识别和表情识别都结束后,把表情和人名写上
                    # (同时处理中文显示问题)
                    img_PIL = Image.fromarray(
                        cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

                    draw = ImageDraw.Draw(img_PIL)
                    final_label = id_card_to_name[
                        name] + ': ' + facial_expression_id_to_name[
                            facial_expression_label] if facial_expression_label else id_card_to_name[
                                name]
                    draw.text((left, top - 30),
                              final_label,
                              font=ImageFont.truetype('NotoSansCJK-Black.ttc',
                                                      40),
                              fill=(255, 0, 0))  # linux

                    # 转换回OpenCV格式
                    image = cv2.cvtColor(np.asarray(img_PIL),
                                         cv2.COLOR_RGB2BGR)

                # show our detected faces along with smiling/not smiling labels
                p.stdin.write(image.tostring())
                cv2.imshow(
                    "Checking Strangers and Ole People's Face Expression",
                    image)

                # Press 'ESC' for exiting video
                k = cv2.waitKey(1) & 0xff
                if k == 27:
                    break
예제 #16
0
 def __init__(self):
     facial_recognition_model_path = 'models/new_face_recognition_hog.pickle'
     self.faceutil = FaceUtil(facial_recognition_model_path)
     self.forbidden = (190, 140, 430, 340)
     print(self.forbidden)