Esempio n. 1
0
                        else:
                            couter = 0
                            error = 0

                    if couter > 3:
                        font = ImageFont.truetype("simsun.ttc", 30, index=1)
                        img_rd = Image.fromarray(cv2.cvtColor(datum.cvOutputData, cv2.COLOR_BGR2RGB))
                        draw = ImageDraw.Draw(img_rd)
                        draw.text((10, 10), text="Fall Detected", font=font,
                                  fill=(255, 0, 0))
                        img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
                        time_snap = datetime.datetime.now()
                        cv2.imwrite('fall_detection' + str(time_snap).replace(':','') + '.jpg', frame)
                        if (datetime.datetime.now() - pre).total_seconds() > 5:
                            t = threading.Thread(
                                target=post(event=3, imagePath='fall_detection' + str(time_snap).replace(':','') + '.jpg'))
                            t.start()
                            # status = post(event=3, imagePath='fall_detection.jpg')
                            # print("fall")
                            pre = datetime.datetime.now()
                            # print(pre)

                    # update variables
                    frame_start_time = now
                    v0 = v
                    width0 = width.copy()
                    height0 = height.copy()
                # if width > height:
                #     print("alarm")
                firstFrame = None
            except Exception as e:
Esempio n. 2
0
def fall_detect(cnts, defined_min_area, frame, prevX, prevY, xList, yList,
                centerV, alert, pre):
    for c in cnts:
        # exclusion
        if cv2.contourArea(c) < defined_min_area:
            continue

        # outer bounding box
        (x_b, y_b, w_b, h_b) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x_b, y_b), (x_b + w_b, y_b + h_b), (0, 255, 255),
                      2)  # 黄色矩形

        # rotating bounding box
        rect = cv2.minAreaRect(c)  # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
        box = cv2.boxPoints(rect)  # 获取最小外接矩形的4个顶点坐标
        box = np.int0(box)
        cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)

        # averaging line
        rows, cols = frame.shape[:2]
        [vx, vy, x, y] = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01)
        lefty = (-x * vy / vx) + y
        righty = ((cols - x) * vy / vx) + y
        cv2.line(frame, (cols - 1, righty), (0, lefty), (255, 0, 0), 2)

        # ellipse
        elps = cv2.fitEllipse(c)
        (x, y), (MA, ma), angle = cv2.fitEllipse(c)
        cv2.ellipse(frame, elps, (255, 0, 0), 3)  # red

        # Aspect Ratio
        AR = MA / ma

        # Center Speed - acceleration
        prevX = 0.0
        prevY = 0.0
        centerSpeed = 0
        if xList.full():
            prevX = statistics.median(list(xList.queue))
            prevY = statistics.median(list(yList.queue))
            xList.get()
            yList.get()

        xList.put(elps[0][0])
        yList.put(elps[0][1])
        X = statistics.median(list(xList.queue))
        Y = statistics.median(list(yList.queue))

        if xList.full():
            dx = abs(prevX - X)
            dy = abs(prevY - Y)
            centerV = math.sqrt(dx**2 + dy**2)

        # calculate probabilities for the 4 features
        pAngle = (abs(angle - 90) - 50) / 10
        pAngle = 1 / (math.exp(pAngle) + 1)

        pAR = 10 * AR - 5
        pAR = 1 / (math.exp(pAR) + 1)

        ACS = centerV - 9
        try:
            ACS = 1 / (math.exp(ACS) + 1)
        except:
            ACS = 1 / (float('inf') + 1)
        # print("pAngle : ", pAngle)
        # print("pAR : ", pAR)
        # print("ACS : ", ACS)

        # confidence
        P_FALL = pAngle * pAR * ACS + 0.5
        # print("P_FALL1 : ", P_FALL)

        P_FALL = 1 / (math.exp(-(P_FALL - 0.65) * 10) + 1)
        # print("P_FALL2: ", P_FALL)

        # status display
        # cv2.putText(frame, "Status : ", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 14)
        # cv2.putText(frame, "Fall Confidence: {0:.2f} ".format(P_FALL), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
        #             (0, 128, 255), 14)
        # cv2.putText(frame, "Angle: {0:.2f}".format(angle), (10, 220),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)
        # cv2.putText(frame, "AR: {0:.2f}".format(AR), (10, 237),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)
        # cv2.putText(frame, "Center Speed: {0:.2f}".format(centerV), (10, 256),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 14)

        # fall
        if P_FALL > 0.88:
            if alert > 3:
                # print("fall")
                font = ImageFont.truetype("simsun.ttc", 30, index=1)
                img_rd = Image.fromarray(cv2.cvtColor(frame,
                                                      cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(img_rd)
                draw.text((10, 10),
                          text="Fall Detected",
                          font=font,
                          fill=(255, 0, 0))
                frame = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
                time_snap = datetime.datetime.now()
                cv2.imwrite(
                    'fall_detection' + str(time_snap).replace(':', '') +
                    '.jpg', frame)
                if (datetime.datetime.now() - pre).total_seconds() > 5:
                    t = threading.Thread(
                        target=post(event=3,
                                    imagePath='fall_detection' +
                                    str(time_snap).replace(':', '') + '.jpg'))
                    t.setDaemon(False)
                    t.start()
                    pre = datetime.datetime.now()
                # cv2.imwrite("report.jpg", frame)
                # send_alert.SendMail("report.jpg")
                alert = alert + 1
            else:
                alert = alert + 1

    return frame, alert, pre
Esempio n. 3
0
def tick(checkItem, url):

    data = checkItem.check()
    post(data, url)
Esempio n. 4
0
    def process(self, img_rd):
        img_with_name = img_rd
        data_type_three = {
            'old': 0,
            'employee': 0,
            'volunteer': 0,
            'stranger': 0
        }

        # 读取所有人脸
        if self.get_face_database():
            cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 40), cv2.FONT_ITALIC, 0.8, (0, 255, 0), 1,
                        cv2.LINE_AA)
            # cv2.putText(img_rd, str(datetime.now()), (120, 40), cv2.FONT_ITALIC, 0.8, (0, 255, 0), 1,
            #             cv2.LINE_AA)
            self.features_camera_list = []
            self.faces_cnt = 0
            self.pos_camera_list = []
            self.name_camera_list = []
            self.type_camera_list = []
            self.id_camera_list = []

            (h, w) = img_rd.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
                                         (300, 300), (104.0, 177.0, 123.0))
            self.detector.setInput(blob)
            faces = self.detector.forward()

            # 检测到人脸
            if faces.shape[2] != 0:
                # 遍历捕获到的图像中所有的人脸
                for k in range(0, faces.shape[2]):
                    # 计算矩形框大小
                    confidence = faces[0, 0, k, 2]

                    # filter out weak detections by ensuring the `confidence` is
                    # greater than the minimum confidence
                    if confidence < 0.5:
                        continue
                    self.faces_cnt += 1

                    # 让人名跟随在矩形框的上方
                    # 确定人名的位置坐标
                    # 先默认所有人不认识,是 unknown
                    self.name_camera_list.append("unknown")
                    self.type_camera_list.append('unknown')
                    self.id_camera_list.append('unknown')

                    # 每个捕获人脸的名字坐标
                    box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")
                    self.pos_camera_list.append(tuple(
                        [int(startX + 5), int(startY - 30)]))

                    # height = (endY - startY)
                    # width = (endX - startX)

                    img_blank = img_rd[startY:endY, startX:endX]
                    img_blank = img_blank[..., ::-1]
                    try:
                        # for ii in range(height):
                        #     for jj in range(width):
                        #         img_blank[ii][jj] = img_rd[startY + ii][startX + jj]

                        img = cv2.resize(img_blank, (96, 96))
                        img = (img / 255.).astype(np.float32)
                        img = self.nn4_small2.predict(np.expand_dims(img, axis=0))[0]

                        # 对于某张人脸,遍历所有存储的人脸
                        e_distance_list = []
                        for i in range(0, len(self.embedded)):
                            e_distance_list.append(facenet.distance(self.embedded[i], img))

                        similar_person_num = e_distance_list.index(min(e_distance_list))
                        # print(min(e_distance_list))
                        if min(e_distance_list) < 0.58:
                            self.name_camera_list[k] = self.id_known_list[similar_person_num % 8]
                            self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
                            self.id_camera_list[k] = self.name_known_list[similar_person_num % 8]

                            data_type_three[api_transfer[self.type_camera_list[k]]] += 1
                            cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
                                          (0, 255, 0), 2)
                            cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
                                          (0, 255, 0), cv2.FILLED)
                            img_with_name = self.draw_name(img_rd)
                            if self.type_camera_list[k] == 'elder':
                                mode = smile_detection.smile_detect(img_blank)
                                if mode == 'happy':
                                    # print("happy")
                                    cv2.rectangle(img_with_name, tuple([startX, startY - 70]),
                                                  tuple([endX, startY - 35]),
                                                  (0, 215, 255), cv2.FILLED)
                                    cv2.putText(img_with_name, 'happy', (startX + 5, startY - 45), cv2.FONT_ITALIC, 1,
                                                (255, 255, 255), 1, cv2.LINE_AA)
                                    time_snap = datetime.now()
                                    cv2.imwrite('smile_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name)
                                    if (datetime.now() - self.pre).total_seconds() > 5:
                                        t = threading.Thread(target=post(elder_id=self.id_camera_list[k], event=0,
                                                                         imagePath='smile_detection' + str(
                                                                             time_snap).replace(':','') + '.jpg'))
                                        t.setDaemon(False)
                                        t.start()
                                        self.pre = datetime.now()
                            # print("May be person " + str(self.name_known_list[similar_person_num]))
                        elif min(e_distance_list) > 0.75:
                            data_type_three['stranger'] += 1
                            self.name_camera_list[k] = '陌生人'
                            cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
                                          (0, 0, 255), 2)
                            cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
                                          (0, 0, 255), cv2.FILLED)
                            img_with_name = self.draw_name(img_rd)
                            time_snap = datetime.now()
                            cv2.imwrite('stranger_detection' + str(time_snap).replace(':','') + '.jpg', img_with_name)
                            if (datetime.now() - self.pre).total_seconds() > 5:
                                t = threading.Thread(
                                    target=post(event=2, imagePath='stranger_detection' + str(time_snap).replace(':','') + '.jpg'))
                                t.setDaemon(False)
                                t.start()
                                self.pre = datetime.now()
                        else:
                            pass

                    except:
                        continue
            else:
                img_with_name = img_rd

            # 更新 FPS / Update stream FPS
            # self.update_fps()
        if (datetime.now() - self.pre).total_seconds() > 5:
            post_person(data_type_three)
            self.pre = datetime.now()
        return img_with_name
Esempio n. 5
0
def tick(checkItem):

    data = checkItem.check()
    post(data, '/checkData')
Esempio n. 6
0
    def run(self, frame):
        height = []
        width = []
        center = []

        # Process Image
        datum = op.Datum()
        datum.cvInputData = frame
        self.opWrapper.emplaceAndPop([datum])
        img_rd = datum.cvOutputData

        # fall judge
        try:
            # key points have been identified
            x = datum.poseKeypoints[0][:, 0]
            y = datum.poseKeypoints[0][:, 1]

            width.append(np.max(x[np.nonzero(x)]) - np.min(x[np.nonzero(x)]))
            height.append(np.max(y[np.nonzero(y)]) - np.min(y[np.nonzero(y)]))

            center.append(np.mean(x[np.nonzero(x)]))
            center.append(np.mean(y[np.nonzero(y)]))

            if self.frame_start_time == 0:
                self.center0 = center.copy()
                self.width0 = width.copy()
                self.height0 = height.copy()
                self.frame_start_time = time.time()
            else:
                diff = np.array(
                    [center[0] - self.center0[0], center[1] - self.center0[1]])
                dist = math.sqrt(np.sum((diff * 10**(-4))**2))
                now = time.time()
                v = dist / (now - self.frame_start_time)
                a = (v**2 - self.v0**2) / (2 * dist)

                # print(v, abs(a))
                if (abs(a) > 0.2) and \
                        (np.subtract(np.array(width), np.array(height)) > np.subtract(np.array(self.width0), np.array(
                            self.height0)) and np.subtract(np.array(width), np.array(height)) > 0):
                    self.couter += 1
                    # print("alarm by v and a")
                elif (width > height and (x[8] != 0 or x[9] != 0 or x[12] != 0)
                      and v < 1):
                    self.couter += 1
                    # print("alarm by w and h")
                else:
                    if self.error == 0:
                        self.error += 1
                    else:
                        self.couter = 0
                        self.error = 0

                if self.couter > 3:
                    font = ImageFont.truetype("simsun.ttc", 30, index=1)
                    img_rd = Image.fromarray(
                        cv2.cvtColor(datum.cvOutputData, cv2.COLOR_BGR2RGB))
                    draw = ImageDraw.Draw(img_rd)
                    draw.text((10, 10),
                              text="Fall Detected",
                              font=font,
                              fill=(255, 0, 0))
                    img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
                    cv2.imwrite('fall_detection.jpg', frame)
                    t = threading.Thread(
                        target=post(event=3, imagePath='fall_detection.jpg'))
                    t.setDaemon(False)
                    t.start()
                    # status = post(event=3, imagePath='fall_detection.jpg')
                    # print("fall")

                # update variables
                self.frame_start_time = now
                self.v0 = v
                self.width0 = width.copy()
                self.height0 = height.copy()
            # if width > height:
            #     print("alarm")
            self.firstFrame = None
        except Exception as e:
            gray = frame_process.preprocess_frame(frame)

            if self.firstFrame is None:
                self.firstFrame = gray
                pass

            frameDelta = cv2.absdiff(self.firstFrame, gray)

            cnts = frame_process.get_contours(self.firstFrame, gray)

            defined_min_area = 3000
            frame, self.alert = algorithm_fall.fall_detect(
                cnts, defined_min_area, frame, self.prevX, self.prevY,
                self.xList, self.yList, self.centerV, self.alert)

            img_rd = frame
            # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", frame)

        frame = cv2.resize(img_rd, (640, 480))
        # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", img_rd)
        return frame


# http://zhuooyu.cn:8000/api/person/old/10
Esempio n. 7
0
    def process(self, frame):
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream

        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if self.W is None or self.H is None:
            (self.H, self.W) = frame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (14) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % 20 == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H), 127.5)
            self.net.setInput(blob)
            detections = self.net.forward()
            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > 0.5:

                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([self.W, self.H, self.W, self.H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)
        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # draw a rectangle around the people
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        # cv2.line(frame, (0, H // 14), (W, H // 14), (0, 255, 255), 14)

        # use the centroid tracker to associate the (1) old object
        # centroids with (14) the newly computed object centroids
        objects = self.ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < self.H // 2:
                        self.totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > self.H // 2:
                        self.totalDown += 1
                        to.counted = True

                        current_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                                     time.localtime(time.time()))
                        event_desc = '有人闯入禁止区域!!!'
                        event_location = '院子'
                        print('[EVENT] %s, 院子, 有人闯入禁止区域!!!'
                              % (current_time))
                        time_snap = datetime.now()
                        cv2.imwrite('intrusion' + str(time_snap).replace(':', '') + '.jpg', frame)
                        if (datetime.now() - self.pre).total_seconds() > 5:
                            t = threading.Thread(
                                target=post(event=4, imagePath='intrusion' + str(time_snap).replace(':', '') + '.jpg'))
                            t.setDaemon(False)
                            t.start()
                            self.pre = datetime.now()

                        # todo insert into database
                        # command = '%s inserting.py --event_desc %s--event_type4 - -event_location % s' % \
                        #           (python_path, event_desc, event_location)
                        # p = subprocess.Popen(command, shell=True)

                # store the trackable obj   ect in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4,
                       (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            # ("Up", totalUp),
            # ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # show the output frame
        frame = cv2.resize(frame, (640, 480))

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        self.totalFrames += 1
        return frame
Esempio n. 8
0
    def process(self, img_rd, scale):
        img_with_name = img_rd
        data_type_three = {
            'old': 0,
            'employee': 0,
            'volunteer': 0,
            'stranger': 0
        }
        # 读取所有人脸
        if self.get_face_database():
            self.draw_note(img_rd)
            self.features_camera_list = []
            self.faces_cnt = 0
            self.pos_camera_list = []
            self.name_camera_list = []
            self.type_camera_list = []

            (h, w) = img_rd.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(img_rd, (300, 300)), 1.0,
                                         (300, 300), (104.0, 177.0, 123.0))
            self.detector.setInput(blob)
            faces = self.detector.forward()

            # 检测到人脸
            if faces.shape[2] != 0:
                # 遍历捕获到的图像中所有的人脸
                for k in range(0, faces.shape[2]):
                    # 计算矩形框大小
                    confidence = faces[0, 0, k, 2]

                    # filter out weak detections by ensuring the `confidence` is
                    # greater than the minimum confidence
                    if confidence < 0.5:
                        continue
                    self.faces_cnt += 1

                    # 让人名跟随在矩形框的上方
                    # 确定人名的位置坐标
                    # 先默认所有人不认识,是 unknown
                    self.name_camera_list.append("unknown")
                    self.type_camera_list.append("unknown")

                    # 每个捕获人脸的名字坐标
                    box = faces[0, 0, k, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")
                    self.pos_camera_list.append(tuple(
                        [int(startX + 5), int(startY - 30)]))

                    img_blank = img_rd[startY:endY, startX:endX]
                    img_blank = img_blank[..., ::-1]
                    try:
                        # for ii in range(height):
                        #     for jj in range(width):
                        #         img_blank[ii][jj] = img_rd[startY + ii][startX + jj]

                        img = cv2.resize(img_blank, (96, 96))
                        img = (img / 255.).astype(np.float32)
                        img = self.nn4_small2.predict(np.expand_dims(img, axis=0))[0]

                        # 对于某张人脸,遍历所有存储的人脸
                        e_distance_list = []
                        for i in range(0, len(self.embedded)):
                            e_distance_list.append(facenet.distance(self.embedded[i], img))

                        similar_person_num = e_distance_list.index(min(e_distance_list))

                        # print(min(e_distance_list))
                        if min(e_distance_list) < 0.58:
                            self.name_camera_list[k] = self.name_known_list[similar_person_num % 8]
                            self.type_camera_list[k] = self.type_known_list[similar_person_num % 8]
                            data_type_three[api_transfer[self.type_camera_list[k]]] += 1

                        # 绘制矩形框
                        if self.name_camera_list[k] != 'unknown':
                            cv2.rectangle(img_rd, tuple([startX, startY]), tuple([endX, endY]),
                                          (0, 255, 0), 2)
                            cv2.rectangle(img_rd, tuple([startX, startY - 35]), tuple([endX, startY]),
                                          (0, 255, 0), cv2.FILLED)

                    except:
                        continue
                    print(self.type_camera_list)
                    img_with_name = self.draw_name(img_rd)
                    if 'unknown' in self.type_camera_list and len(self.type_camera_list) > 1:
                        index = self.type_camera_list.index('unknown')
                        pos_vol = np.array(self.pos_camera_list[index])
                        for i in range(0, len(self.type_camera_list)):
                            if self.type_camera_list[i] == "elder":
                                d = scale * np.sqrt(facenet.distance(pos_vol, np.array(self.pos_camera_list[i])))
                                if d < 50:
                                    if (datetime.now() - self.pre).total_seconds() > 5:
                                        time_snap = datetime.now()
                                        cv2.imwrite("interaction"+str(time_snap).replace(':','')+".jpg", img_with_name)
                                        t = threading.Thread(target=post(event=1, imagePath='interaction'+str(time_snap).replace(':','')+'.jpg'))
                                        t.setDaemon(False)
                                        t.start()
                                        self.pre = datetime.now()
            else:
                img_with_name = img_rd

            # if 'volunteer' in self.type_camera_list:

            # 更新 FPS / Update stream FPS
            self.update_fps()
            # 距离检测
        if (datetime.now() - self.pre).total_seconds() > 5:
            post_person(data_type_three)
            self.pre = datetime.now()
        return img_with_name