Beispiel #1
0
    def processFrameOf(self, camera):
        if not camera.isUp():
            #self.log("WARN", "Video stream for Camera: " + camera._id + " not available")
            return False
        maxt = 10
        frame = None
        for i in range(1, maxt + 1):
            #self.log("INFO", "Trying to accesss frame {}/{}".format(i, maxt))
            try:
                ret, f = camera.read()
                if ret:
                    frame = f
            except:
                yyyyy = 1

        if frame is None:
            #self.log("WARN", "Couldn't access a valid frame")
            return False

        if camera._id in self.preprocessings:
            self.log("INFO", "Pre-Processing frame of camera: " + camera._id)
            st = time.time()
            lineCoords = [(5, frame.shape[0] - 30 * (i + 1)) for i in range(3)]
            pp = self.preprocessings[camera._id]
            if 'brightness' in pp:
                bv = pp['brightness']
                frame = Preprocessing.adjustBrightness(frame, bv)
                Preprocessing.putText(frame, "Brightness: " + str(bv),
                                      lineCoords[0])

            if 'sharpness' in pp:
                sv = pp['sharpness']
                frame = Preprocessing.sharpenImage(frame, k=sv)
                Preprocessing.putText(frame, "Sharpness: " + str(sv),
                                      lineCoords[1])

            if 'denoise' in pp:
                dv = pp['denoise']
                if dv > 0:
                    frame = Preprocessing.denoiseImage(frame, strength=dv)
                    Preprocessing.putText(frame, "denoise: " + str(dv),
                                          lineCoords[2])

            et = time.time()
            self.log("TIME", "Action took {:2.6f}s".format((et - st)))

        #person detection
        #plt.imshow(frame)
        self.log("INFO", "Detecting People in the frame")
        bboxes, conf = self.pd.detect(frame, drawOnFrame=False)
        #overlapping bounding boxes
        self.log("INFO", "Applying nms")
        bboxes = non_max_suppression(np.array(bboxes),
                                     probs=None,
                                     overlapThresh=0.65)
        #tracking
        if len(bboxes) > 0:

            tbboxes, tids = camera.tk.track(frame,
                                            bboxes,
                                            conf,
                                            drawOnFrame=False)
            if len(tbboxes) > 0:

                self.log("INFO", "Tracking people {}".format(len(tids)))
                for i in range(len(tbboxes)):
                    tbbox = np.array(tbboxes[i], np.int32)
                    tid = tids[i]
                    #increasing fps by selective recognition
                    if camera.track.hasPerson(tid):
                        if camera.track.people[tid].isSuspect():
                            if time.time() - camera.track.people[
                                    tid].whenRecognized < self.recognizeThresh:
                                continue

                    person = frame[tbbox[1]:tbbox[3], tbbox[0]:tbbox[2]]
                    #cv2.imshow("person: ", person)
                    faces = fdr.extractFaces(person, drawOnFrame=False)
                    if len(faces) <= 0:
                        continue

                    face = faces[0]
                    fe = fdr.getEmbedding(face[0])

                    #check if he/she is a suspect
                    suspectDetected = False
                    for k, suspect in self.suspects.items():
                        #{"face":face, "em":em, "path":path}
                        for pic in suspect.pictures:
                            em = pic['em']
                            if fdr.is_match(em, fe):
                                camera.track.suspectDetected(
                                    tid, suspect, time.time(), frame,
                                    self.SERVER_ID, camera._id)
                                suspectDetected = True
                                break
                        if suspectDetected:
                            break

                #update track
                camera.track.updatePositions(tbboxes, tids)

        camera.track.clearForgotten()
        #display bboxes and everything
        camera.track.draw(frame)
        #udpate the processedFrame
        #cv2.imshow("Frame", frame)

        t = time.localtime()
        text = "Server: " + time.strftime("%H:%M:%S", t)
        cv2.putText(frame, text, (10, 60), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                    (0, 255, 255), 1)

        with self.lock:
            camera.processedFrame = frame
            camera.processedFrameTime = time.time()
            self.xo = 1
        return True
Beispiel #2
0
    def processFrameOf(self, camera):
        if not camera.isUp():
            #self.log("WARN", "Video stream for Camera: " + camera._id + " not available")
            camera.connect()
            return False

        maxt = 10
        frame = None
        for i in range(1, maxt + 1):
            #self.log("INFO", "Trying to accesss frame {}/{}".format(i, maxt))
            try:
                ret, f = camera.read()
                if ret:
                    frame = f
            except:
                yyyyy = 1

        if frame is None:
            #self.log("WARN", "Couldn't access a valid frame")
            camera.reconnect()
            return False

        if camera._id in self.preprocessings:
            self.log("INFO", "Pre-Processing frame of camera: " + camera._id)
            st = time.time()
            lineCoords = [(5, frame.shape[0] - 30 * (i + 1)) for i in range(3)]
            pp = self.preprocessings[camera._id]
            if 'brightness' in pp:
                bv = pp['brightness']
                frame = Preprocessing.adjustBrightness(frame, bv)
                Preprocessing.putText(frame, "Brightness: " + str(bv),
                                      lineCoords[0])

            if 'sharpness' in pp:
                sv = pp['sharpness']
                frame = Preprocessing.sharpenImage(frame, k=sv)
                Preprocessing.putText(frame, "Sharpness: " + str(sv),
                                      lineCoords[1])

            if 'denoise' in pp:
                dv = pp['denoise']
                if dv > 0:
                    frame = Preprocessing.denoiseImage(frame, strength=dv)
                    Preprocessing.putText(frame, "denoise: " + str(dv),
                                          lineCoords[2])

            et = time.time()
            self.log("TIME", "Action took {:2.6f}s".format((et - st)))

        #processing
        cameraId = camera._id
        t = 0

        st = time.time()
        bboxes, conf = self.pd.detect(frame, drawOnFrame=False)
        et = time.time()
        t += (et - st)
        if len(bboxes) == 0:
            return False
        #print("detection time taken: {:2.4f}s".format(et-st))

        st = time.time()
        bboxes, ids, cents = camera.tk.track(frame,
                                             bboxes,
                                             conf,
                                             returnCentroids=True,
                                             drawOnFrame=False)
        et = time.time()
        t += (et - st)
        if len(bboxes) == 0:
            return False
        #print("tracking time taken: {:2.4f}s".format(et-st) )

        st = time.time()
        facesWithIds = fdr.extractFacesAndAssignToPeople(frame,
                                                         bboxes,
                                                         ids,
                                                         cents,
                                                         drawOnFrame=False)
        et = time.time()
        t += (et - st)
        #print("extracting faces time taken: {:2.4f}s".format(et-st) )

        #drawing normal boxes around detected people
        for i in range(len(bboxes)):
            _bbox = bboxes[i]
            _id = ids[i]
            _cent = cents[i]
            st = (int(_bbox[0]), int(_bbox[1]))
            end = (int(_bbox[2]), int(_bbox[3]))
            clr = (0, 255, 0)
            cv2.rectangle(frame, st, end, clr, 2)
            label = "ID:{}".format(_id)
            self.textOnFrame(frame,
                             label,
                             fc=(0, 0, 0),
                             bc=(0, 255, 0),
                             org=(int(_cent[0]), int(_cent[1])))

        for fd in facesWithIds:
            _c = fd[1]
            st = (_c[0], _c[1])
            end = (_c[2], _c[3])
            cv2.rectangle(frame, st, end, (255, 0, 0), 2)

        self.inventory.update()
        for suspect in self.inventory.suspects:
            #recognition
            if suspect.shouldRecognize():
                recognized = -1
                for embd in suspect.embds:
                    for index, facedata in enumerate(facesWithIds):
                        if fdr.is_match(facedata[3], embd):
                            recognized = index
                            break
                    if recognized >= 0:
                        faceWithId = facesWithIds.pop(recognized)
                        personId = cameraId + "_" + str(faceWithId[2])

                        try:
                            _trackIdIndex = ids.index(faceWithId[2])
                            if not _trackIdIndex > -1:
                                continue
                        except:
                            continue

                        _bbox = bboxes[_trackIdIndex]
                        self.markSuspectOnFrame(frame, suspect, _bbox)

                        suspect.recognized(personId, frame, self.SERVER_ID,
                                           cameraId)
                        break

                if recognized == -1:
                    if suspect.last_time_recognized != None:
                        if time.time() - suspect.last_time_recognized > 20:
                            suspect.personId = None

            #displaying the person red on frame
            if suspect.personId != None:
                _trackId = int(suspect.personId.split("_")[1])
                try:
                    _trackIdIndex = ids.index(_trackId)
                    if not _trackIdIndex > -1:
                        continue
                except:
                    continue

                _bbox = bboxes[_trackIdIndex]
                self.markSuspectOnFrame(frame, suspect, _bbox)

        print("processing one frame: {:2.4f}s".format(t))

        t = time.localtime()
        text = "Server: " + time.strftime("%H:%M:%S", t)
        cv2.putText(frame, text, (10, 60), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                    (0, 255, 255), 1)

        with self.lock:
            camera.processedFrame = frame
            camera.processedFrameTime = time.time()
            self.xo = 1
        return True