示例#1
0
    def set_state(self, state):
        if self.checked_at and (time.time() - self.checked_at < 35):
            log('{} taking a nap... ({})'.format(self.name,
                                                 time.time() -
                                                 self.checked_at))
            self.reset()
            return
        # log('({}) set state {} in frame: {} score: {}'.format(self.name, state, self.in_the_frame, self.i_am_recognized.score()))
        self.i_am_recognized.update(int(state))
        if self.i_am_recognized.detected():
            if not self.in_the_frame:
                self.__update_house_state()

            self.in_the_frame = True
            self.checked_at = time.time()
        else:
            self.in_the_frame = False
    def recognize(self, aligned_image, image, rect_nums, XY):
        print('2')
        ages, genders = self.eval(aligned_image, self.model_path)
        age = int(ages[0])
        #esses valores estavam alterados original
        if age < 39:
            age = "teen"
        elif age >= 40 and age < 49:
            age = "adult"
        elif age >= 50:
            age = "mature"

        gender = "female" if genders[0] == 0 else "male"
        log("[gender_recognizer_tf] Pessoa sexo: {} , idade: {}".format(
            gender, int(ages[0])))
        image_to_show_path_GA = "images/{}/{}".format(gender, age)
        return image_to_show_path_GA, gender, age
    def run(self):
        log('Live streaming...')
        # video_capture = VideoStream(src="rtsp://*****:*****@cyberlabsrio.ddns.net/cam/realmonitor?channel=5&subtype=0", usePiCamera=False).start(
        gst_str = (
            "v4l2src device=/dev/video{} ! "
            "video/x-raw, width=(int){}, height=(int){}, format=(string)RGB ! "
            "videoconvert ! appsink").format(1, 640, 480)
        print(gst_str)

        video_capture = VideoStream(src=gst_str, usePiCamera=False).start()
        while True:
            frame = video_capture.read()
            if frame is None:
                continue
            frame = imutils.resize(frame, width=min(256, frame.shape[1]))
            self.broadcast([frame])
            time.sleep(1.0 / 30.0)
示例#4
0
 def __update_house_state(self):
     if not self.id:
         return
     log('********* update house state: {}'.format(not self.in_the_house))
     self.in_the_house = not self.in_the_house
示例#5
0
    def run(self):
        FACE_RECOGNIZER = True
        advertise_path = AdFeed.load_image(ADVERTISE_PATHS).replace("\ ", " ")
        # video_capture = VideoStream(src=advertise_path, usePiCamera=False).start()
        video_capture = cv2.VideoCapture(advertise_path)
        log('Advertise streaming...')
        blank_frame = None
        welcome = Welcome()
        while True:
            if not self.empty():
                sender, data = self.recv()
                log('Sender to AdFeed...{}'.format(sender))
                if sender == 'face_finder':
                    state = data[0]
                    if state == "NoMatch":
                        # broadcast frame of the face advertise
                        url = AdFeed.load_image(data[1]).replace("\ ", " ")
                        print("Found a face and gender open video :" + url)
                        ad_capture = cv2.VideoCapture(url)
                        while True:
                            grb, framex = ad_capture.read()
                            if framex is None:
                                break
                            framex = imutils.resize(framex,
                                                    width=min(
                                                        1280, framex.shape[1]))
                            self.broadcast([framex])
                            time.sleep(1.0 / 23.97)
                            if not self.empty():
                                sender, data = self.recv()
                                if sender == 'face_finder':
                                    state = data[0]
                                    if data[0] == "Match" and FACE_RECOGNIZER:
                                        break

                    if state == "Match" and FACE_RECOGNIZER:
                        face = data[1]
                        people = data[2]
                        person = people[0]
                        gender = data[3]
                        age = data[4]
                        start_t = time.time()
                        elapse = time.time() - start_t
                        while (elapse < 5):
                            if blank_frame is None:
                                blank_frame = np.zeros((720, 1280, 3),
                                                       dtype=np.uint8)
                            frame = blank_frame.copy()
                            # GUIFaceFinder().draw(frame, face, people,person.imgFile, 0.99)
                            welcome.render(frame, person.name, gender, age)

                            self.broadcast([frame])
                            time.sleep(1.0 / 23.97)
                            elapse = time.time() - start_t

                    while not self.empty():
                        self.recv()
            # log('Advertise streaming default...')
            grb, frame = video_capture.read()
            # log("grab : {} ".format(grb))
            if frame is None:
                print("END OF FILE Advertise")
                advertise_path = AdFeed.load_image(ADVERTISE_PATHS).replace(
                    "\ ", " ")
                video_capture = cv2.VideoCapture(advertise_path)
                grb, frame = video_capture.read()

            frame = imutils.resize(frame, width=min(1280, frame.shape[1]))
            # print("Broadcast Ad Feed")
            self.broadcast([frame])
            time.sleep(1.0 / 24.0)