Пример #1
0
            # May still need some teaking for the lower limit of x, for hour,
            # day, year changes.  Probably not worth the effort though.
            #print("x:", x, "y:", y, "w:", w, "h:", h)
            new_state = State.ACTIVE

            # Draw the bounding box on the frame, and update the text.
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # Update the last_active_time to now (keep the recording going).
            last_active_time = datetime.datetime.now()

    if new_state == State.ACTIVE:
        # Motion has been detected, so the scene is now ACTIVE.
        if state == State.IDLE:
            # Transitioning from IDLE to ACTIVE, so we need to start recording.
            VideoRecorder.start()
        state = State.ACTIVE

    elif new_state == State.RECORDING:
        state = State.RECORDING

    elif new_state == State.IDLE:
        if state != State.IDLE:
            # Transitioning from RECORDING to IDLE.  Stop the recording.
            state = State.IDLE
            VideoRecorder.stop()

    # draw the text on the frame
    if state == State.IDLE:
        text = "Idle."
    elif state == State.RECORDING:
Пример #2
0
class App:
    def __init__(self, preview=False, max_video_length=MAX_VIDEO_LENGTH):
        log.info("booting up..")
        self.final_dir = self._setup_dirs()
        self.max_video_length = max_video_length
        self.video_recorder = VideoRecorder(preview=preview)
        self.audio_recorder = AudioRecorder()
        time.sleep(2)
        log.info("ready!")

    def _setup_dirs(self):
        final_dir = os.path.expanduser('~/media/')
        if (os.path.isdir(final_dir) == False):
            os.mkdir(final_dir)
        return final_dir

    def _make_filename(self):
        return datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")

    def has_space(self):
        statvfs = os.statvfs("/")
        megabytes_available = int(statvfs.f_frsize * statvfs.f_bavail / 1024 /
                                  1024)
        log.info(f"Still {megabytes_available}MB left on device")
        return megabytes_available > MIN_DISK_SPACE_MB

    def on_keyboard_release(self, key):
        if key == keyboard.Key.enter:
            if lock.locked():
                self.stop_recording()
            elif self.has_space():
                self.start_recording()
            else:
                return False
        if key == keyboard.Key.esc:
            if lock.locked():
                self.stop_recording()
            return False

    def timer(self, seconds, current_video):
        log.info(f"going to sleep for {seconds}s and then stop recording")
        for i in range(seconds):
            if not lock.locked():
                log.info("looks like recording has ended before timeout")
                return
            elif current_video != self.file_name:
                log.info("there is a different ongoing recording")
                return
            time.sleep(1)
        log.info("time's up!, stopping recording")
        self.stop_recording()

    def start_recording(self):
        lock.acquire()
        self.start_datetime = datetime.datetime.now()
        self.file_name = self._make_filename()
        timer_thread = threading.Thread(target=self.timer,
                                        args=(self.max_video_length,
                                              self.file_name))
        timer_thread.start()
        self.tmp_dir = tempfile.mkdtemp()

        log.info("starting threads...")
        self.video_recorder.start(self.file_name, self.tmp_dir)
        self.audio_recorder.start(self.file_name, self.tmp_dir)

    def stop_recording(self):
        log.info("stopping threads...")
        if not self.audio_recorder.stop():
            return
        if not self.video_recorder.stop():
            return
        now = datetime.datetime.now()
        video_length = (now - self.start_datetime).seconds
        if video_length > MIN_VIDEO_LENGTH:
            log.info("starting mux...")
            cmd = (
                f"ffmpeg -i {self.tmp_dir}/{self.file_name}.wav -i {self.tmp_dir}/{self.file_name}.h264 "
                f"-c:v copy -c:a aac -strict experimental {self.final_dir}/{self.file_name}.mp4"
            )
            subprocess.run(cmd, capture_output=True, shell=True)
            log.info(f"{self.file_name}.mp4 is ready!")
        else:
            log.info(f"Video was to short: {video_length}, removing it")
        shutil.rmtree(self.tmp_dir)
        log.info(f"{self.tmp_dir} removed")
        lock.release()

    def run(self):
        def on_release(button):
            if lock.locked():
                self.stop_recording()
            elif self.has_space():
                self.start_recording()
            else:
                return False

        button = Button(2)
        button.when_released = on_release
        listener = keyboard.Listener(on_release=self.on_keyboard_release)
        listener.start()
        pause()
Пример #3
0
    def __init__(self):
        super(MainWindow, self).__init__()
        self.ui = Ui_dialog()
        self.ui.setupUi(self)
        self.setFixedSize(self.width(), self.height())
        self.onBindingUI()

        ##
        self.record_header_labels = [
            'who', 'what', 'when', 'where', 'speaking'
        ]

        ## Create detail speaking information window
        self.speaking_ui_cam1 = SpeakingMainWindow()
        self.speaking_ui_cam2 = SpeakingMainWindow()

        ## For audio and video analysis from camera
        global video_thread_cam1
        global video_thread_cam2
        global audio_thread_cam1
        global audio_thread_cam2

        ## For showing Quadro W record
        global nobody_record
        global chung_chih_record
        global yu_sung_record
        global chang_yu_record
        global i_hsin_record
        global tzu_yuan_record
        global chih_yu_record
        global other_record

        ## For Q Learing
        global nobody_q_table
        global chung_chih_q_table
        global yu_sung_q_table
        global chang_yu_q_table
        global i_hsin_q_table
        global tzu_yuan_q_table
        global other_q_table

        ## Create video and audio proces thread and process them to get Quadro W info
        audio_thread_cam1 = AudioRecorder(
            cam_ip='rtsp://*****:*****@192.168.65.66:554/stream1',
            audio_spath='temporarily_rtsp_data/for_speaking/cam1',
            audio_stime=5,
            pos_path='nlp_recognize/pos.json',
            dict_path='nlp_recognize/extra_dict.txt')
        video_thread_cam1 = VideoRecorder(
            cam_ip='rtsp://*****:*****@192.168.65.66:554/stream1',
            record_path='record/',
            qtable_path='q_table/',
            compress_result=True,
            fps=15,
            analysis_sec=1,
            prefix_name="cam1",
            audio_thread=audio_thread_cam1,
            human_model_path=
            "trans_model/model_simulated_RGB_mgpu_scaling_append.0024.pb",
            face_model_path='trans_model/face_new3.pb',
            place_model_path='trans_model/place_new3.pb',
            object_model_path=
            'object_recognize/code/workspace/training_demo/model/pb/frozen_inference_graph.pb',
            face_category_path='face_recognize/categories_human_uscc.txt',
            place_category_path='places_recognize/categories_places_uscc.txt',
            object_category_path=
            'object_recognize/code/workspace/training_demo/annotations/label_map.pbtxt'
        )
        audio_thread_cam2 = AudioRecorder(
            cam_ip='rtsp://*****:*****@192.168.65.41:554/stream1',
            audio_spath='temporarily_rtsp_data/for_speaking/cam2',
            audio_stime=5,
            pos_path='nlp_recognize/pos.json',
            dict_path='nlp_recognize/extra_dict.txt')
        video_thread_cam2 = VideoRecorder(
            cam_ip='rtsp://*****:*****@192.168.65.41:554/stream1',
            record_path='record/',
            qtable_path='q_table/',
            compress_result=True,
            fps=15,
            analysis_sec=1,
            prefix_name="cam2",
            audio_thread=audio_thread_cam2,
            human_model_path=
            "trans_model/model_simulated_RGB_mgpu_scaling_append.0024.pb",
            face_model_path='trans_model/face_new3.pb',
            place_model_path='trans_model/place_new3.pb',
            object_model_path=
            'object_recognize/code/workspace/training_demo/model/pb/frozen_inference_graph.pb',
            face_category_path='face_recognize/categories_human_uscc.txt',
            place_category_path='places_recognize/categories_places_uscc.txt',
            object_category_path=
            'object_recognize/code/workspace/training_demo/annotations/label_map.pbtxt'
        )

        ## Create thread to show record of Quadro W info
        nobody_record = RecordUpdata(csv_path="record/Nobody_record.csv")
        chung_chih_record = RecordUpdata(
            csv_path="record/chung-chih_record.csv")
        yu_sung_record = RecordUpdata(csv_path="record/yu-sung_record.csv")
        chang_yu_record = RecordUpdata(csv_path="record/chang-yu_record.csv")
        i_hsin_record = RecordUpdata(csv_path="record/i-hsin_record.csv")
        tzu_yuan_record = RecordUpdata(csv_path="record/tzu-yuan_record.csv")
        chih_yu_record = RecordUpdata(csv_path="record/chih-yu_record.csv")

        ## Create thread to calculate Q value and show
        nobody_q_table = QLearningUpdata(
            in_record_path="record/Nobody_record.csv",
            out_table_path="q_table/Nobody_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        chung_chih_q_table = QLearningUpdata(
            in_record_path="record/chung-chih_record.csv",
            out_table_path="q_table/chung-chih_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        yu_sung_q_table = QLearningUpdata(
            in_record_path="record/yu-sung_record.csv",
            out_table_path="q_table/yu-sung_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        chang_yu_q_table = QLearningUpdata(
            in_record_path="record/chang-yu_record.csv",
            out_table_path="q_table/chang-yu_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        i_hsin_q_table = QLearningUpdata(
            in_record_path="record/i-hsin_record.csv",
            out_table_path="q_table/i-hsin_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        tzu_yuan_q_table = QLearningUpdata(
            in_record_path="record/tzu-yuan_record.csv",
            out_table_path="q_table/tzu-yuan_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)
        chih_yu_q_table = QLearningUpdata(
            in_record_path="record/chih-yu_record.csv",
            out_table_path="q_table/chih-yu_qtable.csv",
            where_pool=[[2], [1], []],
            where_category_path="places_recognize/categories_places_uscc.txt",
            care_number=100,
            decay_reward=0.98,
            base_reward=100,
            lower_limit=1,
            decay_qvalue=0.9,
            learning_rate=0.1)

        ## Start all thread
        video_thread_cam1.start()
        video_thread_cam2.start()
        audio_thread_cam1.start()
        audio_thread_cam2.start()

        nobody_record.start()
        chung_chih_record.start()
        yu_sung_record.start()
        chang_yu_record.start()
        i_hsin_record.start()
        tzu_yuan_record.start()
        chih_yu_record.start()

        nobody_q_table.start()
        chih_yu_q_table.start()
        yu_sung_q_table.start()
        chang_yu_q_table.start()
        i_hsin_q_table.start()
        tzu_yuan_q_table.start()
        chang_yu_q_table.start()

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.start_webcam)
        self.timer.start(0)