Beispiel #1
0
    def start_recording(self):
        """
        Setup the recorder
        """
        self.current_file = self.archive + "/" + self.detected_at + ".wav"

        Util.log(self.name, "Noise detected! Recording...")
    def find_fps(self, source):
        """
		Determine frames per second of the video source

		@param video source
		@return int
		"""
        Util.log(self.name, "Determining FPS...")

        # How many frames to capture
        num_frames = 120

        # Start time
        start = time.time()

        # Grab a few frames
        for i in range(0, num_frames):
            ret, frame = source.read()

        # End time
        end = time.time()

        # Calculate frames per second
        fps = int(math.floor(num_frames / (end - start)))
        Util.log(self.name, "Setting FPS to " + str(fps))

        return fps
    def start_recording(self):
        """
		Setup the recorder
		"""

        self.current_file = self.archive + "/" + self.detected_at + ".avi"

        Util.log(self.name, "Motion detected! Recording...")

        # Set path and FPS
        self.writer = cv2.VideoWriter(self.current_file, self.codec, self.fps,
                                      (self.width, self.height))
Beispiel #4
0
    def convert_to_mp3(self, path):
        """
        Convert wav-file to mp3

        @param string path
        """
        Util.log(self.name, "Converting audio...")

        try:
            cmd = 'lame --preset insane "{}" 2> /dev/null && rm "{}"'.format(
                path, path)
            p = subprocess.Popen(cmd, shell=True)
            (output, err) = p.communicate()

        except subprocess.CalledProcessError:
            Util.log(self.name, "Error converting audio")
Beispiel #5
0
    def save(self, data):
        """
        Save mic data to a WAV file.

        @param list data
        """
        Util.log(self.name, "Saving audio...")

        # Flatten the list
        data = b''.join(data)

        # Write converted data to file
        with open(self.current_file, "wb+") as file:
            file.write(self.generate_wav(data))

        # Convert
        self.convert_to_mp3(self.current_file)
    def convert_to_mp4(self, path):
        """
		Convert video file to mp4 using ffmpeg

		@param string path
		"""
        try:
            Util.log(self.name, "Converting video...")
            destination = os.path.splitext(path)[0] + '.mp4'
            cmd = 'ffmpeg -i "{}" "{}" 2> /dev/null && rm "{}"'.format(
                path, destination, path)
            #cmd = 'for i in ' + self.archive + '/*.avi; do ffmpeg -i "$i" "${i%.*}.mp4" 2> /dev/null && rm "$i"; done'
            p = subprocess.Popen(cmd, shell=True)
            (output, err) = p.communicate()

        except subprocess.CalledProcessError:
            Util.log(self.name, "Error converting video")
Beispiel #7
0
    def determine_threshold(self):
        """
        Determine threshold noise intensity using RMS
        Anything below the threshold is considered silence

        @return float
        """
        Util.log(self.name, "Determining threshold...")

        res = []
        for x in range(50):
            block = self.stream.read(self.CHUNK_SIZE)
            rms = self.get_rms(block)
            res.append(rms)

        # Set threshold to 20% above avergae
        threshold = (sum(res) / len(res)) * 1.2
        Util.log(self.name, "Setting threshold to: " + str(threshold))

        return threshold
Beispiel #8
0
    def run(self):
        """
        Detect noise from microphone and record
        Noise is defined as sound surrounded by silence (according to threshold)
        """

        # Stores audio intensity of previous sound-chunks
        # If one of these chunks is above threshold, recording gets triggered
        # Keep the last {OBSERVER_LENGTH} seconds in observer
        observer = deque(maxlen=self.OBSERVER_LENGTH * self.CHUNKS_PER_SEC)

        # Prepend audio from before noise was detected
        # Keep the last {HISTORY_LENGTH} seconds in history
        history = deque(maxlen=self.HISTORY_LENGTH * self.CHUNKS_PER_SEC)

        Util.log(self.name, "Listening...")

        try:
            while True:
                # Current chunk of audio data
                self.chunk = self.stream.read(self.CHUNK_SIZE)
                history.append(self.chunk)

                # Add noise level of this chunk to the sliding-window
                rms = self.get_rms(self.chunk)
                observer.append(rms)

                if self.detected(
                        sum([x > self.threshold for x in observer]) > 0):
                    # There's at least one chunk in the sliding-window above threshold
                    if not self.recording():
                        self.start_recording()

                    self.record.append(self.chunk)

                    if not self.notified and len(
                            self.record
                    ) > self.NOTIFICATION_LIMIT * self.CHUNKS_PER_SEC:
                        self.notify()
                elif self.recording():
                    # Silence limit was reached, finish recording and save
                    self.save(list(history) + self.record)
                    self.stop_recording()

                    Util.log(self.name, "Listening...")
        except KeyboardInterrupt:
            Util.log(self.name, "Interrupted.")
Beispiel #9
0
 def notify(self):
     """
     Notify
     """
     Util.log(self.name, "Notifying")
     self.notified = True
    def run(self):
        """
		Main worker
		"""
        observer = deque(maxlen=self.fps * self.OBSERVER_LENGTH)
        previous_frame = None

        while True:
            # Grab a frame
            (grabbed, self.current_frame) = self.source.read()

            # End of feed
            if not grabbed:
                break

            # Gray frame
            frame_gray = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2GRAY)

            # Blur frame
            frame_blur = cv2.GaussianBlur(frame_gray, (21, 21), 0)

            # If there's no previous frame, us the current one
            if previous_frame is None:
                previous_frame = frame_blur
                continue

            # Delta frame
            delta_frame = cv2.absdiff(previous_frame, frame_blur)

            # Threshold frame
            threshold_frame = cv2.threshold(delta_frame, 15, 255,
                                            cv2.THRESH_BINARY)[1]

            # Dilate the thresholded image to fill in holes
            kernel = np.ones((5, 5), np.uint8)
            dilated_frame = cv2.dilate(threshold_frame, kernel, iterations=4)

            # Find difference in percent
            res = dilated_frame.astype(np.uint8)
            movement = (np.count_nonzero(res) * 100) / res.size

            # Add movement percentage to observer
            observer.append(movement)

            if self.do_add_contours or self.do_add_target:
                self.current_frame, targets = self.add_contours(
                    self.current_frame, dilated_frame)

                if self.do_add_target:
                    self.current_frame = self.add_target(
                        self.current_frame, targets)

            if self.do_record and self.detected(
                    sum([x > self.threshold for x in observer]) > 0):
                if not self.recording():
                    self.start_recording()

                self.writer.write(self.current_frame)
            elif self.recording():
                # Convert
                self.convert_to_mp4(self.current_file)

                # Reset all
                self.stop_recording()

                Util.log(self.name, "Observing...")

            # Set blurred frame as new previous frame
            previous_frame = frame_blur

            # Display
            if self.do_display:
                cv2.imshow("Current frame:", self.current_frame)

            # Exit on 'q'
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break