def __init__(self,
                 capture,
                 in_file_name,
                 preview_window_manager=None,
                 should_mirror_preview=False):
        super(VideoProc, self).__init__()

        # initialize variables
        self.vm = VideoManager(capture, in_file_name)

        self.in_file_name = in_file_name
        self.preview_window_manager = preview_window_manager

        self._out_filename = None
        self._capture = capture
        self.channel = 0

        self.stopped = True

        self.build_out_filename()
        self._in_frame = None
        self._out_frame = None

        self.out_width = 480
        self.out_height = 270

        # for numbering frames
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.bottom_left_text = 10, self.out_height - 10
        self.font_scale = 1
        self.font_color = (255, 128, 128)
        self.thickness = 2

        self.counter = 0
        self.build_out_filename()
    def extractData(self):
        start_time = time.process_time()
        print("Initiating Data Retrieval Process")
        print("Reading Video...")
        stream = VideoManager(self.media_source, self.key)
        frame = stream.getNextFrame()
        current_frame_index = stream.getCurrentFrame()
        if frame.all() == -1:
            raise Exception("Video frames corrupted!")
        retrieved_data = ""
        time.sleep(5)
        hot_frame = FrameManager(frame, self.key)
        number = 1
        print("Decrypting Data...")
        for byte in self.data:
            if hot_frame.full():
                # Find the next empty frame. Exit with exception if not available
                while True:
                    if not stream.full():
                        frame = stream.getNextFrame()
                        current_frame_index = stream.getCurrentFrame()
                        if frame.all() == -1:
                            raise Exception("Video frames corrupted!")
                        hot_frame = FrameManager(frame, self.key)
                        if hot_frame.getBlobCount() != 0:
                            break
                    else:
                        raise Exception("Data storage capacity exceeded!")
            retrieved_data += chr(byte)
            number += 1
        print("Done!")
        execution_time = round(time.process_time() - start_time, 3)
        print("Completed in", execution_time, "seconds")

        return retrieved_data
    def test_can_transcribe_long_speech(self):
        manager = VideoManager(
            os.path.join(os.path.dirname(__file__), "input", "test1.mp4"), )
        _, audio = tempfile.mkstemp(".wav")
        manager.extract_audio(audio)
        audio_manager = AudioManager(audio)

        transcriber = Transcriber(audio_manager)

        transcribe_result = list(transcriber.transcribe())

        self.assertGreaterEqual(len(transcribe_result), 1)
Esempio n. 4
0
    def test_can_extract_audio(self):
        manager = VideoManager(
            os.path.join(os.path.dirname(__file__), "input", "test1.mp4"),
        )
        _, audio = tempfile.mkstemp(".wav")

        extracted_audio = manager.extract_audio(audio)

        self.assertEqual(audio, extracted_audio)
        self.assertTrue(os.path.exists(extracted_audio))

        os.remove(audio)
Esempio n. 5
0
    def test_can_extract_thumbnail(self):
        manager = VideoManager(
            os.path.join(os.path.dirname(__file__), "input", "test2.mp4"),
        )
        _, thumbnail = tempfile.mkstemp(".jpg")

        extracted_thumbnail = manager.extract_thumbnail(thumbnail)

        self.assertEqual(thumbnail, extracted_thumbnail)
        self.assertTrue(os.path.exists(extracted_thumbnail))

        os.remove(thumbnail)
Esempio n. 6
0
def main() -> int:
    video_manager = VideoManager(LOGGER)
    video_manager.collect()
    video_url = video_manager.choose_random_video_url()
    if not video_url:
        return -1
    print(video_url)

    conf = Util.read_config("conf.json")
    if not conf or "google_api_key" not in conf:
        return -1
    call_webhook(conf, video_url)
    return 0
 def __init__(self,
              good_pose,
              bad_pose,
              good_bboxes,
              bad_bboxes,
              good_pose_video=None,
              bad_pose_video=None,
              weights=None):
     self.good_poses = npy_to_poses(good_pose)
     self.bad_poses = npy_to_poses(bad_pose)
     self.good_bboxes = (np.load(good_bboxes, allow_pickle=True))[0]
     self.bad_bboxes = np.load(bad_bboxes, allow_pickle=True)[0]
     self.weights = list(
         self.DEFAULT_WEIGHTS.values()) if weights is None else weights
     self.good_poses_video = None
     self.bad_poses_video = None
     if good_pose_video is not None:
         self.good_poses_video = VideoManager()
         self.good_poses_video.get_video(good_pose_video)
     if bad_pose_video is not None:
         self.bad_poses_video = VideoManager()
         self.bad_poses_video.get_video(bad_pose_video)
def main():
    logger = logging.getLogger()

    logger.info("start processing %s", input_file)

    manager = VideoManager(input_file)
    audio = manager.extract_audio(settings.ROOT / "output/a.wav")
    thumbnail = manager.extract_thumbnail(settings.ROOT / "output/a.jpg")

    audio_manager = AudioManager(audio)
    transcriber = Transcriber(audio_manager)

    filter_sections = []

    for transcribtion in transcriber.transcribe():
        logger.debug("transcription: %s", transcribtion)
        word, start, end = transcribtion
        if detect_mature_word(word):
            logger.debug("mature word: %s, %s", word, detect_mature_word(word))
            audio_manager.apply_beep(start, end)
            manager.apply_mask(start, end)
            filter_sections.append({
                "start_time": start,
                "end_time": end,
                "word": word
            })

    manager.apply_audio(audio_manager.save(settings.ROOT /
                                           "output/a_beep.wav"))
    manager.save(settings.ROOT / "output/a.mp4")

    print(
        json.dumps({
            "thumbnail": str(thumbnail),
            "filter_sections": filter_sections,
            "filter_video": str(settings.ROOT / "output" / "a.mp4"),
        }))
 def embedData(self):
     start_time = time.process_time()
     print("Encrypting Data...")
     encrypted_data = self.crypto.mac(self.data)
     print("Reading Video...")
     stream = VideoManager(self.media_source, self.key)
     frame = stream.getNextFrame()
     current_frame_index = stream.getCurrentFrame()
     if frame.all() == -1:
         raise Exception("Video frames corrupted!")
     hot_frame = FrameManager(frame, self.key)
     number = 1
     print("Embedding Data...")
     for byte in encrypted_data:
         if hot_frame.full():
             # Find the next empty frame. Exit with exception if not available
             while True:
                 if not stream.full():
                     frame = stream.getNextFrame()
                     current_frame_index = stream.getCurrentFrame()
                     if frame.all() == -1:
                         raise Exception("Video frames corrupted!")
                     hot_frame = FrameManager(frame, self.key)
                     if hot_frame.getBlobCount() != 0:
                         break
                 else:
                     raise Exception("Data storage capacity exceeded!")
         hot_frame.save_frame("frame_before_" + str(number))
         hot_frame.embed(byte)
         stream.write_frame(current_frame_index, hot_frame.getFrame())
         hot_frame.save_frame("frame_after_" + str(number))
         number += 1
     print("Generating Video...")
     stream.generate_video()
     print("Done!")
     execution_time = round(time.process_time() - start_time, 3)
     print("Completed in", execution_time, "seconds")
Esempio n. 10
0
 def __init__(self, input, output, max_frames, skip, show, detector):
     self.video = VideoManager(input, output, max_frames, skip, show)
     self.horse = None
     self.detector = detector
Esempio n. 11
0
import paho.mqtt.client as mqtt
import time
import serial

from video_manager import VideoManager
import sys

import struct

videomanager = VideoManager("/dev/video0", 1920, 1080, 30, "192.168.10.236",
                            5000, "RGB", 2000000, True, 1280, 720)
videomanager.run()

videomanager_lwir = VideoManager("/dev/video1", 640, 480, 25, "192.168.10.236",
                                 5001, "LWIR", 1000000, False, 640, 480)
videomanager_lwir.run()

last_message = 0.0

ser = serial.Serial(port='/dev/ttyTHS1', baudrate=38400)

ser.isOpen()


def on_connect(client, userdata, flags, rc):
    print("Conencted with result code" + str(rc))
    client.subscribe("gimbal/#")


def on_message(client, userdata, msg):
    if (msg.topic == 'gimbal/cameras/rgb/zoom'):
Esempio n. 12
0
 def __init__(self, input, output, max_frames, skip, show, detector):
     self.video = VideoManager(input, output, max_frames, skip, show)
     self.horses = np.array([], dtype=Horse)
     self.detector = detector
     self.global_horse_number = 1
Esempio n. 13
0
    #     for r in range(height):
    #         for c in range(width):
    #             gx,gy = y_gradient[r,c], x_gradient[r,c]
    #             avg_green_change = (gx+gy)/2
    #             green_channel[r,c,0] = gx
    #             # green_channel[r,c,2] = gy
    greens = green_channel[:, :, 1]
    high_greens = np.zeros(greens.shape)
    high_greens[np.where(greens > 200)] = 1
    green_channel[np.nonzero(high_greens)] = [255, 255, 255]
    green_channel[np.where(high_greens == 0)] = [0, 0, 0]
    visualize.show_image(green_channel)

    # green_channel[np.where(gradient==0)] = [255, 0, 0]


if __name__ == '__main__':
    scene_dir = 'scenes/overhead'
    videos = file_utils.getPaths(scene_dir)
    first_video = videos[0]
    manager = VideoManager(first_video)
    first_frame = manager.getFrame(0)
    field_lines = findFieldLines(first_frame)

    for fl in field_lines:
        fl.draw(first_frame)

    hashmarks = findHashMarks(first_frame, field_lines)
    hashmarks.draw(first_frame)
    visualize.show_image(first_frame)