def __init__(self, video_list, fps=10.0):
        """Given a list a video, uses MultipleVideoLoader object to loaded all videos and
        sample at spefic fps on a diferent thread.
        Args:
            video_list (dict): contais the path of all four video where the key identifies
            the ID of the camera.
            fps (float): frequency of sample of the video.
        """
        self.log = Logger("VideoLoaderThread")

        if len(video_list) == 0:
            self.log.warn("You are trying to initialize with a empty list of videos")

        self.video_loader = MultipleVideoLoader(video_list)
        self.num_samples = self.video_loader.n_frames()
        self.count_sample = 0
        self.fps = fps
        info = {
            "total_samples": self.num_samples
        }
        self.log.info('{}', str(info).replace("'", '"'))
        self.run = True
        self.queue = queue.Queue()
        self.thread = threading.Thread(target=self._reader, daemon=True)
        self.thread.start()
Example #2
0
    options.folder, 'p{:03d}g{:02d}_3d.json'.format(person_id, gesture_id))

if not all(
        map(
            os.path.exists,
            list(video_files.values()) + list(json_files.values()) +
            [json_locaizations_file])):
    log.critical(
        'Missing one of video or annotations files from PERSON_ID {} and GESTURE_ID {}',
        person_id, gesture_id)

size = (2 * options.cameras[0].config.image.resolution.height,
        2 * options.cameras[0].config.image.resolution.width, 3)
full_image = np.zeros(size, dtype=np.uint8)

video_loader = MultipleVideoLoader(video_files)
# load annotations
annotations = {}
for cam_id, filename in json_files.items():
    with open(filename, 'r') as f:
        annotations[cam_id] = json.load(f)['annotations']
#load localizations
with open(json_locaizations_file, 'r') as f:
    localizations = json.load(f)['localizations']

plt.ioff()
fig = plt.figure(figsize=(5, 5))
ax = Axes3D(fig)

update_image = True
it_frames = 0
    for cam_id in cameras
}
json_locaizations_file = os.path.join(options.folder, 'p{:03d}g{:02d}_3d.json'.format(
    person_id, gesture_id))

if not all(
        map(os.path.exists,
            list(video_files.values()) + list(json_files.values()) + [json_locaizations_file])):
    log.critical('Missing one of video or annotations files from PERSON_ID {} and GESTURE_ID {}',
                 person_id, gesture_id)

size = (2 * options.cameras[0].config.image.resolution.height,
        2 * options.cameras[0].config.image.resolution.width, 3)
full_image = np.zeros(size, dtype=np.uint8)

video_loader = MultipleVideoLoader(video_files)
# load annotations
annotations = {}
for cam_id, filename in json_files.items():
    with open(filename, 'r') as f:
        annotations[cam_id] = json.load(f)['annotations']
#load localizations
with open(json_locaizations_file, 'r') as f:
    localizations = json.load(f)['localizations']

plt.ioff()
fig = plt.figure(figsize=(5, 5))
ax = Axes3D(fig)

update_image = True
output_file = 'p{:03d}g{:02d}_output.mp4'.format(person_id, gesture_id)
class FramesLoader:
    def __init__(self, video_list, fps=10.0):
        """Given a list a video, uses MultipleVideoLoader object to loaded all videos and
        sample at spefic fps on a diferent thread.
        Args:
            video_list (dict): contais the path of all four video where the key identifies
            the ID of the camera.
            fps (float): frequency of sample of the video.
        """
        self.log = Logger("VideoLoaderThread")

        if len(video_list) == 0:
            self.log.warn("You are trying to initialize with a empty list of videos")

        self.video_loader = MultipleVideoLoader(video_list)
        self.num_samples = self.video_loader.n_frames()
        self.count_sample = 0
        self.fps = fps
        info = {
            "total_samples": self.num_samples
        }
        self.log.info('{}', str(info).replace("'", '"'))
        self.run = True
        self.queue = queue.Queue()
        self.thread = threading.Thread(target=self._reader, daemon=True)
        self.thread.start()

    def _reader(self):
        """Sample the videos and store on a Queue object.
        This is the target of the thread.
        """
        while self.run:
            t_o = time.time()
            _ = self.video_loader.load_next()
            frames = self.video_loader[0]
            if frames is not None:
                self.count_sample += 1
                if not self.queue.empty():
                    try:
                        self.queue.get_nowait()
                    except queue.Empty:
                        pass
                self.queue.put([self.count_sample, frames])
                self.video_loader.release_memory()
            if self.count_sample == self.num_samples:
                self.run = False
                break
            t_f = time.time()
            took_s = t_f - t_o
            dt = (1 / self.fps) - took_s
            if dt > 0:
                time.sleep(dt)

    def read(self):
        """Returns what it is stored at the Queue.
        That is a list, where the first position is the frame_id and
        the second position is a dictionary containing frames from all cameras at same time
        """
        return self.queue.get()

    def release(self):
        """Finish the thread
        """
        self.run = False
        with self.queue.mutex:
            self.queue.queue.clear()
        self.thread.join()
        self.video_loader.release_memory()
        del self.video_loader
import numpy as np
from sys import exit
import json
import shutil
import time
from collections import OrderedDict
from is_wire.core import Logger
from video_loader import MultipleVideoLoader

log = Logger(name='DisplayGestures')

with open('gestures.json', 'r') as f:
    gestures = json.load(f)
    gestures = OrderedDict(sorted(gestures.items(), key=lambda kv: int(kv[0])))

vl = MultipleVideoLoader({0: 'gestures_.MOV'})

it = 0
labels = set([0, vl.n_frames() - 1])
n_labels = 2 + 2 * len(gestures)
while True:
    n_loaded = vl.load_next()

    image = np.copy(vl[it][0])
    if it in labels:
        cv2.circle(image, (20, 20), 5, (255, 0, 0), 2, -1)
    cv2.imshow('', image)

    key = cv2.waitKey(1)

    if key == -1:
        options.folder,
        'p{:03d}g{:02d}c{:02d}_2d.json'.format(person_id, gesture_id, cam_id))
    for cam_id in cameras
}
if not all(
        map(os.path.exists,
            list(video_files.values()) + list(json_files.values()))):
    log.critical(
        'Missing one of video or annotations files from PERSON_ID {} and GESTURE_ID {}',
        person_id, gesture_id)

size = (2 * options.cameras[0].config.image.resolution.height,
        2 * options.cameras[0].config.image.resolution.width, 3)
full_image = np.zeros(size, dtype=np.uint8)

video_loader = MultipleVideoLoader(video_files)
# load annotations
annotations = {}
for cam_id, filename in json_files.items():
    with open(filename, 'r') as f:
        annotations[cam_id] = json.load(f)['annotations']

fourcc = cv2.VideoWriter_fourcc(*'XVID')
out0 = cv2.VideoWriter('cam0.avi', fourcc, 7.0, (1288, 728))
out1 = cv2.VideoWriter('cam1.avi', fourcc, 7.0, (1288, 728))
out2 = cv2.VideoWriter('cam2.avi', fourcc, 7.0, (1288, 728))
out3 = cv2.VideoWriter('cam3.avi', fourcc, 7.0, (1288, 728))

update_image = True
it_frames = 0
while True: