class CameraStream:
    def __init__(self,camera=0,video_file=None,video=False):
        from cv2 import VideoCapture

        if(video):
            # Use a video file as input.
            self.stream = VideoCapture(video_file)
        else:
            # Use a camera as input.
            self.stream = VideoCapture(camera)

        # Check if we were successful in opening stream.
        if(self.stream.isOpened() == False):
            name = video_file if video else camera
            raise IOError("Error opening video stream or file '{}'".format(name))

    def __del__(self):
        """ Destructor to close everything. """
        # When everything done, release the video capture object    
        self.stream.release()
		 

    def get_frame(self):
        """ Retrieve frame. """
        # Read until video is completed
        while(self.stream.isOpened()):
            # Capture frame-by-frame
            ret,frame = self.stream.read()
            if(ret == True):
                yield frame
def video_to_frames(video: cv2.VideoCapture,
                    out_path,
                    start=0,
                    end=-1,
                    step=24):
    if not video.isOpened():
        print('No video')
        return

    video.set(cv2.CAP_PROP_POS_FRAMES, start)

    if end == -1:
        end = video.get(cv2.CAP_PROP_FRAME_COUNT)

    paths = []
    f_count = 0
    while video.isOpened() and video.get(cv2.CAP_PROP_POS_FRAMES) < end:
        _, img = video.read()
        path = os.path.join(out_path, f'video-{f_count}.png')
        question_2.write_image(img, path)
        paths.append(path)
        f_count += 1
        current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
        video.set(cv2.CAP_PROP_POS_FRAMES, current_frame + step)

    return paths
示例#3
0
def open_camera(cam='/dev/psEye', w=320, h=240, fps=100):
    """
    Opens camera and configures height, width, and fps settings.

    INPUTS
        (optional)
        cam -- str/int -- camera descriptor for VideoCapture; '/dev/psEye'
        w   -- int     -- width (px) to set camera frame; 320
        h   -- int     -- height (px) to set camera frame; 240
        fps -- int     -- frames per second to set camera; 100

    OUTPUTS
        cv2.VideoCapture object

    EXCEPTIONS
        Raises RuntimeError when unable to open camera
    """
    if type(cam) not in (str, int):
        raise TypeError('\'cam\' must be int or str')
    if type(w) != int:
        raise TypeError('\'w\' must be int')
    if type(h) != int:
        raise TypeError('\'h\' must be int')
    if type(fps) != int:
        raise TypeError('\'fps\' must be int')
    cap = VideoCapture(cam)
    if not cap.isOpened():
        cap.release()
        raise RuntimeError('failed to open camera \'%s\'' % cam)
    cap.set(CAP_PROP_FRAME_HEIGHT, h)
    cap.set(CAP_PROP_FRAME_WIDTH, w)
    cap.set(CAP_PROP_FPS, fps)
    return cap
示例#4
0
class Camera(object):
    def __init__(self, cam_num=0):
        self.cam_num = cam_num
        self.cap = None
        self.initialized = False
        self.last_frame = None
        self.data = ""

    def initialize(self):
        self.cap = VideoCapture(self.cam_num)
        if not self.cap.isOpened():
            raise Exception("Could not open video device")
        self.initialized = True

    def is_initialized(self):
        return self.initialized

    def get_frame(self):
        _, self.last_frame = self.cap.read()
        if self.last_frame is not None:
            cvtColor(self.last_frame, COLOR_BGR2RGB, self.last_frame)
        return self.last_frame

    def read_qrcode(self, frame):
        detector = QRCodeDetector()
        self.data, _, _ = detector.detectAndDecode(frame)
        return self.data

    def close_camera(self):
        self.cap.release()
        self.initialized = False

    def __str__(self):
        return 'OpenCV Camera {}'.format(self.cam_num)
示例#5
0
class FileVideoSource(VideoSource):
    '''
    Load a video file as video source, for test or flight replay purposes.
    '''

    def __init__(self, fileName, parent = None):
        super().__init__(parent)
        self.cap = VideoCapture(fileName)
        self.frameRate = self.cap.get(CAP_PROP_FPS)
        self.__delay = 1.0 / self.frameRate

    def run(self):
        self.running = True
        while self.cap.isOpened():
            if self.pause:
                continue
            _s0 = time()
            ret, frame = self.cap.read()
            if ret == True:
                rgbImage = cvtColor(frame, COLOR_BGR2RGB)
                h, w, ch = rgbImage.shape
                bytesPerLine = ch * w
                convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
                self.newFrameAvailable.emit(convertToQtFormat)
            _s0 = time() - _s0
            sleep(0 if _s0 >= self.__delay else self.__delay - _s0)
        self.running = False
        self.__cleanup()

    def __cleanup(self):
        self.cap.release()
示例#6
0
def WebCam_OnOff(device_num: int, cam: cv2.VideoCapture = None):
    """
    WebCameraを読み込む関数

    Args:
        device_num(int): カメラデバイスを番号で指定
            0:PC内臓カメラ
            1:外部カメラ
        cam(cv2.VideoCapture optional): 接続しているカメラ情報

    Returns:
        response(int): 動作終了を表すフラグ
            0: connect
            1: release
            2: NotFound
        capture(cv2.VideoCapture): 接続したデバイス情報を返す
            cv2.VideoCapture: connect
            None: release or NotFound
    """
    if cam is None:  # カメラが接続されていないとき
        cam = cv2.VideoCapture(device_num)
        # カメラに接続できなかった場合
        if not cam.isOpened():
            return 2, None
        # 接続できた場合
        else:
            return 0, cam

    else:  # カメラに接続されていたとき
        cam.release()
        return 1, None
示例#7
0
def draw_speed(video_path: str, speed_path: str, output_video: str) -> None:
    reader = VideoCapture(video_path)
    writer = VideoWriter(
        output_video,
        VideoWriter_fourcc(*"mp4v"),
        20,
        (640, 480),
    )
    data = loadtxt(speed_path, delimiter="\n", dtype="float32")

    frame_id = 0
    while reader.isOpened():
        ret, frame = reader.read()
        if not ret:
            break

        putText(
            frame,
            f"{data[frame_id]:0.3f}",
            (250, 420),
            FONT_HERSHEY_SIMPLEX,
            0.7,
            (255, 255, 255),
            2,
        )
        writer.write(frame)

        frame_id += 1
        if frame_id == data.shape[0]:
            break

    reader.release()
    writer.release()
示例#8
0
def captureCamera(camera: cv2.VideoCapture,
                  tag: str = None,
                  resolution: list = None):
    resolution = [
        512, 512
    ] if resolution is None else resolution  # Параметр по умолчанию
    frame = np.zeros((512, 512, 3), np.uint8)  # Пустой кадр
    DATA[tag] = [True, "", "", frame]  # Начальные результаты камеры
    CAMERA_SETTINGS[tag] = {"HSV": False}  # Дефолтные настройки камеры
    yield None  # Конец инициализации генератора самеры

    if argv.debug and argv.show:  #
        cv2.namedWindow(tag)
        cv2.setMouseCallback(tag, mouseCallback, param=tag)

    while camera.isOpened():
        if DATA[tag][0]:
            flag, frame = camera.read()

            if flag:
                DATA[tag] = [
                    True,
                    getLetterFromFrame(frame),
                    getColorFromFrame(frame), frame
                ]

        if argv.show:
            frame = cv2.resize(frame, CAMERA_SIZE)
            cv2.resizeWindow(tag, *CAMERA_SIZE)
            if argv.debug and CAMERA_SETTINGS[tag]["HSV"]:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                DATA[tag][3] = frame
            cv2.imshow(tag, frame)

        yield None
示例#9
0
class VideoStream(object):
    def __init__(self, device, width, height):
        self.capture = VideoCapture(device)
        assert self.capture.isOpened(), 'failed to open stream %s' % device
        self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
        self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
        self.texture = pyglet.gl.GLuint()
        glGenTextures(1, self.texture)
        glBindTexture(GL_TEXTURE_2D, self.texture)
        glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
                        GL_LINEAR_MIPMAP_LINEAR)

    def next(self):
        status, frame = self.capture.read()
        if status:
            width = frame.shape[1]
            height = frame.shape[0]
            glBindTexture(GL_TEXTURE_2D, self.texture)
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR,
                         GL_UNSIGNED_BYTE, frame.ctypes.data)

    def bind(self):
        glBindTexture(GL_TEXTURE_2D, self.texture)
示例#10
0
    def __to_save_img_seq(self, video_capture: VideoCapture, video_name: str):
        # Frame counter
        i = 0

        # To be 100% sure that we get the filename without the extension

        video_name = os_path_splitext(video_name)[0]
        image_sequence_dir = os_path_join(self.im_seqs_dir, video_name)
        while video_capture.isOpened():
            ret, frame = video_capture.read()
            if ret:
                os_makedirs(image_sequence_dir, exist_ok=True)
                if video_name is None:
                    raise Exception(
                        'VideoHandlerError: if video_to_image_sequence receives a "save_sequences=True" then "video_name" must also receive a value. '
                    )
                frame_name = '{0:06}'.format(i + 1) + '_' + video_name + '.jpg'
                # Save image
                cv2_imwrite(os_path_join(image_sequence_dir, frame_name),
                            frame)
                # wait 1ms and make another check before 'breaking'
                if cv2_waitKey(1) & 0xFF == ord('q'):
                    break
                i += 1
            else:
                break

        print('Total frames of sequence read:', i - 1)
def extractFrames(frames, src, dst):
    reader = VideoCapture(src)

    frame_num = 1

    while reader.isOpened():
        running, frame = reader.read()
        if not running:
            break
        if frame_num > frames:
            break

        #Extract face, with 25 pixels margin
        loc = face_recognition.face_locations(frame)

        if (len(loc) == 0):
            face = frame
        else:
            loc = sorted(
                loc,
                key=cmp_to_key(lambda x, y: (y[2] - y[0]) * (y[1] - y[3]) -
                               (x[2] - x[0]) * (x[1] - x[3])))
            face = frame[loc[0][0] - 25:loc[0][2] + 25,
                         loc[0][3] - 25:loc[0][1] + 25]

        face = cv2.resize(face, (299, 299))
        imwrite(join(dst, '%d.jpg' % frame_num), face)
        frame_num += 1

    reader.release()
示例#12
0
    def run(self):
        print("starting", flush=True)
        print("bueno")
        model = TFModel(resource_path("output_graph.pb"), resource_path("output_labels.txt"), "Placeholder",
                        "final_result")
        print("model loaded")
        # self.label_load.setText("Loading: Model")
        count = 0
        available = []
        while True:
            test = VideoCapture(count)
            if test is None or not test.isOpened():
                break
            available.append(count)
            count += 1
        print(count)
        if count > 0:
            vid = VideoCapture(0)
            _, frame = vid.read()

            load = rt.Recoginize(model)
            load.daemon = True
            load.start()
            load.predict(frame)
            from time import sleep
            sleep(1)
            load.predict("kill")
            load.join()
            vid.release()
            print("model loaded")
            # self.label_load.setText("Loaded Model")
        print("donezo")
        # self.label_load.setText("Finalizing")
        self.progress.emit(model, len(available))
示例#13
0
def main(filename, win_size, win_name, screen_fit=False, speed=1):
    '''
    opens window and responds to user input
    closes when user presses "q"
    '''
    cap = VideoCapture(filename)
    namedWindow(win_name, 1)

    follow = None

    while cap.isOpened():
        if follow == 'q':
            break
        if follow in binds:
            follow = play(binds[follow][0],
                          binds[follow][1],
                          cap,
                          win_size=win_size,
                          win_name=win_name,
                          speed=speed,
                          screen_fit=screen_fit)
        else:
            follow = wait(cap,
                          win_size=win_size,
                          win_name=win_name,
                          screen_fit=screen_fit)

    cap.release()
    destroyAllWindows()
示例#14
0
    def video2imgs(self, video, size):

        from cv2 import VideoCapture
        from cv2 import cvtColor, resize
        from cv2 import COLOR_BGR2GRAY
        from cv2 import INTER_AREA

        img_list = []

        # 从指定文件创建一个VideoCapture对象
        cap = VideoCapture(video)

        # 如果cap对象已经初始化完成了,就返回true,换句话说这是一个 while true 循环
        while cap.isOpened():
            # cap.read() 返回值介绍:
            #   ret 表示是否读取到图像
            #   frame 为图像矩阵,类型为 numpy.ndarry.
            ret, frame = cap.read()
            if ret:
                # 转换成灰度图,也可不做这一步,转换成彩色字符视频。
                gray = cvtColor(frame, COLOR_BGR2GRAY)

                # resize 图片,保证图片转换成字符画后,能完整地在命令行中显示。
                img = resize(gray, size, interpolation=INTER_AREA)

                # 分帧保存转换结果
                img_list.append(img)
            else:
                break

        # 结束后释放空间
        cap.release()

        return img_list
示例#15
0
def captureImage(image_dir, number_of_images=3):
    img_files_count = len([
        name for name in os.listdir(image_dir)
        if os.path.isfile(os.path.join(image_dir, name))
    ])

    image_files = []
    for i in range(0, number_of_images):
        camera = VideoCapture(0)
        if not camera.isOpened():
            with open(LOGS_DIR + "error.log", "a") as f:
                f.write("[Error " +
                        str(datetime.now().strftime("%b %d, %Y %H:%M:%S")) +
                        "] Could not open video device\n")
        return_value, image = camera.read()
        camera.release()
        if not return_value:
            continue
        img_file = datetime.now().strftime(image_dir + "/image_%d%b%y-" +
                                           str(img_files_count) + ".jpg")
        imwrite(img_file, image)
        image_files.append(img_file)
        img_files_count += 1
        sleep(3)
    return image_files
示例#16
0
def WebCam_OnOff(device_num: int,
                 cam: cv2.VideoCapture = None) -> cv2.VideoCapture:
    """
    WebCameraを読み込む関数

    Args:
        device_num (int):
            カメラデバイスを番号で指定
            0: PC内臓カメラ
            1: 外部カメラ
        cam (cv2.VideoCapture):
            接続しているカメラ情報

    Return:
        response (int):
            動作終了を表すフラグ
            0: カメラを開放した
            1: カメラに接続した
            -1: エラー
        cam (cv2.VideoCapture):
            接続したデバイス情報を返す
    """
    if cam is None:  # カメラが接続されていないとき
        cam = cv2.VideoCapture(device_num)
        # カメラに接続できなかった場合
        if not cam.isOpened():
            return -1, None
        # 接続できた場合
        else:
            return 1, cam

    else:  # カメラに接続されていたとき
        cam.release()
        return 0, None
示例#17
0
    def solveFrameDifferences(self, cap: VideoCapture, crop: List[Rect],
                              fold) -> Iterator[Frame]:
        require(cap.isOpened(), "failed to open capture")
        postprocess = lambda mat, index: self.postprocessUMat(
            self.cropUMat(mat, crop, index), index)
        if self.is_crop_debug:
            cv2NormalWin(ExtractSubtitles.WIN_LAST_IMAGE)
            cv2NormalWin(ExtractSubtitles.WIN_LAST_FRAME)
        reducer = fold(cap, crop)

        index = 0
        prev_frame, curr_frame = None, None
        unfinished, img = cap.read()
        prev_frame = postprocess(img, 0)  #< initial (prev == curr)
        while unfinished:
            curr_frame = postprocess(img, index)
            if self.is_crop_debug:
                cv2.imshow(ExtractSubtitles.WIN_LAST_IMAGE, img)
                cv2.imshow(ExtractSubtitles.WIN_LAST_FRAME,
                           curr_frame)  #< must have single title, to animate
                if cv2WaitKey() == 'q': break
            if curr_frame is not None:  # and prev_frame is not None
                try:
                    diff = cv2.absdiff(curr_frame,
                                       prev_frame)  #< main algorithm goes here
                    yield Frame(index, curr_frame, np.sum(diff))
                except cv2.error:
                    pass
            prev_frame = curr_frame
            unfinished, img = cap.read()
            index = index + 1
            reducer.accept(index)
        reducer.finish()
def generate_flow_from_capture(capture: VideoCapture, magnitude_threshold=2) -> Generator[np.ndarray, None, None]:
    # Get first frame and convert it to grayscale

    frame_current = cvtColor(init_frame(capture), COLOR_BGR2GRAY)

    while capture.isOpened():
        frame_previous = frame_current

        # Get new frame and check if capture is working (detects last frame in a video file).
        status, frame_current = capture.read()
        if not status:
            break
        # Convert new frame to grayscale
        frame_current = cvtColor(frame_current, COLOR_BGR2GRAY)

        # Calculate optical flow (movement) between the previous and current frame
        flow = calcOpticalFlowFarneback(
            prev=frame_previous,
            next=frame_current,
            flow=None,
            pyr_scale=0.5,
            levels=3,
            winsize=15,
            iterations=3,
            poly_n=5,
            poly_sigma=1.2,
            flags=0
        )
        magnitude, angle = cartToPolar(flow[..., 0], flow[..., 1])

        # Ignores very small movements by setting magnitude to 0 (magnitude is in range 0.0 - 100.0)
        magnitude[magnitude < magnitude_threshold] = 0.0
        magnitude[magnitude > 100.0] = 100.0

        yield magnitude, angle
示例#19
0
class OverlayBackground():
    """
    Provides the background image for the overlay
    window.
    """
    def __init__(self, config):
        """
        Initialises and configures class to provide a background image.
        Image can be a WEISS logo, a blank image, or an image from an
        OpenCV video source.
        :param: A configuration dictionary
        :raises: RunTimeError, KeyError
        """
        self._video_loop_buffer = []
        self._logo_maker = None
        self._blank_image = None
        if "source" in config:
            self.source = VideoCapture(config.get("source"))
            if not self.source.isOpened():
                raise RuntimeError("Failed to open Video camera:" +
                                   str(config.get("source")))

            self.source_name = config.get("source")

            if "loop" in config:
                if config.get("loop"):
                    video_buffer = []
                    ret, image = self.source.read()
                    while ret:
                        video_buffer.append(image)
                        ret, image = self.source.read()

                    self._video_loop_buffer = cycle(video_buffer)
        else:
            if config.get("blank") or config.get("logo"):
                if config.get("logo"):
                    self._logo_maker = WeissLogo()
                else:
                    self._blank_image = zeros(shape=[512, 512, 3], dtype=uint8)
            else:
                raise KeyError("Configuration must contain a" +
                               "video source, blank, or logo")

    def next_image(self):
        """
        Returns a background image.
        The behaviour is determined by the configuration
        dictionary used at init.
        """
        if self._video_loop_buffer:
            image = next(self._video_loop_buffer)
        else:
            if self._logo_maker is not None:
                image = self._logo_maker.get_noisy_logo()
            else:
                if self._blank_image is not None:
                    image = self._blank_image
                else:
                    _, image = self.source.read()
        return image
示例#20
0
def video_player(vc: cv2.VideoCapture, frame_title: str, skip_frames: int,
                 draw: callable([int, object])):
    width, height, frameCount, fps = video_properties(vc)
    duration = frameCount / fps
    minutes = int(duration / 60)
    seconds = int(duration % 60)
    spf = 1 / fps
    mspf = int(spf * 1000)
    print('Duration = ' + str(minutes) + ':' + str(seconds) + ' (' +
          str(duration) + ' seconds)')
    frame_id = 0
    while vc.isOpened():
        # Capture frame-by-frame
        ret, frame = vc.read()
        if ret:
            frame_id = frame_id + 1
            if frame_id <= skip_frames:
                continue

            video_drawer(frame_id, frame)
            draw(frame_id, frame)

            # Display the resulting frame
            cv2.imshow(frame_title, frame)
            # Press Q on keyboard to  exit
            if cv2.waitKey(mspf) & 0xFF == ord('q'):
                break
        # Break the loop
        else:
            break

    # Closes all the frames
    cv2.destroyAllWindows()
    return
示例#21
0
def frameiter(cap: cv2.VideoCapture, n: int = None, rgb: bool = True):
    """
    Yield frames in numpy array of shape (H, W, 3) where '3' stands for RGB,
    'H' the height and 'W' the width. The number of frames is at most ``n``.
    If ``n`` is not specified, it's default to infinity.

    :param cap: the video capture object
    :param n: at most this number of frames are to be yielded; ``n`` should be
           a nonnegative integer
    :param rgb: True to returns array with color channel RGB, otherwise BGR
    :return: the frames in numpy array
    """
    if n is None:
        n = np.inf
    else:
        n = max(0, int(n))
    yielded_count = 0
    while cap.isOpened():
        if n == yielded_count:
            break
        s, f = cap.read()
        if not s:
            break
        yielded_count += 1
        if len(f.shape) == 3 and f.shape[2] == 3 and rgb:
            f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
        yield f
def detect_face_cv2_webcam():
    cap = VideoCapture(0)
    classifier = CascadeClassifier('haarcascade_frontalface_default.xml')

    # Check if the webcam is opened correctly
    if not cap.isOpened():
        raise IOError("Cannot open webcam")

    while True:
        ret, frame = cap.read()
        frame = flip(frame, 1)
        gray = cvtColor(frame, COLOR_BGR2GRAY)
        faces = classifier.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            # flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        imshow("Video", frame)
        if waitKey(1) == ord('q'):
            break

    cap.release()
    destroyAllWindows()
    def find_frames_with_faces(cls,
                               cap: cv2.VideoCapture,
                               model_path: str,
                               frame_step: int = 5) -> List[dict]:

        model_path = str(model_path)
        face_cascade = cv2.CascadeClassifier(model_path)

        idx = -1
        frame_with_face_idx = []
        while cap.isOpened():
            idx += 1
            ret, frame = cap.read()

            if ret != True:
                cls._close_video(cap=cap)
                return frame_with_face_idx
            elif idx % frame_step != 0:
                continue
            else:
                im = Image.fromarray(frame)
                im = cls.preprocess_im(im=im,
                                       height_offset=100,
                                       rotation=180,
                                       brightness_factor=None)

                gray = cv2.cvtColor(np.array(im), cv2.COLOR_BGR2GRAY)

                faces = face_cascade.detectMultiScale(gray, 1.1, 4)
                if len(faces) >= 1:
                    face = cls.get_largest_face(faces=faces)
                    tmp_dict = dict(idx=idx, face=face)
                    frame_with_face_idx.append(tmp_dict)
示例#24
0
def main(args):

	print('Extracting frames...')
	t_inicio = time.time()
	final_path = []

	ravdess_folder = args.video_folder
	save_frames_to = args.output_folder

	for actor in sorted(os.listdir(ravdess_folder)): 
		actor_folder = ravdess_folder + '/' + actor
		for video in sorted(os.listdir(actor_folder)): 
			final_path.append(actor_folder + '/' + video)

	n_videos = len(final_path)
	done = 0

	for video_path in final_path:

		total_frames_for_video = 0
		i = 1
		print(video_path)
		actor = video_path.split('/')[-2]
		video = video_path.split('/')[-1].split('.')[0]
		
		if not os.path.exists(save_frames_to + '/' + actor):
			os.makedirs(save_frames_to + '/' + actor)

		cap = VideoCapture(video_path)

		while cap.isOpened():

			ret, frame = cap.read()
			if i > 25:
				if not ret:
					break

				img = Image.fromarray(cvtColor(frame, COLOR_BGR2RGB))
				img.save(save_frames_to + '/' + actor + '/' + video + '_frame-' + str(i) + '.jpg')

			i += 1
			total_frames_for_video += 1
			# print("Frame saved: {}".format(save_frames_to + '/' + actor + '/' + video + '_frame-' + str(i) + '.jpg'))

		done += 1
		print("Processing... {}/{} ({} %)".format(done, (n_videos), round((done / (n_videos)) * 100, 2)))

		remove_until = total_frames_for_video - 25

		if total_frames_for_video > 0:
			while total_frames_for_video > remove_until:
				os.remove(save_frames_to + '/' + actor + '/' + video + '_frame-' + str(total_frames_for_video) + '.jpg')
				total_frames_for_video -= 1



	t2 = time.time()
	print('Extracting frames complete!')
	print("Tempo total: {}".format(t2-t_inicio))
示例#25
0
class WebCamStream(object):
    """
    A class that handles threaded video streaming through a USB webcam.
    based on imutils library for python: https://github.com/jrosebr1/imutils

    """

    def __init__(self, src=0):
        """
        Initialize an object that controls a video
        stream from a USB Webcam on a separate thread.
        Args:
          src: the source for the USB webcam, 0 is the default camera
        """
        self.src = src
        self.stream = VideoCapture(src)  # initialize video source
        self.grabbed, self.frame = self.stream.read()  # grab initial frame
        self.stopped = False
        self.has_frame = Event()

    def start(self):
        """
        Start the video stream on a separate daemon thread once the
        capturing device is opened.
        """
        while not self.stream.isOpened():  # wait for I/O to come online
            sleep(8)  # 8 seconds = 1 successful re-open attempt
            self.stream.open(self.src)  # attempt to open again
        thread = Thread(target=self.update, args=())
        thread.daemon = True  # set thread to daemon
        thread.start()  # start thread

    def update(self):
        """
        Continuously update the stream with the most recent frame
        until stopped.
        """
        while not self.stopped:
            self.grabbed, self.frame = self.stream.read()
            if self.grabbed:
                self.has_frame.set()  # notify

    def read(self):
        """
        Read the current frame in the video stream.
        Returns:
          The most recent frame captured
        """
        self.has_frame.wait()
        frame = self.frame
        self.has_frame.clear()
        return frame

    def stop(self):
        """
        Stop the video stream.
        """
        self.stopped = True
        self.stream.release()  # close capturing device
示例#26
0
def stream_status(stream: VideoCapture) -> Union[bool, None]:
    """Check if stream is live."""
    # TODO(xames3): Fix "XXXX is not a known member of module" warning.
    try:
        if check_internet():
            return stream is not None and stream.isOpened()
    except:
        return False
示例#27
0
def get_video_duration(filename):
    cap = VideoCapture(filename)
    if cap.isOpened():
        rate = cap.get(5)
        frame_num = cap.get(7)
        duration = frame_num / rate
        return duration
    return -1
示例#28
0
 def frameiter(cap: cv2.VideoCapture, rgb: bool = True):
     while cap.isOpened():
         s, f = cap.read()
         if not s:
             break
         if len(f.shape) == 3 and f.shape[2] == 2 and rgb:
             f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
         yield f
示例#29
0
    def __init__(self,
                 video: cv.VideoCapture,
                 window_size: int,
                 predictions: PredictionList,
                 true_actions: ActionList,
                 label_dict: ActionLabels,
                 target_fps: int = 30,
                 *args):
        assert video.isOpened()
        self._video: cv.VideoCapture = video
        self._window_size: int = window_size
        self._predictions: PredictionList = predictions
        self._true_actions: ActionList = true_actions
        self._label_dict: ActionLabels = label_dict
        self._target_fps: int = target_fps
        self._fps: int = target_fps

        # Precompute the prediction performance (in correct frames percentage) for each prediction model
        local_predictions, remote_predictions, fusion_predictions = zip(
            *predictions)
        self._local_correct_frame_counts: list[
            float] = self._compute_correct_frame_percentages(
                local_predictions, true_actions)
        self._remote_correct_frame_counts: list[
            float] = self._compute_correct_frame_percentages(
                remote_predictions, true_actions)
        self._fusion_correct_frame_counts: list[
            float] = self._compute_correct_frame_percentages(
                fusion_predictions, true_actions)

        # Extract video meta information
        self._frame_count: int = int(video.get(cv.CAP_PROP_FRAME_COUNT))

        # Initialize playing state
        self._playing: bool = False
        self._frame_id: int = 0

        # Setup main window
        self._main_window = MainWindow(*args)
        self._main_window.set_frame_count(self._frame_count)
        self._main_window.resize_labels_to_required_size(
            list(label_dict.values()))
        self._main_window.setup_framerate(1, target_fps * 3, self._fps)
        self._main_window.playButton.clicked.connect(self._play_or_pause)
        self._main_window.restartButton.clicked.connect(self._restart)
        self._main_window.frameScrollbar.valueChanged.connect(
            self._jump_to_frame)
        self._main_window.framerateSlider.valueChanged.connect(
            self._adjust_fps)

        # Next frame cache
        self._next_frame_cache: Optional[tuple[bool, np.ndarray]] = None

        # Setup and start frame timer
        self._timer: QTimer = QTimer()
        self._timer.timeout.connect(self._timeout)
        interval = int(1000.0 / float(self._fps))
        self._timer.start(interval)
示例#30
0
class CV2Capture(Iterator[Image]):
    def __init__(self, capture_params: Iterable):
        self._cap = VideoCapture(*capture_params)
        assert self._cap.isOpened()

    #     self._cap.set(CAP_PROP_BUFFERSIZE, 0)

    def __next__(self) -> Image:
        success, frame = self._cap.read()

        if success:
            return frame

        raise StopIteration()

    def __del__(self):
        if self._cap.isOpened():
            self._cap.release()
示例#31
0
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
示例#32
0
#! /usr/bin/python

from cv2 import VideoCapture,imshow,destroyAllWindows,waitKey,pyrDown,imread,pyrUp,GaussianBlur
from glasses import _putglass_
from moustache import _putmoustache_
from itertools import count

backgroundTreshold = 35

beach = imread('manzara640.png', -1)

video_capture = VideoCapture(0)

if not video_capture.isOpened():
    exit('The Camera is not opened')

counter = count(1)

for i in range(0, 50):
    ret, background = video_capture.read()
    imshow("Video", background)
    waitKey(10)
    print "!!! Step out of the frame !!!"
    print "Background will be detected in %d seconds" % (7 - i)
ret, background = video_capture.read()
background = pyrDown(background)
#background = pyrDown(background)

while True:
    print "Iteration %d" % counter.next()
示例#33
0
def captureTStamp(files, duration, cod,  fps=0, verbose=True):
    '''
    guarda por un tiempo en minutos (duration) el video levantado desde la
    direccion indicada en el archvo indicado. tambíen archivos con los time
    stamps de cada frame.
    
    files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile]
    duration = time in mintes
    cod = codec
    fps = frames per second for video to be saved
    verbose = print messages to screen
    
    si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla
    
    para opencv '2.4.9.1'
    
    Examples
    --------
    
    from cameraUtils import captureTStamp
    
    # para la FE
    duration = 1 # in minutes
    files = ['rtsp://192.168.1.48/live.sdp',
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"]
    fpsCam = 12
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fps=fpsCam)
    
    # %% para la PTZ
    duration = 0.2 # in minutes
    files = ["rtsp://192.168.1.49/live.sdp",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"]  
    
    fpsCam = 20
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fpsCam)
    
    '''
    
    fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video
    
    if verbose:
        print(files)
        print("Duration",duration,"minutes")
        print("fps",fps)
        print("codec",cod)
    
    # Inicializacion
    tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    
    ts = list()  # timestamp de la captura
    
    # abrir captura
    cap = VideoCapture(files[0])
    while not cap.isOpened():
        cap = VideoCapture(files[0])
    
    print("capture opened")
	# configurar writer
    w = int(cap.get(frame_width))
    h = int(cap.get(frame_height))
    if not fps:
        fps = cap.get(prop_fps)
    #para fe especificar los fps pq toma cualquier cosa con la propiedad
    
    out = VideoWriter(files[1], fcc, fps,( w, h), True)
    
    if verbose:
        print("capture open",cap.isOpened())
        print("frame size",w,h)
        print("output opened",out.isOpened())
    
    if not out.isOpened() or not cap.isOpened():
        out.release()
        cap.release()
        # exit function if unable to open cap or out
        return
    
    s0 = getsize(files[1]) # initial filesize before writing frame
    # Primera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("first frame captured")
    # Segunda captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("second frame captured")
    # Tercera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("third frame captured")
    
    s1 = getsize(files[1])  # size after saving 3 frames
    
    if s1==s0:
        out.release()
        cap.release()
        print("error when saving 3 frames, exiting")
        return 1 # error while saving first frame to file
    print(tFin)
    # loop
    while (t <= tFin):
        ret, frame = cap.read()
        
        if ret:
            t = datetime.datetime.now()
            ts.append(t)
            out.write(frame)
            if verbose:
                print(tFin,t)
                print("seconds elapsed",cap.get(pos_msec)/1000)
                print(frame.size)

    # end of loop
    
    # release and save
    out.release()
    cap.release()
    
    if verbose:
        print('loop exited, cap, out released, times saved to files')
        
    savetxt(files[2],ts, fmt= ["%s"])
    
    return 0  # success
示例#34
0
class camera(object):
    '''
    Object containing camera information
    Call-able, retrieve current frame in camera buffer
    
    User accessible attributes:
        device        system device number
        resolution    camera resolution
        BGRimage      image in BGR format
        HSVimage      image in HSV format
        RGBimage      image in RGB format
        FPS           camera speed in FPS
        
    User accessible methods:
        close         close camera device
    '''
    def __init__(self, cam_num = -1, resolution = (640, 480)):
        '''
        create camera object
            cam_num            device number (integer)
            resolution         image resolution (tuple width x height)
        '''
        self.device = cam_num
        self.resolution = resolution
        self.BGRimage = []
        self.HSVimage = []
        self.RGBimage = []
        self.FPS = [0, 0]
        self.__avr = 0
        #assign and open device
        self.__capture = VideoCapture(cam_num)
        self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0])
        self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1])
        self.__capture.open
        self.__flag = False
        t0 = time()
        self.__flag, self.BGRimage = self.__capture.read()
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = self.FPS[0]
        self.__avr = self.FPS[0]
        print "camera", self.device, "ready @", self.FPS[0], "fps"
        return
    def __call__(self, frame_delay = 0, fast = False):
        '''
        retrieve current frame in camera buffer
            frame_delay        delay the frame decoding (integer)
            fast               if true don't decode image to RGB format (logic)    
        '''
        #set timer to meassure fps
        self.__avr = self.FPS[1]
        t0 = time()
        #try to retrieve current frame
        while not self.__flag:
            if frame_delay > 0:
                for i in xrange(frame_delay + 1):
                    self.__capture.grab()
                self.__flag, self.BGRimage = self.__capture.retrieve()
                del i
            else:
                self.__flag, self.BGRimage = self.__capture.read()
        self.__flag = False
        #decode bgr format to hsv
        self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV)
        if fast:
            self.FPS[0] = 1/(time()-t0)
            self.FPS[1] = (self.FPS[0]+self.__avr)/2
            return
        #decode bgr format to rgb
        self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB)
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = (self.FPS[0]+self.__avr)/2
        return
    def __str__(self):
        '''
        return camera information;
            device number
            device resolution
            instant speed
            average speed
        '''
        tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution)
        tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1])
        return tmp
    def __del__(self):
        '''
        when the object is deleted, it closes the device
        '''
        self.close()
        return
    def close(self):
        '''
        close device, making it available to use 
        '''
        #if the device is open then close it
        if self.__capture.isOpened():
            self.__capture.release()
            print "camera", self.device, "closed"
        return