def read_frames_at_indices(path: str, capture: cv2.VideoCapture, frame_idxs: np.ndarray) -> Optional[np.ndarray]: try: frames = [] next_idx = 0 for frame_idx in range(frame_idxs[0], frame_idxs[-1] + 1): ret = capture.grab() if not ret: print('Unable to grab frame %d from %s' % (frame_idx, path)) break if frame_idx == frame_idxs[next_idx]: ret, frame = capture.retrieve() if not ret or frame is None: print('Unable to retrieve frame %d from %s' % (frame_idx, path)) break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(frame) next_idx += 1 if len(frames) > 0: return np.stack(frames) else: print('No frames have been read from %s' % path) return None except Exception as e: print('Unable to read %s' % path) print(e) return None
def read(self, i: int, cap: cv2.VideoCapture, sleep: int) -> None: c = 0 while cap.isOpened(): c += 1 cap.grab() if c == 4: _, self.frames[i] = cap.retrieve() #print(f"Thread {threading.get_ident()} put a frame at i = {i}") c = 0 time.sleep(sleep)
def grab(cam, queue, width, height): global running capture = VideoCapture(cam) capture.set(CAP_PROP_FRAME_WIDTH, width) capture.set(CAP_PROP_FRAME_HEIGHT, height) while (running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img queue.put(frame)
def from_capture(cls, vc: cv2.VideoCapture): success, frame = vc.retrieve() assert success color_layer = RED # red color shows timecode better self = cls( frame_data=frame[..., color_layer].astype('float32') / 255, frame_number=int(vc.get(cv2.CAP_PROP_POS_FRAMES)), time=vc.get(cv2.CAP_PROP_POS_MSEC) / 1000, ) return self
def __load_video(self, video_raw: cv2.VideoCapture): if not video_raw.isOpened(): Logger.error("Unable to read {} feed".format(self.video_path)) self.frames = [] num_video_frames = int(video_raw.get(cv2.CAP_PROP_FRAME_COUNT)) if self.end_frame is None or self.end_frame > num_video_frames: Logger.warning("Setting end_frame to {}".format(num_video_frames)) self.end_frame = num_video_frames num_frames = 0 # Skip the first frames until the self_start frame. video_raw.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame) Logger.info("Loading {} frames...".format(self.end_frame - self.start_frame)) bar = progressbar.ProgressBar(maxval=self.num_frames, widgets=[ progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage() ]) bar.start() for i in range(self.end_frame - self.start_frame): ret = video_raw.grab() if not ret: Logger.error( "Could not load frame {}".format(i + self.start_frame)) raise ValueError( "Could not load frame {}".format(i + self.start_frame)) self.frames.append(video_raw.retrieve()[1]) num_frames += 1 bar.update(num_frames) bar.finish() video_raw.release()
class camera(object): ''' Object containing camera information Call-able, retrieve current frame in camera buffer User accessible attributes: device system device number resolution camera resolution BGRimage image in BGR format HSVimage image in HSV format RGBimage image in RGB format FPS camera speed in FPS User accessible methods: close close camera device ''' def __init__(self, cam_num = -1, resolution = (640, 480)): ''' create camera object cam_num device number (integer) resolution image resolution (tuple width x height) ''' self.device = cam_num self.resolution = resolution self.BGRimage = [] self.HSVimage = [] self.RGBimage = [] self.FPS = [0, 0] self.__avr = 0 #assign and open device self.__capture = VideoCapture(cam_num) self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0]) self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1]) self.__capture.open self.__flag = False t0 = time() self.__flag, self.BGRimage = self.__capture.read() self.FPS[0] = 1/(time()-t0) self.FPS[1] = self.FPS[0] self.__avr = self.FPS[0] print "camera", self.device, "ready @", self.FPS[0], "fps" return def __call__(self, frame_delay = 0, fast = False): ''' retrieve current frame in camera buffer frame_delay delay the frame decoding (integer) fast if true don't decode image to RGB format (logic) ''' #set timer to meassure fps self.__avr = self.FPS[1] t0 = time() #try to retrieve current frame while not self.__flag: if frame_delay > 0: for i in xrange(frame_delay + 1): self.__capture.grab() self.__flag, self.BGRimage = self.__capture.retrieve() del i else: self.__flag, self.BGRimage = self.__capture.read() self.__flag = False #decode bgr format to hsv self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV) if fast: self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return #decode bgr format to rgb self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB) self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return def __str__(self): ''' return camera information; device number device resolution instant speed average speed ''' tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution) tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1]) return tmp def __del__(self): ''' when the object is deleted, it closes the device ''' self.close() return def close(self): ''' close device, making it available to use ''' #if the device is open then close it if self.__capture.isOpened(): self.__capture.release() print "camera", self.device, "closed" return
def video_retrieve_frame(cap: cv2.VideoCapture) -> Tuple[bool, np.ndarray]: ret, frame = cap.retrieve() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) return ret, frame