def update(self, conn: Connection, rtsp_url: str, buffer: bool): """ Runs the rtspcam thread to grab data and keep the buffer empty. :param conn: the Pipe to transmit data :param rtsp_url: the url of the rtspcam. :param buffer: should the client read frame by frame from the buffer or just grab the latest frame? :return: """ self.log.info(f"Starting video capture client for {rtsp_url}") cap = VideoCapture(rtsp_url, CAP_FFMPEG) self.log.info("Started...") run = True while run: if not conn.poll() and not buffer: self.log.debug("clearing buffer frame") cap.grab() continue rec_dat = conn.recv() if rec_dat == self.SEND_FRAME: self.log.debug("Sending next frame to parent process") return_value, frame = cap.read() conn.send(frame) elif rec_dat == self.END_PROCESS: self.log.debug("Closing connection") cap.release() run = False self.log.info("Camera Connection Closed") conn.close()
def read(self, i: int, cap: cv2.VideoCapture, sleep: int) -> None: c = 0 while cap.isOpened(): c += 1 cap.grab() if c == 4: _, self.frames[i] = cap.retrieve() #print(f"Thread {threading.get_ident()} put a frame at i = {i}") c = 0 time.sleep(sleep)
def grab(cam, queue, width, height): global running capture = VideoCapture(cam) capture.set(CAP_PROP_FRAME_WIDTH, width) capture.set(CAP_PROP_FRAME_HEIGHT, height) while (running): frame = {} capture.grab() retval, img = capture.retrieve(0) frame["img"] = img queue.put(frame)
def read_frames_at_indices(path: str, capture: cv2.VideoCapture, frame_idxs: np.ndarray) -> Optional[np.ndarray]: try: frames = [] next_idx = 0 for frame_idx in range(frame_idxs[0], frame_idxs[-1] + 1): ret = capture.grab() if not ret: print('Unable to grab frame %d from %s' % (frame_idx, path)) break if frame_idx == frame_idxs[next_idx]: ret, frame = capture.retrieve() if not ret or frame is None: print('Unable to retrieve frame %d from %s' % (frame_idx, path)) break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(frame) next_idx += 1 if len(frames) > 0: return np.stack(frames) else: print('No frames have been read from %s' % path) return None except Exception as e: print('Unable to read %s' % path) print(e) return None
def list_cameras(): cam = [] for x in range(0,10): camera = VideoCapture(x) if camera.grab() == True: result = 'Camera ID: %d' % x cam.append(result) return '\n'.join(cam)
def newest_frame( source: cv2.VideoCapture, ) -> Tuple[bool, Optional[np.ndarray]]: """ Capture the newest frame from the webcam. This should not be hard but the cv2.CAP_PROP_BUFFERSIZE property cannot be set or retrieved from the camera. We do not know why this is exactly. There are several proposed workarounds for this problem and this is one of them. We simply skip over the frame buffer to capture the newest frame. Of all the workarounds we tested so far this seems to be the most reliable and least laggy. :param source: Capture source :return: (success, frame) """ for _ in range(4): source.grab() return source.read()
def __load_video(self, video_raw: cv2.VideoCapture): if not video_raw.isOpened(): Logger.error("Unable to read {} feed".format(self.video_path)) self.frames = [] num_video_frames = int(video_raw.get(cv2.CAP_PROP_FRAME_COUNT)) if self.end_frame is None or self.end_frame > num_video_frames: Logger.warning("Setting end_frame to {}".format(num_video_frames)) self.end_frame = num_video_frames num_frames = 0 # Skip the first frames until the self_start frame. video_raw.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame) Logger.info("Loading {} frames...".format(self.end_frame - self.start_frame)) bar = progressbar.ProgressBar(maxval=self.num_frames, widgets=[ progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage() ]) bar.start() for i in range(self.end_frame - self.start_frame): ret = video_raw.grab() if not ret: Logger.error( "Could not load frame {}".format(i + self.start_frame)) raise ValueError( "Could not load frame {}".format(i + self.start_frame)) self.frames.append(video_raw.retrieve()[1]) num_frames += 1 bar.update(num_frames) bar.finish() video_raw.release()
class camera(object): ''' Object containing camera information Call-able, retrieve current frame in camera buffer User accessible attributes: device system device number resolution camera resolution BGRimage image in BGR format HSVimage image in HSV format RGBimage image in RGB format FPS camera speed in FPS User accessible methods: close close camera device ''' def __init__(self, cam_num = -1, resolution = (640, 480)): ''' create camera object cam_num device number (integer) resolution image resolution (tuple width x height) ''' self.device = cam_num self.resolution = resolution self.BGRimage = [] self.HSVimage = [] self.RGBimage = [] self.FPS = [0, 0] self.__avr = 0 #assign and open device self.__capture = VideoCapture(cam_num) self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0]) self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1]) self.__capture.open self.__flag = False t0 = time() self.__flag, self.BGRimage = self.__capture.read() self.FPS[0] = 1/(time()-t0) self.FPS[1] = self.FPS[0] self.__avr = self.FPS[0] print "camera", self.device, "ready @", self.FPS[0], "fps" return def __call__(self, frame_delay = 0, fast = False): ''' retrieve current frame in camera buffer frame_delay delay the frame decoding (integer) fast if true don't decode image to RGB format (logic) ''' #set timer to meassure fps self.__avr = self.FPS[1] t0 = time() #try to retrieve current frame while not self.__flag: if frame_delay > 0: for i in xrange(frame_delay + 1): self.__capture.grab() self.__flag, self.BGRimage = self.__capture.retrieve() del i else: self.__flag, self.BGRimage = self.__capture.read() self.__flag = False #decode bgr format to hsv self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV) if fast: self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return #decode bgr format to rgb self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB) self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return def __str__(self): ''' return camera information; device number device resolution instant speed average speed ''' tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution) tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1]) return tmp def __del__(self): ''' when the object is deleted, it closes the device ''' self.close() return def close(self): ''' close device, making it available to use ''' #if the device is open then close it if self.__capture.isOpened(): self.__capture.release() print "camera", self.device, "closed" return
class PyCamera(PiyCamera): def __init__(self, device_no=0): global imwrite from cv2 import VideoCapture, imwrite from time import sleep super(PyCamera, self).__init__() self._video_capture = VideoCapture(device_no) self._validate_module() @staticmethod def _validate_module(): if OSDetector.is_embedded(): from subprocess import call response = call(['lsmod | grep bcm...._v4l2'], shell=True) if len(response) <= 1: logger.warning( 'It seems you are trying to use OpenCV camera on RaspberryPi. ' 'Make sure that v4l2 module is loaded as it was not detected' ) def get_frame(self): return self._frame def read_frame(self): if self._camera_thread is None: for i in range(5): self._video_capture.grab() self.update_frame() return self.get_frame() def update_frame(self): got_frame, frame = self._video_capture.read() while not got_frame: got_frame, frame = self._video_capture.read() self._frame = frame def camera_worker(self): sleep(0.01) while self._run: logger.info('frame') self._video_capture.grab() def save_image(self, img_path): status = imwrite(img_path, self._frame) return status @change_settings def set_resolution(self, width=1280, height=1024): self._video_capture.set(3, width) self._video_capture.set(4, height) def get_resolution(self): return int(self._video_capture.get(3)), int(self._video_capture.get(4)) @change_settings def set_brightness(self, brightness): self._video_capture.set(10, brightness) def get_brightness(self): return self._video_capture.get(10) @change_settings def set_contrast(self, contrast): self._video_capture.set(11, contrast) def get_contrast(self): return self._video_capture.get(11) @change_settings def set_exposure(self, exposure): self._video_capture.set(15, exposure) def get_exposure(self): return self._video_capture.get(15) @change_settings def set_fps(self, fps): self._video_capture.set(5, fps) def get_fps(self): return self._video_capture.get(5) @change_settings def set_iso(self, iso): logger.warning('Opencv does not support ISO setting') def get_iso(self): logger.warning('Opencv does not support ISO setting') return -1
def _load_cuboid(self, cuboid: tuple, prep_fn=None): """Loads a cuboid from its video frames files """ fname = cuboid[0] frames_range = cuboid[1:-1] stride = cuboid[-1] cuboid_data = None n_frames = frames_range[0] # Number of readen frames # Load the desired video frames if (self.is_shuffled() or self.__cap_opened is None or self.__cap_opened[0] != fname or frames_range[0] != self.__cap_opened[2]+1): if self.__cap_opened is not None: self.__cap_opened[1].release() vid = VideoCapture(fname) if not vid.isOpened(): raise ValueError('Cannot load frames {} from video '\ 'file {}'.format(frames_range, fname)) vid.set(1, frames_range[0]) # Store the Video Capture for retrieval of next cuboid belonging # to the same video if not self.is_shuffled(): self.__cap_opened = [fname, vid, frames_range[1]] else: vid = self.__cap_opened[1] self.__cap_opened[2] = frames_range[1] # Update the last frame taken for i in range(self.cub_frames): if n_frames <= frames_range[1]: #readen # Take consecutive frames separated by the stride ret, fr = vid.read() if not ret: raise ValueError('Failed to load frame {} from video '\ 'file {}'.format(i, fname)) n_frames += 1 # Apply preprocessing function if specified if prep_fn: fr = prep_fn(fr) # Add the frame readen to the cuboid if cuboid_data is None: cuboid_data = np.zeros((self.cub_frames, *fr.shape), dtype=fr.dtype) cuboid_data[i] = fr # Exclude the next middle frames if stride for _ in range(stride-1): if n_frames <= frames_range[1]: ret = vid.grab() n_frames += 1 else: # Repeat the last frame if a complete cuboid cannot be taken # from the remained video frames cuboid_data[i] = cuboid_data[i-1] # Check loaded images have the same format #if frames and (frames[-1].shape != fr.shape or # frames[-1].dtype != fr.dtype): # raise ValueError('Differents sizes or types for images loaded'\ # ' detected for image "{}"'.format(fn)) # Add the remaind cuboid and fill it by repetitions of last frame # until make the cuboid with the derired number of frames #if len(frames) < self.cub_frames: # rep = len(frames) - self.cub_frames # frames.extend([frames[-1]]*rep) #frames = np.array(frames) #vid.release() return cuboid_data
def process_video(self, cap: cv2.VideoCapture, video_meta: Dict, out: cv2.VideoWriter, ball_detector, tracker: AbstractTracker, process_nth_frame: int = 1, ): width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) pbar = tqdm(total=video_meta['n_frames']) history = {} start = 0 reading_first_frame = True for count_frame in range(video_meta['n_frames']): ret = cap.grab() if not ret: break if count_frame % process_nth_frame == 0: ret, frame = video_retrieve_frame(cap) if reading_first_frame: self.M, self.Minv = self.get_camera_perspective(frame, [self.config.markup_bot_left, self.config.markup_bot_right, self.config.markup_top_left, self.config.markup_top_right]) reading_first_frame = False # Balls detector before = time.time() cur = ball_detector(frame) start += time.time() - before yolo_preds = cur.xyxy[0].cpu().numpy() if count_frame % 20: print("FPS") print(count_frame / start) boxes = yolo_preds[:, :4] # xmin, ymin, xmax, ymax confs = yolo_preds[:, 4] labels = yolo_preds[:, 5] # Filter boxes by class person_boxes, table_confs = self.filter_class(labels=labels, boxes=boxes, confs=confs, label_to_filter='person') # Tracker tracks = tracker.update(person_boxes).astype(int) tracked_balls_boxes = tracks[:, :4] track_ids = tracks[:, 4] person_centers = list( map(lambda bbox: [int((bbox[0] + bbox[2]) / 2), int(bbox[3])], tracked_balls_boxes)) person_centers = list( map(lambda pts: cv2.perspectiveTransform(np.array([[pts]], dtype='float32'), self.M)[0][0], person_centers)) person_centers = list(map(lambda point: ((point[0] / width) * self.config.markup_width, (point[1] / height) * self.config.markup_height), person_centers)) for box, track_id, center_2d in zip(tracked_balls_boxes, track_ids, person_centers): draw_rectangle(frame, box, color=(0, 255, 0), thickness=2) # put_text(frame, str(track_id), (box[0], box[1])) # put_text(frame, str(center_2d), (box[0], box[1])) if track_id not in history: history[track_id] = OrderedDict() history[track_id][count_frame] = {'bbox': box, 'coord_2d': center_2d} speed_10 = self.evaluate_speed(history=history[track_id], last_frame_num=count_frame, for_frames_amount=10) speed_11 = self.evaluate_speed(history=history[track_id], last_frame_num=count_frame, for_frames_amount=11) speed_12 = self.evaluate_speed(history=history[track_id], last_frame_num=count_frame, for_frames_amount=12) speed_13 = self.evaluate_speed(history=history[track_id], last_frame_num=count_frame, for_frames_amount=13) speed_14 = self.evaluate_speed(history=history[track_id], last_frame_num=count_frame, for_frames_amount=14) speed = (speed_10 + speed_11 + speed_12 + speed_13 + speed_14) / 5 history[track_id][count_frame]['speed'] = speed put_text(frame, str("%.3f".format() % speed) + 'm/s', (box[0], box[1])) if self.config.markup_visualize: self.visualize_markup(frame) video_write_frame(out, frame) pbar.update() cap.release() out.release()
def get_latest_image(capture: cv2.VideoCapture): for i in range(4): capture.grab() is_frame_returned, frame = capture.read() return frame