class noUVCCapture(): """ VideoCapture without uvc control """ def __init__(self, src,size=(640,480)): self.controls = None self.cvId = src ###add cv videocapture capabilities self.cap = VideoCapture(src) self.set_size(size) self.read = self.cap.read def set_size(self,size): width,height = size self.cap.set(3, width) self.cap.set(4, height) def get_size(self): return self.cap.get(3),self.cap.get(4) def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img
class CameraCapture(uvc.Camera): """ CameraCapture Class for Image grabbing and control inherits from an OS specitic Camera that defines all uvc control methods """ def __init__(self, cam,size=(640,480)): super(CameraCapture, self).__init__(cam) ###add cv videocapture capabilities self.cap = VideoCapture(self.cvId) self.set_size(size) self.read = self.cap.read def set_size(self,size): width,height = size self.cap.set(3, width) self.cap.set(4, height) def get_size(self): return self.cap.get(3),self.cap.get(4) def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img
class Camera_Capture(): """ VideoCapture without uvc control using cv2.VideoCapture """ def __init__(self,src_id,size=(640,480),fps=None,timebase=None): self.controls = None self.cvId = src_id self.name = "VideoCapture" self.controls = None ###add cv videocapture capabilities self.capture = VideoCapture(src_id) self.set_size(size) if timebase == None: logger.debug("Capture will run with default system timebase") self.timebase = c_double(0) elif isinstance(timebase,c_double): logger.debug("Capture will run with app wide adjustable timebase") self.timebase = timebase else: logger.error("Invalid timebase variable type. Will use default system timebase") self.timebase = c_double(0) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def get_now(self): return time() def create_atb_bar(self,pos): size = 0,0 return size def kill_atb_bar(self): pass def close(self): pass
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) timestamps_loc = os.path.join(src.rsplit(os.path.sep,1)[0],'eye_timestamps.npy') logger.info("trying to load timestamps with video at: %s"%timestamps_loc) try: self.timestamps = np.load(timestamps_loc).tolist() logger.info("loaded %s timestamps"%len(self.timestamps)) except: logger.info("did not find timestamps") self.timestamps = None self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def set_fps(self): pass def get_fps(self): return None def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def get_frame(self): s, img = self.read() if self.timestamps: timestamp = self.timestamps.pop(0) else: timestamp = time() return Frame(timestamp,img) def rewind(self): self.cap.set(1,0) #seek to the beginning def create_atb_bar(self,pos): return 0,0 def kill_atb_bar(self): pass def close(self): pass
def video_properties(vc: cv2.VideoCapture): """ :param skip_frames: :param vc: :return: """ fps = vc.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS" width = vc.get(cv2.CAP_PROP_FRAME_WIDTH) height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT) frameCount = int(vc.get(cv2.CAP_PROP_FRAME_COUNT)) return width, height, frameCount, fps
def from_capture(cls, vc: cv2.VideoCapture): success, frame = vc.retrieve() assert success color_layer = RED # red color shows timecode better self = cls( frame_data=frame[..., color_layer].astype('float32') / 255, frame_number=int(vc.get(cv2.CAP_PROP_POS_FRAMES)), time=vc.get(cv2.CAP_PROP_POS_MSEC) / 1000, ) return self
def get_video_info(video_source: cv.VideoCapture): fps = video_source.get(cv.CAP_PROP_FPS) size = (video_source.get(cv.CAP_PROP_FRAME_WIDTH), video_source.get(cv.CAP_PROP_FRAME_HEIGHT)) total_frames = int(video_source.get(cv.CAP_PROP_FRAME_COUNT)) print('Video name:', VIDEO_SOURCE_NAME) print('[FPS] {}\n[SIZE] {}'.format(fps, size)) print('[INFO] {} total frames'.format(total_frames)) return { 'video_name': VIDEO_SOURCE_NAME, 'video_fps': fps, 'video_size': size, 'total_frames': total_frames, }
def video_create(self, image_path=None, dcp_path=''): assert image_path # Video capture to get shapes and stats # Only supports 1 video at a time, but this can still get mp4 only vid_list = [] for file in os.listdir(image_path): if file.endswith('mp4') or file.endswith('MP4'): vid_list.append(image_path + '/' + file) video_path = vid_list[0] # ONLY works with 1 video for now vcapture = VideoCapture(video_path) width = int(vcapture.get(CAP_PROP_FRAME_WIDTH)) height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT)) fps = vcapture.get(CAP_PROP_FPS) # Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring. file_name = "uncensored_video.avi" vwriter = VideoWriter(file_name, VideoWriter_fourcc(*'MJPG'), fps, (width, height)) count = 0 print( "Beginning build. Do ensure only relevant images are in source directory" ) input_path = dcp_path + '/decensor_output/' img_list = [] # output of the video detection should be in order anyway # os.chdir(input_path) # files = filter(os.path.isfile, os.listdir(input_path)) # files = [os.path.join( f) for f in files] # files.sort(key=lambda x: os.path.getmtime(x)) # for file in files: for file in os.listdir(input_path): # TODO: check what other filetpyes supported if file.endswith('.png') or file.endswith('.PNG'): img_list.append(input_path + file) print('adding image ', input_path + file) for img in img_list: print("frame: ", count) # Read next image image = skimage.io.imread( img) # Should be no alpha channel in created image # Add image to video writer, after flipping R and B value image = image[..., ::-1] vwriter.write(image) count += 1 vwriter.release() print('video complete')
def to_numpy(video: cv2.VideoCapture, input_shape: Sequence[int], dtype: type) -> np.ndarray: num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) w, h = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int( video.get(cv2.CAP_PROP_FRAME_HEIGHT)) if w != input_shape[1] or h != input_shape[0]: raise ValueError("Video frame dimension does not match input_shape") output = np.empty((num_frames, *input_shape), dtype=dtype) for frame_idx, frame in enumerate(frame_iterator(video)): output[frame_idx] = frame return output
def OpenOutputVideo(outputVideoFilename: str, inputFileStream: cv2.VideoCapture, outputVideoEncoding='DIVX') -> cv2.VideoWriter: ''' 打开输出视频文件 :param outputVideoFilename: 输出文件名 :param inputFileStream: 输入文件流(用户获得视频基本信息) :param outputVideoEncoding: 输出文件编码 :return: 输出文件流 ''' # 获得码率及尺寸 fps = int(inputFileStream.get(cv2.CAP_PROP_FPS)) size = (int(inputFileStream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(inputFileStream.get(cv2.CAP_PROP_FRAME_HEIGHT))) return cv2.VideoWriter(outputVideoFilename, cv2.VideoWriter_fourcc(*outputVideoEncoding), fps, size, False)
def ssd_resize_factor(video: cv2.VideoCapture): """ Gets a multiplier to scale the bounding box positions to their correct position in the frame. Args: video: Video capture object, contains information about data source. Returns: Resizing factor to scale box coordinates to output frame size. """ frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH) return max(frame_height, frame_width)
def video_create(self, image_path=None, dcp_path=''): assert image_path # Video capture to get shapes and stats vid_list = [] for file in os.listdir(str(image_path)): file_s = str(file) if len(vid_list) == 1: print( "WARNING: More than 1 video in input directory! Assuming you want the first video." ) break if file_s.endswith('mp4') or file_s.endswith('MP4'): vid_list.append(image_path + '/' + file_s) video_path = vid_list[0] # ONLY works with 1 video for now vcapture = VideoCapture(video_path) width = int(vcapture.get(CAP_PROP_FRAME_WIDTH)) height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT)) fps = vcapture.get(CAP_PROP_FPS) # Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring. file_name = str(file) + '_uncensored.avi' vwriter = VideoWriter(file_name, VideoWriter_fourcc(*'MJPG'), fps, (width, height)) count = 0 print( "Beginning build. Do ensure only relevant images are in source directory" ) input_path = dcp_path + '/decensor_output/' img_list = [] for file in os.listdir(input_path): # TODO: check what other filetpyes supported file_s = str(file) if file_s.endswith('.png') or file_s.endswith('.PNG'): img_list.append(input_path + file_s) # print('adding image ', input_path + file_s) for img in img_list: print("frame: ", count) # Read next image image = skimage.io.imread( img) # Should be no alpha channel in created image # Add image to video writer, after flipping R and B value image = image[..., ::-1] vwriter.write(image) count += 1 vwriter.release() print('video complete')
def GetFps(videoStream: cv2.VideoCapture) -> int: ''' 获得视频流的FPS :param videoStream: 视频输入流 :return: 每秒多少帧 ''' return int(videoStream.get(cv2.CAP_PROP_FPS))
def videoconvert(inp): capture = VideoCapture(inp) inp_ext = inp.split(".") fpsin = capture.get(CAP_PROP_FPS) count = 0 success = 1 while success: success, image = capture.read() if (success == False and image == None): pass else: imwrite("zzimg%d.jpg" % count, image) count += 1 outfile = inp_ext[0] + '_output.mp4' fourcc = VideoWriter_fourcc(*'DIVX') fpsout = fpsin img = imread("zzimg0.jpg") height, width, layers = img.shape size = (width, height) out = VideoWriter(outfile, fourcc, fpsout, size, 0) for i in range(count): img = imread("zzimg%d.jpg" % i, 0) out.write(img) print( "Video Converted to Grayscale, Please check the folder for the output file: ", outfile) out.release() capture.release() return outfile
class VideoLoader: def __init__(self, video_file): self._frame_pointer = -1 self._video_file = video_file self._capture = VideoCapture(video_file) def __getitem__(self, idx): if idx >= len(self) or idx < 0: raise IndexError if idx < self._frame_pointer: self._capture = VideoCapture(self._video_file) self._frame_pointer = -1 image = None while self._frame_pointer < idx: _, image = self._capture.read() self._frame_pointer += 1 if image is not None: return image def __len__(self): return int(self._capture.get(CAP_PROP_FRAME_COUNT))
def readCapture(cap: cv2.VideoCapture, frame) -> np.ndarray: cursor = cap.get(cv2.CAP_PROP_POS_FRAMES) if frame != cursor: cap.set(cv2.CAP_PROP_POS_FRAMES, frame) ret, img = cap.read() return img
def extract_img(save_filepath, bucket_name, num=10): """evenly capture num of frames from the video""" cap = VideoCapture(save_filepath) length_video = int(cap.get(CAP_PROP_FRAME_COUNT)) parts = length_video // num # the part of video that 1 frame extracted start_part_num = 0 currentFrame = 0 res = [] for currentFrame in range(length_video): if currentFrame == start_part_num: add_frame_num = random.randint(start_part_num, start_part_num + parts - 1) start_part_num += parts if add_frame_num > length_video: break _,frame = cap.read() if currentFrame == add_frame_num: img_name = 'frame' + str(currentFrame) + '.jpg' save_imgs_filepath = os.path.join(IMGS_FOLDER, img_name) imwrite(save_imgs_filepath,frame) upload_blob(save_imgs_filepath, img_name, bucket_name) os.remove(save_imgs_filepath) currentFrame += 1
class FileVideoSource(VideoSource): ''' Load a video file as video source, for test or flight replay purposes. ''' def __init__(self, fileName, parent = None): super().__init__(parent) self.cap = VideoCapture(fileName) self.frameRate = self.cap.get(CAP_PROP_FPS) self.__delay = 1.0 / self.frameRate def run(self): self.running = True while self.cap.isOpened(): if self.pause: continue _s0 = time() ret, frame = self.cap.read() if ret == True: rgbImage = cvtColor(frame, COLOR_BGR2RGB) h, w, ch = rgbImage.shape bytesPerLine = ch * w convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888) self.newFrameAvailable.emit(convertToQtFormat) _s0 = time() - _s0 sleep(0 if _s0 >= self.__delay else self.__delay - _s0) self.running = False self.__cleanup() def __cleanup(self): self.cap.release()
def Extract_Frames(source, fps=1, dest=None): '''Extracts frames from a given source animation, with optional fps and destination''' from Webscraping import USER from pathlib import Path from cv2 import VideoCapture, imencode, CAP_PROP_POS_FRAMES path = Path(source) if dest is None: dest = USER / 'Pictures' / 'Screenshots' / path.stem if dest.exists(): for file in dest.iterdir(): file.unlink() dest.mkdir(exist_ok=1) vidcap = VideoCapture(source) success, frame = vidcap.read() while success: if ((vidcap.get(CAP_PROP_POS_FRAMES) % fps) - 1) in (-1, 0): image = dest / f'{vidcap.get(CAP_PROP_POS_FRAMES)}.jpg' image.write_bytes(imencode('.jpg', frame)[-1]) success, frame = vidcap.read() else: vidcap.release()
def yolo_resize_factor(video: cv2.VideoCapture, input_data_shape: tuple): """ Gets a multiplier to scale the bounding box positions to their correct position in the frame. Args: video: Video capture object, contains information about data source. input_data_shape: Contains shape of model input layer. Returns: Resizing factor to scale box coordinates to output frame size. """ frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH) _, model_height, model_width, _ = input_data_shape return max(frame_height, frame_width) / max(model_height, model_width)
def read_images(photos_path, video_path): cap = VideoCapture( video_path) # Reproducir video para realizar la captura por frames fps = cap.get(CAP_PROP_FPS) # Optener los fps del video print(f'fps: {fps}') mkdir(photos_path) currentFrame = 0 while True: _, frame = cap.read() # Obtener 1 frame try: len(frame) except: break # Salvar la catura name = prt(photos_path, currentFrame) imwrite(name, frame) currentFrame += 1 # Indice de la imagen # Cerrar el video cap.release() destroyAllWindows()
def make_video(photos_path, video_path, video_name): cap = VideoCapture(v_path) fps = cap.get(CAP_PROP_FPS) cap.release() mkdir(video_path) images_cnt = jpgcount(photos_path) print(f'{images_cnt} imagenes') img = [] # Cargar las imágenes for i in range(images_cnt): img.append(imread(photos_path + '/frame' + str(i) + '.jpg')) height, width, _ = img[1].shape # Inicializar el video video = VideoWriter(video_path + '/' + video_name + '.mp4', VideoWriter_fourcc(*'MP4V'), fps, (width, height)) print('Codificando video') # Insertar cada imagen for j in range(images_cnt): video.write(img[j]) # Cerrar proceso destroyAllWindows() video.release() print('Video Codificado')
def read_frames(video: cv2.VideoCapture, n_frames: int): frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) step = int(frame_count / n_frames) for frame_number in range(0, frame_count, step): video.set(cv2.CAP_PROP_POS_FRAMES, frame_number) response, frame = video.read() yield frame
def GetVideoFileFrameCount(videoFileStream: cv2.VideoCapture) -> int: ''' 获得视频文件的总帧数 :param videoFileStream: 视频文件流 :return: 视频文件的总帧数 ''' return videoFileStream.get(cv2.CAP_PROP_FRAME_COUNT)
def list_supported_capture_properties(cap: cv2.VideoCapture): """List the properties supported by the capture device.""" # thanks: https://stackoverflow.com/q/47935846/782170 supported = list() for attr in dir(cv2): if attr.startswith("CAP_PROP") and cap.get(getattr(cv2, attr)) != -1: supported.append(attr) return supported
def get_tags(driver, path, filter=False): tags = set() frames = [] video = path.suffix in ('.gif', '.webm', '.mp4') if video: tags.add('animated') if path.suffix in ('.webm', '.mp4'): try: for stream in FFProbe(str(path)).streams: if stream.codec_type == 'audio': tags.add('audio') break except: pass vidcap = VideoCapture(str(path)) frame_count = int(vidcap.get(CAP_PROP_FRAME_COUNT)) vidcap.release() elif path.suffix in ('.gif'): gifcap = GifImagePlugin.GifImageFile(str(path)) frame_count = gifcap.n_frames gifcap.close() step = 90 * log((frame_count * .002) + 1) + 1 frames = video_generator(path, round(step)) else: frames.append(path) for frame in frames: driver.get('http://dev.kanotype.net:8003/deepdanbooru/') driver.find('//*[@id="exampleFormControlFile1"]', str(frame)) driver.find('//button[@type="submit"]', click=True) for _ in range(4): html = bs4.BeautifulSoup(driver.page_source(), 'lxml') try: tags.update([ tag.text for tag in html.find('tbody').findAll(href=True) ]) break except AttributeError: if driver.current_url().endswith('deepdanbooru/'): driver.find('//*[@id="exampleFormControlFile1"]', str(frame)) driver.find('//button[@type="submit"]', click=True) driver.refresh() if filter: tags.difference_update(REMOVE) return ' '.join(tags)
def __init__(self, video: cv.VideoCapture, window_size: int, predictions: PredictionList, true_actions: ActionList, label_dict: ActionLabels, target_fps: int = 30, *args): assert video.isOpened() self._video: cv.VideoCapture = video self._window_size: int = window_size self._predictions: PredictionList = predictions self._true_actions: ActionList = true_actions self._label_dict: ActionLabels = label_dict self._target_fps: int = target_fps self._fps: int = target_fps # Precompute the prediction performance (in correct frames percentage) for each prediction model local_predictions, remote_predictions, fusion_predictions = zip( *predictions) self._local_correct_frame_counts: list[ float] = self._compute_correct_frame_percentages( local_predictions, true_actions) self._remote_correct_frame_counts: list[ float] = self._compute_correct_frame_percentages( remote_predictions, true_actions) self._fusion_correct_frame_counts: list[ float] = self._compute_correct_frame_percentages( fusion_predictions, true_actions) # Extract video meta information self._frame_count: int = int(video.get(cv.CAP_PROP_FRAME_COUNT)) # Initialize playing state self._playing: bool = False self._frame_id: int = 0 # Setup main window self._main_window = MainWindow(*args) self._main_window.set_frame_count(self._frame_count) self._main_window.resize_labels_to_required_size( list(label_dict.values())) self._main_window.setup_framerate(1, target_fps * 3, self._fps) self._main_window.playButton.clicked.connect(self._play_or_pause) self._main_window.restartButton.clicked.connect(self._restart) self._main_window.frameScrollbar.valueChanged.connect( self._jump_to_frame) self._main_window.framerateSlider.valueChanged.connect( self._adjust_fps) # Next frame cache self._next_frame_cache: Optional[tuple[bool, np.ndarray]] = None # Setup and start frame timer self._timer: QTimer = QTimer() self._timer.timeout.connect(self._timeout) interval = int(1000.0 / float(self._fps)) self._timer.start(interval)
class CV2WebCamSource(VideoSource): def __init__(self, name, camNumber): VideoSource.__init__(self, name + "-CV2Cam" + str(camNumber)) self.camNumber = camNumber def getFrame(self): ret, rawFrame = self.cap.read() return rawFrame def getFPS(self): return 28 def start(self): self.cap = VideoCapture(self.camNumber) self.size = (int(self.cap.get(3)), int(self.cap.get(4))) def stop(self): self.cap.release()
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def set_fps(self): pass def get_fps(self): return None def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def get_frame(self): s, img = self.read() timestamp = time() return Frame(timestamp,img) def rewind(self): self.cap.set(1,0) #seek to the beginning def create_atb_bar(self,pos): return 0,0 def kill_atb_bar(self): pass def close(self): pass
def videotape_seconds(self, carNo, t_seconds, show_view=True): """ 录像 :param carNo: :param show_view: :return: """ try: # 打开rtsp cap = VideoCapture(self.URL) # 视频分辨率 size = (int(cap.get(CAP_PROP_FRAME_WIDTH)), int(cap.get(CAP_PROP_FRAME_HEIGHT))) # 帧率 fps = cap.get(CAP_PROP_FPS) # 视频保存格式avi fourcc = VideoWriter_fourcc(*'XVID') # 视频保存obj outfile = VideoWriter(self.video_path.format(carNo=carNo, now=fmt_date(fmt=FMT_DATETIME)), fourcc, fps, size) if show_view: # 预览窗口 namedWindow('view', WINDOW_NORMAL | WINDOW_KEEPRATIO) ret, frame = cap.read() t1 = time() while ret: if time() - t1 >= t_seconds: break ret, frame = cap.read() outfile.write(frame) if show_view: imshow("view", frame) waitKey(1) else: consoleLog(self.logPre, "未捕获到帧") except Exception as e: consoleLog(self.logPre, "视频录制异常:", repr(e)) finally: if cap: cap.release() if outfile: outfile.release() destroyAllWindows()
def GetPosition(stream: cv2.VideoCapture) -> int: """ 获得视频输入流的当前帧位置 Args: stream: Returns: """ return stream.get(cv2.CAP_PROP_POS_FRAMES)
def dominant_color(cap: cv2.VideoCapture, pt1: tuple, pt2: tuple): x1, y1 = pt1 x2, y2 = pt2 fourcc = cv2.VideoWriter_fourcc(*'avc1') size = (int(cap.get(3)), int(cap.get(4))) out = cv2.VideoWriter('output.mp4', fourcc, 20.0, size) while (True): ret, frame = cap.read() if not ret: break # Get region of interest from frame. roi = frame[y1:y2, x1:x2] roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) # Convert roi to 2d array. x, y, z = roi.shape roi_2d = roi.reshape(x * y, z) # Perfrom K-means clustering to find dominant color. kmeans = KMeans(n_clusters=3) kmeans.fit(roi_2d) most_label = np.bincount(kmeans.labels_).argmax() dc = [int(i) for i in kmeans.cluster_centers_[most_label]] # Draw rectangle and print dominant color. rect_color = (dc[2], dc[1], dc[0]) rect = cv2.rectangle(frame, (x1, y1), (x2, y2), rect_color, 2) cv2.putText(rect, "Dominant RGB: {}".format(dc), (x1, y1), 0, 0.5, rect_color) print(dc) out.write(frame) cv2.imshow("frame", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break out.release() return
def play(markdown: Dict, capture: cv2.VideoCapture): resize_resolution = (int( capture.get(cv2.CAP_PROP_FRAME_WIDTH) * RESIZE_SCALE), int( capture.get(cv2.CAP_PROP_FRAME_HEIGHT) * RESIZE_SCALE)) persons = markdown['viper']['data']['sourcefile']['object'] if DEBUG: for person_id in persons: check_for_gaps(person_id) events: List[EventWithId] = create_event_list(persons) show_frames = False frame_no = 0 expected_event_id = 0 active_persons_ids = set() if DEBUG: frame_no = 3400 - 1 capture.set(cv2.CAP_PROP_POS_FRAMES, 3400) while capture.isOpened(): ret, frame = capture.read() if ret is False: break if frame_no == events[expected_event_id].framespan: show_frames = True expected_event_id = update_active_persons(frame_no, events, expected_event_id, active_persons_ids) current_bboxes = get_bboxes_for_active(active_persons_ids, frame_no, persons) frame = put_bboxes_on_frame(current_bboxes, frame) if show_frames: frame = cv2.resize(frame, resize_resolution) cv2.imshow('vid', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break frame_no += 1
def get_frames(video: cv.VideoCapture, grayscale: bool = False) -> np.ndarray: """AssertionError is raised if a frame cannot be read.""" video.set(cv.CAP_PROP_POS_AVI_RATIO, 0) frame_count = int(video.get(cv.CAP_PROP_FRAME_COUNT)) width = int(video.get(cv.CAP_PROP_FRAME_WIDTH)) height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT)) shape = ((frame_count, height, width) if grayscale else (frame_count, height, width, 3)) result = np.zeros(shape, np.uint8) # type: ignore for index in range(frame_count): success, frame = video.read() assert success, f"Error reading frame {index}" result[index] = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if grayscale else frame return result
class Camera_Capture(): """ VideoCapture without uvc control using cv2.VideoCapture """ def __init__(self,src_id,size=(640,480),fps=None): self.controls = None self.cvId = src_id self.name = "VideoCapture" self.controls = None ###add cv videocapture capabilities self.capture = VideoCapture(src_id) self.set_size(size) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self,pos): size = 0,0 return size def kill_atb_bar(self): pass def close(self): pass
def main(): # Get frame from video video = input('Video file: ') vidcap = VideoCapture(video) print('Total frame: {}'.format(vidcap.get(CAP_PROP_FRAME_COUNT))) print('Printing frames ...') success, image = vidcap.read() count = 1 if success: if not os.path.isdir('Frame'): os.makedirs('Frame') else: print("Can't find video file") exit() while success: imwrite(os.path.join("Frame", "%d.jpg" % count), image) # save frame as JPEG file success, image = vidcap.read() count += 1 print("Done printing") # Render every frame frames = os.listdir("Frame") frames.sort(key=lambda name: int(name.split('.')[0])) for frame in frames: render_image(os.path.join("Frame", frame), frame.split('.')[0]) # Write video structure = imread(os.path.join("Frame", frames[0])) height, width, layers = structure.shape vidwrite = VideoWriter('result.AVI', -1, vidcap.get(CAP_PROP_FPS), (width, height)) for frame in frames: img = imread(os.path.join("Frame", frame)) vidwrite.write(img) cv2.destroyAllWindows() vidwrite.release()
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img def rewind(self): self.cap.set(1,0) #seek to the beginning
class Camera_Capture(object): """docstring for uvcc_camera""" def __init__(self, cam,size=(640,480),fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0) except KeyError: pass if '6000' in self.name and False: #on mac we dont have enough controls to use this right. logger.info("adjusting exposure for HD-6000 camera") try: self.controls['UVCC_REQ_EXPOSURE_AUTOMODE'].set_val(1) self.controls['UVCC_REQ_EXPOSURE_ABS'].set_val(156) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self,pos): # add uvc camera controls to a separate ATB bar size = (200,200) self.bar = atb.Bar(name="Camera_Controls", label=self.name, help="UVC Camera Controls", color=(50,50,50), alpha=100, text='light',position=pos,refresh=2., size=size) sorted_controls = [c for c in self.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) for control in sorted_controls: name = control.atb_name if control.type=="bool": self.bar.add_var(name,vtype=atb.TW_TYPE_BOOL8,getter=control.get_val,setter=control.set_val) elif control.type=='int': self.bar.add_var(name,vtype=atb.TW_TYPE_INT32,getter=control.get_val,setter=control.set_val) self.bar.define(definition='min='+str(control.min), varname=name) self.bar.define(definition='max='+str(control.max), varname=name) self.bar.define(definition='step='+str(control.step), varname=name) elif control.type=="menu": if control.menu is None: vtype = None else: vtype= atb.enum(name,control.menu) self.bar.add_var(name,vtype=vtype,getter=control.get_val,setter=control.set_val) if control.menu is None: self.bar.define(definition='min='+str(control.min), varname=name) self.bar.define(definition='max='+str(control.max), varname=name) self.bar.define(definition='step='+str(control.step), varname=name) else: pass if control.flags == "inactive": pass # self.bar.define(definition='readonly=1',varname=control.name) self.bar.add_button("refresh",self.controls.update_from_device) self.bar.add_button("load defaults",self.controls.load_defaults) return size def kill_atb_bar(self): pass def close(self): pass
def captureTStamp(files, duration, cod, fps=0, verbose=True): ''' guarda por un tiempo en minutos (duration) el video levantado desde la direccion indicada en el archvo indicado. tambíen archivos con los time stamps de cada frame. files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile] duration = time in mintes cod = codec fps = frames per second for video to be saved verbose = print messages to screen si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla para opencv '2.4.9.1' Examples -------- from cameraUtils import captureTStamp # para la FE duration = 1 # in minutes files = ['rtsp://192.168.1.48/live.sdp', "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi", "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"] fpsCam = 12 cod = 'XVID' captureTStamp(files, duration, cod, fps=fpsCam) # %% para la PTZ duration = 0.2 # in minutes files = ["rtsp://192.168.1.49/live.sdp", "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi", "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"] fpsCam = 20 cod = 'XVID' captureTStamp(files, duration, cod, fpsCam) ''' fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video if verbose: print(files) print("Duration",duration,"minutes") print("fps",fps) print("codec",cod) # Inicializacion tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration) ts = list() # timestamp de la captura # abrir captura cap = VideoCapture(files[0]) while not cap.isOpened(): cap = VideoCapture(files[0]) print("capture opened") # configurar writer w = int(cap.get(frame_width)) h = int(cap.get(frame_height)) if not fps: fps = cap.get(prop_fps) #para fe especificar los fps pq toma cualquier cosa con la propiedad out = VideoWriter(files[1], fcc, fps,( w, h), True) if verbose: print("capture open",cap.isOpened()) print("frame size",w,h) print("output opened",out.isOpened()) if not out.isOpened() or not cap.isOpened(): out.release() cap.release() # exit function if unable to open cap or out return s0 = getsize(files[1]) # initial filesize before writing frame # Primera captura ret, frame = cap.read() if ret: t = datetime.datetime.now() ts.append(t) out.write(frame) if verbose: print("first frame captured") # Segunda captura ret, frame = cap.read() if ret: t = datetime.datetime.now() ts.append(t) out.write(frame) if verbose: print("second frame captured") # Tercera captura ret, frame = cap.read() if ret: t = datetime.datetime.now() ts.append(t) out.write(frame) if verbose: print("third frame captured") s1 = getsize(files[1]) # size after saving 3 frames if s1==s0: out.release() cap.release() print("error when saving 3 frames, exiting") return 1 # error while saving first frame to file print(tFin) # loop while (t <= tFin): ret, frame = cap.read() if ret: t = datetime.datetime.now() ts.append(t) out.write(frame) if verbose: print(tFin,t) print("seconds elapsed",cap.get(pos_msec)/1000) print(frame.size) # end of loop # release and save out.release() cap.release() if verbose: print('loop exited, cap, out released, times saved to files') savetxt(files[2],ts, fmt= ["%s"]) return 0 # success
class Camera_Capture(object): """docstring for uvcc_camera""" def __init__(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) def re_init(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) # recreate the bar with new values bar_pos = self.bar._get_position() self.bar.destroy() self.create_atb_bar(bar_pos) def re_init_cam_by_src_id(self, src_id): try: cam = Camera_List()[src_id] except KeyError: logger.warning("could not reinit capture, src_id not valid anymore") return self.re_init(cam, self.get_size()) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp, img) def set_size(self, size): width, height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self, fps): self.capture.set(5, fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self, pos): # add uvc camera controls to a separate ATB bar size = (200, 200) self.bar = atb.Bar( name="Camera_Controls", label=self.name, help="UVC Camera Controls", color=(50, 50, 50), alpha=100, text="light", position=pos, refresh=2.0, size=size, ) sorted_controls = [c for c in self.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) cameras_enum = atb.enum("Capture", dict([(c.name, c.src_id) for c in Camera_List()])) self.bar.add_var("Capture", vtype=cameras_enum, getter=lambda: self.src_id, setter=self.re_init_cam_by_src_id) for control in sorted_controls: name = control.atb_name if control.type == "bool": self.bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val) elif control.type == "int": self.bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val) self.bar.define(definition="min=" + str(control.min), varname=name) self.bar.define(definition="max=" + str(control.max), varname=name) self.bar.define(definition="step=" + str(control.step), varname=name) elif control.type == "menu": if control.menu is None: vtype = None else: vtype = atb.enum(name, control.menu) self.bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val) if control.menu is None: self.bar.define(definition="min=" + str(control.min), varname=name) self.bar.define(definition="max=" + str(control.max), varname=name) self.bar.define(definition="step=" + str(control.step), varname=name) else: pass if control.flags == "inactive": pass # self.bar.define(definition='readonly=1',varname=control.name) self.bar.add_button("refresh", self.controls.update_from_device) self.bar.add_button("load defaults", self.controls.load_defaults) return size def kill_atb_bar(self): pass def close(self): self.control = None logger.info("Capture released") pass
from cv2 import VideoCapture from processFrame import * from PIL import Image, ImageDraw import numpy as np import matplotlib.pyplot as plt from time import sleep t = time() count = 0 filename = raw_input() video = VideoCapture(filename) frame_count = int(np.ceil(video.get(7))/3) internal = 0 # while internal < 2507: # condition = video.grab() # print "Skipping frame %d" % internal # count += 1 # internal += 1 print "At frame: ", video.get(1) print "Total frames: ", frame_count, "vs. ", video.get(7) # sleep(10) width,height = frame_count, int(np.ceil(frame_count/(16.0/9))) # barcode = Image.new('RGB', (width, height), (255,255,255)) # draw = ImageDraw.Draw(barcode) # f = open("barcode.jpg", 'w') f = open("color_codes.txt", 'a') condition,frame = video.read() while condition: print "Processing frame %d" % count # color = findColor(frame) if count % 3 == 0: color = findColor(frame)