def test_large_small_skip_equal(): sequential_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm") small_skip_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm") large_skip_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm") assert small_skip_reader.fps == large_skip_reader.fps == sequential_reader.fps == 24 # Read every frame sequentially for t in np.arange(0, 10, 1 / 24): sequential_reader.get_frame(t) sequential_final_frame = sequential_reader.get_frame(10) # Read in increments of 24 frames for t in range(10): small_skip_reader.get_frame(t) small_skip_final_frame = small_skip_reader.get_frame(10) # Jumps straight forward 240 frames. This is greater than 100 so it uses # FFmpeg to reseek at the right position. large_skip_final_frame = large_skip_reader.get_frame(10) assert (sequential_reader.pos == small_skip_reader.pos == large_skip_reader.pos == 241) # All frames have gone forward an equal amount, so should be equal assert np.array_equal(sequential_final_frame, small_skip_final_frame) assert np.array_equal(small_skip_final_frame, large_skip_final_frame)
def test_autorotate(): # This test requires ffmpeg >=2.7 video_file = 'media/ficus_vertical.mp4' reader = FFMPEG_VideoReader(video_file) assert reader.infos['video_size'] == [1920, 1080] assert reader.infos['video_rotation'] == 90 assert reader.size == [1080, 1920] reader.close() reader = FFMPEG_VideoReader(video_file, ffmpeg_params=['-noautorotate']) assert reader.size == [1920, 1080] assert reader.rotation == 90 reader.close()
def __init__(self, swf_mp4, grf_mp4, chat_mp4, output, fps=10): self._swf_mp4 = swf_mp4 self._grf_mp4 = grf_mp4 self._chat_mp4 = chat_mp4 self._fq = FfmpegQuerier() self._swf_reader = FFMPEG_VideoReader(swf_mp4) self._grf_reader = FFMPEG_VideoReader(grf_mp4) self._chat_reader = FFMPEG_VideoReader(chat_mp4) self._output = output self._fps = fps self._output_size = self.cal_output_size() self._writer = FFMPEG_VideoWriter(output, self._output_size, fps, audiofile=grf_mp4)
def test_moviepy(infile, out_dir): import numpy as np import pipi from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter chunksize = 128 vr = FFMPEG_VideoReader(infile) w,h = vr.size chunk = np.zeros((h, w, vr.depth, chunksize), dtype=np.uint8) with pipi.Timer("mp read..."): for i in range(chunksize): frame = vr.read_frame() chunk[...,i] = frame vw = FFMPEG_VideoWriter(os.path.join(out_dir, "mp_ov.mkv"), (w,h), 30, codec="libx264", preset="fast", ffmpeg_params=["-crf", "0"]) with pipi.Timer("mp write..."): for i in range(chunksize): vw.write_frame(chunk[...,i]) vr.close() vw.close()
def __load_set(self, set_file): with open(set_file) as f: lines = f.readlines() video_list = [] text_list = [] gt_list = [] for line in lines: line = line.strip('\n') segs = line.split(' ') print('=>Load Video', segs) assert (len(segs) == 3) segs = [os.path.join(self.root, seg) for seg in segs] video_list.append(segs[0]) cap = FFMPEG_VideoReader(segs[0]) cap.initialize() #video_list.append(cap) print('Video: frames({})'.format(int(cap.nframes))) # Load text json file text = json.load(open(segs[1])) # Load GT json file gt = np.load(open(segs[2])) print('Gt : frames({})'.format(len(gt))) text_list.append(text) gt_list.append(gt) self.video_list = video_list self.text_list = text_list self.gt_list = gt_list
def get_video(vid_path, color=True, size=True): """Get video by given video path. Parameters ---------- image_path : string target image absolute path color : bool if color is True then return color frames with BGR encoding. if color is False then return grey scale frames. size : bool if size is True then return the size of the frame. if size is False then just return the frame. Returns ------- frames : list a list of frames that contains the video size : tuple size of the frame (optional). """ vid_container = FFMPEG_VideoReader(vid_path) frames = [] for i in range(vid_container.nframes): frame_t = vid_container.read_frame() frame_t = cv2.cvtColor(frame_t, cv2.COLOR_RGB2BGR) if color is False: frame_t = cv2.cvtColor(frame_t, cv2.COLOR_BGR2GRAY) frames.append(frame_t) if size is True: return frames, frames[0].shape else: return frames
def test_sequential_frame_pos(): """test_video.mp4 contains 5 frames at 1 fps. Each frame is 1x1 pixels and the sequence is Red, Green, Blue, Black, White. The rgb values are not pure due to compression. """ reader = FFMPEG_VideoReader("media/test_video.mp4") assert reader.pos == 1 # Get first frame frame_1 = reader.get_frame(0) assert reader.pos == 1 assert np.array_equal(frame_1, [[[254, 0, 0]]]) # Get a specific sequential frame frame_2 = reader.get_frame(1) assert reader.pos == 2 assert np.array_equal(frame_2, [[[0, 255, 1]]]) # Get next frame. Note `.read_frame()` instead of `.get_frame()` frame_3 = reader.read_frame() assert reader.pos == 3 assert np.array_equal(frame_3, [[[0, 0, 255]]]) # Skip a frame skip_frame = reader.get_frame(4) assert reader.pos == 5 assert np.array_equal(skip_frame, [[[255, 255, 255]]])
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize=200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes)
def video_uniform_sample_n_frames_old(video_path, n_samples, max_dim): """ Sample only n frames from the video. """ raise Exception('Needs to add argument about resizing type') cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = cap.fps n_frames = cap.nframes duration = cap.duration step = duration / (n_samples) frames = [] for i in range(n_samples): time_sec = i * step frame = cap.get_frame(time_sec) # resize frame to fit in the array, it's going to be used by caffe anyway frame = image_utils.resize_keep_aspect_ratio_max_dim(frame, max_dim) # frame encoded as uint and values are from 0-255 # but caffe needs float32 and values from 0-1 frame = frame.astype('float32') / float(255) frames.append(frame) # very important, or we'd have memory leakage cap.__del__() return frames
def __play_video_ffmpeg(video_path, caption, window_name='window', speed=1): is_playing = True cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = float(cap.fps) n_frames = cap.nframes index = 0 while True: if is_playing: time_sec = index / fps # increment by speed index += speed frame = cap.get_frame(time_sec) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame_size = frame.shape # resize the frame f_width = 800 resize_factor = float(f_width) / frame_size[1] f_height = int(frame_size[0] * resize_factor) frame_size = (f_width, f_height) frame = cv2.resize(src=frame, dsize=frame_size, interpolation=cv2.INTER_AREA) # write caption on frame top = int((f_height * 0.9)) text_width = cv2.getTextSize(caption, font, 1.2, 1)[0][0] + 20 cv2.rectangle(frame, (0, top - 22), (text_width, top + 10), black_color, cv2.cv.CV_FILLED) cv2.putText(img=frame, text=caption, org=(10, top), fontFace=font, fontScale=1.2, color=white_color, thickness=1, lineType=8) # show the frame cv2.imshow(window_name, frame) e = cv2.waitKey(2) if e == 27: break if e == 32: is_playing = False print('Pause video') # If the number of captured frames is equal to the total number of frames,we stop if index >= n_frames: break else: # toggle pause with 'space' e = cv2.waitKey(2) if e == 32: is_playing = True print('Play video')
def __init__( self, filename, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm="bicubic", audio_fps=44100, audio_nbytes=2, fps_source="tbr", ): VideoClip.__init__(self) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader( filename, pix_fmt=pix_fmt, target_resolution=target_resolution, resize_algo=resize_algorithm, fps_source=fps_source, ) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.rotation = self.reader.rotation self.filename = filename if has_mask: self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] def mask_mf(t): return self.reader.get_frame(t)[:, :, 3] / 255.0 self.mask = VideoClip( ismask=True, make_frame=mask_mf).set_duration(self.duration) self.mask.fps = self.fps else: self.make_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos["audio_found"]: self.audio = AudioFileClip( filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes, )
def get_regions(video_path, annot, resize_type, verbose=False): """ Get the frames whose numbers are given in the "annot" dictionary.. Then, for each frame get the regions as specificed in the "annot" dictionary. Finally, return these regions. """ assert resize_type in ['resize', 'resize_crop', 'resize_crop_scaled'] resize_function = None if resize_type == 'resize': resize_function = image_utils.resize_frame elif resize_type == 'resize_crop': resize_function = image_utils.resize_crop elif resize_type == 'resize_crop_scaled': resize_function = image_utils.resize_crop_scaled cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = float(cap.fps) n_frames = cap.nframes duration = cap.duration n_regions = sum([len(v) for k, v in annot.iteritems()]) frame_size = 224 bbox_resize_factor = 2 regions = np.zeros(shape=(n_regions, frame_size, frame_size, 3), dtype='float32') region_idx = -1 frame_nums = annot.keys() for frame_num in frame_nums: if (region_idx + 1) % 100 == 0 and verbose: print(' ... reading region %d/%d' % (region_idx + 1, n_regions)) # get the frame i = frame_num - 1 time_sec = i / fps frame = cap.get_frame(time_sec) # get the regions (resized) from the frame regions_info = annot[frame_num] for region_info in regions_info: region_idx += 1 bbox = region_info[1:5] bbox = np.multiply(bbox, bbox_resize_factor).astype(np.int) x1, y1, x2, y2 = bbox region = frame[y1:y2, x1:x2] # resize frame to fit in the array, it's going to be used by caffe anyway region = resize_function(region) # frame encoded as uint and values are from 0-255, but caffe needs float32 and values from 0-1 region = region.astype('float32') / float(255) regions[region_idx] = region # very important, or we'd have memory leakage cap.__del__() return regions
def test_seeking_beyond_file_end(): reader = FFMPEG_VideoReader("media/test_video.mp4") frame_1 = reader.get_frame(0) with pytest.warns(UserWarning, match="Using the last valid frame instead"): end_of_file_frame = reader.get_frame(5) assert np.array_equal(frame_1, end_of_file_frame) assert reader.pos == 6 # Try again with a jump larger than 100 frames # (which triggers different behaivour in `.get_frame()` reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm") frame_1 = reader.get_frame(0) with pytest.warns(UserWarning, match="Using the last valid frame instead"): end_of_file_frame = reader.get_frame(30) assert np.array_equal(frame_1, end_of_file_frame) assert reader.pos == 30 * 24 + 1
def __setstate__(self, newstate): """ Custom unpickling """ # Recreate the video segment's audio and reader newstate['reader'] = FFMPEG_VideoReader(newstate['filename']) newstate['audio'] = AudioFileClip(newstate['filename']).subclip( newstate['source_start_time'], newstate['source_start_time'] + newstate['duration']) self.__dict__.update(newstate)
def comparechunk(queryVidName, dbVidName, dbStart, dbEnd, thresh): """ This function takes the names of the files to compare, and where in the comparison file to begin checking from. Threshold is a user determined value that they chose qualitatively, and the GUI turned into a quantitative number Args: path to query video, path to comparison video, start frame number, endframe number, callback to process results, RMSE threshold (a number from 0-255, realistically should be between 10-50) """ # Create the FFMPEG class variables dbVid = FFMPEG_VideoReader(dbVidName) queryVid = FFMPEG_VideoReader(queryVidName) # Skip to the correct frames in the video frameQ = queryVid.get_frame(0) dbVid.skip_frames(dbStart) frameD = dbVid.read_frame() scores = [] # Compare the first frame in the query video to every frame in the chunk belowThresh = [] for i in range(dbStart, dbEnd): score = frame_rmse(frameQ, frameD) # Immediately look at startframes below the threshold if frame_rmse(frameQ, frameD) < thresh: print "Found a frame below the threshold. Scanning sequence..." score = startpointCompare(queryVidName, dbVidName, i) if score < thresh and score is not None: scores.append({ "Video Name": dbVidName, "Timestamp": secondsToTimestamp(i / dbVid.fps), "Frame Number": i, "Score": score }) return scores else: print "A sequence had a poor score of", score, ". Moving on..." frameD = dbVid.read_frame() return scores
def get_video_info(video_path): # video_fps, video_n_frames, video_duration = video_utils. cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = cap.fps n_frames = cap.nframes duration = cap.duration cap.close() del cap return fps, n_frames, duration
def get_frame(video, t_frame=0.0): """Crop a single frame from the video to check cropping result. Stored alongside video. :param video: Path to video :param t_frame: position to extract example frame from, in seconds. Default: 0 :return: image """ clip = FFMPEG_VideoReader(str(video)) image = clip.get_frame(t_frame) return image
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm='bicubic', audio_fps=44100, audio_nbytes=2, verbose=False, fps_source='tbr'): VideoClip.__init__(self) # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = None # need this just in case FFMPEG has issues (__del__ complains) self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, target_resolution=target_resolution, resize_algo=resize_algorithm, fps_source=fps_source) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.end = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.rotation = self.reader.rotation self.filename = self.reader.filename if has_mask: self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] mask_mf = lambda t: self.reader.get_frame(t)[:, :, 3] / 255.0 self.mask = (VideoClip( ismask=True, make_frame=mask_mf).set_duration(self.duration)) self.mask.fps = self.fps else: self.make_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes)
def test_large_skip_frame_pos(): reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm") assert reader.fps == 24 # 10 sec * 24 fps = 240 frames reader.get_frame(240 // 24) assert reader.pos == 241 reader.get_frame(719 / 24) assert reader.pos == 720 # Go backwards reader.get_frame(120 // 24) assert reader.pos == 121
def get_frames(video_path: str) -> List[np.ndarray]: """ Load frames from video. :param video_path: path to video. :return: loaded frames. """ video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames
def video_uniform_sample_and_save_old(spf, video_path, frames_path, image_name_format, resize_type, verbose=False): if resize_type is not None: assert resize_type in ['resize', 'resize_crop', 'resize_crop_scaled'] resize_function = None if resize_type == 'resize': resize_function = image_utils.resize_frame elif resize_type == 'resize_crop': resize_function = image_utils.resize_crop elif resize_type == 'resize_crop_scaled': resize_function = image_utils.resize_crop_scaled cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = cap.fps n_frames = cap.nframes duration = cap.duration n_samples = int(duration / float(spf)) # check if no samples because the video duration is less than spf # then at least, get 1 frame of the video if n_samples == 0: n_samples = 1 for i in range(n_samples): num = i + 1 if verbose: print(' ... reading frame %d/%d' % (num, n_samples)) time_sec = i * spf frame = cap.get_frame(time_sec) if resize_type is not None: # resize frame to fit in the array, it's going to be used by caffe anyway frame = resize_function(frame) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) image_name = image_name_format % (num, ) frame_path = os.path.join(frames_path, image_name) cv2.imwrite(frame_path, frame) # very important, or we'd have memory leakage cap.close() return fps, n_frames, duration
def video_save_frames_specific_duration(action_num, video_num, video_path, frames_root_pathes, start_stop_sec, image_name_format, verbose=False): assert len(frames_root_pathes) == len(start_stop_sec) cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = float(cap.fps) duration_sec = cap.duration img_dim = 224 start_stop_sec = np.array(start_stop_sec) for i, s_s_sec in enumerate(start_stop_sec): start_sec, stop_sec = s_s_sec frame_root_path = frames_root_pathes[i] # offset of starting/stopping the action sec_offset = 0.25 start_idx = int((start_sec + sec_offset) * fps) stop_idx = int((stop_sec + sec_offset) * fps) + 1 if verbose: print('action, video: %d, %d' % (action_num, video_num)) print('%d/%d' % (start_sec, stop_sec)) print('%d/%d' % (start_idx, stop_idx)) for idx_frame in range(start_idx, stop_idx): time_sec = idx_frame / fps if verbose and idx_frame % 100 == 0: print('... time_sec, frame: %d/%d' % (time_sec, idx_frame)) frame = cap.get_frame(time_sec) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) frame = image_utils.resize_crop(frame, target_width=img_dim, target_height=img_dim) image_name = image_name_format % (idx_frame, ) frame_path = os.path.join(frame_root_path, image_name) cv2.imwrite(frame_path, frame) # very important, or we'd have memory leakage cap.__del__()
def save_frames_from_vid(video_name, category_id, train_or_test, row=0): make_dir_structure(row) # Initialize FFMPEG_VideoReader fvr = FFMPEG_VideoReader(filename=video_name) fvr.initialize() vid = os.path.split(video_name)[1] for i in range(0, fvr.nframes): frame_name = vid + '_' + str(i) frame = fvr.read_frame() imsave(os.path.join('trafficdb', 'eval_'+ str(row), train_or_test, str(category_id), frame_name + '.jpg'), frame) return True
def get_frames_from_vid(video_name, category_id): # Initialize FFMPEG_VideoReader fvr = FFMPEG_VideoReader(filename=video_name) fvr.initialize() shape_for_stack = (1, fvr.size[0], fvr.size[1], fvr.depth) img_stack = np.zeros(shape_for_stack) for i in range(0, fvr.nframes): frame = fvr.read_frame() frame = frame.reshape(shape_for_stack) img_stack = np.vstack((img_stack, frame)) img_stack = img_stack[1:] cat_stack = np.ones((len(img_stack), 1)) * category_id return img_stack, cat_stack
def video_uniform_sampling(spf, video_path, resize_type, is_local, verbose=False): assert resize_type in ['resize', 'resize_crop', 'resize_crop_scaled'] resize_function = None if resize_type == 'resize': resize_function = image_utils.resize_frame elif resize_type == 'resize_crop': resize_function = image_utils.resize_crop elif resize_type == 'resize_crop_scaled': resize_function = image_utils.resize_crop_scaled cap = FFMPEG_VideoReader(video_path, False) cap.initialize() fps = cap.fps n_frames = cap.nframes duration = cap.duration n_samples = int(duration / float(spf)) # check if no samples because the video duration is less than spf # then at least, get 1 frame of the video if n_samples == 0: n_samples = 1 frame_size = 224 frames = np.zeros(shape=(n_samples, frame_size, frame_size, 3), dtype='float32') for i in range(n_samples): num = i + 1 if num % 100 == 0 and verbose: print(' ... reading frame %d/%d' % (num, n_samples)) time_sec = i * spf frame = cap.get_frame(time_sec) # resize frame to fit in the array, it's going to be used by caffe anyway frame = resize_function(frame) # frame encoded as uint and values are from 0-255 # but caffe needs float32 and values from 0-1 frame = frame.astype('float32') / float(255) frames[i] = frame # very important, or we'd have memory leakage cap.__del__() return frames, fps, n_frames, duration
def __init__(self, filename, ismask=False, has_mask=False, audio=True, audio_buffersize=200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self, ismask) # We store the construction parameters in case we need to make # a copy (a 'co-reader'). self.parameters = { 'filename': filename, 'ismask': ismask, 'has_mask': has_mask, 'audio': audio, 'audio_buffersize': audio_buffersize } # Make a reader pix_fmt = "rgba" if has_mask else "rgb24" self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) # Make some of the reader's attributes accessible from the clip self.duration = self.reader.duration self.fps = self.reader.fps self.size = self.reader.size self.get_frame = lambda t: self.reader.get_frame(t) # Make a reader for the audio, if any. if audio: try: self.audio = AudioFileClip(filename, buffersize=audio_buffersize, fps=audio_fps, nbytes=audio_nbytes) except: if verbose: print "No audio found in %s" % filename pass
def test_unusual_order_frame_pos(): reader = FFMPEG_VideoReader("media/test_video.mp4") assert reader.pos == 1 # Go straight to end end_frame = reader.get_frame(4) assert reader.pos == 5 assert np.array_equal(end_frame, [[[255, 255, 255]]]) # Repeat the previous frame second_end_frame = reader.get_frame(4) assert reader.pos == 5 assert np.array_equal(second_end_frame, [[[255, 255, 255]]]) # Go backwards previous_frame = reader.get_frame(3) assert reader.pos == 4 assert np.array_equal(previous_frame, [[[0, 0, 0]]]) # Go back to start start_frame = reader.get_frame(0) assert reader.pos == 1 assert np.array_equal(start_frame, [[[254, 0, 0]]])
def __init__(self, input, output, width=None, height=None, log_level=FFMPEG_LOGLEVEL): self._reader = FFMPEG_VideoReader(input) self._fps = self._reader.fps # self.cur_frame = 1 # self._frame = None self._querier = FfmpegQuerier() self._info = self._querier(input) self._duration = self._querier.duration self.draw_dict = {} self._resize = (int(width), int(height)) if (width and height) else None self._loglevel = log_level self._output = output self._tmp_file = self._make_tmp_file(input) if self._resize else None self._writer = FFMPEG_VideoWriter( self._tmp_file if self.need_resize else output, size=self._reader.size, fps=self._reader.fps)
def __init__(self, filename, has_mask=False, audio=True, audio_buffersize = 200000, audio_fps=44100, audio_nbytes=2, verbose=False): VideoClip.__init__(self) # Make a reader pix_fmt= "rgba" if has_mask else "rgb24" reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt) self.reader = reader # Make some of the reader's attributes accessible from the clip self.duracion = self.reader.duracion self.fin = self.reader.duracion self.fps = self.reader.fps self.tamano = self.reader.tamano if has_mask: self.make_frame = lambda t: reader.get_frame(t)[:,:,:3] mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0 self.mask = (VideoClip(ismask = True, make_frame = mask_mf) .set_duracion(self.duracion)) self.mask.fps = self.fps else: self.make_frame = lambda t: reader.get_frame(t) # Make a reader for the audio, if any. if audio and self.reader.infos['audio_found']: self.audio = AudioFileClip(filename, buffersize= audio_buffersize, fps = audio_fps, nbytes = audio_nbytes)
startFrame = 0 if exists: # Continue from where it left off print("Resuming processing...") print("Moving processed video to temporary storage...") tempDir = "temp" tempDest = tempDir + "/" + os.path.basename(arguments_strVideoOut) if os.path.exists(tempDir): shutil.rmtree(tempDir) os.makedirs(tempDir) shutil.move(arguments_strVideoOut, tempDest) readerCont = FFMPEG_VideoReader(tempDest, False) totalFramesCont = readerCont.nframes if totalFramesCont % 2 == 0: # Even numbers: Ended on interpolated frame # startFrame = number of last original frame # This means that it will skip this frame startFrame = totalFramesCont / 2 else: # Odd numbers: Ended on original frame # startFrame = number of second last original frame # This means that it will move on to the current frame startFrame = (totalFramesCont - 1) / 2 reader = FFMPEG_VideoReader(arguments_strVideo, False) if arguments_strVideoAudio: writer = FFMPEG_VideoWriter(arguments_strVideoOut,