def test_scene_list(test_video_file): """ Test SceneManager get_scene_list method with VideoManager/ContentDetector. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) try: base_timecode = vm.get_base_timecode() video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:00', video_fps) end_time = FrameTimecode('00:00:10', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() num_frames = sm.detect_scenes(frame_source=vm) assert num_frames == end_time.get_frames() + 1 scene_list = sm.get_scene_list(base_timecode) for i, _ in enumerate(scene_list): if i > 0: # Ensure frame list is sorted (i.e. end time frame of # one scene is equal to the start time of the next). assert scene_list[i-1][1] == scene_list[i][0] finally: vm.release()
def test_content_detector(test_movie_clip): """ Test SceneManager with VideoManager and ContentDetector. """ for threshold, start_frames in TEST_MOVIE_CLIP_GROUND_TRUTH_CONTENT: vm = VideoManager([test_movie_clip]) sm = SceneManager() sm.add_detector(ContentDetector(threshold=threshold)) try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:50', video_fps) end_time = FrameTimecode('00:01:19', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() sm.detect_scenes(frame_source=vm) scene_list = sm.get_scene_list() assert len(scene_list) == len(start_frames) detected_start_frames = [ timecode.get_frames() for timecode, _ in scene_list ] assert all(x == y for (x, y) in zip(start_frames, detected_start_frames)) finally: vm.release()
def test_adaptive_detector(test_movie_clip): """ Test SceneManager with VideoManager and AdaptiveDetector. """ # We use the ground truth of ContentDetector with threshold=27. start_frames = TEST_MOVIE_CLIP_GROUND_TRUTH_CONTENT[1][1] vm = VideoManager([test_movie_clip]) sm = SceneManager() assert sm._stats_manager is None # The SceneManager should implicitly create a StatsManager since this # detector requires it. sm.add_detector(AdaptiveDetector(video_manager=vm)) assert sm._stats_manager is not None try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:50', video_fps) end_time = FrameTimecode('00:01:19', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() sm.detect_scenes(frame_source=vm) scene_list = sm.get_scene_list() assert len(scene_list) == len(start_frames) detected_start_frames = [ timecode.get_frames() for timecode, _ in scene_list ] assert all(x == y for (x, y) in zip(start_frames, detected_start_frames)) finally: vm.release()
def test_get_seconds(): ''' Test FrameTimecode get_seconds() method. ''' assert FrameTimecode(timecode=1, fps=1.0).get_seconds(), pytest.approx(1.0 / 1.0) assert FrameTimecode(timecode=1000, fps=60.0).get_seconds(), pytest.approx(1000 / 60.0) assert FrameTimecode(timecode=1000000000, fps=29.97).get_seconds(), pytest.approx(1000000000 / 29.97) assert FrameTimecode(timecode=1.0, fps=1.0).get_seconds(), pytest.approx(1.0) assert FrameTimecode(timecode=1000.0, fps=60.0).get_seconds(), pytest.approx(1000.0) assert FrameTimecode(timecode=1000000000.0, fps=29.97).get_seconds(), pytest.approx(1000000000.0) assert FrameTimecode(timecode='00:00:02.0000', fps=1).get_seconds(), pytest.approx(2.0) assert FrameTimecode(timecode='00:00:00.5', fps=10).get_seconds(), pytest.approx(0.5) assert FrameTimecode(timecode='00:00:01', fps=10).get_seconds(), pytest.approx(1.0) assert FrameTimecode(timecode='00:01:00.000', fps=1).get_seconds(), pytest.approx(60.0)
def test_scene_list(test_video_file): """ Test SceneManager get_scene_list method with VideoManager/ContentDetector. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) try: base_timecode = vm.get_base_timecode() video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:00', video_fps) end_time = FrameTimecode('00:00:10', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() num_frames = sm.detect_scenes(frame_source=vm) assert num_frames == end_time.get_frames() + 1 scene_list = sm.get_scene_list(base_timecode) for i, _ in enumerate(scene_list): if i > 0: # Ensure frame list is sorted (i.e. end time frame of # one scene is equal to the start time of the next). assert scene_list[i - 1][1] == scene_list[i][0] finally: vm.release()
def test_addition(): ''' Test FrameTimecode addition (+/+=, __add__/__iadd__) operator. ''' x = FrameTimecode(timecode=1.0, fps=10.0) assert x + 1 == FrameTimecode(timecode=1.1, fps=10.0) assert x + 1 == FrameTimecode(1.1, x) assert x + 10 == 20 assert x + 10 == 2.0 assert x + 10 == '00:00:02.000' with pytest.raises(TypeError): FrameTimecode('00:00:02.000', fps=20.0) == x + 10
def test_get_frames(): ''' Test FrameTimecode get_frames() method. ''' assert FrameTimecode(timecode=1, fps=1.0).get_frames(), 1 assert FrameTimecode(timecode=1000, fps=60.0).get_frames(), 1000 assert FrameTimecode(timecode=1000000000, fps=29.97).get_frames(), 1000000000 assert FrameTimecode(timecode=1.0, fps=1.0).get_frames(), int(1.0/1.0) assert FrameTimecode(timecode=1000.0, fps=60.0).get_frames(), int(1000.0*60.0) assert FrameTimecode(timecode=1000000000.0, fps=29.97).get_frames(), int(1000000000.0*29.97) assert FrameTimecode(timecode='00:00:02.0000', fps=1).get_frames(), 2 assert FrameTimecode(timecode='00:00:00.5', fps=10).get_frames(), 5 assert FrameTimecode(timecode='00:00:01', fps=10).get_frames(), 10 assert FrameTimecode(timecode='00:01:00.000', fps=1).get_frames(), 60
def test_subtraction(): ''' Test FrameTimecode subtraction (-/-=, __sub__) operator. ''' x = FrameTimecode(timecode=1.0, fps=10.0) assert (x - 1) == FrameTimecode(timecode=0.9, fps=10.0) assert x - 2 == FrameTimecode(0.8, x) assert x - 10 == FrameTimecode(0.0, x) assert x - 11 == FrameTimecode(0.0, x) assert x - 100 == FrameTimecode(0.0, x) assert x - 1.0 == FrameTimecode(0.0, x) assert x - 100.0 == FrameTimecode(0.0, x) assert x - 1 == FrameTimecode(timecode=0.9, fps=10.0) with pytest.raises(TypeError): FrameTimecode('00:00:02.000', fps=20.0) == x - 10
def test_content_detect_opencv_videocap(test_video_file): """ Test SceneManager with cv2.VideoCapture and ContentDetector. """ cap = cv2.VideoCapture(test_video_file) sm = SceneManager() sm.add_detector(ContentDetector()) try: video_fps = cap.get(cv2.CAP_PROP_FPS) duration = FrameTimecode('00:00:05', video_fps) num_frames = sm.detect_scenes(frame_source=cap, end_time=duration) assert num_frames == duration.get_frames() finally: cap.release()
def get_cut_list(self, base_timecode=None): # type: (FrameTimecode) -> List[FrameTimecode] """ Returns a list of FrameTimecodes of the detected scene changes/cuts. Unlike get_scene_list, the cutting list returns a list of FrameTimecodes representing the point in the input video(s) where a new scene was detected, and thus the frame where the input should be cut/split. The cutting list, in turn, is used to generate the scene list, noting that each scene is contiguous starting from the first frame and ending at the last frame detected. If only sparse detectors are used (e.g. MotionDetector), this will always be empty. Returns: List of FrameTimecode objects denoting the points in time where a scene change was detected in the input video(s), which can also be passed to external tools for automated splitting of the input into individual scenes. """ if base_timecode is None: base_timecode = self._base_timecode if base_timecode is None: return [] return [ FrameTimecode(cut, base_timecode) for cut in self._get_cutting_list() ]
def test_load_corrupt_stats(): """ Test loading a corrupted stats file created by outputting data in the wrong format. """ stats_manager = StatsManager() with open(TEST_STATS_FILES[0], 'wt') as stats_file: stats_writer = get_csv_writer(stats_file) some_metric_key = 'some_metric' some_metric_value = str(1.2) some_frame_key = 100 base_timecode = FrameTimecode(0, 29.97) some_frame_timecode = base_timecode + some_frame_key # Write out some invalid files. # File #0: Wrong Header Names [StatsFileCorrupt] # Swapped timecode & frame number. stats_writer.writerow( [COLUMN_NAME_TIMECODE, COLUMN_NAME_FRAME_NUMBER, some_metric_key]) stats_writer.writerow([ some_frame_key, some_frame_timecode.get_timecode(), some_metric_value ]) stats_file.close() with pytest.raises(StatsFileCorrupt): stats_manager.load_from_csv(TEST_STATS_FILES[0])
def test_load_hardcoded_file(): """ Test loading a stats file with some hard-coded data generated by this test case. """ stats_manager = StatsManager() with open(TEST_STATS_FILES[0], 'w') as stats_file: stats_writer = get_csv_writer(stats_file) some_metric_key = 'some_metric' some_metric_value = 1.2 some_frame_key = 100 base_timecode = FrameTimecode(0, 29.97) some_frame_timecode = base_timecode + some_frame_key # Write out a valid file. stats_writer.writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writer.writerow([ some_frame_key, some_frame_timecode.get_timecode(), str(some_metric_value) ]) stats_file.close() stats_file = open(TEST_STATS_FILES[0], 'r') stats_manager.load_from_csv(csv_file=stats_file) # Check that we decoded the correct values. assert stats_manager.metrics_exist(some_frame_key, [some_metric_key]) assert stats_manager.get_metrics( some_frame_key, [some_metric_key])[0] == pytest.approx(some_metric_value)
def process_youtube_video(youtube_url): #url = "https://www.youtube.com/watch?v=BGLTLitLUAo" videoPafy = pafy.new(youtube_url) best = videoPafy.getbest(preftype="mp4") cap = cv2.VideoCapture(best.url) sm = SceneManager() sm.add_detector(ContentDetector()) try: video_fps = cap.get(cv2.CAP_PROP_FPS) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) duration = frame_count / video_fps dur = FrameTimecode(duration, video_fps) num_frames = sm.detect_scenes(frame_source=cap, end_time=dur) #base_timecode = FrameTimecode('00:00:05', fps=video_fps) scene_list = sm.get_scene_list(dur) print("Scene List Count: " + str(len(scene_list))) result_urls = generate_images(cap, scene_list, 1, "testvid") finally: cap.release() #return urls return result_urls
def test_detectors_with_stats(test_video_file): """ Test all detectors functionality with a StatsManager. """ for detector in [ContentDetector, ThresholdDetector, AdaptiveDetector]: vm = VideoManager([test_video_file]) stats = StatsManager() sm = SceneManager(stats_manager=stats) sm.add_detector(create_detector(detector, vm)) try: end_time = FrameTimecode('00:00:15', vm.get_framerate()) vm.set_duration(end_time=end_time) vm.set_downscale_factor() vm.start() sm.detect_scenes(frame_source=vm) initial_scene_len = len(sm.get_scene_list()) assert initial_scene_len > 0 # test case must have at least one scene! # Re-analyze using existing stats manager. sm = SceneManager(stats_manager=stats) sm.add_detector(create_detector(detector, vm)) vm.release() vm.reset() vm.set_duration(end_time=end_time) vm.set_downscale_factor() vm.start() sm.detect_scenes(frame_source=vm) scene_list = sm.get_scene_list() assert len(scene_list) == initial_scene_len finally: vm.release()
def test_timecode_numeric(): ''' Test FrameTimecode constructor argument "timecode" with numeric arguments. ''' with pytest.raises(ValueError): FrameTimecode(timecode=-1, fps=1) with pytest.raises(ValueError): FrameTimecode(timecode=-1.0, fps=1.0) with pytest.raises(ValueError): FrameTimecode(timecode=-0.1, fps=1.0) with pytest.raises(ValueError): FrameTimecode(timecode=-1.0/1000, fps=1.0) assert FrameTimecode(timecode=0, fps=1).frame_num == 0 assert FrameTimecode(timecode=1, fps=1).frame_num == 1 assert FrameTimecode(timecode=0.0, fps=1.0).frame_num == 0 assert FrameTimecode(timecode=1.0, fps=1.0).frame_num == 1
def test_save_load_from_video(test_video_file): """ Test generating and saving some frame metrics from TEST_VIDEO_FILE to a file on disk, and loading the file back to ensure the loaded frame metrics agree with those that were saved. """ video_manager = VideoManager([test_video_file]) stats_manager = StatsManager() scene_manager = SceneManager(stats_manager) base_timecode = video_manager.get_base_timecode() scene_manager.add_detector(ContentDetector()) try: video_fps = video_manager.get_framerate() start_time = FrameTimecode('00:00:00', video_fps) duration = FrameTimecode('00:00:20', video_fps) video_manager.set_duration(start_time=start_time, end_time=duration) video_manager.set_downscale_factor() video_manager.start() scene_manager.detect_scenes(frame_source=video_manager) with open(TEST_STATS_FILES[0], 'w') as stats_file: stats_manager.save_to_csv(stats_file, base_timecode) stats_manager_new = StatsManager() with open(TEST_STATS_FILES[0], 'r') as stats_file: stats_manager_new.load_from_csv(stats_file, base_timecode) # Choose the first available frame key and compare all metrics in both. frame_key = min(stats_manager._frame_metrics.keys()) metric_keys = list(stats_manager._registered_metrics) assert stats_manager.metrics_exist(frame_key, metric_keys) orig_metrics = stats_manager.get_metrics(frame_key, metric_keys) new_metrics = stats_manager_new.get_metrics(frame_key, metric_keys) for i, metric_val in enumerate(orig_metrics): assert metric_val == pytest.approx(new_metrics[i]) finally: os.remove(TEST_STATS_FILES[0]) video_manager.release()
def test_save_images(test_video_file): """ Test scenedetect.scene_manager.save_images function. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) image_name_glob = 'scenedetect.tempfile.*.jpg' image_name_template = 'scenedetect.tempfile.$SCENE_NUMBER.$IMAGE_NUMBER' try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:05', video_fps) end_time = FrameTimecode('00:00:15', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() sm.detect_scenes(frame_source=vm) scene_list = sm.get_scene_list() assert scene_list image_filenames = save_images(scene_list=scene_list, video_manager=vm, num_images=3, image_extension='jpg', image_name_template=image_name_template) # Ensure images got created, and the proper number got created. total_images = 0 for scene_number in image_filenames: for path in image_filenames[scene_number]: assert os.path.exists(path) total_images += 1 assert total_images == len(glob.glob(image_name_glob)) finally: vm.release() for path in glob.glob(image_name_glob): os.remove(path)
def test_content_detect(test_video_file): """ Test SceneManager with VideoManager and ContentDetector. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:00', video_fps) end_time = FrameTimecode('00:00:05', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() num_frames = sm.detect_scenes(frame_source=vm) assert num_frames == end_time.get_frames() + 1 finally: vm.release()
def test_get_timecode(): ''' Test FrameTimecode get_timecode() method. ''' assert FrameTimecode(timecode=1.0, fps=1.0).get_timecode() == '00:00:01.000' assert FrameTimecode(timecode=60.117, fps=60.0).get_timecode() == '00:01:00.117' assert FrameTimecode(timecode=3600.234, fps=29.97).get_timecode() == '01:00:00.234' assert FrameTimecode(timecode='00:00:02.0000', fps=1).get_timecode() == '00:00:02.000' assert FrameTimecode(timecode='00:00:00.5', fps=10).get_timecode() == '00:00:00.500' assert FrameTimecode(timecode='00:00:01.501', fps=10).get_timecode() == '00:00:01.500' assert FrameTimecode(timecode='00:01:00.000', fps=1).get_timecode() == '00:01:00.000'
def test_detector_metrics(test_video_file): """ Test passing StatsManager to a SceneManager and using it for storing the frame metrics from a ContentDetector. """ video_manager = VideoManager([test_video_file]) stats_manager = StatsManager() scene_manager = SceneManager(stats_manager) #base_timecode = video_manager.get_base_timecode() assert not stats_manager._registered_metrics scene_manager.add_detector(ContentDetector()) # add_detector should trigger register_metrics in the StatsManager. assert stats_manager._registered_metrics try: video_fps = video_manager.get_framerate() start_time = FrameTimecode('00:00:00', video_fps) duration = FrameTimecode('00:00:20', video_fps) video_manager.set_duration(start_time=start_time, end_time=duration) video_manager.set_downscale_factor() video_manager.start() scene_manager.detect_scenes(frame_source=video_manager) # Check that metrics were written to the StatsManager. assert stats_manager._frame_metrics frame_key = min(stats_manager._frame_metrics.keys()) assert stats_manager._frame_metrics[frame_key] assert stats_manager.metrics_exist( frame_key, list(stats_manager._registered_metrics)) # Since we only added 1 detector, the number of metrics from get_metrics # should equal the number of metric keys in _registered_metrics. assert len( stats_manager.get_metrics( frame_key, list(stats_manager._registered_metrics))) == len( stats_manager._registered_metrics) finally: video_manager.release()
def find_scenes(self): # instance scenedetect objects to detect scenes using ContentDetector # input: string- video path; # return: scene_list in FrameTimecode format, see below detail sample / explaination. video_manager = VideoManager([self.video_path]) fps = video_manager.get_framerate() nFrames = count_frames(self.video_path) stats_manager = StatsManager() scene_manager = SceneManager(stats_manager) # select ContentDetector to detect scenes # Threshhold = 30 by default, set it lower if density is darker, say 27 # it can be analyzed from output scene timecode or generated images against video scene_manager.add_detector(ContentDetector()) base_timecode = video_manager.get_base_timecode() scene_list = [] try: # set downscale factor according to resolution ratio to improve speed video_manager.set_downscale_factor() video_manager.start() # scene detection on video_manager(video_path) scene_manager.detect_scenes(frame_source=video_manager) # scene_list = scene_manager.get_cut_list(base_timecode) scene_list = scene_manager.get_scene_list(base_timecode) finally: video_manager.release() if scene_list == []: scene_list = [(FrameTimecode(0, fps), FrameTimecode(nFrames, fps))] # return a list of tuple to indicate each scene start & end frame number in FrameTimecode # looks like: [(FrameTimecode(frame=0, fps=4.358900), FrameTimecode(frame=68, fps=4.358900))] # another examples: [(FrameTimecode(frame=0, fps=23.976024), FrameTimecode(frame=90, fps=23.976024)), ..., # (FrameTimecode(frame=1966, fps=23.976024), FrameTimecode(frame=1980, fps=23.976024)) # ] return scene_list
def test_load_hardcoded_file(test_video_file): """ Test loading a stats file with some hard-coded data generated by this test case. """ from scenedetect.stats_manager import COLUMN_NAME_FPS from scenedetect.stats_manager import COLUMN_NAME_FRAME_NUMBER from scenedetect.stats_manager import COLUMN_NAME_TIMECODE stats_manager = StatsManager() stats_file = open(TEST_STATS_FILES[0], 'w') try: stats_writer = get_csv_writer(stats_file) some_metric_key = 'some_metric' some_metric_value = 1.2 some_frame_key = 100 base_timecode = FrameTimecode(0, 29.97) some_frame_timecode = base_timecode + some_frame_key # Write out a valid file. stats_writer.writerow([COLUMN_NAME_FPS, '%.10f' % base_timecode.get_framerate()]) stats_writer.writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writer.writerow( [some_frame_key, some_frame_timecode.get_timecode(), str(some_metric_value)]) stats_file.close() stats_file = open(TEST_STATS_FILES[0], 'r') stats_manager.load_from_csv(csv_file=stats_file, base_timecode=base_timecode) # Check that we decoded the correct values. assert stats_manager.metrics_exist(some_frame_key, [some_metric_key]) assert stats_manager.get_metrics( some_frame_key, [some_metric_key])[0] == pytest.approx(some_metric_value) finally: stats_file.close() os.remove(TEST_STATS_FILES[0])
def test_detect_scenes_callback(test_video_file): """ Test SceneManager detect_scenes method with a callback function. Note that the API signature of the callback will undergo breaking changes in v0.6. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) fake_callback = FakeCallback() try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:05', video_fps) end_time = FrameTimecode('00:00:15', video_fps) vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() num_frames = sm.detect_scenes(frame_source=vm, callback=fake_callback.get_callback()) assert fake_callback.num_invoked() == (len(sm.get_scene_list()) - 1) finally: vm.release()
def _process_frame(self, frame_num, frame_im, base_timecode): # type(int, numpy.ndarray) -> None """ Adds any cuts detected with the current frame to the cutting list. """ cut_output = [] event_output = [] for detector in self._detector_list: cut_output = detector.process_frame(frame_num, frame_im) if len(cut_output) > 0: if len(cut_output) > 1: raise Exception('Multiple cuts are not supported ' + ','.join( str( FrameTimecode(cut, base_timecode). get_seconds()) for cut in cut_output)) # cut output list is 0 or 1 if len(self._cutting_list) == 0: start_scene = str( FrameTimecode(0, base_timecode).get_seconds() ) # need start time hack else: start_scene = str( FrameTimecode(self._cutting_list[-1], base_timecode).get_seconds()) # print ('%cut:' + ','.join(str(FrameTimecode(cut, base_timecode)) for cut in cut_output)) # print ('timecode ' + str(base_timecode) + ' ' + str(cut_output[0])) print('%cut:' + start_scene + '-' + str( FrameTimecode(cut_output[0], base_timecode).get_seconds())) self._cutting_list += cut_output for detector in self._sparse_detector_list: event_output = detector.process_frame(frame_num, frame_im) self._event_list += event_output if len(event_output) > 0: # print ('%event: ' + ','.join(str(FrameTimecode(cut, base_timecode)) for cut in event_output)) raise Exception('Event output not implemented')
def get_cut_list(self, base_timecode): # type: (FrameTimecode) -> List[FrameTimecode] """ Returns a list of FrameTimecodes of the detected scene changes/cuts. Unlike get_scene_list, the cutting list returns a list of FrameTimecodes representing the point in the input video(s) where a new scene was detected, and thus the frame where the input should be cut/split. The cutting list, in turn, is used to generate the scene list, noting that each scene is contiguous starting from the first frame and ending at the last frame detected. Returns: List of FrameTimecode objects denoting the points in time where a scene change was detected in the input video(s), which can also be passed to external tools for automated splitting of the input into individual scenes. """ return [ FrameTimecode(timecode=base_timecode, new_time=cut) for cut in self._get_cutting_list() ]
def parse_timecode(cli_ctx, value): # type: (CliContext, str) -> Union[FrameTimecode, None] """ Parses a user input string expected to be a timecode, given a CLI context. Returns: (FrameTimecode) Timecode set to value with the CliContext VideoManager framerate. If value is None, skips processing and returns None. Raises: click.BadParameter """ cli_ctx.check_input_open() if value is None: return value try: timecode = FrameTimecode( timecode=value, fps=cli_ctx.video_manager.get_framerate()) return timecode except (ValueError, TypeError): raise click.BadParameter( 'timecode must be in frames (1234), seconds (123.4s), or HH:MM:SS (00:02:03.400)')
def test_scene_list(test_video_file): """ Test SceneManager get_scene_list method with VideoManager/ContentDetector. """ vm = VideoManager([test_video_file]) sm = SceneManager() sm.add_detector(ContentDetector()) try: video_fps = vm.get_framerate() start_time = FrameTimecode('00:00:05', video_fps) end_time = FrameTimecode('00:00:15', video_fps) assert end_time.get_frames() > start_time.get_frames() vm.set_duration(start_time=start_time, end_time=end_time) vm.set_downscale_factor() vm.start() num_frames = sm.detect_scenes(frame_source=vm) assert num_frames == (1 + end_time.get_frames() - start_time.get_frames()) scene_list = sm.get_scene_list() assert scene_list # Each scene is in the format (Start Timecode, End Timecode) assert len(scene_list[0]) == 2 for i, _ in enumerate(scene_list): assert scene_list[i][0].get_frames() < scene_list[i][1].get_frames( ) if i > 0: # Ensure frame list is sorted (i.e. end time frame of # one scene is equal to the start time of the next). assert scene_list[i - 1][1] == scene_list[i][0] finally: vm.release()
def get_base_timecode(self): # type: () -> FrameTimecode """ Get Base Timecode - returns a FrameTimecode object at frame 0 / time 00:00:00. The timecode returned by this method can be used to perform arithmetic (e.g. addition), passing the resulting values back to the VideoManager (e.g. for the set_duration() method), as the framerate of the returned FrameTimecode object matches that of the VideoManager. As such, this method is equivalent to creating a FrameTimecode at frame 0 with the VideoManager framerate, for example, given a VideoManager called obj, the following expression will evaluate as True: obj.get_base_timecode() == FrameTimecode(0, obj.get_framerate()) Furthermore, the base timecode object returned by a particular VideoManager should not be passed to another one, unless you first verify that their framerates are the same. Returns: FrameTimecode object set to frame 0/time 00:00:00 with the video(s) framerate. """ return FrameTimecode(timecode=0, fps=self._cap_framerate)
def test_scenes(url): cap = cv2.VideoCapture(url) sm = SceneManager() sm.add_detector(ContentDetector()) try: video_fps = cap.get(cv2.CAP_PROP_FPS) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) duration = frame_count / video_fps dur = FrameTimecode(duration, video_fps) num_frames = sm.detect_scenes(frame_source=cap, end_time=dur) #base_timecode = FrameTimecode('00:00:05', fps=video_fps) scene_list = sm.get_scene_list(dur) print("Scene List Count: " + str(len(scene_list))) result_urls = generate_images(cap, scene_list, 1, "testvid") #urls = [] #s3 = boto3.client( #'s3', #aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"), #aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY") #) #for key in s3.list_objects(Bucket='motion-snapshots')['Contents']: #print(key['Key']) #urls.append(str(key['Key'])) #assert num_frames == duration.get_frames() finally: cap.release() #return urls return result_urls
def test_load_hardcoded_file_backwards_compat(): """ Test loading a stats file with some hard-coded data generated by this test case. Ensures backwards compatibility with old statsfiles which included an addional header. """ stats_manager = StatsManager() with open(TEST_STATS_FILES[0], 'w') as stats_file: stats_writer = get_csv_writer(stats_file) some_metric_key = 'some_metric' some_metric_value = 1.2 some_frame_key = 100 base_timecode = FrameTimecode(0, 29.97) some_frame_timecode = base_timecode + some_frame_key # Write out a valid file as per PySceneDetect v0.5.4 and prior. stats_writer.writerow(['Video Framerate', '23.976']) stats_writer.writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writer.writerow([ some_frame_key, some_frame_timecode.get_timecode(), str(some_metric_value) ]) stats_file.close() stats_file = open(TEST_STATS_FILES[0], 'r') stats_manager.load_from_csv(csv_file=stats_file) # Check that we decoded the correct values. assert stats_manager.metrics_exist(some_frame_key, [some_metric_key]) assert stats_manager.get_metrics( some_frame_key, [some_metric_key])[0] == pytest.approx(some_metric_value)
def _generate_images( self, scene_list, video_name, image_name_template='$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir=None, downscale_factor=1): # type: (List[Tuple[FrameTimecode, FrameTimecode]) -> None if not scene_list: return if not self.options_processed: return if self.num_images <= 0: raise ValueError() self.check_input_open() imwrite_param = [] if self.image_param is not None: imwrite_param = [ self.imwrite_params[self.image_extension], self.image_param ] # Reset video manager and downscale factor. self.video_manager.release() self.video_manager.reset() self.video_manager.set_downscale_factor(1) self.video_manager.start() # Setup flags and init progress bar if available. completed = True logging.info('Generating output images (%d per scene)...', self.num_images) progress_bar = None if tqdm and not self.quiet_mode: progress_bar = tqdm(total=len(scene_list) * self.num_images, unit='images') filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str( max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd' timecode_list = dict() fps = scene_list[0][0].framerate timecode_list = [ [ FrameTimecode(int(f), fps=fps) for f in [ # middle frames a[len(a) // 2] if ( 0 < j < self.num_images - 1) or self.num_images == 1 # first frame else min(a[0] + self.image_frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - self.image_frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, self.num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if r.stop - r.start >= self.num_images else list(r) + [r.stop - 1] * (self.num_images - len(r)) # create range of frames in scene for r in ( range(start.get_frames(), end.get_frames()) # for each scene in scene list for start, end in scene_list) ]) ] self.image_filenames = {i: [] for i in range(len(timecode_list))} for i, tl in enumerate(timecode_list): for j, image_timecode in enumerate(tl): self.video_manager.seek(image_timecode) self.video_manager.grab() ret_val, frame_im = self.video_manager.retrieve() if downscale_factor != 1: logging.info("resizing thumb") scale_percent = 1 / downscale_factor width = int(frame_im.shape[1] * scale_percent) height = int(frame_im.shape[0] * scale_percent) resized = cv2.resize(frame_im, (width, height), interpolation=cv2.INTER_AREA) frame_im = resized if ret_val: file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video_name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), self.image_extension) self.image_filenames[i].append(file_path) cv2.imwrite( get_and_create_path( file_path, output_dir if output_dir is not None else self.output_directory), frame_im, imwrite_param) else: completed = False break if progress_bar: progress_bar.update(1) if not completed: logging.error('Could not generate all output images.')
def test_framerate(): ''' Test FrameTimecode constructor argument "fps". ''' # Not passing fps results in TypeError. with pytest.raises(TypeError): FrameTimecode() with pytest.raises(TypeError): FrameTimecode(timecode=0, fps=None) with pytest.raises(TypeError): FrameTimecode(timecode=None, fps=FrameTimecode(timecode=0, fps=None)) # Test zero FPS/negative. with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=0) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=-1) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=-100) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=0.0) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=-1.0) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=-1000.0) with pytest.raises(ValueError): FrameTimecode(timecode=0, fps=MINIMUM_FRAMES_PER_SECOND_FLOAT / 2) # Test positive framerates. assert FrameTimecode(timecode=0, fps=1).frame_num == 0 assert FrameTimecode(timecode=0, fps=MINIMUM_FRAMES_PER_SECOND_FLOAT).frame_num == 0 assert FrameTimecode(timecode=0, fps=10).frame_num == 0 assert FrameTimecode(timecode=0, fps=MINIMUM_FRAMES_PER_SECOND_FLOAT * 2).frame_num == 0 assert FrameTimecode(timecode=0, fps=1000).frame_num == 0 assert FrameTimecode(timecode=0, fps=1000.0).frame_num == 0
def test_load_corrupt_stats(test_video_file): """ Test loading a corrupted stats file created by outputting data in the wrong format. """ from scenedetect.stats_manager import COLUMN_NAME_FPS from scenedetect.stats_manager import COLUMN_NAME_FRAME_NUMBER from scenedetect.stats_manager import COLUMN_NAME_TIMECODE stats_manager = StatsManager() stats_files = [open(stats_file, 'wt') for stats_file in TEST_STATS_FILES] try: stats_writers = [get_csv_writer(stats_file) for stats_file in stats_files] some_metric_key = 'some_metric' some_metric_value = str(1.2) some_frame_key = 100 base_timecode = FrameTimecode(0, 29.97) some_frame_timecode = base_timecode + some_frame_key # Write out some invalid files. # File 0: Blank FPS [StatsFileCorrupt] stats_writers[0].writerow([COLUMN_NAME_FPS]) stats_writers[0].writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writers[0].writerow( [some_frame_key, some_frame_timecode.get_timecode(), some_metric_value]) # File 1: Invalid FPS [StatsFileCorrupt] stats_writers[1].writerow([COLUMN_NAME_FPS, '%0.10f' % 0.0000001]) stats_writers[1].writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writers[1].writerow( [some_frame_key, some_frame_timecode.get_timecode(), some_metric_value]) # File 2: Wrong FPS [StatsFileFramerateMismatch] stats_writers[2].writerow( [COLUMN_NAME_FPS, '%.10f' % (base_timecode.get_framerate() / 2.0)]) stats_writers[2].writerow( [COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE, some_metric_key]) stats_writers[2].writerow( [some_frame_key, some_frame_timecode.get_timecode(), some_metric_value]) # File 3: Wrong Header Names [StatsFileCorrupt] stats_writers[3].writerow([COLUMN_NAME_FPS, '%.10f' % base_timecode.get_framerate()]) stats_writers[3].writerow( [COLUMN_NAME_TIMECODE, COLUMN_NAME_FRAME_NUMBER, some_metric_key]) stats_writers[3].writerow( [some_frame_key, some_frame_timecode.get_timecode(), some_metric_value]) for stats_file in stats_files: stats_file.close() stats_files = [open(stats_file, 'rt') for stats_file in TEST_STATS_FILES] with pytest.raises(StatsFileCorrupt): stats_manager.load_from_csv(stats_files[0], base_timecode) with pytest.raises(StatsFileCorrupt): stats_manager.load_from_csv(stats_files[1], base_timecode) with pytest.raises(StatsFileFramerateMismatch): stats_manager.load_from_csv(stats_files[2], base_timecode) with pytest.raises(StatsFileCorrupt): stats_manager.load_from_csv(stats_files[3], base_timecode) finally: for stats_file in stats_files: stats_file.close() for stats_file in TEST_STATS_FILES: os.remove(stats_file)