def detect_events_in_video(filename, config=None): if not config: config = { 'hidden_scanlines': 0, 'diff_max_peak_thresh': 20, 'ramp_detection_thresh': 4, } features = extract_features(filename) source = imagesource.TimedVideoSource(filename) source.extract_timestamps() events = detect_events(features, source.timestamps_ms, config['hidden_scanlines'], config['diff_max_peak_thresh'], config['ramp_detection_thresh']) return events
def detect_events_in_video(filename, config=None): if not config: config = { "hidden_scanlines": 0, "diff_max_peak_thresh": 20, "ramp_detection_thresh": 4, } features = extract_features(filename) source = imagesource.TimedVideoSource(filename) source.extract_timestamps() events = detect_events( features, source.timestamps_ms, config["hidden_scanlines"], config["diff_max_peak_thresh"], config["ramp_detection_thresh"], ) return events
def load(self, filename): with open(filename, 'r') as fr: data = yaml.load(fr) self.model = data['model'] self.base_cam = data['base_cam'] if __name__ == '__main__': # example 4 camera synchronization cameras = [1, 2, 3, 4] filenames = {cam: 'data/ice_hockey/%d.mp4' % cam for cam in cameras} # load video files and extract frame timestamps sources = { cam: imagesource.TimedVideoSource(filenames[cam]) for cam in cameras } for source in sources.itervalues(): source.extract_timestamps() # detect flash events sync = FlashVideoSynchronization() sync.detect_flash_events(filenames) # # save all detected events for analysis # features = {cam: extract_features(filenames[cam], compute_luminance_median, dtype=np.uint8) for cam in cameras} # sync.save_event_images(sources, features, 'out/events') # manually set rough offset by matching an event sync.show_events()
def test_timedvideo(): images = imagesource.TimedVideoSource(video) images.extract_timestamps() assert images.timestamps_ms is not None