コード例 #1
0
def process_youtube_video(youtube_url):
    #url = "https://www.youtube.com/watch?v=BGLTLitLUAo"

    videoPafy = pafy.new(youtube_url)
    best = videoPafy.getbest(preftype="mp4")

    cap = cv2.VideoCapture(best.url)

    sm = SceneManager()

    sm.add_detector(ContentDetector())

    try:
        video_fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = frame_count / video_fps
        dur = FrameTimecode(duration, video_fps)

        num_frames = sm.detect_scenes(frame_source=cap, end_time=dur)

        #base_timecode = FrameTimecode('00:00:05', fps=video_fps)
        scene_list = sm.get_scene_list(dur)
        print("Scene List Count: " + str(len(scene_list)))

        result_urls = generate_images(cap, scene_list, 1, "testvid")

    finally:
        cap.release()

    #return urls
    return result_urls
コード例 #2
0
def find_scenes(video_path):
    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector())
    base_timecode = video_manager.get_base_timecode()

    try:
        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)

    finally:
        video_manager.release()

    scene_list_new = []
    for cur_scene in scene_list:
        start_frame = cur_scene[0].get_frames()
        end_frame = cur_scene[1].get_frames()
        scene_list_new.append((start_frame, end_frame))

    return scene_list_new
コード例 #3
0
def find_scenes(video_path):
    """
    based on changes between frames in the HSV color space
    """
    # three main calsses: VideoManager, SceneManager, StatsManager
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector())

    base_timecode = video_manager.get_base_timecode()

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = '%s.stats.csv' % video_path

    scene_list = []

    try:
        # If stats file exists, load it.
        if os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

        print('List of scenes obtained:')
        for i, scene in enumerate(scene_list):
            print('Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (
                i + 1,
                scene[0].get_timecode(),
                scene[0].get_frames(),
                scene[1].get_timecode(),
                scene[1].get_frames(),
            ))

        # We only write to the stats file if a save is required:
        if stats_manager.is_save_required():
            with open(stats_file_path, 'w') as stats_file:
                stats_manager.save_to_csv(stats_file, base_timecode)

    finally:
        video_manager.release()

    return scene_list
コード例 #4
0
def find_scenes(video_path):
    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]

    url = "https://www.youtube.com/watch?v=BGLTLitLUAo"
    videoPafy = pafy.new(url)
    best = videoPafy.getbest(preftype="webm")

    video = cv2.VideoCapture(best.url)

    video_manager = VideoManager([video])

    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager()

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector())
    base_timecode = video_manager.get_base_timecode()

    scene_list = []

    try:

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        #scene_manager.detect_scenes(frame_source=video_manager)
        scene_manager.detect_scenes(frame_source=video_manager)
        #vcap =

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)

        timecodes = []
        # Each scene is a tuple of (start, end) FrameTimecodes.

        print('List of scenes obtained:')
        for i, scene in enumerate(scene_list):
            timecodes.append(scene[0].get_timecode())
            print('Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (
                i + 1,
                scene[0].get_timecode(),
                scene[0].get_frames(),
                scene[1].get_timecode(),
                scene[1].get_frames(),
            ))

    finally:
        video_manager.release()

    return timecodes
コード例 #5
0
def find_shots(video_path, stats_file, threshold):
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector(threshold=threshold))
    base_timecode = video_manager.get_base_timecode()

    scene_list = []

    try:
        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)

        # Each scene is a tuple of (start, end) FrameTimecodes.
        print('List of shots obtained:')
        for i, scene in enumerate(scene_list):
            print('Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (
                i + 1,
                scene[0].get_timecode(),
                scene[0].get_frames(),
                scene[1].get_timecode(),
                scene[1].get_frames(),
            ))

        # Save a list of stats to a csv
        if stats_manager.is_save_required():
            with open(stats_file, 'w') as stats_file:
                stats_manager.save_to_csv(stats_file, base_timecode)
    except Exception as err:
        print(
            "Failed to find shots for: video: " + video_path + ", stats: " +
            stats_file + ", threshold: " + threshold, err)
        traceback.print_exc()
    finally:
        video_manager.release()

    return scene_list
コード例 #6
0
def make_elements(video_path, video_name, save_dir):
    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector(threshold=threshold))
    base_timecode = video_manager.get_base_timecode()

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = 'stats/%s.stats.csv' % video_name

    scene_list = []

    try:
        # If stats file exists, load it.
        if os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

    finally:
        video_manager.release()

    video_dir = os.path.join(save_dir, video_name)
    if not os.path.exists(video_dir):
        os.makedirs(video_dir)

    split_video_ffmpeg([video_path], scene_list, os.path.join(video_dir,"${VIDEO_NAME}-${SCENE_NUMBER}.mp4"), video_name)
コード例 #7
0
ファイル: clip.py プロジェクト: xlelou/auto_clip
def find_scenes(video_path):

    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector(threshold=threshold))
    base_timecode = video_manager.get_base_timecode()

    try:

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

        print('List of scenes obtained:')
        final_scene_list = []
        for i, scene in enumerate(scene_list):
            temp = list(scene)
            # print(temp)
            temp[0] = temp[0] + 1
            temp[1] = temp[1] - 1
            scene = tuple(temp)
            final_scene_list.append(scene)

    finally:
        video_manager.release()

    return final_scene_list
def pyscenedetect(path, output_csv_file):
    # For content-aware scene detection:
    from scenedetect.detectors.content_detector import ContentDetector

    video_manager = VideoManager([path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)
    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector(threshold=12))
    base_timecode = video_manager.get_base_timecode()

    scene_list = []
    # Set downscale factor to improve processing speed.
    video_manager.set_downscale_factor()

    # Start video_manager.
    video_manager.start()

    # Perform scene detection on video_manager.
    scene_manager.detect_scenes(frame_source=video_manager)

    # Obtain list of detected scenes.
    scene_list = scene_manager.get_scene_list(base_timecode)
    # Each scene is a tuple of (start, end) FrameTimecodes.

    print('List of scenes obtained:')
    for i, scene in enumerate(scene_list):
        print('Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (
            i + 1,
            scene[0].get_timecode(),
            scene[0].get_frames(),
            scene[1].get_timecode(),
            scene[1].get_frames(),
        ))

    #writing scene list to .csv file
    f = open(output_csv_file, "w+")
    scenedetect.scene_manager.write_scene_list(f, scene_list, cut_list=None)
コード例 #9
0
def test_scenes(url):
    cap = cv2.VideoCapture(url)
    sm = SceneManager()

    sm.add_detector(ContentDetector())

    try:
        video_fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = frame_count / video_fps
        dur = FrameTimecode(duration, video_fps)

        num_frames = sm.detect_scenes(frame_source=cap, end_time=dur)

        #base_timecode = FrameTimecode('00:00:05', fps=video_fps)
        scene_list = sm.get_scene_list(dur)
        print("Scene List Count: " + str(len(scene_list)))

        result_urls = generate_images(cap, scene_list, 1, "testvid")

        #urls = []

        #s3 = boto3.client(
        #'s3',
        #aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
        #aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY")
        #)

        #for key in s3.list_objects(Bucket='motion-snapshots')['Contents']:
        #print(key['Key'])
        #urls.append(str(key['Key']))

        #assert num_frames == duration.get_frames()

    finally:
        cap.release()

    #return urls
    return result_urls
コード例 #10
0
    def run_sd(mmif):
        video_manager = VideoManager(
            [mmif.get_document_location(DocumentTypes.VideoDocument)[7:]])
        scene_manager = SceneManager()

        scene_manager.add_detector(ContentDetector())
        ##todo 2020-12-01 kelleylynch incorporate option for threshold detector
        base_timecode = video_manager.get_base_timecode()

        try:
            # Set downscale factor to improve processing speed.
            video_manager.set_downscale_factor()
            # Start video_manager.
            video_manager.start()
            # Perform scene detection on video_manager.
            scene_manager.detect_scenes(frame_source=video_manager)
            # Obtain list of detected scenes.
            scene_list = scene_manager.get_scene_list(base_timecode)
            # Each scene is a tuple of (start, end) FrameTimecodes.
            scenes = map(lambda x: (x[0].get_frames(), x[1].get_frames()),
                         scene_list)
        finally:
            video_manager.release()
        return scenes
コード例 #11
0
    def find_scene_changes(self,
                           video_path,
                           method='threshold',
                           new_stat_file=True):
        """
        Detect scene changes in given video.

        Args:
            video_path: Path to video to analyze
            method: Method for detecting scene changes
            new_stat_file: Option to save results

        Returns:
            Scene changes + their corresponding time codes

        """
        # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]

        video_manager = VideoManager([video_path])
        stats_manager = StatsManager()

        # Construct our SceneManager and pass it our StatsManager.
        scene_manager = SceneManager(stats_manager)

        # Add ContentDetector algorithm (each detector's constructor
        # takes detector options, e.g. thresholsd).
        if method == 'content':
            scene_manager.add_detector(
                ContentDetector(threshold=30, min_scene_len=40))
        else:
            scene_manager.add_detector(
                ThresholdDetector(min_scene_len=40,
                                  threshold=125,
                                  min_percent=0.5))

        base_timecode = video_manager.get_base_timecode()

        # We save our stats file to {VIDEO_PATH}.{CONTENT}.stats.csv.
        stats_file_path = '%s.%s.stats.csv' % (video_path, method)

        scene_list = []

        try:
            # If stats file exists, load it.
            if not new_stat_file and os.path.exists(stats_file_path):
                # Read stats from CSV file opened in read mode:
                with open(stats_file_path, 'r') as stats_file:
                    stats_manager.load_from_csv(stats_file, base_timecode)

            # Set downscale factor to improve processing speed.
            video_manager.set_downscale_factor(2)

            # Start video_manager.
            video_manager.start()

            # Perform scene detection on video_manager.
            scene_manager.detect_scenes(frame_source=video_manager)

            # Obtain list of detected scenes.
            scene_list = scene_manager.get_scene_list(base_timecode)
            # Each scene is a tuple of (start, end) FrameTimecodes.

            # We only write to the stats file if a save is required:
            if stats_manager.is_save_required():
                with open(stats_file_path, 'w') as stats_file:
                    stats_manager.save_to_csv(stats_file, base_timecode)

        finally:
            video_manager.release()

        return scene_list
コード例 #12
0
def split_video(video_path, video_name, save_dir_path, threshold):
    """
    Oversegment the input video using PySceneDetect

    Patameters
    ----------
    video_path : string
        path to input video
    video_name : string
        name of input video
    save_dir_path : string
        path to saving split elements
    threshold : int
        PySceneDetect threshold
    """

    cv2.setNumThreads(1)

    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(
        ContentDetector(threshold=threshold, min_scene_len=180))
    base_timecode = video_manager.get_base_timecode()

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = 'stats/%s.stats.csv' % video_name

    scene_list = []

    try:
        # If stats file exists, load it.
        if os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

    finally:
        video_manager.release()

    if not os.path.exists(save_dir_path):
        os.makedirs(save_dir_path)

    split_video_ffmpeg(
        [video_path],
        scene_list,
        os.path.join(save_dir_path, "${VIDEO_NAME}-${SCENE_NUMBER}.mp4"),
        video_name[:-4],
        arg_override='-threads 1 -c:v libx264 -preset fast -crf 21 -c:a aac')

    timecode_list = []
    for scene in scene_list:
        start = scene[0].get_timecode()
        end = scene[1].get_timecode()
        timecode_list.append((start, end))

    return timecode_list
コード例 #13
0
def find_scenes(video_path):
    start_time = time.time()
    print("Analyzing video " + video_path)

    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()

    # Pass StatsManager to SceneManager to accelerate computing time
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector())
    base_timecode = video_manager.get_base_timecode()

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = '%s.stats.csv' % (video_path)

    scene_list = []

    folder = os.path.splitext(video_path)[0]

    if os.path.exists(folder):
        print(
            '--- STOP : The folder for this video already exists, it is probably already split.'
        )

    else:
        try:
            # If stats file exists, load it.
            if os.path.exists(stats_file_path):
                # Read stats from CSV file opened in read mode:
                with open(stats_file_path, 'r') as stats_file:
                    stats_manager.load_from_csv(stats_file, base_timecode)

            if video_splitter.is_ffmpeg_available():
                # Set downscale factor to improve processing speed.
                video_manager.set_downscale_factor()

                # Start video_manager.
                video_manager.start()

                # Perform scene detection on video_manager.
                scene_manager.detect_scenes(frame_source=video_manager)

                # Obtain list of detected scenes.
                scene_list = scene_manager.get_scene_list(base_timecode)
                # Each scene is a tuple of (start, end) FrameTimecodes.

                print('%s scenes obtained' % len(scene_list))

                if len(scene_list) > 0:
                    # STATISTICS : Store scenes length
                    with open(FILE_SCENE_LENGH, 'a') as myfile:
                        for i, scene in enumerate(scene_list):
                            myfile.write(
                                '%s, %d, %f\n' %
                                (os.path.splitext(
                                    os.path.basename(video_path))[0],
                                 scene[1].get_frames() - scene[0].get_frames(),
                                 (scene[1] - scene[0]).get_seconds()))

                    # STATISTICS : Store number of scenes
                    with open(FILE_SCENE_NUMBER, 'a') as myfile:
                        myfile.write('%s,%d\n' % (os.path.splitext(
                            os.path.basename(video_path))[0], len(scene_list)))

                    # Split the video
                    print('Splitting the video. Put scenes in %s/%s' %
                          (folder, VIDEO_SPLIT_TEMPLATE))
                    os.mkdir(folder)
                    video_splitter.split_video_ffmpeg(
                        [video_path],
                        scene_list,
                        folder + "/" + VIDEO_SPLIT_TEMPLATE + ".mp4",
                        os.path.basename(folder),
                        suppress_output=True)

                print("-- Finished video splitting in {:.2f}s --".format(
                    time.time() - start_time))
            else:
                print(
                    'Ffmpeg is not installed on your computer. Please install it before running this code'
                )

        finally:
            video_manager.release()

    return scene_list
コード例 #14
0
def make_dataset(video_path, video_name, timecodes, save_dir):
    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    scene_manager.add_detector(ContentDetector())
    base_timecode = video_manager.get_base_timecode()

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = 'stats/%s.stats.csv' % video_name

    scene_list = []

    try:
        # If stats file exists, load it.
        if os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # We only write to the stats file if a save is required:
        if stats_manager.is_save_required():
            with open(stats_file_path, 'w') as stats_file:
                stats_manager.save_to_csv(stats_file, base_timecode)

        start_timecode = ""
        start_content_val = 0
        end_timecode = ""
        end_content_val = 0
        metric_keys = sorted(
            list(
                stats_manager._registered_metrics.union(
                    stats_manager._loaded_metrics)))
        frame_keys = sorted(stats_manager._frame_metrics.keys())
        for frame_key in frame_keys:
            frame_timecode = base_timecode + frame_key
            timecode = frame_timecode.get_timecode()
            if timecode > timecodes[0] and timecode < timecodes[1]:
                content_val = stats_manager.get_metrics(
                    frame_key, metric_keys)[0]
                if start_content_val < content_val:
                    start_content_val = content_val
                    start_timecode = timecode
            if timecode > timecodes[2] and timecode < timecodes[3]:
                content_val = stats_manager.get_metrics(
                    frame_key, metric_keys)[0]
                if end_content_val < content_val:
                    end_content_val = content_val
                    end_timecode = timecode
        threshold = min(start_content_val, end_content_val)

        print(f"Start Time: {start_timecode}, End Time: {end_timecode}")

    finally:
        video_manager.release()

    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    scene_manager = SceneManager(stats_manager)
    scene_manager.add_detector(ContentDetector(threshold=threshold))
    base_timecode = video_manager.get_base_timecode()

    scene_list = []

    try:
        # If stats file exists, load it.
        if os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scene_list = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

        start_video_num = 0
        end_video_num = 0
        for i, scene in enumerate(scene_list):
            if scene[0].get_timecode(
            ) >= start_timecode and start_video_num == 0:
                start_video_num = i
                print(f"start video: {start_video_num}")
            if scene[1].get_timecode() >= end_timecode and end_video_num == 0:
                end_video_num = i
                print(f"end video: {end_video_num}")

    finally:
        video_manager.release()

    video_dir = os.path.join(save_dir, video_name)
    if not os.path.exists(video_dir):
        os.makedirs(video_dir)

    split_video_ffmpeg([video_path], scene_list,
                       os.path.join(video_dir,
                                    "${VIDEO_NAME}-${SCENE_NUMBER}.mp4"),
                       video_name)

    return start_video_num, end_video_num, len(scene_list)
コード例 #15
0
def getScenes(video_path,
              threshold=30.0,
              minSceneDur=500,
              windowSize=50,
              fadeThreshold=3.0):
    global progress
    global fileCount

    basename = os.path.basename(video_path)
    doStats = CHECK_FOR_FADE or PLOT or SAVE_STATS

    # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    # Construct our SceneManager and pass it our StatsManager.
    scene_manager = SceneManager(stats_manager)

    base_timecode = video_manager.get_base_timecode()
    framerate = video_manager.get_framerate()

    # Add ContentDetector algorithm (each detector's constructor
    # takes detector options, e.g. threshold).
    min_scene_len = roundInt(minSceneDur / 1000.0 * framerate)
    scene_manager.add_detector(
        ContentDetector(threshold=threshold, min_scene_len=min_scene_len))

    # We save our stats file to {VIDEO_PATH}.stats.csv.
    stats_file_path = OUTPUT_FILE.replace(".csv", "%s.csv")
    stats_file_path = stats_file_path % ("_" + basename + "_stats")

    scene_list = []

    print("Looking for scenes in %s" % video_path)
    try:
        # If stats file exists, load it.
        if doStats and os.path.exists(stats_file_path):
            # Read stats from CSV file opened in read mode:
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        # Set downscale factor to improve processing speed.
        video_manager.set_downscale_factor()

        # Start video_manager.
        video_manager.start()

        # Perform scene detection on video_manager.
        scene_manager.detect_scenes(frame_source=video_manager)

        # Obtain list of detected scenes.
        scenes = scene_manager.get_scene_list(base_timecode)
        # Each scene is a tuple of (start, end) FrameTimecodes.

        for i, scene in enumerate(scenes):
            start = roundInt(scene[0].get_seconds() * 1000)
            end = roundInt(scene[1].get_seconds() * 1000)
            scene_list.append({
                "filename": basename,
                "index": i,
                "start": start,
                "end": end,
                "dur": end - start,
                "frameStart": scene[0].get_frames(),
                "frameEnd": scene[1].get_frames()
            })

        # We only write to the stats file if a save is required:
        if doStats and stats_manager.is_save_required():
            with open(stats_file_path, 'w') as stats_file:
                stats_manager.save_to_csv(stats_file, base_timecode)

        # Retrieve raw data for plotting and additional analysis
        fieldNames, sceneData = readCsv(stats_file_path, skipLines=1)
        dlen = len(sceneData)

        # Add smoothed data
        windowLeft = int(windowSize / 2)
        windowRight = windowSize - windowLeft
        for i, d in enumerate(sceneData):
            i0 = max(i - windowLeft, 0)
            i1 = min(i + windowRight, dlen - 1)
            sceneData[i]["smoothed"] = np.mean(
                [d["content_val"] for d in sceneData[i0:i1]])
            sceneData[i]["ms"] = timecodeToMs(d["Timecode"])

        # Add crossfade cuts
        if CHECK_FOR_FADE:
            for i, d in enumerate(sceneData):
                ms = d["ms"]
                value = d["smoothed"]
                frame = d["Frame Number"]
                neighboringCuts = [
                    s for s in scene_list
                    if abs(frame - s["frameStart"]) <= windowSize
                    or abs(frame - s["frameEnd"]) <= windowSize
                ]

                # if there's no nearby cuts and we've reached the fade threshold
                if len(neighboringCuts) <= 0 and value >= fadeThreshold:
                    # retrieve the scene right before this one
                    sortedList = sorted(scene_list,
                                        key=lambda k: k['frameStart'])
                    prev = [s for s in sortedList if s["frameStart"] < frame]
                    if len(prev) > 0:
                        prev = prev[-1]
                    else:
                        prev = sortedList[0]

                    # Find local minimums to determine fade start/end
                    leftWindow = sorted([
                        d for d in sceneData
                        if frame - windowSize < d["Frame Number"] < frame
                    ],
                                        key=lambda k: k['smoothed'])
                    rightWindow = sorted([
                        d for d in sceneData
                        if frame < d["Frame Number"] < frame + windowSize
                    ],
                                         key=lambda k: k['smoothed'])
                    fadeStart = leftWindow[0]
                    fadeEnd = rightWindow[0]

                    # Add new cut if we're not too close to the edges
                    if fadeStart["ms"] - prev["start"] >= minSceneDur and prev[
                            "end"] - fadeEnd["ms"] >= minSceneDur:
                        # Add the new scene
                        scene_list.append({
                            "filename": basename,
                            "index": prev["index"] + 1,
                            "frameStart": fadeEnd["Frame Number"],
                            "frameEnd": prev["frameEnd"],
                            "start": fadeEnd["ms"],
                            "end": prev["end"],
                            "dur": prev["end"] - fadeEnd["ms"]
                        })

                        # Update the previous scene
                        scene_list[prev["index"]]["end"] = fadeStart["ms"]
                        scene_list[prev["index"]][
                            "dur"] = fadeStart["ms"] - prev["start"]
                        scene_list[prev["index"]]["frameEnd"] = fadeStart[
                            "Frame Number"]

                        # Sort and update indices
                        scene_list = sorted(scene_list,
                                            key=lambda k: k['frameStart'])
                        for j, s in enumerate(scene_list):
                            scene_list[j]["index"] = j

        if PLOT:
            f0, f1 = PLOT
            # add raw data
            xs = [
                d["Frame Number"] - 1 for d in sceneData
                if f0 <= d["Frame Number"] <= f1
            ]
            ys = [
                d["content_val"] for d in sceneData
                if f0 <= d["Frame Number"] <= f1
            ]
            plt.plot(xs, ys)

            # add smoothed data
            ys = [
                d["smoothed"] for d in sceneData
                if f0 <= d["Frame Number"] <= f1
            ]
            plt.plot(xs, ys, "c")

            # add horizontal line for threshold
            plt.plot([xs[0], xs[-1]], [threshold, threshold], "g--")

            # add scenes as plot data
            xs = [
                d["frameEnd"] - 1 for d in scene_list
                if f0 <= d["frameEnd"] <= f1
            ]
            ys = [
                sceneData[d["frameEnd"] - 1]["content_val"] for d in scene_list
                if f0 <= d["frameEnd"] <= f1
            ]
            plt.scatter(xs, ys, c="red")
            plt.show()

        if os.path.exists(stats_file_path) and not SAVE_STATS:
            os.remove(stats_file_path)

    finally:
        video_manager.release()

    progress += 1
    sys.stdout.write('\r')
    sys.stdout.write("%s%%" % round(1.0 * progress / fileCount * 100, 1))
    sys.stdout.flush()

    return scene_list
コード例 #16
0
def find_scenes(video_path, generate_images=False):
    """
    This method slicing a video to a list of scenes, each scene will have a similar color distributions.
    This function allows to generate images for each scene.
    :param video_path: The path to the video for finding scenes
    :param generate_images: whether to generate images or not
    :return: a list of scenes
    """
    video_manager = VideoManager([video_path])
    stats_manager = StatsManager()
    scene_manager = SceneManager(stats_manager)

    scene_manager.add_detector(ContentDetector())
    base_timecode = video_manager.get_base_timecode()

    stats_file_path = '%s.stats.csv' % video_path

    scene_list = []
    output = []

    try:
        if os.path.exists(stats_file_path):
            with open(stats_file_path, 'r') as stats_file:
                stats_manager.load_from_csv(stats_file, base_timecode)

        video_manager.set_downscale_factor()
        video_manager.start()
        scene_manager.detect_scenes(frame_source=video_manager)
        scene_list = scene_manager.get_scene_list(base_timecode)

        print("Starting to generate images from scenelist")
        num_images = 2

        if not scene_list:
            return

        available_extensions = get_cv2_imwrite_params()
        image_extension = "jpg"

        imwrite_param = [available_extensions[image_extension], 100]

        video_manager.release()
        video_manager.reset()
        video_manager.set_downscale_factor(1)
        video_manager.start()

        completed = True
        print('Generating output images (%d per scene)...', num_images)

        filename_template = Template(
            "$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER")

        scene_num_format = '%0'
        scene_num_format += str(
            max(3,
                math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd'

        timecode_list = dict()

        fps = scene_list[0][0].framerate

        timecode_list = [[
            FrameTimecode(int(f), fps=fps) for f in [
                a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1
                else min(a[0] + 0, a[-1]) if j == 0 else max(a[-1] - 0, a[0])
                for j, a in enumerate(np.array_split(r, num_images))
            ]
        ] for i, r in enumerate([
            r if r.stop - r.start >= num_images else list(r) + [r.stop - 1] *
            (num_images - len(r))
            for r in (range(start.get_frames(), end.get_frames())
                      for start, end in scene_list)
        ])]

        image_filenames = {i: [] for i in range(len(timecode_list))}

        for i, tl in enumerate(timecode_list):
            for j, image_timecode in enumerate(tl):
                video_manager.seek(image_timecode)
                video_manager.grab()
                ret_val, frame_im = video_manager.retrieve()
                if ret_val:
                    file_path = '%s.%s' % (filename_template.safe_substitute(
                        VIDEO_NAME=video_path,
                        SCENE_NUMBER=scene_num_format % (i + 1),
                        IMAGE_NUMBER=image_num_format % (j + 1),
                        FRAME_NUMBER=image_timecode.get_frames()),
                                           image_extension)
                    image_filenames[i].append(file_path)
                    abs_file_path = get_and_create_path(file_path, "output")
                    output.append(frame_im)
                    if generate_images:
                        print(abs_file_path)
                        cv2.imwrite(abs_file_path, frame_im, imwrite_param)
                else:
                    completed = False
                    break

        if not completed:
            print('Could not generate all output images.')

    finally:
        video_manager.release()

    return output
コード例 #17
0
def find_scenes(video_path):

    try:
        start_time = perf_counter()
        print(f"find_scenes({video_path})")
        # type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]

        file_name = video_path[video_path.rfind('/') + 1:video_path.find('.')]
        dir = os.path.join(DATA_DIR, file_name)
        if not os.path.exists(dir):
            os.mkdir(dir)

        #cap = cv2.VideoCapture(video_path)
        video_manager = VideoManager([video_path])
        stats_manager = StatsManager()
        # Construct our SceneManager and pass it our StatsManager.
        scene_manager = SceneManager(stats_manager)

        # Add ContentDetector algorithm (each detector's constructor
        # takes detector options, e.g. threshold).
        scene_manager.add_detector(
            ContentDetector(threshold=2, min_scene_len=100))
        #scene_manager.add_detector(ThresholdDetector(threshold=4))
        base_timecode = video_manager.get_base_timecode()

        # We save our stats file to {VIDEO_PATH}.stats.csv.
        stats_file_path = f'{video_path}.stats.csv'
        scene_list = []

        try:
            # If stats file exists, load it.
            if os.path.exists(stats_file_path):
                # Read stats from CSV file opened in read mode:
                with open(stats_file_path, 'r') as stats_file:
                    stats_manager.load_from_csv(stats_file, base_timecode)

            # Set downscale factor to improve processing speed.
            video_manager.set_downscale_factor()

            # Start video_manager.
            video_manager.start()

            # Perform scene detection on video_manager.
            scene_manager.detect_scenes(frame_source=video_manager)

            # Obtain list of detected scenes.
            scene_list = scene_manager.get_scene_list(base_timecode)

            # We only write to the stats file if a save is required:
            if stats_manager.is_save_required():
                with open(stats_file_path, 'w') as stats_file:
                    stats_manager.save_to_csv(stats_file, base_timecode)

        finally:
            video_manager.release()

        print(f"find_scenes({video_path}) - phase 2, Extract jpg")

        cap = cv2.VideoCapture(video_path)

        verbose = False
        if verbose:
            print('List of scenes obtained:')
        # Each scene is a tuple of (start, end) FrameTimecodes.

        scenes = []

        for i, scene in enumerate(scene_list):
            if verbose:
                print('Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (
                    i + 1,
                    scene[0].get_timecode(),
                    scene[0].get_frames(),
                    scene[1].get_timecode(),
                    scene[1].get_frames(),
                ))
            cap.set(
                cv2.CAP_PROP_POS_FRAMES, scene[0].get_frames() +
                (scene[1].get_frames() - scene[0].get_frames()) // 2)
            frame_no = scene[0].get_frames()
            if verbose:
                print('Frame no.', frame_no)
            res, frame = cap.read()
            img_file = os.path.join(DATA_DIR, file_name, "%d.jpg" % i)
            cv2.imwrite(img_file, frame)
            scenes.append({
                "start": scene[0].get_timecode(),
                "img_file": img_file,
                "end": scene[1].get_timecode()
            })

        end_time = perf_counter()
        print(
            f"findScene() Complete. Returning {len(scenes)} scene(s). Duration {int(end_time - start_time)} seconds"
        )

        return json.dumps(scenes)
    except Exception as e:
        print("findScene() throwing Exception:" + str(e))
        raise e