示例#1
0
def get_frame_timestamps(filename):
    path = get_absolute_path(
        os.path.join('.', 'videos', '{}').format(filename))
    content_detector = scenedetect.detectors.ContentDetector()
    smgr = scenedetect.manager.SceneManager(detector=content_detector)
    scenedetect.detect_scenes_file(path, smgr)
    video_framerate = 0
    return video_framerate, smgr.scene_list
示例#2
0
def main():

    print("Running PySceneDetect API test...")

    print("Version being used:")
    print(scenedetect.__version__)

    content_detector = scenedetect.detectors.ContentDetector()
    smgr = scenedetect.manager.SceneManager(detector=content_detector)

    scenedetect.detect_scenes_file("goldeneye.mp4", smgr)

    print("Detected %d scenes in video." % (len(smgr.scene_list)))
示例#3
0
def countCuts(path):
    if not os.path.exists(path):
        print(path, "does not exist")
        return None

    print "Handling", os.path.basename(path)

    scene_list = []
    detector_list = [scenedetect.detectors.ContentDetector(threshold=4)]

    video_framerate, frames_read = scenedetect.detect_scenes_file(
        path, scene_list, detector_list)

    numcuts = len(scene_list)

    print numcuts, "cuts:", scene_list

    #scene_list_msec = [(1000.0 * x) / float(video_framerate) for x in scene_list]
    #scene_length_msec = []

    #for scene in xrange(len(scene_list_msec)):
    #    if scene == 0:
    #        scene_length_msec.append(scene_list_msec[scene])
    #    else:
    #        scene_length_msec.append(scene_list_msec[scene] - scene_list_msec[scene - 1])

    cutspersecond = numcuts / (frames_read / float(video_framerate))

    print cutspersecond, "cuts/second"

    return cutspersecond, video_framerate
示例#4
0
def detect_first_scene(path, start_minutes=0, end_minutes=12, threshold=20.0):
    """
    Detect transition from static image into the actual program.
    
    Requires PySceneDetect and OpenCV compiled with FFmpeg support.
    
    :param path: path to file
    :param start_minutes: when to start looking
    :param end_minutes: when to stop looking
    :param threshold: how big a change in frames to detect
    :return: 
    """
    import scenedetect
    # detect_scenes_file is unfortunately not really designed to be used like this
    # it's tightly coupled to the command line arguments passed by scenedetect.cli
    # TODO: Rewrite the necessary PySceneDetect functions so they aren't retarded.
    # or write my own detector that stops after finding a match, see detect_threshold
    scene_detectors = scenedetect.detectors.get_available()
    args = DumbNamespace(threshold=threshold,
                         detection_method='content',
                         downscale_factor=2,
                         start_time=[0, start_minutes, 0],
                         duration=[0, end_minutes, 0],
                         quiet_mode=True,
                         # end custom arguments, begin defaults
                         min_scene_len=15,
                         frame_skip=0)
    scene_manager = scenedetect.manager.SceneManager(args=args, scene_detectors=scene_detectors)

    video_fps, frames_read, frames_processed = scenedetect.detect_scenes_file(path, scene_manager)

    scene_list_sec = [x / float(video_fps) for x in scene_manager.scene_list]

    return scene_list_sec[0]
示例#5
0
def videoDetection(path, save_images, detector):
    print "*-*-*- Start processing the file " + path
    scene_list = []  # Scenes will be added to this list in detect_scenes().

    # Usually use one detector, but multiple can be used.
    if (detector == 'content'):
        detector_list = [
            scenedetect.detectors.ContentDetector(threshold=30,
                                                  min_scene_len=15)
        ]
    else:
        detector_list = [
            scenedetect.detectors.ThresholdDetector(threshold=16,
                                                    min_percent=0.9)
        ]

    video_fps, frames_read = scenedetect.detect_scenes_file(
        path, scene_list, detector_list, save_images=save_images)
    print 'Scene_list now contains the frame numbers of scene boundaries.'
    print scene_list

    # create new list with scene boundaries in milliseconds instead of frame #.
    scene_list_msec = [(1000.0 * x) / float(video_fps) for x in scene_list]
    print 'Boundaries in milliseconds instead of frame'
    print scene_list_msec

    # create new list with scene boundaries in timecode strings ("HH:MM:SS.nnn").
    scene_list_tc = [
        scenedetect.timecodes.get_string(x) for x in scene_list_msec
    ]
    print 'Boundaries in timecode strings ("HH:MM:SS.nnn").'
    print scene_list_tc
    print 'End processing ' + path + ' *-*-*-'
示例#6
0
def scene_detect(videoPath):
    sceneDetect = []
    detector_list = [
        scenedetect.detectors.ThresholdDetector(threshold=30, min_percent=0.9)
    ]
    print videoPath
    video_framerate, frames_read = scenedetect.detect_scenes_file(
        videoPath, sceneDetect, detector_list)
    return sceneDetect
示例#7
0
    def __init__(self, video):
        self.VIDEO = video
        self.BOUNDARIES = []
        self.FRAME_THRESHOLD = int(video.get_video_information()['FRAMES']/20)
        self.DETECTOR = [scenedetect.detectors.ContentDetector(threshold=50, min_scene_len=self.FRAME_THRESHOLD)]
        self.CUT_FPS, _ = scenedetect.detect_scenes_file(
            self.VIDEO.get_path(), self.BOUNDARIES, self.DETECTOR
        )

        self.BOUNDARIES.insert(0, 0)
        self.BOUNDARIES.append(self.VIDEO.get_video_information()['FRAMES'])
示例#8
0
 def process_scenes(self):
     """
     pick a random five minutes to examine for scenes - then pass out to create gifs
     returns number of clips created
     """
     import scenedetect
     
     scene_list = []
     out_dir = self.clip_dir
     path = self.ogv_loc()
     
     detector_list = [
         scenedetect.detectors.ContentDetector(threshold = 5,min_scene_len=4)
         #scenedetect.detectors.ThresholdDetector(threshold = 16, min_percent = 0.9)
     ]
     
     random.seed()
     total_seconds = self.get_length()
     if total_seconds < 60*10:
         start = random.uniform(60,total_seconds-60) # ignore first and last five minutes
         timecode_list = [start,start+60.0,0]
     else:
         start = random.uniform(300,total_seconds-600) # ignore first and last five minutes
         timecode_list = [start,start+60*5.0,0]
     print "starting at {0}".format(start)
     
     if start < 0:
         return 0
     
     video_framerate, frames_read = scenedetect.detect_scenes_file(
         path, scene_list, detector_list,timecode_list=timecode_list)
     
     print "detected"
     scene_list_sec = [x / float(video_framerate) for x in scene_list]
     
     ten_frame_offset = (1/video_framerate) * 10
     
     def yield_pairs():
         last = None
         for s in scene_list_sec:
             if last:
                 if s-last <= 6 + ten_frame_offset*2 and s-last>=4: # max of six second scenes-
                     yield (last+ten_frame_offset,s-ten_frame_offset) #cut ten frames from start and end
             last = s
     
     clip_count = 0
     
     for start,end in yield_pairs():
         clip_count+= make_clip(path,out_dir,video_framerate,start,end)
     return clip_count
def extract_shots_with_pyscenedetect(src_video,
                                     threshold=0,
                                     min_scene_length=15,
                                     fps=25):
    """
    uses pyscenedetect to produce a list of shot 
    boundaries (in seconds)
    
    Args:
        src_video (string): the path to the source 
            video
        threshold (int): the minimum value used 
            by pyscenedetect to classify a shot boundary
        min_scene_length (int): the minimum number of frames
            permitted per shot. 
        fps (int): the frame rate of the video
    
    Returns: 
        List[(float, float)]: a list of tuples of floats 
        representing predicted shot boundaries (in seconds) and 
        their associated scores
    """
    scene_detectors = scenedetect.detectors.get_available()
    timecode_formats = scenedetect.timecodes.get_available()
    detection_method = 'content'
    detector = None
    start_time, duration, end_time = None, None, None

    # Setup scenedetect defaults
    downscale_factor = 1
    frame_skip = 0
    stats_writer = None
    quiet_mode, save_images = False, False

    detector = scene_detectors['content'](threshold, min_scene_length)
    scene_list = list()
    timecode_list = [start_time, duration, end_time]
    video_fps, frames_read = scenedetect.detect_scenes_file(
        path=src_video,
        scene_list=scene_list,
        detector_list=[detector],
        stats_writer=stats_writer,
        downscale_factor=downscale_factor,
        frame_skip=frame_skip,
        quiet_mode=quiet_mode,
        save_images=save_images,
        timecode_list=timecode_list)
    boundaries = [(pair[0] / fps, pair[1]) for pair in scene_list]
    return boundaries
示例#10
0
def scene_detect(opt):

  scene_list = []

  detector_list = [scenedetect.detectors.ContentDetector(threshold = 32)]

  video_framerate, frames_read = scenedetect.detect_scenes_file(os.path.join(opt.avi_dir,opt.reference,'video.avi'), scene_list, detector_list)

  savepath = os.path.join(opt.work_dir,opt.reference,'scene.pckl')

  with open(savepath, 'wb') as fil:
    pickle.dump([frames_read, scene_list], fil)

  print('%s - scenes detected %d from %d frames'%(os.path.join(opt.avi_dir,opt.reference,'video.avi'),len(scene_list),frames_read))

  return [frames_read, scene_list]
示例#11
0
def upload(request):

    # home view that allows the user to upload a video and set parameters.

    uploaded = False
    form = VideoForm(request.POST or None, request.FILES)

    fps, read, processed = 0, 0, 0

    if form.is_valid():

        video_file, scenedetect_object = form_cleaner(form)
        scene_detectors = scenedetect.detectors.get_available()
        sc_man = scenedetect.manager.SceneManager(scenedetect_object,
                                                  scene_detectors)
        f_r_p = scenedetect.detect_scenes_file(path=video_file.absolute_path,
                                               scene_manager=sc_man)
        #adding the frist frame (not taken in account in pySceneDetect)
        sc_man.scene_list.insert(0, 0)

        output_file(sc_man.scene_list, scenedetect_object.output_file,
                    f_r_p[0], f_r_p[1])

        video_list = ffmpeg_split(sc_man.scene_list, video_file.name, f_r_p[0],
                                  f_r_p[1])

        #save in the session the important variables we will need in result view, so we can use them
        request.session['fps_read_proc'] = f_r_p
        request.session['det_thres_down'] = (sc_man.detection_method,
                                             sc_man.args.threshold,
                                             sc_man.downscale_factor)
        request.session['video_list'] = video_list
        request.session['scene_list'] = sc_man.scene_list
        request.session['view'] = "display"

        #reidrect to the result page
        return redirect(result)

    return render(
        request, 'interface/upload.html', {
            'form': form,
            'uploaded': uploaded,
            'fps': fps,
            'read': read,
            'processed': processed
        })
def extract_shots_with_pyscenedetect(src_video, threshold=0, min_scene_length=15,  fps=25):
    """
    uses pyscenedetect to produce a list of shot 
    boundaries (in seconds)
    
    Args:
        src_video (string): the path to the source 
            video
        threshold (int): the minimum value used 
            by pyscenedetect to classify a shot boundary
        min_scene_length (int): the minimum number of frames
            permitted per shot. 
        fps (int): the frame rate of the video
    
    Returns: 
        List[(float, float)]: a list of tuples of floats 
        representing predicted shot boundaries (in seconds) and 
        their associated scores
    """
    scene_detectors = scenedetect.detectors.get_available()
    timecode_formats = scenedetect.timecodes.get_available()
    detection_method = 'content'
    detector = None
    start_time, duration, end_time = None, None, None
    
    # Setup scenedetect defaults
    downscale_factor = 1
    frame_skip = 0
    stats_writer = None
    quiet_mode, save_images = False, False
    
    detector = scene_detectors['content'](threshold, min_scene_length)
    scene_list = list()
    timecode_list = [start_time, duration, end_time]
    video_fps, frames_read = scenedetect.detect_scenes_file(
                            path = src_video,
                            scene_list = scene_list,
                            detector_list = [detector],
                            stats_writer = stats_writer,
                            downscale_factor = downscale_factor,
                            frame_skip = frame_skip,
                            quiet_mode = quiet_mode,
                            save_images = save_images,
                            timecode_list = timecode_list)
    boundaries = [(pair[0] / fps, pair[1]) for pair in scene_list]
    return boundaries
def crap_detect(video_dir):
    detector_list = [
        scenedetect.detectors.ThresholdDetector(threshold=16, min_percent=0.6)
    ]

    file_paths = gfile.Glob(os.path.join(video_dir, '*.avi'))
    l = []
    for file_path in file_paths:
        try:
            print(file_path)
            scene_list = []
            video_framerate, frames_read = scenedetect.detect_scenes_file(
                file_path, scene_list, detector_list)

            # scene_list now contains the frame numbers of scene boundaries.
            print(l)
            if len(scene_list) >= 1:
                l.append(file_path)
        except:
            pass
示例#14
0
def keyFrame():
    scene_list = []  # Scenes will be added to this list in detect_scenes().
    path = 'Files/Lecture1.mp4'  # Path to video file.

    # Usually use one detector, but multiple can be used.
    detector_list = [
        scenedetect.detectors.ThresholdDetector(threshold=1, min_percent=0.2)
    ]

    video_framerate, frames_read = scenedetect.detect_scenes_file(
        path, scene_list, detector_list)

    # scene_list now contains the frame numbers of scene boundaries.
    print(scene_list)
    for frame_no in scene_list:
        cap = cv2.VideoCapture(path)  # video_name is the video being called
        cap.set(1, frame_no)
        # Where frame_no is the frame you want
        ret, frame = cap.read()  # Read the frame
        # cv2.imshow('window_name', frame)
        cv2.imwrite("Images/" + "frame" + "_" + str(frame_no) + ".jpg", frame)
示例#15
0
def main():

    print("Running PySceneDetect API test...")

    print("PySceneDetect version being used: %s" % str(scenedetect.__version__))

    content_detector = scenedetect.detectors.ContentDetector()
    smgr = scenedetect.manager.SceneManager(detector = content_detector)
    scenedetect.detect_scenes_file("goldeneye.mp4", smgr)
    print("Detected %d scenes in video (algorithm = content, threshold = default)." % (len(smgr.scene_list)))

    content_detector = scenedetect.detectors.ContentDetector(threshold = 27)
    smgr = scenedetect.manager.SceneManager(detector = content_detector, downscale_factor = 2)
    scenedetect.detect_scenes_file("goldeneye.mp4", smgr)
    print("Detected %d scenes in video (algorithm = content, threshold = 27)." % (len(smgr.scene_list)))

    threshold = scenedetect.detectors.ThresholdDetector(threshold = 100)
    smgr = scenedetect.manager.SceneManager(detector = threshold, perf_update_rate = 5)
    scenedetect.detect_scenes_file("goldeneye.mp4", smgr)
    print("Detected %d scenes in video (algorithm = threshold, threshold = 100)." % (len(smgr.scene_list)))
示例#16
0
#opening file for writing the scene cut per frame value of the videos..
fp = open("nbscene.txt", "w")

#reading for 9800 video files .....
for videocount in range(l_range, r_range):
    videoname = ("ACCEDE%05i.mp4" % videocount)

    #this list contains all the frame index of the scene cuts for the video....
    #declaring the path for the current video....
    scene_list = []
    path = videoname

    #defining the detector list for the detection...
    #using the content based scene cut detector with the value for threshold be 30
    detector_list = [scenedetect.detectors.ContentDetector(threshold=30)]

    #calling the detect_scene_file with the arguments path for the video, scene_list , and finally
    #the detector list which will be used for the detection of the frames...
    scenedetect.detect_scenes_file(path, scene_list, detector_list)
    scene_cuts = len(scene_list)

    #to count number of frames using the below command....
    cap = cv2.VideoCapture(videoname)
    length = float(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    #writing the value in the file ....
    fp.write("%s - %lf" % (videoname, scene_cuts / length))
    fp.write("\n")

fp.close()
示例#17
0
def __detect_shot_list_by_path(path):
    scene_list = []
    video_framerate, frames_read = scenedetect.detect_scenes_file(
        path, scene_list, detector_list, quiet_mode=True)
    scene_list = [0] + scene_list + [int(frames_read)]
    return scene_list
import scenedetect

scene_list = []        # Scenes will be added to this list in detect_scenes().
path = 'test.mp4'  # Path to video file.

# Usually use one detector, but multiple can be used.
detector_list = [
    scenedetect.detectors.ThresholdDetector(threshold = 16, min_percent = 0.9)
]

video_framerate, frames_read = scenedetect.detect_scenes_file(path, scene_list, detector_list)

# scene_list now contains the frame numbers of scene boundaries.
print scene_list
# create new list with scene boundaries in milliseconds instead of frame #.
scene_list_msec = [(1000.0 * x) / float(video_framerate) for x in scene_list]

# create new list with scene boundaries in timecode strings ("HH:MM:SS.nnn").
scene_list_tc = [scenedetect.timecodes.get_string(x) for x in scene_list_msec]
示例#19
0
    def extract_chunks(self,
                       video_path,
                       chunks_path=CHUNKS_PATH,
                       out_fps=OUT_FPS):
        scene_list = []

        video_fps, frames_read = scenedetect.detect_scenes_file(
            video_path, scene_list, self.detectors)

        scene_list.sort()
        scene_list_msec = [(1000.0 * x) / float(video_fps) for x in scene_list]

        chunks_list = []

        last = 0
        chunk_number = 0
        for time in scene_list_msec:
            if time - last < self.min_len:
                continue
            elif time - last > self.max_len:
                subchunks_number = int((time - last) / self.max_len)
                step = (time - last) / subchunks_number
                now_time = last + step
                while now_time <= time:
                    out_filename = os.path.basename(video_path).split(".")[0] + str(chunk_number) + "." + \
                                   os.path.basename(video_path).split(".")[1]
                    out_path = os.path.join(chunks_path, out_filename)
                    ffmpeg_start_time = self.convert_msecs_to_ffmpeg_time(last)
                    ffmpeg_end_time = self.convert_msecs_to_ffmpeg_time(
                        now_time)
                    ff = ffmpy.FFmpeg(inputs={video_path: None},
                                      outputs={
                                          out_path: [
                                              "-ss", ffmpeg_start_time, "-to",
                                              ffmpeg_end_time, "-y", "-r",
                                              str(out_fps), "-strict", "-2"
                                          ]
                                      })
                    ff.run()
                    chunks_list.append([out_path, now_time - last])
                    last = now_time
                    now_time += step
                    chunk_number += 1
            else:
                out_filename = os.path.basename(video_path).split(".")[0] + str(chunk_number) + "." + \
                               os.path.basename(video_path).split(".")[1]
                out_path = os.path.join(chunks_path, out_filename)
                ffmpeg_start_time = self.convert_msecs_to_ffmpeg_time(last)
                ffmpeg_end_time = self.convert_msecs_to_ffmpeg_time(time)
                ff = ffmpy.FFmpeg(inputs={video_path: None},
                                  outputs={
                                      out_path: [
                                          "-ss", ffmpeg_start_time, "-to",
                                          ffmpeg_end_time, "-y", "-r",
                                          str(out_fps), "-strict", "-2"
                                      ]
                                  })
                ff.run()
                chunks_list.append([out_path, time - last])
                last = time
                chunk_number += 1

        return chunks_list
示例#20
0
 def process_thread(self):
     scenedetect.detect_scenes_file(self.videoname, self.smgr)
示例#21
0
            col=row
    return col
try:
    for path in file_list:
        filename=os.path.splitext(os.path.basename(path))[0]
        print("分析“%s”中……"%filename)
        scene_list = []
        cap = cv2.VideoCapture(path) #video_name is the video being called
        videoHeight=cap.get(4)
        videoWidth=cap.get(3)
        #downscale if video is two large
        if videoWidth>320:
            downscale= int(videoWidth //320)
        else:
            downscale=None
        video_fps, frames_read = scenedetect.detect_scenes_file(path, scene_list, detector_list,downscale_factor=downscale)
        if len(scene_list)==0:
            print("cannot find cuts in file %s" % os.path.basename(path))
            continue
        else:
                # create new list with scene boundaries in milliseconds instead of frame #.
            print("scene lists:")
            print(scene_list)
            frame_list=[]
            for i in range(0,len(scene_list)+1):
                if i==0:
                    first=0
                    last=scene_list[i]
                elif i==(len(scene_list)):
                    fist=scene_list[i-1]
                    last=frames_read
def video_detection(filepath, detector, username, save_images=True):
    """Performs scene detection on passed file using given scene detector.

    args:
        filepath:    A string containing the path/to/file/filename.
        detector:    Scene detection mode, with only 2 options: content or threshold
        username:    Username calling the fuction, to organize outputfiles
    kwargs:
        save_images: Boolean to save images on disk 
    
    Returns:
        Dictionary with 2 fields
             scenes_time:  Array with scene boundaries in timecode strings ("HH:MM:SS.nnn")
             scene_files:  Array with scene files path. Empty id save_images is False
    """

    scene_list = []  # Scenes will be added to this list in detect_scenes().

    # Usually use one detector, but multiple can be used.
    # By default it is content detector
    detector = detector.lower()
    if (detector == 'threshold'):
        detector_list = [
            scenedetect.detectors.ThresholdDetector(threshold=16,
                                                    min_percent=0.9)
        ]
    else:
        detector_list = [
            scenedetect.detectors.ContentDetector(threshold=30,
                                                  min_scene_len=15)
        ]

    video_fps, frames_read = scenedetect.detect_scenes_file(
        filepath,
        scene_list,
        detector_list,
        save_images=save_images,
        quiet_mode=True)

    # Organize frames
    path, filename = os.path.split(filepath)
    scene_files = glob.glob(filename + '.Scene*-OUT.jpg')
    output_path = reduce(
        os.path.join,
        [os.getenv('APP_TEMP_FOLDER'), username,
         filename.split('.')[:-1][0]])
    output_lambda = lambda x: os.path.join(output_path, x.replace('-OUT', ''))
    if save_images:
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        # Preserve only OUT scene files to bind with time
        [os.rename(x, output_lambda(x)) for x in scene_files]

        # Remove all other files
        [os.remove(x) for x in glob.glob(filename + '.Scene*-IN.jpg')]

    # create new list with scene boundaries in milliseconds instead of frame #.
    scene_list_msec = [(1000.0 * x) / float(video_fps) for x in scene_list]
    # create new list with scene boundaries in timecode strings ("HH:MM:SS.nnn").
    scene_list_tc = [
        scenedetect.timecodes.get_string(x) for x in scene_list_msec
    ]
    return {
        'scenes_time':
        scene_list_tc,
        'scenes_file':
        [output_lambda(x) for x in scene_files] if scene_files else []
    }
示例#23
0
import scenedetect

scene_list = []  # Scenes will be added to this list in detect_scenes().
path = 'test.mp4'  # Path to video file.

# Usually use one detector, but multiple can be used.
detector_list = [
    scenedetect.detectors.ThresholdDetector(threshold=16, min_percent=0.9)
]

video_framerate, frames_read = scenedetect.detect_scenes_file(
    path, scene_list, detector_list)

# scene_list now contains the frame numbers of scene boundaries.
print scene_list
# create new list with scene boundaries in milliseconds instead of frame #.
scene_list_msec = [(1000.0 * x) / float(video_framerate) for x in scene_list]

# create new list with scene boundaries in timecode strings ("HH:MM:SS.nnn").
scene_list_tc = [scenedetect.timecodes.get_string(x) for x in scene_list_msec]