def SelectFrames(videopath, filename, x1, x2, y1, y2, cropping, videotype,
                 start, stop, Task, selectionalgorithm):
    ''' Selecting frames from videos for labeling.'''
    if start > 1.0 or stop > 1.0 or start < 0 or stop < 0 or start >= stop:
        print(
            "Please change start & stop, they should form a normalized interval with 0<= start < stop<=1."
        )
    else:
        basefolder = 'data-' + Task + '/'
        auxiliaryfunctions.attempttomakefolder(basefolder)
        videos = auxiliaryfunctions.GetVideoList(filename, videopath,
                                                 videotype)
        for vindex, video in enumerate(videos):
            print("Loading ", video)
            clip = VideoFileClip(os.path.join(videopath, video))
            print("Duration of video [s], ", clip.duration, "fps, ", clip.fps,
                  "Cropped frame dimensions: ", clip.size)

            ####################################################
            # Creating folder with name of experiment and extract random frames
            ####################################################
            folder = video.split('.')[0]
            auxiliaryfunctions.attempttomakefolder(
                os.path.join(basefolder, folder))
            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
            # Extract the first frame (not cropped!) - useful for data augmentation
            index = 0
            image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
            io.imsave(
                os.path.join(basefolder, folder,
                             "img" + str(index).zfill(indexlength) + ".png"),
                image)

            if cropping == True:
                # Select ROI of interest by adjusting values in myconfig.py
                clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)
            print("Extracting frames ...")
            if selectionalgorithm == 'uniform':
                frames2pick = frameselectiontools.UniformFrames(
                    clip, numframes2pick, start, stop)
            elif selectionalgorithm == 'kmeans':
                frames2pick = frameselectiontools.KmeansbasedFrameselection(
                    clip, numframes2pick, start, stop)
            else:
                print(
                    "Please implement this method yourself and send us a pull request!"
                )
                frames2pick = []

            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
            for index in frames2pick:
                try:
                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                    io.imsave(
                        os.path.join(
                            basefolder, folder,
                            "img" + str(index).zfill(indexlength) + ".png"),
                        image)
                except FileNotFoundError:
                    print("Frame # ", index, " does not exist.")
示例#2
0
def cut_crop_video(vidpath='',  save_format=['avi'],
                   cut=False, starts=0., fins=-1.,
                   crop_sel=False, crop_coord=[0, 100, 0, 100], ret=False):
    clip = VideoFileClip(vidpath)

    duration = clip.duration
    fps = clip.fps
    savename = vidpath.split('.')[0]

    if crop_sel:
        clip = clip.crop(x1=crop_coord[0], width=crop_coord[1], y2=crop_coord[2], height=crop_coord[3])

    if cut:
        clip = clip.subclip(starts, fins)

    if save_format:
        if 'avi' in save_format:
            clip.write_videofile(savename+'_edited'+'.avi', codec='png')
        elif 'mp4' in save_format:
            clip.write_videofile(savename+'_edited'+'.mp4', codec='mpeg4')
        elif 'gif' in save_format:
            clip.write_gif(savename+'_edited'+'.gif', fps=30)

    if ret:
        return clip
    def CheckCropping(self):
        ''' Display frame at time "time" for video to check if cropping is fine.
        Select ROI of interest by adjusting values in myconfig.py

        USAGE for cropping:
        clip.crop(x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)

        Returns a new clip in which just a rectangular subregion of the
        original clip is conserved. x1,y1 indicates the top left corner and
        x2,y2 is the lower right corner of the cropped region.

        All coordinates are in pixels. Float numbers are accepted.
        '''
        from skimage import io
        videosource = self.video_source
        filename = self.filename
        time = self.start
        self.x1 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[0])
        self.x2 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[1])
        self.y1 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[2])
        self.y2 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[3])

        clip = VideoFileClip(videosource)

        ny, nx = clip.size  # dimensions of frame (width, height)
        if self.cropping == True:
            # Select ROI of interest by adjusting values in myconfig.py
            clip = clip.crop(y1=self.y1, y2=self.y2, x1=self.x1, x2=self.x2)


#            time = start
        image = clip.get_frame(
            time * clip.duration
        )  #frame is accessed by index *1./clip.fps (fps cancels)
        fname = Path(filename)
        output_path = Path(
            self.config_path).parents[0] / 'labeled-data' / fname.stem
        if output_path.exists():
            saveimg = str(
                Path(self.config_path).parents[0] / 'labeled-data'
            ) + '/IsCroppingOK_' + str(
                fname
            ) + ".png"  #str(self.currFrame).zfill(int(np.ceil(np.log10(self.numberFrames)))) + '.png'
            io.imsave(saveimg, image)
            print(
                'Image cropped. Check the %s if cropping is ok, other wise change the parameters in the config file.'
                % saveimg)
        else:
            print(
                'The path %s does not exist in the config file. Use add function to add this video in the config file and retry.'
                % output_path)
            self.Close(True)
        return image
示例#4
0
 async def roundcmd(self, message):
     """.round <Reply to image/sticker or video/gif>"""
     reply = None
     if message.is_reply:
         reply = await message.get_reply_message()
         data = await check_media(reply)
         if isinstance(data, bool):
             await utils.answer(
                 message, "<b>Reply to image/sticker or video/gif!</b>")
             return
     else:
         await utils.answer(message,
                            "<b>Reply to image/sticker or video/gif!</b>")
         return
     data, type = data
     if type == "img":
         await message.edit("<b>Processing image</b>📷")
         img = io.BytesIO()
         bytes = await message.client.download_file(data, img)
         im = Image.open(img)
         w, h = im.size
         img = Image.new("RGBA", (w, h), (0, 0, 0, 0))
         img.paste(im, (0, 0))
         m = min(w, h)
         img = img.crop(
             ((w - m) // 2, (h - m) // 2, (w + m) // 2, (h + m) // 2))
         w, h = img.size
         mask = Image.new('L', (w, h), 0)
         draw = ImageDraw.Draw(mask)
         draw.ellipse((10, 10, w - 10, h - 10), fill=255)
         mask = mask.filter(ImageFilter.GaussianBlur(2))
         img = ImageOps.fit(img, (w, h))
         img.putalpha(mask)
         im = io.BytesIO()
         im.name = "img.webp"
         img.save(im)
         im.seek(0)
         await message.client.send_file(message.to_id, im, reply_to=reply)
     else:
         await message.edit("<b>Processing video</b>🎥")
         await message.client.download_file(data, "video.mp4")
         video = VideoFileClip("video.mp4")
         video.reader.close()
         w, h = video.size
         m = min(w, h)
         box = [(w - m) // 2, (h - m) // 2, (w + m) // 2, (h + m) // 2]
         video = video.crop(*box)
         await message.edit("<b>Saving video</b>📼")
         video.write_videofile("result.mp4")
         await message.client.send_file(message.to_id,
                                        "result.mp4",
                                        video_note=True,
                                        reply_to=reply)
         os.remove("video.mp4")
         os.remove("result.mp4")
     await message.delete()
示例#5
0
 def crop_video(self,
                source: str,
                destination: str,
                x1: float = None,
                x2: float = None,
                y1: float = None,
                y2: float = None) -> None:
     video = VideoFileClip(source)
     video = video.crop(x1=x1, x2=x2, y1=y1, y2=y2)
     video.write_videofile(destination)
def select_frames():
    if start > 1.0 or stop > 1.0 or start < 0 or stop < 0 or start >= stop:
        raise ValueError('Please change start & stop, they should form a '
                         'normalized interval with 0 <= start < stop <= 1.')
    else:
        base_folder = os.path.join(video_path, 'data-' + task + '/')
        auxiliary_functions.attempt_to_make_folder(base_folder)
        videos = auxiliary_functions.get_video_list(filename, video_path,
                                                    video_type)
        for vindex, video in enumerate(videos):
            print("Loading ", video, '# ', str(vindex + 1), ' of ',
                  str(len(videos)))
            clip = VideoFileClip(os.path.join(video_path, video))
            # print("Duration of video [s], ", clip.duration, "fps, ", clip.fps,
            #       "Cropped frame dimensions: ", clip.size)

            # Create folder with experiment name and extract random frames
            folder = 'selected'
            v_name = video.split('.')[0]
            auxiliary_functions.attempt_to_make_folder(
                os.path.join(base_folder, folder))
            index_length = int(np.ceil(np.log10(clip.duration * clip.fps)))

            # extract first frame (uncropped) - useful for data augmentation
            # index = 0
            # image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
            # io.imsave(os.path.join(base_folder, folder, 'img' + v_name + '-'
            #                        + str(index).zfill(index_length) + '.png'), image)

            if cropping is True:
                clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)

            # print("Extracting frames")
            if selection_algorithm == 'uniform':
                frames_to_pick = frame_selection_tools.uniform_frames(
                    clip, num_frames_to_pick, start, stop)
            elif selection_algorithm == 'kmeans':
                frames_to_pick = frame_selection_tools.k_means_based_frame_selection(
                    clip, num_frames_to_pick, start, stop)
            else:
                print('not implemented')
                frames_to_pick = []

            index_length = int(np.ceil(np.log10(clip.duration * clip.fps)))
            for index in frames_to_pick:
                try:
                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                    io.imsave(
                        os.path.join(
                            base_folder, folder, 'img' + v_name + '-' +
                            str(index).zfill(index_length) + '.png'), image)
                except FileExistsError:
                    print('Frame # ', index, ' does not exist.')

            clip.close()
示例#7
0
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'):

    gif_name = gifdir + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."

    if video_file.h > max_height:
        video_file = video_file.resize(height=max_height)

    if video_file.w > max_width:
        video_file = video_file.resize(width=max_width)

    end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])
    fadein_video_file = CompositeVideoClip(
        [video_file,
         (video_file.to_ImageClip()
          .set_duration(0.7)
          .crossfadein(0.4)
          .set_start(video_file.duration-0.7)),
     ]
    )
    
    logo_size = video_file.h/6
    text = ImageClip(
        expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(
            video_file.duration).resize(width=logo_size).set_pos(
                (video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([fadein_video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > max_file_size:
        process_video(filename,
                      max_height=video_file.h*0.95,
                      overwrite=True,
                      gifdir=gifdir,
                      max_file_size=max_file_size)
示例#8
0
def cutVideos():
    '''
    视频裁剪: https://www.freesion.com/article/3449998295/
    crop( clip, x1=None, y1=None, x2=None, y2=None, width=None,
    height=None, x_center=None, y_center=None)
    '''
    # 指定视频大小,对其进行裁剪
    clip1 = VideoFileClip("mhls.mp4").resize([540, 960])
    clip1 = clip1.crop(0, 200, 540, 760)
    clip1.write_videofile("cut_video.mp4")

    clip2 = VideoFileClip("cut_video.mp4")
    print(clip2.size)
示例#9
0
def video_to_gif(input_file,
                 output_file=None,
                 start=0,
                 end=None,
                 scale=DEFAULT_SCALE,
                 crop=None,
                 fps=None):
    if output_file is None:
        output_file = splitext(input_file)[0] + ".gif"

    clip = VideoFileClip(input_file).subclip(start, end).resize(scale)

    if crop:
        clip = clip.crop(**crop)

    clip.write_gif(output_file, fps=fps)
def write_cropped_video(cwd, video, interval, length, end):
    clip = VideoFileClip(cwd + '/' + video)
    try:
        with open(cwd + '/' + 'config.py', 'r+') as f:
            coords = ['x0 = \n', 'y0 = \n', 'x1 = \n', 'y1 = \n']
            intcoords = []
            for coord in range(len(coords)):
                coords[coord] = f.readline()
                nums = re.findall('\d+', coords[coord])[1]
                intcoords.append(nums)

        print(video.split('.')[0] + 'cropped.avi')
        cropped = clip.crop(x1=intcoords[0],
                            y1=intcoords[1],
                            x2=intcoords[2],
                            y2=intcoords[3])
        ## We want to split our video into manageable segments.
        ## Account for the case that our video analysis failed somewhere in the middle:
        # We want to be able to extract out the things that have been done so far:

        for segment in interval:
            print('moving to ' + str(segment) + ' of ' + str(interval))
            try:
                # Ensures that the last clip is the right length
                if segment == end:  # Corner case for the last video segment.

                    endseg = -1
                else:
                    endseg = length * (segment + 1)
                cropped_cutout = cropped.subclip(t_start=segment * length,
                                                 t_end=endseg)
                cropped_cutout.write_videofile(
                    cwd + '/' + video.split('.')[0] + 'cropped_' + 'part' +
                    str(segment) + '.mp4',
                    codec='mpeg4',
                    bitrate="1500k",
                    threads=2,
                    logger=None)

            except OSError as e:
                print('segment not viable')
            gc.collect()
    except OSError as e:

        print(e.errno)
        print('configuration not loaded')
def CheckCropping(videopath,
                  filename,
                  x1,
                  x2,
                  y1,
                  y2,
                  cropping,
                  videotype,
                  time=start):
    ''' Display frame at time "time" for video to check if cropping is fine. 
    Select ROI of interest by adjusting values in myconfig.py
    
    USAGE for cropping:
    clip.crop(x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)
    
    Returns a new clip in which just a rectangular subregion of the
    original clip is conserved. x1,y1 indicates the top left corner and
    x2,y2 is the lower right corner of the croped region.
    
    All coordinates are in pixels. Float numbers are accepted.
    '''
    videos = auxiliaryfunctions.GetVideoList(filename, videopath, videotype)
    if filename != 'all':
        videotype = filename.split('.')[1]

    for vindex, video in enumerate(videos):
        clip = VideoFileClip(os.path.join(videopath, video))
        print("Extracting ", video)

        ny, nx = clip.size  # dimensions of frame (width, height)
        if cropping == True:
            # Select ROI of interest by adjusting values in myconfig.py
            clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)

        image = clip.get_frame(
            time * clip.duration
        )  #frame is accessed by index *1./clip.fps (fps cancels)
        io.imsave("IsCroppingOK" + video.split('.')[0] + ".png", image)

        if vindex == len(videos) - 1:
            print(
                "--> Open the CroppingOK-videofilename-.png file(s) to set the output range! <---"
            )
            print("--> Adjust shiftx, shifty, fx and fy accordingly! <---")
    return image
示例#12
0
def process_video(filename, video_height=480, overwrite=False):

    gif_name = 'gifs/' + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."


    video_file = video_file.resize(height=video_height)

    end_image = video_file.to_ImageClip(0).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])

    logo_size = video_height/6
    text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > 5*1024**2:
        process_video(filename, video_height=video_height*0.75, overwrite=True)
示例#13
0
def extract_frames(config,mode,algo='uniform',crop=False,checkcropping=False):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
        
    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.
        
    algo : string 
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``.
        
    crop : bool, optional
        If this is set to True, the selected frames are cropped based on the ``crop`` parameters in the config.yaml file. 
        The default is ``False``; if provided it must be either ``True`` or ``False``.
        
    checkcropping: bool, optional
        If this is set to True, the cropping parameters are overlayed in a plot of the first frame to check and the user can decide if the program should proceed 
        with those parameters, or perhaps edit them. The default is ``False``; if provided it must be either ``True`` or ``False``.
        
    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames automatically with 'uniform', want to crop the frames based on the ``crop`` parameters in config.yaml and check for cropping
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True,checkcropping=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose 
    if you need to crop or not.
    --------
    
    """
    import os
    import sys
    import yaml
    import numpy as np
    from moviepy.editor import VideoFileClip
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    
    from deeplabcut.generate_training_dataset import frameselectiontools

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox 
        frame_extraction_toolbox.show(config)
        
    elif mode == "automatic":
        config_file = Path(config).resolve()
        with open(str(config_file), 'r') as ymlfile:
            cfg = yaml.load(ymlfile)
        print("Reading config file successfully...")
        
        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']
        
        # Check for variable correctness
        if start>1 or stop>1 or start<0 or stop<0 or start>=stop:
            raise Exception("Erroneous start or stop values. Please correct it in the config file.")
        if numframes2pick<1 and not int(numframes2pick):
            raise Exception("Perhaps consider extracting more, or a natural number of frames.")
            
        videos = cfg['video_sets'].keys()
        for vindex,video in enumerate(videos):
            plt.close("all")
            #update to openCV
            clip = VideoFileClip(video)
            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
            if crop==True:
                print("Make sure you change the crop parameters in the config.yaml file. The default parameters are set to the video dimensions.")
                coords = cfg['video_sets'][video]['crop'].split(',')
                image = clip.get_frame(start*clip.duration) #frame is accessed by index *1./clip.fps (fps cancels)
                
                fname = Path(video)
                output_path = Path(config).parents[0] / 'labeled-data' / fname.stem
                
                if output_path.exists() and checkcropping==True:
                    fig,ax = plt.subplots(1)
                    # Display the image
                    ax.imshow(image)
                    # Create a Rectangle 
                    x1, x2, y1, y2 = [int(s) for s in coords]
                    rect = patches.Rectangle((x1, y1), x2-x1, y2-y1,
                                             linewidth=3,
                                             edgecolor='r',
                                             facecolor='none')
                    # Add the patch to the Axes
                    ax.add_patch(rect)
                    plt.show()
                    
                    print("The red boundary indicates how the cropped image will look.")
                    #saveimg = str(Path(config).parents[0] / Path('labeled-data','IsCroppingOK_'+fname.stem +".png")) 
                    #io.imsave(saveimg, image)
                    
                    msg = input("Is the cropping ok? (yes/no): ")
                    if msg == "yes" or msg == "y" or msg =="Yes" or msg == "Y":
                      if len(os.listdir(output_path))==0: #check if empty
                            #store full frame (good for augmentation)
                            index = np.random.randint(int(clip.duration * clip.fps/2))
                            image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                            output_path = Path(config).parents[0] / 'labeled-data' / Path(video).stem
                            saveimg = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                            io.imsave(saveimg, image)
                            
                            # crop and move on with extraction of frames:
                            clip=clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)
                      else:
                          askuser=input ("The directory already contains some frames. Do you want to add to it?(yes/no): ")
                          if askuser=='y' or askuser=='yes' or askuser=='Y' or askuser=='Yes':
                              clip=clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)
                          else:
                              sys.exit("Delete the frames and try again later!")
                    else:
                      sys.exit("Correct the crop parameters in the config.yaml file and try again!")
                
                elif output_path.exists(): #cropping without checking:
                        coords = cfg['video_sets'][video]['crop'].split(',')
                        index = np.random.randint(int(clip.duration * clip.fps/2))
                        image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                        output_path = Path(config).parents[0] / 'labeled-data' / Path(video).stem
                        saveimg = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                        io.imsave(saveimg, image)
                        
                        # crop and move on with extraction of frames:
                        clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
            
            print("Extracting frames based on %s ..." %algo)
            if algo =='uniform': #extract n-1 frames (0 was already stored)
                frames2pick=frameselectiontools.UniformFrames(clip,numframes2pick-1,start,stop)
            elif algo =='kmeans':
                frames2pick=frameselectiontools.KmeansbasedFrameselection(clip,numframes2pick-1,start,stop)
            else:
                print("Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'.")
                frames2pick=[]
            
            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps))) 
            for index in frames2pick:
                try:
                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                    output_path = Path(config).parents[0] / 'labeled-data' / Path(video).stem
                    img_name = str(output_path) +'/img'+ str(index).zfill(indexlength) + ".png"
                    io.imsave(img_name,image)
                except FileNotFoundError:
                    print("Frame # ", index, " does not exist.")
            
            #close video. 
            clip.close()
            del clip
    else:
        print("Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
    
    print("\nFrames are selected.\nYou can now label the frames using the function 'label_frames'.")
def crop_videos(cwd=None):
    length = 1200
    if cwd is None:
        # First get current directory
        cwd = os.getcwd()
    # First get all subdirectories:
    print(cwd)
    all_sub = next(os.walk(cwd))[1]
    print(all_sub[5:])
    all_sub = all_sub
    for sub in all_sub:
        files = os.listdir(cwd + '/' + sub)
        # Only look at videos
        videos = [video for video in files if video.split('.')[-1] == 'avi']
        print(videos)
        for video in videos:
            ## Load in cropped version of
            print('loading ' + video)
            #clip = VideoFileClip(cwd+'/'+sub+'/'+video)
            print(cwd, sub)
            clip = VideoFileClip(cwd + '/' + sub + "/" + video)
            print('IT LOAEDED')
            try:
                with open(
                        cwd + '/' + sub + "/" + video.split('.')[0] +
                        'config.py', 'r+') as f:
                    coords = ['x0 = \n', 'y0 = \n', 'x1 = \n', 'y1 = \n']
                    intcoords = []
                    for coord in range(len(coords)):
                        coords[coord] = f.readline()
                        nums = re.findall('\d+', coords[coord])[1]
                        intcoords.append(nums)

                print(video.split('.')[0] + 'cropped.avi')
                cropped = clip.crop(x1=intcoords[0],
                                    y1=intcoords[1],
                                    x2=intcoords[2],
                                    y2=intcoords[3])
                ## We want to split our video into manageable segments.
                ## Account for the case that our video analysis failed somewhere in the middle:
                # We want to be able to extract out the things that have been done so far:

                ## First get the duration in seconds:
                seconds = cropped.duration
                # If analysis has been found:
                ident = video.split('.')[0] + 'cropped_' + 'part'
                print(files, ident)
                print([part for part in files if ident in part.split('.')[0]])
                if len([part for part in files
                        if ident in part.split('.')[0]]):
                    done = [
                        int(re.findall('\d+',
                                       part.split('.')[0])[-1])
                        for part in files if ident in part.split('.')[0]
                    ]
                    presegs = range(np.ceil(seconds / length).astype(int))
                    print(done)
                    print(presegs)
                    segments = [
                        segment for segment in presegs if segment not in done
                    ]
                    print(segments)

                else:
                    segments = range(
                        np.ceil(seconds / length).astype(int)
                    )  # rounds up to give the number of distinct segments we need
                    presegs = segments
                for segment in segments:
                    try:
                        # Ensures that the last clip is the right length
                        print("producing segment " + str(segment) + 'of ' +
                              str(segments))
                        if segment == presegs[-1]:

                            endseg = -1
                        else:
                            endseg = length * (segment + 1)
                        cropped_cutout = cropped.subclip(t_start=segment *
                                                         length,
                                                         t_end=endseg)
                        cropped_cutout.write_videofile(
                            cwd + '/' + sub + '/' + video.split('.')[0] +
                            'cropped_' + 'part' + str(segment) + '.mp4',
                            codec='mpeg4',
                            bitrate="1500k",
                            threads=2)
                    except OSError as e:
                        print('segment not viable')
                    gc.collect()
            except OSError as e:

                print(e.errno)
                print('configuration not loaded')
def crop_videos_special(videopath, videoname, directorypath):
    videoid = videopath + '/' + videoname
    files = os.listdir(directorypath)
    ## Load in cropped version of
    print('loading ' + videoid)
    clip = VideoFileClip(videoid)
    try:
        with open(directorypath + '/' + videoname + 'config.py', 'r+') as f:
            coords = ['x0 = \n', 'y0 = \n', 'x1 = \n', 'y1 = \n']
            intcoords = []
            for coord in range(len(coords)):
                coords[coord] = f.readline()
                nums = re.findall('\d+', coords[coord])[1]
                intcoords.append(nums)

        print(videoname + 'cropped.avi')
        cropped = clip.crop(x1=intcoords[0],
                            y1=intcoords[1],
                            x2=intcoords[2],
                            y2=intcoords[3])
        ## We want to split our video into manageable segments.
        ## Account for the case that our video analysis failed somewhere in the middle:
        # We want to be able to extract out the things that have been done so far:

        ## First get the duration in seconds:
        seconds = cropped.duration
        # If analysis has been found:
        # We need to identify the specific video that we are building from.

        ident = videoname + 'cropped_' + 'part'
        print(files, ident)
        print([part for part in files if ident in part.split('.')[0]])
        if len([part for part in files if ident in part.split('.')[0]]):
            done = [
                int(re.findall('\d+',
                               part.split('.')[0])[-1]) for part in files
                if ident in part.split('.')[0]
            ]
            presegs = range(np.ceil(seconds / length).astype(int))
            print(done)
            print(presegs)
            segments = [segment for segment in presegs if segment not in done]
            print(segments)

        else:

            segments = range(
                np.ceil(seconds / length).astype(int)
            )  # rounds up to give the number of distinct segments we need
        for segment in segments:
            try:
                # Ensures that the last clip is the right length
                print("producing segment " + str(segment) + 'of ' +
                      str(segments))
                if segment == presegs[-1]:

                    endseg = -1
                else:
                    endseg = length * (segment + 1)
                cropped_cutout = cropped.subclip(t_start=segment * length,
                                                 t_end=endseg)
                cropped_cutout.write_videofile(
                    directorypath + '/' + videoname + 'cropped_' + 'part' +
                    str(segment) + '.mp4',
                    codec='mpeg4',
                    bitrate="1500k",
                    threads=2)
            except OSError as e:
                print('segment not viable')
            gc.collect()
    except OSError as e:
        print(e.errno)
        print('configuration not loaded')
示例#16
0
def ExtractFramesbasedonPreselection(Index,
                                     extractionalgorithm,
                                     Dataframe,
                                     dataname,
                                     scorer,
                                     video,
                                     cfg,
                                     config,
                                     opencv=True,
                                     cluster_resizewidth=30,
                                     cluster_color=False,
                                     savelabeled=True):
    from deeplabcut.create_project import add
    start = cfg['start']
    stop = cfg['stop']
    numframes2extract = cfg['numframes2pick']
    bodyparts = cfg['bodyparts']

    videofolder = str(Path(video).parents[0])
    vname = str(Path(video).stem)
    tmpfolder = os.path.join(cfg['project_path'], 'labeled-data', vname)
    if os.path.isdir(tmpfolder):
        print("Frames from video", vname,
              " already extracted (more will be added)!")
    else:
        auxiliaryfunctions.attempttomakefolder(tmpfolder)

    nframes = np.size(Dataframe.index)
    print("Loading video...")
    if opencv:
        import cv2
        cap = cv2.VideoCapture(video)
        fps = cap.get(5)
        duration = nframes * 1. / fps
        size = (int(cap.get(4)), int(cap.get(3)))
    else:
        from moviepy.editor import VideoFileClip
        clip = VideoFileClip(video)
        fps = clip.fps
        duration = clip.duration
        size = clip.size

    if cfg['cropping']:  # one might want to adjust
        coords = (cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2'])
    else:
        coords = None

    print("Duration of video [s]: ", duration, ", recorded @ ", fps, "fps!")
    print(
        "Overall # of frames: ",
        nframes,
        "with (cropped) frame dimensions: ",
    )
    if extractionalgorithm == 'uniform':
        if opencv:
            frames2pick = frameselectiontools.UniformFramescv2(
                cap, numframes2extract, start, stop, Index)
        else:
            frames2pick = frameselectiontools.UniformFrames(
                clip, numframes2extract, start, stop, Index)
    elif extractionalgorithm == 'kmeans':
        if opencv:
            frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                cap,
                numframes2extract,
                start,
                stop,
                cfg['cropping'],
                coords,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color)
        else:
            if cfg['cropping']:
                clip = clip.crop(y1=cfg['y1'],
                                 y2=cfg['x2'],
                                 x1=cfg['x1'],
                                 x2=cfg['x2'])
            frames2pick = frameselectiontools.KmeansbasedFrameselection(
                clip,
                numframes2extract,
                start,
                stop,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color)

    else:
        print(
            "Please implement this method yourself! Currently the options are 'kmeans', 'jump', 'uniform'."
        )
        frames2pick = []

    # Extract frames + frames with plotted labels and store them in folder (with name derived from video name) nder labeled-data
    print("Let's select frames indices:", frames2pick)
    colors = visualization.get_cmap(len(bodyparts), cfg['colormap'])
    strwidth = int(np.ceil(np.log10(nframes)))  #width for strings
    for index in frames2pick:  ##tqdm(range(0,nframes,10)):
        if opencv:
            PlottingSingleFramecv2(cap, cv2, cfg['cropping'], coords,
                                   Dataframe, bodyparts, tmpfolder, index,
                                   scorer, cfg['dotsize'], cfg['pcutoff'],
                                   cfg['alphavalue'], colors, strwidth,
                                   savelabeled)
        else:
            PlottingSingleFrame(clip, Dataframe, bodyparts, tmpfolder, index,
                                scorer, cfg['dotsize'], cfg['pcutoff'],
                                cfg['alphavalue'], colors, strwidth,
                                savelabeled)
        plt.close("all")

    #close videos
    if opencv:
        cap.release()
    else:
        clip.close()
        del clip

    # Extract annotations based on DeepLabCut and store in the folder (with name derived from video name) under labeled-data
    if len(frames2pick) > 0:
        #Dataframe = pd.read_hdf(os.path.join(videofolder,dataname+'.h5'))
        DF = Dataframe.ix[frames2pick]
        DF.index = [
            os.path.join('labeled-data', vname,
                         "img" + str(index).zfill(strwidth) + ".png")
            for index in DF.index
        ]  #exchange index number by file names.

        machinefile = os.path.join(
            tmpfolder, 'machinelabels-iter' + str(cfg['iteration']) + '.h5')
        if Path(machinefile).is_file():
            Data = pd.read_hdf(machinefile, 'df_with_missing')
            DataCombined = pd.concat([Data, DF])
            #drop duplicate labels:
            DataCombined = DataCombined[~DataCombined.index.duplicated(
                keep='first')]

            DataCombined.to_hdf(machinefile, key='df_with_missing', mode='w')
            DataCombined.to_csv(
                os.path.join(tmpfolder, "machinelabels.csv")
            )  #this is always the most current one (as reading is from h5)
        else:
            DF.to_hdf(machinefile, key='df_with_missing', mode='w')
            DF.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))
        try:
            if cfg['cropping']:
                add.add_new_videos(
                    config, [video],
                    coords=[coords])  # make sure you pass coords as a list
            else:
                add.add_new_videos(config, [video], coords=None)
        except:  #can we make a catch here? - in fact we should drop indices from DataCombined if they are in CollectedData.. [ideal behavior; currently this is pretty unlikely]
            print(
                "AUTOMATIC ADDING OF VIDEO TO CONFIG FILE FAILED! You need to do this manually for including it in the config.yaml file!"
            )
            print("Videopath:", video, "Coordinates for cropping:", coords)
            pass

        print(
            "The outlier frames are extracted. They are stored in the subdirectory labeled-data\%s."
            % vname)
        print(
            "Once you extracted frames for all videos, use 'refine_labels' to manually correct the labels."
        )
    else:
        print("No frames were extracted.")
示例#17
0
def AnalyzeVideosTrial(video_file):
    """
    DeepLabCut Toolbox
    https://github.com/AlexEMG/DeepLabCut

    A Mathis, [email protected]
    M Mathis, [email protected]

    This script analyzes videos based on a trained network (as specified in myconfig_analysis.py)

    You need tensorflow for evaluation. Run by:

    python3 AnalyzeVideosTrial.py video_file

    Functionalized by Adam S. Lowet, 10/25/19
    """

    ####################################################
    # Dependencies
    ####################################################

    import os.path
    import sys
    subfolder = os.getcwd().split('analysis-tools')[0]
    sys.path.append(subfolder)
    # add parent directory: (where nnet & config are!)
    sys.path.append(os.path.join(subfolder, "pose-tensorflow"))
    sys.path.append(os.path.join(subfolder, "config"))

    from myconfig_analysis import cropping, Task, date, \
        trainingsFraction, resnet, snapshotindex, shuffle,x1, x2, y1, y2, videotype, storedata_as_csv

    # Deep-cut dependencies
    from config import load_config
    from nnet import predict
    from dataset.pose_dataset import data_to_input

    # Dependencies for video:
    import pickle
    # import matplotlib.pyplot as plt
    import imageio
    from skimage.util import img_as_ubyte
    from moviepy.editor import VideoFileClip
    import skimage
    import skimage.color
    import time
    import pandas as pd
    import numpy as np
    import os
    from tqdm import tqdm


    def getpose(image, cfg, outputs, outall=False):
        ''' Adapted from DeeperCut, see pose-tensorflow folder'''
        image_batch = data_to_input(skimage.color.gray2rgb(image))
        outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
        scmap, locref = predict.extract_cnn_output(outputs_np, cfg)
        pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
        if outall:
            return scmap, locref, pose
        else:
            return pose


    ####################################################
    # Loading data, and defining model folder
    ####################################################

    basefolder = os.path.join('..','..','pose-tensorflow','models')
    modelfolder = os.path.join(basefolder, Task + str(date) + '-trainset' +
                   str(int(trainingsFraction * 100)) + 'shuffle' + str(shuffle))

    cfg = load_config(os.path.join(modelfolder , 'test' ,"pose_cfg.yaml"))

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check which snapshots are available and sort them by # iterations
    Snapshots = np.array([
        fn.split('.')[0]
        for fn in os.listdir(os.path.join(modelfolder , 'train'))
        if "index" in fn
    ])
    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print(modelfolder)
    print(Snapshots)

    ##################################################
    # Compute predictions over images
    ##################################################

    # Check if data already was generated:
    cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])

    # Name for scorer:
    trainingsiterations = (cfg['init_weights'].split('/')[-1]).split('-')[-1]

    # Name for scorer:
    scorer = 'DeepCut' + "_resnet" + str(resnet) + "_" + Task + str(
        date) + 'shuffle' + str(shuffle) + '_' + str(trainingsiterations)


    cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
    sess, inputs, outputs = predict.setup_pose_prediction(cfg)
    pdindex = pd.MultiIndex.from_product(
        [[scorer], cfg['all_joints_names'], ['x', 'y', 'likelihood']],
        names=['scorer', 'bodyparts', 'coords'])

    ##################################################
    # Datafolder
    ##################################################

    # video_dir='../videos/' #where your folder with videos is.
    frame_buffer = 10

    #os.chdir(video_dir)
    #videos = np.sort([fn for fn in os.listdir(os.curdir) if (videotype in fn)])
    #print("Starting ", video_dir, videos)
    #for video in videos:
    video = video_file
    dataname = video.split('.')[0] + scorer + '.h5'
    try:
        # Attempt to load data...
        pd.read_hdf(dataname)
        print("Video already analyzed!", dataname)
    except FileNotFoundError:
        print("Loading ", video)
        clip = VideoFileClip(video)
        ny, nx = clip.size  # dimensions of frame (height, width)
        fps = clip.fps
        #nframes = np.sum(1 for j in clip.iter_frames()) #this is slow (but accurate)
        nframes_approx = int(np.ceil(clip.duration * clip.fps) + frame_buffer)
        # this will overestimage number of frames (see https://github.com/AlexEMG/DeepLabCut/issues/9) This is especially a problem
        # for high frame rates and long durations due to rounding errors (as Rich Warren found). Later we crop the result (line 187)
        
        if cropping:
            clip = clip.crop(
                y1=y1, y2=y2, x1=x1, x2=x2)  # one might want to adjust

        print("Duration of video [s]: ", clip.duration, ", recorded with ", fps,
              "fps!")
        print("Overall # of frames: ", nframes_approx,"with cropped frame dimensions: ", clip.size)

        start = time.time()
        PredicteData = np.zeros((nframes_approx, 3 * len(cfg['all_joints_names'])))
        clip.reader.initialize()
        print("Starting to extract posture")
        for index in tqdm(range(nframes_approx)):
            #image = img_as_ubyte(clip.get_frame(index * 1. / fps))
            image = img_as_ubyte(clip.reader.read_frame())
            # Thanks to Rick Warren for the  following snipplet:
            # if close to end of video, start checking whether two adjacent frames are identical
            # this should only happen when moviepy has reached the final frame
            # if two adjacent frames are identical, terminate the loop
            if index==int(nframes_approx-frame_buffer*2):
                last_image = image
            elif index>int(nframes_approx-frame_buffer*2):
                if (image==last_image).all():
                    nframes = index
                    print("Detected frames: ", nframes)
                    break
                else:
                    last_image = image
            pose = getpose(image, cfg, outputs)
            PredicteData[index, :] = pose.flatten()  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!

        stop = time.time()

        dictionary = {
            "start": start,
            "stop": stop,
            "run_duration": stop - start,
            "Scorer": scorer,
            "config file": cfg,
            "fps": fps,
            "frame_dimensions": (ny, nx),
            "nframes": nframes
        }
        metadata = {'data': dictionary}

        print("Saving results...")
        DataMachine = pd.DataFrame(PredicteData[:nframes,:], columns=pdindex, index=range(nframes)) #slice pose data to have same # as # of frames.
        DataMachine.to_hdf(dataname, 'df_with_missing', format='table', mode='w')
        
        if storedata_as_csv:
            DataMachine.to_csv(video.split('.')[0] + scorer+'.csv')
        
        with open(dataname.split('.')[0] + 'includingmetadata.pickle',
                  'wb') as f:
            pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL)
示例#18
0
def process_video(url, output, i):
    agent = ImageProcessor(i)
    clip = VideoFileClip(url)
    clip = clip.crop(x_center=600, y_center=360, width=700, height=700)
    clip = clip.fl_image(agent.process)
    clip.write_videofile(output, audio=False)
def extract_frames(config,
                   mode='automatic',
                   algo='kmeans',
                   crop=False,
                   userfeedback=True,
                   cluster_step=1,
                   cluster_resizewidth=30,
                   cluster_color=False,
                   opencv=True,
                   flymovie=False,
                   slider_width=25):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.
    
    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n 
    by clustering based on visual appearance (k-means), or by manual selection. 
    
    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file. 
    
    Please refer to the user guide for more details on methods and parameters https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
        
    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.
        
    algo : string 
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this. 
        
    crop : bool, optional
        If this is set to True, a user interface pops up with a frame to select the cropping parameters. Use the left click to draw a cropping area and hit the button set cropping parameters to save the cropping parameters for a video.
        The default is ``False``; if provided it must be either ``True`` or ``False``.
            
    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos. 
    
    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).
    
    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however, 
        reading the individual frames takes longer due to the skipping.
    
    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases 
        the computational complexity. 
    
    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))
        
    slider_width: number, default: 25
        Width of the video frames slider, in percent of window
        
    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)
    
    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose 
    if you need to crop or not.
    --------
    
    """
    import os
    import sys
    import numpy as np
    from pathlib import Path
    from skimage import io
    import skimage
    from skimage.util import img_as_ubyte
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions
    from matplotlib.widgets import RectangleSelector

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox
        from deeplabcut.utils import select_crop_parameters
        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg['video_sets'].keys()
        if opencv:
            import cv2
        elif flymovie:
            from motmot.FlyMovieFormat import FlyMovieFormat as FMF
            import cv2
        else:
            from moviepy.editor import VideoFileClip
        for vindex, video in enumerate(videos):
            #plt.close("all")
            global coords
            coords = cfg['video_sets'][video]['crop'].split(',')

            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video, "?")
                askuser = input("yes/no")
            else:
                askuser = "******"

            if askuser == 'y' or askuser == 'yes' or askuser == 'Ja' or askuser == 'ha':  # multilanguage support :)
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5
                    )  #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                    duration = nframes * 1. / fps
                elif flymovie:
                    cap = FMF.FlyMovie(video)
                    nframes = cap.n_frames
                    while True:
                        try:
                            cap.get_frame(nframes)
                        except FMF.NoMoreFramesException:
                            nframes -= 1
                            continue
                        break
                    fps = 1. / (cap.get_frame(min(100, nframes))[1] -
                                cap.get_frame(min(100, nframes) - 1)[1])
                    duration = cap.get_frame(nframes)[1]
                else:
                    #Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    duration = clip.duration
                    nframes = int(np.ceil(clip.duration * 1. / fps))

                indexlength = int(np.ceil(np.log10(nframes)))

                if crop == True:
                    from deeplabcut.utils import select_crop_parameters
                    if opencv:
                        cap.set(2, start * duration)
                        ret, frame = cap.read()
                        if ret:
                            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    elif flymovie:
                        frame = cap.get_frame(int(nframes * start))[0]
                        if frame.ndim != 3:
                            frame = skimage.color.gray2rgb(frame)
                        image = frame
                    else:
                        image = clip.get_frame(
                            start * clip.duration
                        )  #frame is accessed by index *1./clip.fps (fps cancels)

                    fname = Path(video)
                    output_path = Path(
                        config).parents[0] / 'labeled-data' / fname.stem

                    if output_path.exists():
                        fig, ax = plt.subplots(1)
                        # Call the GUI to select the cropping parameters
                        coords = select_crop_parameters.show(config, image)
                        # Update the config.yaml file with current cropping parameters
                        cfg['video_sets'][video] = {
                            'crop':
                            ', '.join(
                                map(str, [
                                    int(coords[0]),
                                    int(coords[1]),
                                    int(coords[2]),
                                    int(coords[3])
                                ]))
                        }
                        auxiliaryfunctions.write_config(config_file, cfg)

                        if len(os.listdir(output_path)) == 0:  #check if empty
                            #store full frame from random location (good for augmentation)
                            index = int(start * duration +
                                        np.random.rand() * duration *
                                        (stop - start))
                            if opencv:
                                cap.set(1, index)
                                ret, frame = cap.read()
                                if ret:
                                    image = img_as_ubyte(
                                        cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            elif flymovie:
                                frame = cap.get_frame(int(nframes * start))[0]
                                if frame.ndim != 3:
                                    frame = skimage.color.gray2rgb(frame)
                                image = img_as_ubyte(frame)
                            else:
                                image = img_as_ubyte(
                                    clip.get_frame(index * 1. / clip.fps))
                                clip = clip.crop(
                                    y1=int(coords[2]),
                                    y2=int(coords[3]),
                                    x1=int(coords[0]),
                                    x2=int(coords[1]))  #now crop clip

                            saveimg = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(saveimg, image)

                        else:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                            if askuser == 'y' or askuser == 'yes' or askuser == 'Y' or askuser == 'Yes':
                                #clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
                                index = int(start * duration +
                                            np.random.rand() * duration *
                                            (stop - start))
                                if opencv:
                                    cap.set(1, index)
                                    ret, frame = cap.read()
                                    if ret:
                                        image = img_as_ubyte(
                                            cv2.cvtColor(
                                                frame, cv2.COLOR_BGR2RGB))
                                elif flymovie:
                                    frame = cap.get_frame(int(nframes *
                                                              start))[0]
                                    if frame.ndim != 3:
                                        frame = skimage.color.gray2rgb(frame)
                                    image = img_as_ubyte(frame)
                                else:
                                    image = img_as_ubyte(
                                        clip.get_frame(index * 1. / clip.fps))
                                    clip = clip.crop(y1=int(coords[2]),
                                                     y2=int(coords[3]),
                                                     x1=int(coords[0]),
                                                     x2=int(coords[1]))

                                saveimg = str(output_path) + '/img' + str(
                                    index).zfill(indexlength) + ".png"
                                io.imsave(saveimg, image)
                                pass
                            else:
                                sys.exit(
                                    "Delete the frames and try again later!")

                else:
                    numframes2pick = cfg[
                        'numframes2pick'] + 1  # without cropping a full size frame will not be extracted >> thus one more frame should be selected in next stage.

                print("Extracting frames based on %s ..." % algo)

                if algo == 'uniform':  #extract n-1 frames (0 was already stored)
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick - 1, start, stop)
                    elif flymovie:
                        frames2pick = frameselectiontools.UniformFramesfmf(
                            cap, numframes2pick - 1, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick - 1, start, stop)
                elif algo == 'kmeans':
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick - 1,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                    elif flymovie:
                        print("FMF not supported by kmeans as of now!")
                        frames2pick = []
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick - 1,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                output_path = Path(config).parents[0] / 'labeled-data' / Path(
                    video).stem
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  #extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(
                                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :]
                                )  #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                        else:
                            print("Frame", index, " not found!")
                    cap.release()
                elif flymovie:
                    for index in frames2pick:
                        print(index)
                        frame = cap.get_frame(int(index))[0]
                        if frame.ndim != 3:
                            frame = skimage.color.gray2rgb(frame)
                        image = img_as_ubyte(frame)
                        img_name = str(output_path) + '/img' + str(
                            index).zfill(indexlength) + ".png"
                        if crop:
                            io.imsave(
                                img_name,
                                image[int(coords[2]):int(coords[3]),
                                      int(coords[0]):int(coords[1]), :]
                            )  #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                        else:
                            io.imsave(img_name, image)
                    cap.close()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  #constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )

                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")

                    #close video.
                    clip.close()
                    del clip
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")

    print(
        "\nFrames were selected.\nYou can now label the frames using the function 'label_frames' (if you extracted enough frames for all videos)."
    )
def extract_frames(
    config,
    mode="automatic",
    algo="kmeans",
    crop=False,
    userfeedback=True,
    cluster_step=1,
    cluster_resizewidth=30,
    cluster_color=False,
    opencv=True,
    slider_width=25,
    config3d=None,
    extracted_cam=0,
    videos_list=None,
):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.

    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n
    by clustering based on visual appearance (k-means), or by manual selection.

    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file.

    After frames have been extracted from all videos from one camera, matched frames from other cameras can be extracted using mode = ``match``.
    This is necessary if you plan to use epipolar lines to improve labeling across multiple camera angles. It will overwrite previously extracted
    images from the second camera angle if necessary.

    Please refer to the user guide for more details on methods and parameters https://www.nature.com/articles/s41596-019-0176-0
    or the preprint: https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual`` to extract the initial set of frames. It can also be ``match`` to match frames between
        the cameras in preparation for the use of epipolar lines during labeling; namely, extract from camera_1 first, then run this to extract the matched frames in camera_2.
        WARNING: if you use match, and you previously extracted and labeled frames from the second camera, this will overwrite your data. This will require you deleting the
        collectdata.h5/.csv files before labeling.... Use with caution!

    algo : string
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this.

    crop : bool, optional
        If True, video frames are cropped according to the corresponding coordinates stored in the config.yaml.
        Alternatively, if cropping coordinates are not known yet, crop='GUI' triggers a user interface
        where the cropping area can be manually drawn and saved.

    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however,
        reading the individual frames takes longer due to the skipping.

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    slider_width: number, default: 25
        Width of the video frames slider, in percent of window

    config3d: string, optional
        Path to the config.yaml file in the 3D project. This will be used to match frames extracted from all cameras present in the field 'camera_names' to the
        frames extracted from the camera given by the parameter 'extracted_cam'

    extracted_cam: number, default: 0
        The index of the camera that already has extracted frames. This will match frame numbers to extract for all other cameras.
        This parameter is necessary if you wish to use epipolar lines in the labeling toolbox. Only use if mode = 'match' and config3d is provided.

    videos_list: list, default: None
            A list of the string containing full paths to videos to extract frames for. If this is left as None all videos specified in the config file will have frames extracted. 
            Otherwise one can select a subset by passing those paths. 

    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and defining the cropping area at runtime.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans','GUI')
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)
    --------
    for extracting frames from a second camera that match the frames extracted from the first
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml', mode='match', extracted_cam=0)

    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose
    if you need to crop or not.
    --------

    """
    import os
    import sys
    import re
    import glob
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.gui import frame_extraction_toolbox

        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg["numframes2pick"]
        start = cfg["start"]
        stop = cfg["stop"]

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )
        if videos_list is None:
            videos = cfg.get("video_sets_original") or cfg["video_sets"]
        else:  #filter video_list by the ones in the config file
            videos = [v for v in cfg["video_sets"] if v in videos_list]

        if opencv:
            from deeplabcut.utils.auxfun_videos import VideoReader
        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for video in videos:
            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video,
                    "?",
                )
                askuser = input("yes/no")
            else:
                askuser = "******"

            if (askuser == "y" or askuser == "yes" or askuser == "Ja"
                    or askuser == "ha" or askuser == "oui"
                    or askuser == "ouais"):  # multilanguage support :)

                if opencv:
                    cap = VideoReader(video)
                    nframes = len(cap)
                else:
                    # Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1.0 / fps))
                if not nframes:
                    print("Video could not be opened. Skipping...")
                    continue

                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(
                    config).parents[0] / "labeled-data" / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        if userfeedback:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                        if not (askuser == "y" or askuser == "yes"
                                or askuser == "Y" or askuser == "Yes"):
                            sys.exit("Delete the frames and try again later!")

                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                try:
                    coords = cfg["video_sets"][video]["crop"].split(",")
                except KeyError:
                    coords = cfg["video_sets_original"][video]["crop"].split(
                        ",")

                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == "uniform":
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick, start, stop)
                elif algo == "kmeans":
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                if not len(frames2pick):
                    print("Frame selection failed...")
                    return

                output_path = (Path(config).parents[0] / "labeled-data" /
                               Path(video).stem)
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set_to_frame(index)  # extract a particular frame
                        frame = cap.read_frame()
                        if frame is not None:
                            image = img_as_ubyte(frame)
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :, ],
                                )  # y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.close()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1.0 / clip.fps))
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  # constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

            else:  # NO!
                has_failed.append(False)

        if all(has_failed):
            print("Frame extraction failed. Video files must be corrupted.")
            return
        elif any(has_failed):
            print("Although most frames were extracted, some were invalid.")
        else:
            print(
                "Frames were successfully extracted, for the videos listed in the config.yaml file."
            )
        print(
            "\nYou can now label the frames using the function 'label_frames' "
            "(Note, you should label frames extracted from diverse videos (and many videos; we do not recommend training on single videos!))."
        )

    elif mode == "match":
        import cv2

        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")
        videos = sorted(cfg["video_sets"].keys())
        project_path = Path(config).parents[0]
        labels_path = os.path.join(project_path, "labeled-data/")
        video_dir = os.path.join(project_path, "videos/")
        try:
            cfg_3d = auxiliaryfunctions.read_config(config3d)
        except:
            raise Exception(
                "You must create a 3D project and edit the 3D config file before extracting matched frames. \n"
            )
        cams = cfg_3d["camera_names"]
        extCam_name = cams[extracted_cam]
        del cams[extracted_cam]
        label_dirs = sorted(
            glob.glob(os.path.join(labels_path, "*" + extCam_name + "*")))

        # select crop method
        crop_list = []
        for video in videos:
            if extCam_name not in video:
                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                    print("in gui code")
                coords = cfg["video_sets"][video]["crop"].split(",")

                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None
                crop_list.append(coords)
        print(crop_list)

        for coords, dirPath in zip(crop_list, label_dirs):
            extracted_images = glob.glob(os.path.join(dirPath, "*png"))

            imgPattern = re.compile("[0-9]{1,10}")
            for cam in cams:
                output_path = re.sub(extCam_name, cam, dirPath)

                for fname in os.listdir(output_path):
                    if fname.endswith(".png"):
                        os.remove(os.path.join(output_path, fname))

                vid = os.path.join(video_dir,
                                   os.path.basename(output_path)) + ".avi"
                cap = cv2.VideoCapture(vid)
                print("\n extracting matched frames from " +
                      os.path.basename(output_path) + ".avi")
                for img in extracted_images:
                    imgNum = re.findall(imgPattern, os.path.basename(img))[0]
                    cap.set(1, int(imgNum))
                    ret, frame = cap.read()
                    if ret:
                        image = img_as_ubyte(
                            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                        img_name = str(output_path) + "/img" + imgNum + ".png"
                        if crop:
                            io.imsave(
                                img_name,
                                image[int(coords[2]):int(coords[3]),
                                      int(coords[0]):int(coords[1]), :, ],
                            )
                        else:
                            io.imsave(img_name, image)
        print(
            "\n Done extracting matched frames. You can now begin labeling frames using the function label_frames\n"
        )

    else:
        print(
            "Invalid MODE. Choose either 'manual', 'automatic' or 'match'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
示例#21
0
def extract_frames(config,
                   mode='automatic',
                   algo='kmeans',
                   crop=False,
                   userfeedback=True,
                   cluster_step=1,
                   cluster_resizewidth=30,
                   cluster_color=False,
                   opencv=True,
                   slider_width=25):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.

    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n
    by clustering based on visual appearance (k-means), or by manual selection.

    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file.

    Please refer to the user guide for more details on methods and parameters https://www.nature.com/articles/s41596-019-0176-0
    or the preprint: https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.

    algo : string
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this.

    crop : bool, optional
        If True, video frames are cropped according to the corresponding coordinates stored in the config.yaml.
        Alternatively, if cropping coordinates are not known yet, crop='GUI' triggers a user interface
        where the cropping area can be manually drawn and saved.

    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however,
        reading the individual frames takes longer due to the skipping.

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    slider_width: number, default: 25
        Width of the video frames slider, in percent of window

    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and defining the cropping area at runtime.
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans','GUI')
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcutcore.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)

    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose
    if you need to crop or not.
    --------

    """
    import os
    import sys
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    from deeplabcutcore.utils import frameselectiontools
    from deeplabcutcore.utils import auxiliaryfunctions

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcutcore.generate_training_dataset import frame_extraction_toolbox
        from deeplabcutcore.utils import select_crop_parameters
        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg['video_sets'].keys()
        if opencv:
            import cv2
        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for vindex, video in enumerate(videos):
            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video, "?")
                askuser = input("yes/no")
            else:
                askuser = "******"

            if askuser == 'y' or askuser == 'yes' or askuser == 'Ja' or askuser == 'ha'\
                    or askuser == 'oui' or askuser == 'ouais':  # multilanguage support :)
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5
                    )  # https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                else:
                    # Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1. / fps))
                if not nframes:
                    print('Video could not be opened. Skipping...')
                    continue

                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(
                    config).parents[0] / 'labeled-data' / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        if userfeedback:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                        if not (askuser == 'y' or askuser == 'yes'
                                or askuser == 'Y' or askuser == 'Yes'):
                            sys.exit("Delete the frames and try again later!")

                if crop == 'GUI':
                    cfg = select_cropping_area(config, [video])
                coords = cfg['video_sets'][video]['crop'].split(',')
                if crop and not opencv:
                    clip = clip.crop(y1=int(coords[2]),
                                     y2=int(coords[3]),
                                     x1=int(coords[0]),
                                     x2=int(coords[1]))
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == 'uniform':
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick, start, stop)
                elif algo == 'kmeans':
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                if not len(frames2pick):
                    print('Frame selection failed...')
                    return

                output_path = Path(config).parents[0] / 'labeled-data' / Path(
                    video).stem
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  # extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(
                                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :]
                                )  # y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.release()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  # constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

        if all(has_failed):
            print('Frame extraction failed. Video files must be corrupted.')
            return
        elif any(has_failed):
            print('Although most frames were extracted, some were invalid.')
        else:
            print("Frames were successfully extracted.")
        print(
            "\nYou can now label the frames using the function 'label_frames' "
            "(if you extracted enough frames for all videos).")
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcutcore.extract_frames)`` on python and ``deeplabcutcore.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
def ExtractFramesbasedonPreselection(
    Index,
    extractionalgorithm,
    data,
    video,
    cfg,
    config,
    opencv=True,
    cluster_resizewidth=30,
    cluster_color=False,
    savelabeled=True,
    with_annotations=True,
):
    from deeplabcut.create_project import add

    start = cfg["start"]
    stop = cfg["stop"]
    numframes2extract = cfg["numframes2pick"]
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, "all")

    videofolder = str(Path(video).parents[0])
    vname = str(Path(video).stem)
    tmpfolder = os.path.join(cfg["project_path"], "labeled-data", vname)
    if os.path.isdir(tmpfolder):
        print("Frames from video", vname,
              " already extracted (more will be added)!")
    else:
        auxiliaryfunctions.attempttomakefolder(tmpfolder, recursive=True)

    nframes = len(data)
    print("Loading video...")
    if opencv:
        vid = VideoWriter(video)
        fps = vid.fps
        duration = vid.calc_duration()
    else:
        from moviepy.editor import VideoFileClip

        clip = VideoFileClip(video)
        fps = clip.fps
        duration = clip.duration

    if cfg["cropping"]:  # one might want to adjust
        coords = (cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"])
    else:
        coords = None

    print("Duration of video [s]: ", duration, ", recorded @ ", fps, "fps!")
    print("Overall # of frames: ", nframes,
          "with (cropped) frame dimensions: ")
    if extractionalgorithm == "uniform":
        if opencv:
            frames2pick = frameselectiontools.UniformFramescv2(
                vid, numframes2extract, start, stop, Index)
        else:
            frames2pick = frameselectiontools.UniformFrames(
                clip, numframes2extract, start, stop, Index)
    elif extractionalgorithm == "kmeans":
        if opencv:
            frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                vid,
                numframes2extract,
                start,
                stop,
                cfg["cropping"],
                coords,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )
        else:
            if cfg["cropping"]:
                clip = clip.crop(y1=cfg["y1"],
                                 y2=cfg["x2"],
                                 x1=cfg["x1"],
                                 x2=cfg["x2"])
            frames2pick = frameselectiontools.KmeansbasedFrameselection(
                clip,
                numframes2extract,
                start,
                stop,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )

    else:
        print(
            "Please implement this method yourself! Currently the options are 'kmeans', 'jump', 'uniform'."
        )
        frames2pick = []

    # Extract frames + frames with plotted labels and store them in folder (with name derived from video name) nder labeled-data
    print("Let's select frames indices:", frames2pick)
    colors = visualization.get_cmap(len(bodyparts), cfg["colormap"])
    strwidth = int(np.ceil(np.log10(nframes)))  # width for strings
    for index in frames2pick:  ##tqdm(range(0,nframes,10)):
        if opencv:
            PlottingSingleFramecv2(
                vid,
                cfg["cropping"],
                coords,
                data,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        else:
            PlottingSingleFrame(
                clip,
                data,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        plt.close("all")

    # close videos
    if opencv:
        vid.close()
    else:
        clip.close()
        del clip

    # Extract annotations based on DeepLabCut and store in the folder (with name derived from video name) under labeled-data
    if len(frames2pick) > 0:
        try:
            if cfg["cropping"]:
                add.add_new_videos(
                    config, [video],
                    coords=[coords])  # make sure you pass coords as a list
            else:
                add.add_new_videos(config, [video], coords=None)
        except:  # can we make a catch here? - in fact we should drop indices from DataCombined if they are in CollectedData.. [ideal behavior; currently this is pretty unlikely]
            print(
                "AUTOMATIC ADDING OF VIDEO TO CONFIG FILE FAILED! You need to do this manually for including it in the config.yaml file!"
            )
            print("Videopath:", video, "Coordinates for cropping:", coords)
            pass

        if with_annotations:
            machinefile = os.path.join(
                tmpfolder,
                "machinelabels-iter" + str(cfg["iteration"]) + ".h5")
            if isinstance(data, pd.DataFrame):
                df = data.loc[frames2pick]
                df.index = [
                    os.path.join(
                        "labeled-data",
                        vname,
                        "img" + str(index).zfill(strwidth) + ".png",
                    ) for index in df.index
                ]  # exchange index number by file names.
            elif isinstance(data, dict):
                idx = [
                    os.path.join(
                        "labeled-data",
                        vname,
                        "img" + str(index).zfill(strwidth) + ".png",
                    ) for index in frames2pick
                ]
                filename = os.path.join(str(tmpfolder),
                                        f"CollectedData_{cfg['scorer']}.h5")
                try:
                    df_temp = pd.read_hdf(filename, "df_with_missing")
                    columns = df_temp.columns
                except FileNotFoundError:
                    columns = pd.MultiIndex.from_product(
                        [
                            [cfg["scorer"]],
                            cfg["individuals"],
                            cfg["multianimalbodyparts"],
                            ["x", "y"],
                        ],
                        names=["scorer", "individuals", "bodyparts", "coords"],
                    )
                    if cfg["uniquebodyparts"]:
                        columns2 = pd.MultiIndex.from_product(
                            [
                                [cfg["scorer"]],
                                ["single"],
                                cfg["uniquebodyparts"],
                                ["x", "y"],
                            ],
                            names=[
                                "scorer", "individuals", "bodyparts", "coords"
                            ],
                        )
                        df_temp = pd.concat((
                            pd.DataFrame(columns=columns),
                            pd.DataFrame(columns=columns2),
                        ))
                        columns = df_temp.columns
                array = np.full((len(frames2pick), len(columns)), np.nan)
                for i, index in enumerate(frames2pick):
                    data_temp = data.get(index)
                    if data_temp is not None:
                        vals = np.concatenate(data_temp)[:, :2].flatten()
                        array[i, :len(vals)] = vals
                df = pd.DataFrame(array, index=idx, columns=columns)
            else:
                return
            if Path(machinefile).is_file():
                Data = pd.read_hdf(machinefile, "df_with_missing")
                DataCombined = pd.concat([Data, df])
                # drop duplicate labels:
                DataCombined = DataCombined[~DataCombined.index.duplicated(
                    keep="first")]

                DataCombined.to_hdf(machinefile,
                                    key="df_with_missing",
                                    mode="w")
                DataCombined.to_csv(
                    os.path.join(tmpfolder, "machinelabels.csv")
                )  # this is always the most current one (as reading is from h5)
            else:
                df.to_hdf(machinefile, key="df_with_missing", mode="w")
                df.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))

        print(
            "The outlier frames are extracted. They are stored in the subdirectory labeled-data\%s."
            % vname)
        print(
            "Once you extracted frames for all videos, use 'refine_labels' to manually correct the labels."
        )
    else:
        print("No frames were extracted.")
def analyse(tf_setting, videofolder: str, clips_l: list):
    """  analyse the videos in videofolder that are also listed in clips_l"""
    # Load TENSORFLOW settings
    cfg = tf_setting['cfg']
    scorer = tf_setting['scorer']
    sess = tf_setting['sess']
    inputs = tf_setting['inputs']
    outputs = tf_setting['outputs']

    pdindex = pd.MultiIndex.from_product(
        [[scorer], cfg['all_joints_names'], ['x', 'y', 'likelihood']],
        names=['scorer', 'bodyparts', 'coords'])
    frame_buffer = 10

    os.chdir(videofolder)
    videos = np.sort([fn for fn in os.listdir(os.curdir) if (videotype in fn)])
    clips_l = [item for sublist in clips_l for item in sublist]
    for video in videos:
        try:
            if video.split('.')[0] not in clips_l:
                continue

            dataname = video.split('.')[0] + scorer + '.h5'
            try:
                # Attempt to load data...
                pd.read_hdf(dataname)
                print("            ... video already analyzed!", dataname)
            except FileNotFoundError:
                print("                 ... loading ", video)
                clip = VideoFileClip(video)
                ny, nx = clip.size  # dimensions of frame (height, width)
                fps = clip.fps
                nframes_approx = int(
                    np.ceil(clip.duration * clip.fps) + frame_buffer)

                if cropping:
                    clip = clip.crop(y1=y1, y2=y2, x1=x1,
                                     x2=x2)  # one might want to adjust

                start = time.time()
                PredicteData = np.zeros(
                    (nframes_approx, 3 * len(cfg['all_joints_names'])))

                temp_image = img_as_ubyte(clip.get_frame(0))
                scmap, locref, pose = getpose(sess,
                                              inputs,
                                              temp_image,
                                              cfg,
                                              outputs,
                                              outall=True)
                PredictedScmap = np.zeros(
                    (nframes_approx, scmap.shape[0], scmap.shape[1],
                     len(cfg['all_joints_names'])))

                for index in tqdm(range(nframes_approx)):
                    image = img_as_ubyte(clip.reader.read_frame())

                    if index == int(nframes_approx - frame_buffer * 2):
                        last_image = image
                    elif index > int(nframes_approx - frame_buffer * 2):
                        if (image == last_image).all():
                            nframes = index
                            print("Detected frames: ", nframes)
                            break
                        else:
                            last_image = image
                    try:
                        pose = getpose(sess,
                                       inputs,
                                       image,
                                       cfg,
                                       outputs,
                                       outall=True)
                        PredicteData[index, :] = pose.flatten()
                    except:
                        scmap, locref, pose = getpose(sess,
                                                      inputs,
                                                      image,
                                                      cfg,
                                                      outputs,
                                                      outall=True)
                        PredicteData[index, :] = pose.flatten()
                        PredictedScmap[index, :, :, :] = scmap

                stop = time.time()

                dictionary = {
                    "start": start,
                    "stop": stop,
                    "run_duration": stop - start,
                    "Scorer": scorer,
                    "config file": cfg,
                    "fps": fps,
                    "frame_dimensions": (ny, nx),
                    "nframes": nframes
                }
                metadata = {'data': dictionary}

                print("Saving results...")
                DataMachine = pd.DataFrame(
                    PredicteData[:nframes, :],
                    columns=pdindex,
                    index=range(nframes)
                )  # slice pose data to have same # as # of frames.
                DataMachine.to_hdf(dataname,
                                   'df_with_missing',
                                   format='table',
                                   mode='w')

                with open(
                        dataname.split('.')[0] + 'includingmetadata.pickle',
                        'wb') as f:
                    pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL)
        except:
            from warnings import warn
            warn('Could not do DLC tracking on video {}'.format(video))
示例#24
0
print("Starting ", videofolder, videos)
for video in videos:
    dataname = video.split('.')[0] + scorer + '.h5'
    try:
        # Attempt to load data...
        pd.read_hdf(dataname)
        print("Video already analyzed!", dataname)
    except:
        print("Loading ", video)
        clip = VideoFileClip(video)
        ny, nx = clip.size  # dimensions of frame (height, width)
        fps = clip.fps
        nframes = np.sum(1 for j in clip.iter_frames())

        if cropping:
            clip = clip.crop(y1=y1, y2=y2, x1=x1,
                             x2=x2)  # one might want to adjust

        print("Duration of video [s]: ", clip.duration, ", recorded with ",
              fps, "fps!")
        print("Overall # of frames: ", nframes,
              "with cropped frame dimensions: ", clip.size)

        start = time.time()
        PredicteData = np.zeros((nframes, 3 * len(cfg['all_joints_names'])))

        print("Starting to extract posture")
        for index in tqdm(range(nframes)):
            image = img_as_ubyte(clip.get_frame(index * 1. / fps))
            pose = getpose(image, cfg, outputs)
            PredicteData[index, :] = pose.flatten(
            )  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
示例#25
0
def extract_frames(config,
                   mode='automatic',
                   algo='kmeans',
                   crop=False,
                   checkcropping=False,
                   userfeedback=False,
                   cluster_step=1,
                   cluster_resizewidth=30,
                   cluster_color=False,
                   opencv=True,
                   Screens=1,
                   scale_w=.8,
                   scale_h=.8):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.
    
    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n 
    by clustering based on visual appearance (k-means), or by manual selection. 
    
    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file. 
    
    Please refer to the user guide for more details on methods and parameters https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
        
    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.
        
    algo : string 
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this. 
        
    crop : bool, optional
        If this is set to True, the selected frames are cropped based on the ``crop`` parameters in the config.yaml file. 
        The default is ``False``; if provided it must be either ``True`` or ``False``.
        
    checkcropping: bool, optional
        If this is set to True, the cropping parameters are overlayed in a plot of the first frame to check and the user can decide if the program should proceed 
        with those parameters, or perhaps edit them. The default is ``False``; if provided it must be either ``True`` or ``False``.
    
    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos. 
    
    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).
    
    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however, 
        reading the individual frames takes longer due to the skipping.
    
    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases 
        the computational complexity. 

    The three parameters Screens=1,scale_w=.8,scale_h=.8 define the relative height (scale_h), relative widht (scale_w) and number of screens (horizontally) and thereby 
    affect the dimensions of the manual frame extraction GUI.
        
    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames based on the ``crop`` parameters in config.yaml
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames automatically with 'uniform', want to crop the frames based on the ``crop`` parameters in config.yaml and check for cropping
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True,checkcropping=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    
    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose 
    if you need to crop or not.
    --------
    
    """
    import os
    import sys
    import yaml
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    from deeplabcut.generate_training_dataset import frameselectiontools

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox
        frame_extraction_toolbox.show(config, Screens, scale_w, scale_h)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        with open(str(config_file), 'r') as ymlfile:
            cfg = yaml.load(ymlfile)
        print("Config file read successfully.")

        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg['video_sets'].keys()
        if opencv:
            import cv2
        else:
            from moviepy.editor import VideoFileClip
        for vindex, video in enumerate(videos):
            #plt.close("all")
            coords = cfg['video_sets'][video]['crop'].split(',')

            if userfeedback == True:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video, "?")
                askuser = input("yes/no")
            else:
                askuser = "******"

            if askuser == 'y' or askuser == 'yes' or askuser == 'Ja' or askuser == 'ha':  # multilanguage support :)
                #indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5
                    )  #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                    duration = nframes * 1. / fps
                else:
                    #Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    duration = clip.duration
                    nframes = int(np.ceil(clip.duration * 1. / fps))
                indexlength = int(np.ceil(np.log10(nframes)))
                if crop == True:
                    print(
                        "Make sure you change the crop parameters in the config.yaml file. The default parameters are set to the video dimensions."
                    )
                    if opencv:
                        cap.set(2, start * duration)
                        ret, frame = cap.read()
                        if ret:
                            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    else:
                        image = clip.get_frame(
                            start * clip.duration
                        )  #frame is accessed by index *1./clip.fps (fps cancels)

                    fname = Path(video)
                    output_path = Path(
                        config).parents[0] / 'labeled-data' / fname.stem

                    if output_path.exists() and checkcropping == True:
                        fig, ax = plt.subplots(1)
                        # Display the image
                        ax.imshow(image)
                        # Create a Rectangle patch
                        rect = patches.Rectangle(
                            (int(coords[0]), int(coords[2])),
                            int(coords[1]) - int(coords[0]),
                            int(coords[3]) - int(coords[2]),
                            linewidth=3,
                            edgecolor='r',
                            facecolor='none')
                        # Add the patch to the Axes
                        ax.add_patch(rect)
                        plt.show()

                        print(
                            "The red boundary indicates how the cropped image will look."
                        )
                        #saveimg = str(Path(config).parents[0] / Path('labeled-data','IsCroppingOK_'+fname.stem +".png"))
                        #io.imsave(saveimg, image)

                        msg = input("Is the cropping ok? (yes/no): ")
                        if msg == "yes" or msg == "y" or msg == "Yes" or msg == "Y":
                            if len(os.listdir(
                                    output_path)) == 0:  #check if empty

                                #store full frame from random location (good for augmentation)
                                index = int(start * duration +
                                            np.random.rand() * duration *
                                            (stop - start))
                                if opencv:
                                    cap.set(1, index)
                                    if ret:
                                        image = img_as_ubyte(
                                            cv2.cvtColor(
                                                frame, cv2.COLOR_BGR2RGB))
                                else:
                                    image = img_as_ubyte(
                                        clip.get_frame(index * 1. / clip.fps))
                                    clip = clip.crop(
                                        y1=int(coords[2]),
                                        y2=int(coords[3]),
                                        x1=int(coords[0]),
                                        x2=int(coords[1]))  #now crop clip

                                saveimg = str(output_path) + '/img' + str(
                                    index).zfill(indexlength) + ".png"
                                io.imsave(saveimg, image)

                            else:
                                askuser = input(
                                    "The directory already contains some frames. Do you want to add to it?(yes/no): "
                                )
                                if askuser == 'y' or askuser == 'yes' or askuser == 'Y' or askuser == 'Yes':
                                    #clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
                                    pass
                                else:
                                    sys.exit(
                                        "Delete the frames and try again later!"
                                    )
                        else:
                            sys.exit(
                                "Correct the crop parameters in the config.yaml file and try again!"
                            )

                    elif output_path.exists():  #cropping without checking:
                        index = int(start * duration +
                                    np.random.rand() * duration *
                                    (stop - start))
                        if opencv:
                            cap.set(1, index)
                            if ret:
                                image = img_as_ubyte(
                                    cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                        else:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1. / clip.fps))
                            clip = clip.crop(y1=int(coords[2]),
                                             y2=int(coords[3]),
                                             x1=int(coords[0]),
                                             x2=int(coords[1]))

                        saveimg = str(output_path) + '/img' + str(index).zfill(
                            indexlength) + ".png"
                        io.imsave(saveimg, image)

                else:
                    numframes2pick = cfg[
                        'numframes2pick'] + 1  # without cropping a full size frame will not be extracted >> thus one more frame should be selected in next stage.

                print("Extracting frames based on %s ..." % algo)
                if algo == 'uniform':  #extract n-1 frames (0 was already stored)
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick - 1, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick - 1, start, stop)
                elif algo == 'kmeans':
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick - 1,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick - 1,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                output_path = Path(config).parents[0] / 'labeled-data' / Path(
                    video).stem
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  #extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(
                                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :]
                                )  #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                        else:
                            print("Frame", index, " not found!")
                    cap.release()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  #constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )

                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")

                    #close video.
                    clip.close()
                    del clip
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")

    print(
        "\nFrames were selected.\nYou can now label the frames using the function 'label_frames' (if you extracted enough frames for all videos)."
    )
    else:
        print("Loading ", video, "and data.")
        dataname = video.split('.')[0] + scorer + '.h5'
        try:
            Dataframe = pd.read_hdf(dataname)
            clip = VideoFileClip(video)
        except FileNotFoundError:
            print("Data was not analyzed (run AnalysisVideos.py first).")

        ny, nx = clip.size  # dimensions of frame (height, width)
        fps = clip.fps
        nframes = np.sum(1 for j in clip.iter_frames())

        if cropping:
            # one might want to adjust
            clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)

        print("Duration of video [s]: ", clip.duration, ", recorded with ",
              fps, "fps!")
        print("Overall # of frames: ", nframes,
              "with cropped frame dimensions: ", clip.size)

        print("Generating frames")
        for index in tqdm(range(nframes)):
            imagename = tmpfolder + "/file%04d.png" % index
            if os.path.isfile(tmpfolder + "/file%04d.png" % index):
                pass
            else:
                plt.axis('off')
                image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
示例#27
0
def crop_one(vid_path):
    clip = VideoFileClip(str(vid_path))
    x = get_split(clip.get_frame(2))
    if x: return clip.crop(x2=x), clip.crop(x1=x)