def create_labeled_video(config, videos, videotype='avi', shuffle=1, trainingsetindex=0, save_frames=False, Frames2plot=None, delete=False, displayedbodyparts='all', codec='mp4v', outputframerate=None, destfolder=None): """ Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video' Parameters ---------- config : string Full path of the config.yaml file as a string. videos : list A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: string, optional Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi`` shuffle : int, optional Number of shuffles of training dataset. Default is set to 1. trainingsetindex: int, optional Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). videotype: string, optional Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi`` save_frames: bool If true creates each frame individual and then combines into a video. This variant is relatively slow as it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize). Frames2plot: List of indices If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame. delete: bool If true then the individual frames created during the video generation will be deleted. displayedbodyparts: list of strings, optional This select the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used orr a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.] outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). Examples -------- If you want to create the labeled video for only 1 video >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi']) -------- If you want to create the labeled video for only 1 video and store the individual frames >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True) -------- If you want to create the labeled video for multiple videos >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi']) -------- If you want to create the labeled video for all the videos (as .avi extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/']) -------- If you want to create the labeled video for all the videos (as .mp4 extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4') -------- """ cfg = auxiliaryfunctions.read_config(config) trainFraction = cfg['TrainingFraction'][trainingsetindex] DLCscorer = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction ) #automatically loads corresponding model (even training iteration based on snapshot index) bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype) for video in Videos: if destfolder is None: #videofolder = str(Path(video).parents[0]) videofolder = Path(video).parents[ 0] #where your folder with videos is. else: videofolder = destfolder os.chdir(str(videofolder)) videotype = Path(video).suffix print("Starting % ", videofolder, videos) vname = str(Path(video).stem) if os.path.isfile( os.path.join(str(videofolder), vname + DLCscorer + '_labeled.mp4')): print("Labeled video already created.") else: print("Loading ", video, "and data.") dataname = os.path.join(str(videofolder), vname + DLCscorer + '.h5') try: Dataframe = pd.read_hdf(dataname) metadata = auxiliaryfunctions.LoadVideoMetadata(dataname) #print(metadata) datanames = [dataname] except FileNotFoundError: datanames = [ fn for fn in os.listdir(os.curdir) if (vname in fn) and (".h5" in fn) and "resnet" in fn ] if len(datanames) == 0: print("The video was not analyzed with this scorer:", DLCscorer) print( "No other scorers were found, please use the function 'analyze_videos' first." ) elif len(datanames) > 0: print("The video was not analyzed with this scorer:", DLCscorer) print("Other scorers were found, however:", datanames) DLCscorer = 'DeepCut' + ( datanames[0].split('DeepCut')[1]).split('.h5')[0] print("Creating labeled video for:", DLCscorer, " instead.") Dataframe = pd.read_hdf(datanames[0]) metadata = auxiliaryfunctions.LoadVideoMetadata( datanames[0]) if len(datanames) > 0: #Loading cropping data used during analysis cropping = metadata['data']["cropping"] [x1, x2, y1, y2] = metadata['data']["cropping_parameters"] print(cropping, x1, x2, y1, y2) if save_frames == True: tmpfolder = os.path.join(str(videofolder), 'temp-' + vname) auxiliaryfunctions.attempttomakefolder(tmpfolder) clip = vp(video) #CreateVideoSlow(clip,Dataframe,tmpfolder,cfg["dotsize"],cfg["colormap"],cfg["alphavalue"],cfg["pcutoff"],cfg["cropping"],cfg["x1"],cfg["x2"],cfg["y1"],cfg["y2"],delete,DLCscorer,bodyparts) CreateVideoSlow(clip, Dataframe, tmpfolder, cfg["dotsize"], cfg["colormap"], cfg["alphavalue"], cfg["pcutoff"], cropping, x1, x2, y1, y2, delete, DLCscorer, bodyparts, outputframerate, Frames2plot) else: clip = vp(fname=video, sname=os.path.join(vname + DLCscorer + '_labeled.mp4'), codec=codec) if cropping: print( "Fast video creation has currently not been implemented for cropped videos. Please use 'save_frames=True' to get the video." ) else: CreateVideo(clip, Dataframe, cfg["pcutoff"], cfg["dotsize"], cfg["colormap"], DLCscorer, bodyparts, cropping, x1, x2, y1, y2) #NEED TO ADD CROPPING!
def create_labeled_video(config, videos, videotype='avi', shuffle=1, trainingsetindex=0, filtered=False, save_frames=False, Frames2plot=None, delete=False, displayedbodyparts='all', codec='mp4v', outputframerate=None, destfolder=None, draw_skeleton=False, trailpoints=0, displaycropped=False): """ Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video' Parameters ---------- config : string Full path of the config.yaml file as a string. videos : list A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: string, optional Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi`` shuffle : int, optional Number of shuffles of training dataset. Default is set to 1. trainingsetindex: int, optional Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). filtered: bool, default false Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions videotype: string, optional Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi`` save_frames: bool If true creates each frame individual and then combines into a video. This variant is relatively slow as it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize). Frames2plot: List of indices If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame. delete: bool If true then the individual frames created during the video generation will be deleted. displayedbodyparts: list of strings, optional This select the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used orr a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.] outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). draw_skeleton: bool If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False`` trailpoints: int Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0. displaycropped: bool, optional Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset. Examples -------- If you want to create the labeled video for only 1 video >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi']) -------- If you want to create the labeled video for only 1 video and store the individual frames >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True) -------- If you want to create the labeled video for multiple videos >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi']) -------- If you want to create the labeled video for all the videos (as .avi extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/']) -------- If you want to create the labeled video for all the videos (as .mp4 extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4') -------- """ cfg = auxiliaryfunctions.read_config(config) trainFraction = cfg['TrainingFraction'][trainingsetindex] DLCscorer = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction ) #automatically loads corresponding model (even training iteration based on snapshot index) bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) if draw_skeleton: print("\n\n----------- DRAW Skeleton is TRUE -------------\n\n") bodyparts2connect = cfg['skeleton'] print("Body parts to connect: ", bodyparts2connect) skeleton_color = cfg['skeleton_color'] print("Skeleton color: ", skeleton_color) else: print("\n\n----------- DRAW Skeleton is FALSE!! -------------\n\n") bodyparts2connect = None skeleton_color = None Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype) for video in Videos: if destfolder is None: #videofolder = str(Path(video).parents[0]) videofolder = Path(video).parents[ 0] #where your folder with videos is. else: videofolder = destfolder os.chdir(str(videofolder)) videotype = Path(video).suffix print("Starting % ", videofolder, videos) vname = str(Path(video).stem) if filtered == True: videooutname = os.path.join(vname + DLCscorer + 'filtered_labeled.mp4') else: videooutname = os.path.join(vname + DLCscorer + '_labeled.mp4') if os.path.isfile( os.path.join(str(videofolder), vname + DLCscorer + '_labeled.mp4')): print("\n\nLabeled video already created.") else: print("\n\nLoading ", video, "and data.") dataname = os.path.join(vname + DLCscorer + '.h5') cwd = os.getcwd() print("\n\n CWD = {} \n".format(cwd)) try: print("\nData name = {}\n".format(dataname)) Dataframe = pd.read_hdf(dataname) metadata = auxiliaryfunctions.LoadVideoMetadata(dataname) #print(metadata) datanames = [dataname] except FileNotFoundError: datanames = [ fn for fn in os.listdir(os.curdir) if (vname in fn) and (".h5" in fn) and "resnet" in fn ] if len(datanames) == 0: print("The video was not analyzed with this scorer:", DLCscorer) print( "No other scorers were found, please use the function 'analyze_videos' first." ) elif len(datanames) > 0: print("The video was not analyzed with this scorer:", DLCscorer) print("Other scorers were found, however:", datanames) DLCscorer = 'DeepCut' + ( datanames[0].split('DeepCut')[1]).split('.h5')[0] print("Creating labeled video for:", DLCscorer, " instead.") Dataframe = pd.read_hdf(datanames[0]) metadata = auxiliaryfunctions.LoadVideoMetadata( datanames[0]) if len(datanames) > 0: #Loading cropping data used during analysis cropping = metadata['data']["cropping"] [x1, x2, y1, y2] = metadata['data']["cropping_parameters"] print(cropping, x1, x2, y1, y2) if save_frames == True: tmpfolder = os.path.join(str(videofolder), 'temp-' + vname) auxiliaryfunctions.attempttomakefolder(tmpfolder) clip = vp(video) CreateVideoSlow(videooutname, clip, Dataframe, tmpfolder, cfg["dotsize"], cfg["colormap"], cfg["alphavalue"], cfg["pcutoff"], trailpoints, cropping, x1, x2, y1, y2, delete, DLCscorer, bodyparts, outputframerate, Frames2plot, bodyparts2connect, skeleton_color, draw_skeleton, displaycropped) else: if displaycropped: #then the cropped video + the labels is depicted clip = vp(fname=video, sname=videooutname, codec=codec, sw=x2 - x1, sh=y2 - y1) CreateVideo(clip, Dataframe, cfg["pcutoff"], cfg["dotsize"], cfg["colormap"], DLCscorer, bodyparts, trailpoints, cropping, x1, x2, y1, y2, bodyparts2connect, skeleton_color, draw_skeleton, displaycropped) else: #then the full video + the (perhaps in cropped mode analyzed labels) are depicted print("\n\nVideo = {}\n".format(video.split('/')[1])) clip = vp(fname=video.split('/')[1], sname=videooutname, codec=codec) CreateVideo(clip, Dataframe, cfg["pcutoff"], cfg["dotsize"], cfg["colormap"], DLCscorer, bodyparts, trailpoints, cropping, x1, x2, y1, y2, bodyparts2connect, skeleton_color, draw_skeleton, displaycropped)