def plot_trajectories( config, videos, videotype=".avi", shuffle=1, trainingsetindex=0, filtered=False, displayedbodyparts="all", displayedindividuals="all", showfigures=False, destfolder=None, modelprefix="", imagetype=".png", resolution=100, linewidth=1.0, track_method="", ): """ Plots the trajectories of various bodyparts across the video. Parameters ---------- config : string Full path of the config.yaml file as a string. videos : list A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: string, optional Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi`` shuffle: list, optional List of integers specifying the shuffle indices of the training dataset. The default is [1] trainingsetindex: int, optional Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). filtered: bool, default false Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions displayedbodyparts: list of strings, optional This select the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used, or a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. showfigures: bool, default false If true then plots are also displayed. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). imagetype: string, default ".png" Specifies the output image format, tested '.tif', '.jpg', '.svg' and ".png". resolution: int, default 100 Specifies the resolution (in dpi) of saved figures. Note higher resolution figures take longer to generate. linewidth: float, default 1.0 Specifies width of line for line and histogram plots. track_method: string, optional Specifies the tracker used to generate the data. Empty by default (corresponding to a single animal project). For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will be taken from the config.yaml file if none is given. Example -------- for labeling the frames >>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi']) -------- """ cfg = auxiliaryfunctions.read_config(config) track_method = auxfun_multianimal.get_track_method( cfg, track_method=track_method) trainFraction = cfg["TrainingFraction"][trainingsetindex] DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction, modelprefix=modelprefix ) # automatically loads corresponding model (even training iteration based on snapshot index) bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser( cfg, displayedindividuals) Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype) if not len(Videos): print( "No videos found. Make sure you passed a list of videos and that *videotype* is right." ) return failed = [] for video in Videos: if destfolder is None: videofolder = str(Path(video).parents[0]) else: videofolder = destfolder vname = str(Path(video).stem) print("Loading ", video, "and data.") try: df, _, _, suffix = auxiliaryfunctions.load_analyzed_data( videofolder, vname, DLCscorer, filtered, track_method) failed.append(False) tmpfolder = os.path.join(videofolder, "plot-poses", vname) auxiliaryfunctions.attempttomakefolder(tmpfolder, recursive=True) # Keep only the individuals and bodyparts that were labeled labeled_bpts = [ bp for bp in df.columns.get_level_values("bodyparts").unique() if bp in bodyparts ] # Either display the animals defined in the config if they are found # in the dataframe, or all the trajectories regardless of their names try: animals = set(df.columns.get_level_values("individuals")) except KeyError: animals = {""} for animal in animals.intersection(individuals) or animals: PlottingResults( tmpfolder, df, cfg, labeled_bpts, animal, showfigures, suffix + animal + imagetype, resolution=resolution, linewidth=linewidth, ) except FileNotFoundError as e: failed.append(True) print(e) try: _ = auxiliaryfunctions.load_detection_data( video, DLCscorer, track_method) print('Call "deeplabcut.stitch_tracklets()"' " prior to plotting the trajectories.") except FileNotFoundError as e: print(e) print( f"Make sure {video} was previously analyzed, and that " f'detections were successively converted to tracklets using "deeplabcut.convert_detections2tracklets()" ' f'and "deeplabcut.stitch_tracklets()".') if not all(failed): print( 'Plots created! Please check the directory "plot-poses" within the video directory' ) else: print( f"Plots could not be created! " f"Videos were not evaluated with the current scorer {DLCscorer}.")
def create_labeled_video( config, videos, videotype="avi", shuffle=1, trainingsetindex=0, filtered=False, fastmode=True, save_frames=False, Frames2plot=None, displayedbodyparts="all", displayedindividuals="all", codec="mp4v", outputframerate=None, destfolder=None, draw_skeleton=False, trailpoints=0, displaycropped=False, color_by="bodypart", modelprefix="", track_method="", ): """ Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video' Parameters ---------- config : string Full path of the config.yaml file as a string. videos : list A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: string, optional Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi`` shuffle : int, optional Number of shuffles of training dataset. Default is set to 1. trainingsetindex: int, optional Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). filtered: bool, default false Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions videotype: string, optional Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi`` fastmode: bool If true uses openCV (much faster but less customization of video) vs matplotlib (if false). You can also "save_frames" individually or not in the matplotlib mode (if you set the "save_frames" variable accordingly). However, using matplotlib to create the frames it therefore allows much more flexible (one can set transparency of markers, crop, and easily customize). save_frames: bool If true creates each frame individual and then combines into a video. This variant is relatively slow as it stores all individual frames. Frames2plot: List of indices If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame. displayedbodyparts: list of strings, optional This selects the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used orr a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. displayedindividuals: list of strings, optional Individuals plotted in the video. By default, all individuals present in the config will be showed. codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.] outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). draw_skeleton: bool If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False`` trailpoints: int Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0. displaycropped: bool, optional Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset. color_by : string, optional (default='bodypart') Coloring rule. By default, each bodypart is colored differently. If set to 'individual', points belonging to a single individual are colored the same. Examples -------- If you want to create the labeled video for only 1 video >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi']) -------- If you want to create the labeled video for only 1 video and store the individual frames >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],fastmode=True, save_frames=True) -------- If you want to create the labeled video for multiple videos >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi']) -------- If you want to create the labeled video for all the videos (as .avi extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/']) -------- If you want to create the labeled video for all the videos (as .mp4 extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4') -------- """ cfg = auxiliaryfunctions.read_config(config) trainFraction = cfg["TrainingFraction"][trainingsetindex] DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction, modelprefix=modelprefix ) # automatically loads corresponding model (even training iteration based on snapshot index) if save_frames: fastmode = False # otherwise one cannot save frames bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser( cfg, displayedindividuals) if draw_skeleton: bodyparts2connect = cfg["skeleton"] skeleton_color = cfg["skeleton_color"] else: bodyparts2connect = None skeleton_color = None start_path = os.getcwd() Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype) if not len(Videos): print( "No video(s) were found. Please check your paths and/or 'video_type'." ) return for video in Videos: videofolder = Path(video).parents[0] if destfolder is None: destfolder = videofolder # where your folder with videos is. auxiliaryfunctions.attempttomakefolder(destfolder) os.chdir(destfolder) # THE VIDEO IS STILL IN THE VIDEO FOLDER videotype = Path(video).suffix print("Starting % ", destfolder, videos) vname = str(Path(video).stem) # if notanalyzed: # notanalyzed,outdataname,sourcedataname,DLCscorer=auxiliaryfunctions.CheckifPostProcessing(folder,vname,DLCscorer,DLCscorerlegacy,suffix='checking') if filtered == True: videooutname1 = os.path.join(vname + DLCscorer + "filtered_labeled.mp4") videooutname2 = os.path.join(vname + DLCscorerlegacy + "filtered_labeled.mp4") else: videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4") videooutname2 = os.path.join(vname + DLCscorerlegacy + "_labeled.mp4") if os.path.isfile(videooutname1) or os.path.isfile(videooutname2): print("Labeled video already created.") else: print("Loading ", video, "and data.") try: df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data( destfolder, vname, DLCscorer, filtered, track_method) metadata = auxiliaryfunctions.load_video_metadata( destfolder, vname, DLCscorer) if cfg.get("multianimalproject", False): s = "_id" if color_by == "individual" else "_bp" else: s = "" videooutname = filepath.replace(".h5", f"{s}_labeled.mp4") if os.path.isfile(videooutname): print("Labeled video already created. Skipping...") continue if all(individuals): df = df.loc(axis=1)[:, individuals] cropping = metadata["data"]["cropping"] [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"] labeled_bpts = [ bp for bp in df.columns.get_level_values( "bodyparts").unique() if bp in bodyparts ] if not fastmode: tmpfolder = os.path.join(str(videofolder), "temp-" + vname) if save_frames: auxiliaryfunctions.attempttomakefolder(tmpfolder) clip = vp(video) CreateVideoSlow( videooutname, clip, df, tmpfolder, cfg["dotsize"], cfg["colormap"], cfg["alphavalue"], cfg["pcutoff"], trailpoints, cropping, x1, x2, y1, y2, save_frames, labeled_bpts, outputframerate, Frames2plot, bodyparts2connect, skeleton_color, draw_skeleton, displaycropped, color_by, ) else: if (displaycropped ): # then the cropped video + the labels is depicted clip = vp( fname=video, sname=videooutname, codec=codec, sw=x2 - x1, sh=y2 - y1, ) else: # then the full video + the (perhaps in cropped mode analyzed labels) are depicted clip = vp(fname=video, sname=videooutname, codec=codec) CreateVideo( clip, df, cfg["pcutoff"], cfg["dotsize"], cfg["colormap"], labeled_bpts, trailpoints, cropping, x1, x2, y1, y2, bodyparts2connect, skeleton_color, draw_skeleton, displaycropped, color_by, ) except FileNotFoundError as e: print(e) continue os.chdir(start_path)
def create_labeled_video( config, videos, videotype="avi", shuffle=1, trainingsetindex=0, filtered=False, fastmode=True, save_frames=False, keypoints_only=False, Frames2plot=None, displayedbodyparts="all", displayedindividuals="all", codec="mp4v", outputframerate=None, destfolder=None, draw_skeleton=False, trailpoints=0, displaycropped=False, color_by="bodypart", modelprefix="", track_method="", ): """ Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video' Parameters ---------- config : string Full path of the config.yaml file as a string. videos : list A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: string, optional Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi`` shuffle : int, optional Number of shuffles of training dataset. Default is set to 1. trainingsetindex: int, optional Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). filtered: bool, default false Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions fastmode: bool If true uses openCV (much faster but less customization of video) vs matplotlib (if false). You can also "save_frames" individually or not in the matplotlib mode (if you set the "save_frames" variable accordingly). However, using matplotlib to create the frames it therefore allows much more flexible (one can set transparency of markers, crop, and easily customize). save_frames: bool If true creates each frame individual and then combines into a video. This variant is relatively slow as it stores all individual frames. keypoints_only: bool, optional By default, both video frames and keypoints are visible. If true, only the keypoints are shown. These clips are an hommage to Johansson movies, see https://www.youtube.com/watch?v=1F5ICP9SYLU and of course his seminal paper: "Visual perception of biological motion and a model for its analysis" by Gunnar Johansson in Perception & Psychophysics 1973. Frames2plot: List of indices If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame. displayedbodyparts: list of strings, optional This selects the body parts that are plotted in the video. Either ``all``, then all body parts from config.yaml are used orr a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts. displayedindividuals: list of strings, optional Individuals plotted in the video. By default, all individuals present in the config will be showed. codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.] outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate. destfolder: string, optional Specifies the destination folder that was used for storing analysis data (default is the path of the video). draw_skeleton: bool If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False`` trailpoints: int Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0. displaycropped: bool, optional Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset. color_by : string, optional (default='bodypart') Coloring rule. By default, each bodypart is colored differently. If set to 'individual', points belonging to a single individual are colored the same. Examples -------- If you want to create the labeled video for only 1 video >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi']) -------- If you want to create the labeled video for only 1 video and store the individual frames >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],fastmode=True, save_frames=True) -------- If you want to create the labeled video for multiple videos >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi']) -------- If you want to create the labeled video for all the videos (as .avi extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/']) -------- If you want to create the labeled video for all the videos (as .mp4 extension) in a directory. >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4') -------- """ cfg = auxiliaryfunctions.read_config(config) trainFraction = cfg["TrainingFraction"][trainingsetindex] DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction, modelprefix=modelprefix ) # automatically loads corresponding model (even training iteration based on snapshot index) if save_frames: fastmode = False # otherwise one cannot save frames keypoints_only = False bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser( cfg, displayedindividuals) if draw_skeleton: bodyparts2connect = cfg["skeleton"] skeleton_color = cfg["skeleton_color"] else: bodyparts2connect = None skeleton_color = None start_path = os.getcwd() Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype) if not Videos: print( "No video(s) were found. Please check your paths and/or 'video_type'." ) return func = partial( proc_video, videos, destfolder, filtered, DLCscorer, DLCscorerlegacy, track_method, cfg, individuals, color_by, bodyparts, codec, bodyparts2connect, trailpoints, save_frames, outputframerate, Frames2plot, draw_skeleton, skeleton_color, displaycropped, fastmode, keypoints_only, ) with Pool(min(os.cpu_count(), len(Videos))) as pool: pool.map(func, Videos) os.chdir(start_path)
def create_labeled_video( config, videos, videotype="", shuffle=1, trainingsetindex=0, filtered=False, fastmode=True, save_frames=False, keypoints_only=False, Frames2plot=None, displayedbodyparts="all", displayedindividuals="all", codec="mp4v", outputframerate=None, destfolder=None, draw_skeleton=False, trailpoints=0, displaycropped=False, color_by="bodypart", modelprefix="", track_method="", ): """Labels the bodyparts in a video. Make sure the video is already analyzed by the function ``deeplabcut.analyze_videos``. Parameters ---------- config : string Full path of the config.yaml file. videos : list[str] A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored. videotype: str, optional, default="" Checks for the extension of the video in case the input to the video is a directory. Only videos with this extension are analyzed. If left unspecified, videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept. shuffle : int, optional, default=1 Number of shuffles of training dataset. trainingsetindex: int, optional, default=0 Integer specifying which TrainingsetFraction to use. Note that TrainingFraction is a list in config.yaml. filtered: bool, optional, default=False Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with ``deeplabcut.filterpredictions``. fastmode: bool, optional, default=True If ``True``, uses openCV (much faster but less customization of video) instead of matplotlib if ``False``. You can also "save_frames" individually or not in the matplotlib mode (if you set the "save_frames" variable accordingly). However, using matplotlib to create the frames it therefore allows much more flexible (one can set transparency of markers, crop, and easily customize). save_frames: bool, optional, default=False If ``True``, creates each frame individual and then combines into a video. Setting this to ``True`` is relatively slow as it stores all individual frames. keypoints_only: bool, optional, default=False By default, both video frames and keypoints are visible. If ``True``, only the keypoints are shown. These clips are an hommage to Johansson movies, see https://www.youtube.com/watch?v=1F5ICP9SYLU and of course his seminal paper: "Visual perception of biological motion and a model for its analysis" by Gunnar Johansson in Perception & Psychophysics 1973. Frames2plot: List[int] or None, optional, default=None If not ``None`` and ``save_frames=True`` then the frames corresponding to the index will be plotted. For example, ``Frames2plot=[0,11]`` will plot the first and the 12th frame. displayedbodyparts: list[str] or str, optional, default="all" This selects the body parts that are plotted in the video. If ``all``, then all body parts from config.yaml are used. If a list of strings that are a subset of the full list. E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these body parts. displayedindividuals: list[str] or str, optional, default="all" Individuals plotted in the video. By default, all individuals present in the config will be showed. codec: str, optional, default="mp4v" Codec for labeled video. For available options, see http://www.fourcc.org/codecs.php. Note that this depends on your ffmpeg installation. outputframerate: int or None, optional, default=None Positive number, output frame rate for labeled video (only available for the mode with saving frames.) If ``None``, which results in the original video rate. destfolder: string or None, optional, default=None Specifies the destination folder that was used for storing analysis data. If ``None``, the path of the video file is used. draw_skeleton: bool, optional, default=False If ``True`` adds a line connecting the body parts making a skeleton on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. trailpoints: int, optional, default=0 Number of previous frames whose body parts are plotted in a frame (for displaying history). displaycropped: bool, optional, default=False Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset. color_by : string, optional, default='bodypart' Coloring rule. By default, each bodypart is colored differently. If set to 'individual', points belonging to a single individual are colored the same. modelprefix: str, optional, default="" Directory containing the deeplabcut models to use when evaluating the network. By default, the models are assumed to exist in the project folder. track_method: string, optional, default="" Specifies the tracker used to generate the data. Empty by default (corresponding to a single animal project). For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will be taken from the config.yaml file if none is given. Returns ------- None Examples -------- Create the labeled video for a single video >>> deeplabcut.create_labeled_video( '/analysis/project/reaching-task/config.yaml', ['/analysis/project/videos/reachingvideo1.avi'], ) Create the labeled video for a single video and store the individual frames >>> deeplabcut.create_labeled_video( '/analysis/project/reaching-task/config.yaml', ['/analysis/project/videos/reachingvideo1.avi'], fastmode=True, save_frames=True, ) Create the labeled video for multiple videos >>> deeplabcut.create_labeled_video( '/analysis/project/reaching-task/config.yaml', [ '/analysis/project/videos/reachingvideo1.avi', '/analysis/project/videos/reachingvideo2.avi', ], ) Create the labeled video for all the videos with an .avi extension in a directory. >>> deeplabcut.create_labeled_video( '/analysis/project/reaching-task/config.yaml', ['/analysis/project/videos/'], ) Create the labeled video for all the videos with an .mp4 extension in a directory. >>> deeplabcut.create_labeled_video( '/analysis/project/reaching-task/config.yaml', ['/analysis/project/videos/'], videotype='mp4', ) """ cfg = auxiliaryfunctions.read_config(config) track_method = auxfun_multianimal.get_track_method( cfg, track_method=track_method) trainFraction = cfg["TrainingFraction"][trainingsetindex] DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName( cfg, shuffle, trainFraction, modelprefix=modelprefix ) # automatically loads corresponding model (even training iteration based on snapshot index) if save_frames: fastmode = False # otherwise one cannot save frames keypoints_only = False bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser( cfg, displayedbodyparts) individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser( cfg, displayedindividuals) if draw_skeleton: bodyparts2connect = cfg["skeleton"] skeleton_color = cfg["skeleton_color"] else: bodyparts2connect = None skeleton_color = None start_path = os.getcwd() Videos = auxiliaryfunctions.get_list_of_videos(videos, videotype) if not Videos: return func = partial( proc_video, videos, destfolder, filtered, DLCscorer, DLCscorerlegacy, track_method, cfg, individuals, color_by, bodyparts, codec, bodyparts2connect, trailpoints, save_frames, outputframerate, Frames2plot, draw_skeleton, skeleton_color, displaycropped, fastmode, keypoints_only, ) with Pool(min(os.cpu_count(), len(Videos))) as pool: pool.map(func, Videos) os.chdir(start_path)