def create_video_with_all_detections(
    config,
    videos,
    shuffle=1,
    trainingsetindex=0,
    displayedbodyparts="all",
    destfolder=None,
    modelprefix="",
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import Assembler
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorername, _ = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername +
                                       "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.mp4")
            full_pickle = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.pickle")

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = Assembler._flatten_detections(
                        data[frame_names[ind]])
                    for det in dets:
                        if det.label not in bpts or det.confidence < pcutoff:
                            continue
                        x, y = det.pos
                        rr, cc = disk((y, x), dotsize, shape=(ny, nx))
                        frame[rr, cc] = colors[bpts.index(det.label)]
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)
def _create_labeled_video(
    video,
    h5file,
    keypoints2show="all",
    animals2show="all",
    skeleton_edges=None,
    pcutoff=0.6,
    dotsize=8,
    cmap="cool",
    color_by="bodypart",
    skeleton_color="k",
    trailpoints=0,
    bbox=None,
    codec="mp4v",
    fps=None,
    output_path="",
):
    if color_by not in ("bodypart", "individual"):
        raise ValueError("`color_by` should be either 'bodypart' or 'individual'.")

    if not output_path:
        s = "_id" if color_by == "individual" else "_bp"
        output_path = h5file.replace(".h5", f"{s}_labeled.mp4")
    try:
        x1, x2, y1, y2 = bbox
        display_cropped = True
        sw = x2 - x1
        sh = y2 - y1
    except TypeError:
        x1 = x2 = y1 = y2 = 0
        display_cropped = False
        sw = ""
        sh = ""

    clip = vp(
        fname=video,
        sname=output_path,
        codec=codec,
        sw=sw,
        sh=sh,
        fps=fps,
    )
    df = pd.read_hdf(h5file)
    try:
        animals = df.columns.get_level_values("individuals").unique().to_list()
        if animals2show != "all" and isinstance(animals, Iterable):
            animals = [a for a in animals if a in animals2show]
        df = df.loc(axis=1)[:, animals]
    except KeyError:
        pass
    kpts = df.columns.get_level_values("bodyparts").unique().to_list()
    if keypoints2show != "all" and isinstance(keypoints2show, Iterable):
        kpts = [kpt for kpt in kpts if kpt in keypoints2show]
    CreateVideo(
        clip,
        df,
        pcutoff,
        dotsize,
        cmap,
        kpts,
        trailpoints,
        False,
        x1,
        x2,
        y1,
        y2,
        skeleton_edges,
        skeleton_color,
        bool(skeleton_edges),
        display_cropped,
        color_by,
    )
def proc_video(
    videos,
    destfolder,
    filtered,
    DLCscorer,
    DLCscorerlegacy,
    track_method,
    cfg,
    individuals,
    color_by,
    bodyparts,
    codec,
    bodyparts2connect,
    trailpoints,
    save_frames,
    outputframerate,
    Frames2plot,
    draw_skeleton,
    skeleton_color,
    displaycropped,
    fastmode,
    keypoints_only,
    video,
):
    """Helper function for create_videos

    Parameters
    ----------


    """
    videofolder = Path(video).parents[0]
    if destfolder is None:
        destfolder = videofolder  # where your folder with videos is.

    auxiliaryfunctions.attempttomakefolder(destfolder)

    os.chdir(destfolder)  # THE VIDEO IS STILL IN THE VIDEO FOLDER
    print("Starting to process video: {}".format(video))
    vname = str(Path(video).stem)

    if filtered:
        videooutname1 = os.path.join(vname + DLCscorer +
                                     "filtered_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                     "filtered_labeled.mp4")
    else:
        videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy + "_labeled.mp4")

    if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
        print("Labeled video {} already created.".format(vname))
    else:
        print("Loading {} and data.".format(video))
        try:
            df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
                destfolder, vname, DLCscorer, filtered, track_method)
            metadata = auxiliaryfunctions.load_video_metadata(
                destfolder, vname, DLCscorer)
            if cfg.get("multianimalproject", False):
                s = "_id" if color_by == "individual" else "_bp"
            else:
                s = ""
            videooutname = filepath.replace(".h5", f"{s}_labeled.mp4")
            if os.path.isfile(videooutname):
                print("Labeled video already created. Skipping...")
                return

            if all(individuals):
                df = df.loc(axis=1)[:, individuals]
            cropping = metadata["data"]["cropping"]
            [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"]
            labeled_bpts = [
                bp for bp in df.columns.get_level_values("bodyparts").unique()
                if bp in bodyparts
            ]

            if keypoints_only:
                # Mask rather than drop unwanted bodyparts to ensure consistent coloring
                mask = df.columns.get_level_values("bodyparts").isin(bodyparts)
                df.loc[:, ~mask] = np.nan
                inds = None
                if bodyparts2connect:
                    all_bpts = df.columns.get_level_values("bodyparts")[::3]
                    inds = get_segment_indices(bodyparts2connect, all_bpts)
                create_video_with_keypoints_only(
                    df,
                    videooutname,
                    inds,
                    cfg["pcutoff"],
                    cfg["dotsize"],
                    cfg["alphavalue"],
                    skeleton_color=skeleton_color,
                    color_by=color_by,
                    colormap=cfg["colormap"],
                )
            elif not fastmode:
                tmpfolder = os.path.join(str(videofolder), "temp-" + vname)
                if save_frames:
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                clip = vp(video)
                CreateVideoSlow(
                    videooutname,
                    clip,
                    df,
                    tmpfolder,
                    cfg["dotsize"],
                    cfg["colormap"],
                    cfg["alphavalue"],
                    cfg["pcutoff"],
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    save_frames,
                    labeled_bpts,
                    outputframerate,
                    Frames2plot,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )
            else:
                if displaycropped:  # then the cropped video + the labels is depicted
                    clip = vp(
                        fname=video,
                        sname=videooutname,
                        codec=codec,
                        sw=x2 - x1,
                        sh=y2 - y1,
                        fps=outputframerate,
                    )
                else:  # then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                    clip = vp(fname=video,
                              sname=videooutname,
                              codec=codec,
                              fps=outputframerate)
                CreateVideo(
                    clip,
                    df,
                    cfg["pcutoff"],
                    cfg["dotsize"],
                    cfg["colormap"],
                    labeled_bpts,
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )

        except FileNotFoundError as e:
            print(e)
def create_labeled_video(config,
                         videos,
                         videotype='avi',
                         shuffle=1,
                         trainingsetindex=0,
                         save_frames=False,
                         Frames2plot=None,
                         delete=False,
                         displayedbodyparts='all',
                         codec='mp4v',
                         outputframerate=None,
                         destfolder=None):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.
    
    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
     
    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize).

    Frames2plot: List of indices
        If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame.
        
    delete: bool
        If true then the individual frames created during the video generation will be deleted.

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]
    
    outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate.
    
    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video). 
    
    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)

    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:

        if destfolder is None:
            #videofolder = str(Path(video).parents[0])
            videofolder = Path(video).parents[
                0]  #where your folder with videos is.
        else:
            videofolder = destfolder

        os.chdir(str(videofolder))
        videotype = Path(video).suffix
        print("Starting % ", videofolder, videos)
        vname = str(Path(video).stem)
        if os.path.isfile(
                os.path.join(str(videofolder),
                             vname + DLCscorer + '_labeled.mp4')):
            print("Labeled video already created.")
        else:
            print("Loading ", video, "and data.")
            dataname = os.path.join(str(videofolder),
                                    vname + DLCscorer + '.h5')
            try:
                Dataframe = pd.read_hdf(dataname)
                metadata = auxiliaryfunctions.LoadVideoMetadata(dataname)
                #print(metadata)
                datanames = [dataname]
            except FileNotFoundError:
                datanames = [
                    fn for fn in os.listdir(os.curdir)
                    if (vname in fn) and (".h5" in fn) and "resnet" in fn
                ]
                if len(datanames) == 0:
                    print("The video was not analyzed with this scorer:",
                          DLCscorer)
                    print(
                        "No other scorers were found, please use the function 'analyze_videos' first."
                    )
                elif len(datanames) > 0:
                    print("The video was not analyzed with this scorer:",
                          DLCscorer)
                    print("Other scorers were found, however:", datanames)
                    DLCscorer = 'DeepCut' + (
                        datanames[0].split('DeepCut')[1]).split('.h5')[0]
                    print("Creating labeled video for:", DLCscorer,
                          " instead.")
                    Dataframe = pd.read_hdf(datanames[0])
                    metadata = auxiliaryfunctions.LoadVideoMetadata(
                        datanames[0])

            if len(datanames) > 0:
                #Loading cropping data used during analysis
                cropping = metadata['data']["cropping"]
                [x1, x2, y1, y2] = metadata['data']["cropping_parameters"]
                print(cropping, x1, x2, y1, y2)

                if save_frames == True:
                    tmpfolder = os.path.join(str(videofolder), 'temp-' + vname)
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                    clip = vp(video)
                    #CreateVideoSlow(clip,Dataframe,tmpfolder,cfg["dotsize"],cfg["colormap"],cfg["alphavalue"],cfg["pcutoff"],cfg["cropping"],cfg["x1"],cfg["x2"],cfg["y1"],cfg["y2"],delete,DLCscorer,bodyparts)
                    CreateVideoSlow(clip, Dataframe, tmpfolder, cfg["dotsize"],
                                    cfg["colormap"], cfg["alphavalue"],
                                    cfg["pcutoff"], cropping, x1, x2, y1, y2,
                                    delete, DLCscorer, bodyparts,
                                    outputframerate, Frames2plot)
                else:
                    clip = vp(fname=video,
                              sname=os.path.join(vname + DLCscorer +
                                                 '_labeled.mp4'),
                              codec=codec)
                    if cropping:
                        print(
                            "Fast video creation has currently not been implemented for cropped videos. Please use 'save_frames=True' to get the video."
                        )
                    else:
                        CreateVideo(clip, Dataframe, cfg["pcutoff"],
                                    cfg["dotsize"], cfg["colormap"], DLCscorer,
                                    bodyparts, cropping, x1, x2, y1,
                                    y2)  #NEED TO ADD CROPPING!
Beispiel #5
0
def create_video_with_all_detections(
    config, videos, DLCscorername, displayedbodyparts="all", destfolder=None
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    DLCscorername: str
        Name of network. E.g. 'DLC_resnet50_project_userMar23shuffle1_50000

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import (
        convertdetectiondict2listoflist,
    )
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername + "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.mp4"
            )
            full_pickle = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.pickle"
            )

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = convertdetectiondict2listoflist(data[frame_names[ind]], bpts)
                    for i, det in enumerate(dets):
                        color = colors[i]
                        for x, y, p, _ in det:
                            if p > pcutoff:
                                rr, cc = circle(y, x, dotsize, shape=(ny, nx))
                                frame[rr, cc] = color
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)
Beispiel #6
0
bodyparts2connect = cfg["skeleton"]
os.chdir(r'C:\Users\Windows\Desktop\Lab\2020-06-06 - grantGavinVid\file')

#########################################
##########################################

bpts = Dataframe.columns.get_level_values(
    "bodyparts"
)  # recover all the body parts (3 repetition for x y and likelihood)
all_bpts = bpts.values[::
                       3]  # obtain only unique body parts by taking the 3rd only
bpts2connect = get_segment_indices(bodyparts2connect, all_bpts)
skeleton_color = 'orange'  # look into how to convert

video = path + os.sep + '628shockPRO_cut_9809.mp4'
clip = vp(video)
fps = clip.fps()
nframes = len(Dataframe.index)
duration = nframes / fps

df_x, df_y, df_likelihood = Dataframe.values.reshape((nframes, -1, 3)).T
colorclass = plt.cm.ScalarMappable(cmap='jet')
bplist = bpts.unique().to_list()
nbodyparts = len(bplist)

nindividuals = 1
map2bp = list(range(len(all_bpts)))
map2id = [0 for _ in map2bp]

bodyparts2plot = ['snoutL', 'snoutR', 'snoutTip']
keep = np.flatnonzero(np.isin(all_bpts, bodyparts2plot))
Beispiel #7
0
def create_labeled_video(config,
                         videos,
                         videotype='avi',
                         shuffle=1,
                         trainingsetindex=0,
                         filtered=False,
                         save_frames=False,
                         Frames2plot=None,
                         delete=False,
                         displayedbodyparts='all',
                         codec='mp4v',
                         outputframerate=None,
                         destfolder=None,
                         draw_skeleton=False,
                         trailpoints=0,
                         displaycropped=False):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
        Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize).

    Frames2plot: List of indices
        If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame.

    delete: bool
        If true then the individual frames created during the video generation will be deleted.

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]

    outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False``

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    displaycropped: bool, optional
        Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset.

    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)
    if draw_skeleton:
        bodyparts2connect = cfg['skeleton']
        skeleton_color = cfg['skeleton_color']
    else:
        bodyparts2connect = None
        skeleton_color = None

    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        if destfolder is None:
            videofolder = Path(video).parents[
                0]  #where your folder with videos is.
        else:
            videofolder = destfolder

        os.chdir(str(videofolder))
        videotype = Path(video).suffix
        print("Starting % ", videofolder, videos)
        vname = str(Path(video).stem)

        #if notanalyzed:
        #notanalyzed,outdataname,sourcedataname,DLCscorer=auxiliaryfunctions.CheckifPostProcessing(folder,vname,DLCscorer,DLCscorerlegacy,suffix='checking')

        if filtered == True:
            videooutname1 = os.path.join(vname + DLCscorer +
                                         'filtered_labeled.mp4')
            videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                         'filtered_labeled.mp4')
        else:
            videooutname1 = os.path.join(vname + DLCscorer + '_labeled.mp4')
            videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                         '_labeled.mp4')

        if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
            print("Labeled video already created.")
        else:
            print("Loading ", video, "and data.")
            datafound, metadata, Dataframe, DLCscorer, suffix = auxiliaryfunctions.LoadAnalyzedData(
                str(videofolder), vname, DLCscorer, filtered
            )  #returns boolean variable if data was found and metadata + pandas array
            videooutname = os.path.join(vname + DLCscorer + suffix +
                                        '_labeled.mp4')
            if datafound and not os.path.isfile(
                    videooutname
            ):  #checking again, for this loader video could exist
                #Loading cropping data used during analysis
                cropping = metadata['data']["cropping"]
                [x1, x2, y1, y2] = metadata['data']["cropping_parameters"]
                if save_frames == True:
                    tmpfolder = os.path.join(str(videofolder), 'temp-' + vname)
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                    clip = vp(video)

                    CreateVideoSlow(videooutname, clip, Dataframe, tmpfolder,
                                    cfg["dotsize"], cfg["colormap"],
                                    cfg["alphavalue"], cfg["pcutoff"],
                                    trailpoints, cropping, x1, x2, y1, y2,
                                    delete, DLCscorer, bodyparts,
                                    outputframerate, Frames2plot,
                                    bodyparts2connect, skeleton_color,
                                    draw_skeleton, displaycropped)
                else:
                    if displaycropped:  #then the cropped video + the labels is depicted
                        clip = vp(fname=video,
                                  sname=videooutname,
                                  codec=codec,
                                  sw=x2 - x1,
                                  sh=y2 - y1)
                        CreateVideo(clip, Dataframe, cfg["pcutoff"],
                                    cfg["dotsize"], cfg["colormap"], DLCscorer,
                                    bodyparts, trailpoints, cropping, x1, x2,
                                    y1, y2, bodyparts2connect, skeleton_color,
                                    draw_skeleton, displaycropped)
                    else:  #then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                        clip = vp(fname=video, sname=videooutname, codec=codec)
                        CreateVideo(clip, Dataframe, cfg["pcutoff"],
                                    cfg["dotsize"], cfg["colormap"], DLCscorer,
                                    bodyparts, trailpoints, cropping, x1, x2,
                                    y1, y2, bodyparts2connect, skeleton_color,
                                    draw_skeleton, displaycropped)
Beispiel #8
0
def create_labeled_video(
    config,
    videos,
    videotype="avi",
    shuffle=1,
    trainingsetindex=0,
    filtered=False,
    fastmode=True,
    save_frames=False,
    Frames2plot=None,
    displayedbodyparts="all",
    displayedindividuals="all",
    codec="mp4v",
    outputframerate=None,
    destfolder=None,
    draw_skeleton=False,
    trailpoints=0,
    displaycropped=False,
    color_by="bodypart",
    modelprefix="",
    track_method="",
):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
        Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    fastmode: bool
        If true uses openCV (much faster but less customization of video) vs matplotlib (if false). You can also
        "save_frames" individually or not in the matplotlib mode (if you set the "save_frames" variable accordingly).
        However, using matplotlib to create the frames it therefore allows much more flexible (one can set transparency of markers, crop, and easily customize).

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames.

    Frames2plot: List of indices
        If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame.

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    displayedindividuals: list of strings, optional
        Individuals plotted in the video. By default, all individuals present in the config will be showed.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]

    outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False``

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    displaycropped: bool, optional
        Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset.

    color_by : string, optional (default='bodypart')
        Coloring rule. By default, each bodypart is colored differently.
        If set to 'individual', points belonging to a single individual are colored the same.

    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],fastmode=True, save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix
    )  # automatically loads corresponding model (even training iteration based on snapshot index)

    if save_frames:
        fastmode = False  # otherwise one cannot save frames

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)
    individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser(
        cfg, displayedindividuals)
    if draw_skeleton:
        bodyparts2connect = cfg["skeleton"]
        skeleton_color = cfg["skeleton_color"]
    else:
        bodyparts2connect = None
        skeleton_color = None

    start_path = os.getcwd()
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)

    if not len(Videos):
        print(
            "No video(s) were found. Please check your paths and/or 'video_type'."
        )
        return

    for video in Videos:
        videofolder = Path(video).parents[0]
        if destfolder is None:
            destfolder = videofolder  # where your folder with videos is.
        auxiliaryfunctions.attempttomakefolder(destfolder)

        os.chdir(destfolder)  # THE VIDEO IS STILL IN THE VIDEO FOLDER
        videotype = Path(video).suffix
        print("Starting % ", destfolder, videos)
        vname = str(Path(video).stem)

        # if notanalyzed:
        # notanalyzed,outdataname,sourcedataname,DLCscorer=auxiliaryfunctions.CheckifPostProcessing(folder,vname,DLCscorer,DLCscorerlegacy,suffix='checking')

        if filtered == True:
            videooutname1 = os.path.join(vname + DLCscorer +
                                         "filtered_labeled.mp4")
            videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                         "filtered_labeled.mp4")
        else:
            videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4")
            videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                         "_labeled.mp4")

        if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
            print("Labeled video already created.")
        else:
            print("Loading ", video, "and data.")
            try:
                df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
                    destfolder, vname, DLCscorer, filtered, track_method)
                metadata = auxiliaryfunctions.load_video_metadata(
                    destfolder, vname, DLCscorer)
                if cfg.get("multianimalproject", False):
                    s = "_id" if color_by == "individual" else "_bp"
                else:
                    s = ""
                videooutname = filepath.replace(".h5", f"{s}_labeled.mp4")
                if os.path.isfile(videooutname):
                    print("Labeled video already created. Skipping...")
                    continue

                if all(individuals):
                    df = df.loc(axis=1)[:, individuals]
                cropping = metadata["data"]["cropping"]
                [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"]
                labeled_bpts = [
                    bp for bp in df.columns.get_level_values(
                        "bodyparts").unique() if bp in bodyparts
                ]
                if not fastmode:
                    tmpfolder = os.path.join(str(videofolder), "temp-" + vname)
                    if save_frames:
                        auxiliaryfunctions.attempttomakefolder(tmpfolder)
                    clip = vp(video)
                    CreateVideoSlow(
                        videooutname,
                        clip,
                        df,
                        tmpfolder,
                        cfg["dotsize"],
                        cfg["colormap"],
                        cfg["alphavalue"],
                        cfg["pcutoff"],
                        trailpoints,
                        cropping,
                        x1,
                        x2,
                        y1,
                        y2,
                        save_frames,
                        labeled_bpts,
                        outputframerate,
                        Frames2plot,
                        bodyparts2connect,
                        skeleton_color,
                        draw_skeleton,
                        displaycropped,
                        color_by,
                    )
                else:
                    if (displaycropped
                        ):  # then the cropped video + the labels is depicted
                        clip = vp(
                            fname=video,
                            sname=videooutname,
                            codec=codec,
                            sw=x2 - x1,
                            sh=y2 - y1,
                        )
                    else:  # then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                        clip = vp(fname=video, sname=videooutname, codec=codec)
                    CreateVideo(
                        clip,
                        df,
                        cfg["pcutoff"],
                        cfg["dotsize"],
                        cfg["colormap"],
                        labeled_bpts,
                        trailpoints,
                        cropping,
                        x1,
                        x2,
                        y1,
                        y2,
                        bodyparts2connect,
                        skeleton_color,
                        draw_skeleton,
                        displaycropped,
                        color_by,
                    )

            except FileNotFoundError as e:
                print(e)
                continue

    os.chdir(start_path)
        self.project_path = self.config['project_path']
        self.path_to_video = os.path.join(
            self.project_path, 'videos', self._case_full_name + '.avi')
        self.path_to_analysis = os.path.join(
            self.project_path, 'analysis', self._case_full_name)

        # inference_case_list = [os.path.basename(os.path.normpath(video_path)).split(
        #     '.')[0] for video_path in list(dict(self.cropping_config).keys())]
        if self.path_to_video not in dict(self.cropping_config).keys():
            raise ValueError(
                "case {} is not added to the cropping_config.yaml yet! \
                    Also make sure you analyze the video without any cropping".format(self.case))

        self.label_path = os.path.join(
            self.project_path, 'analysis', self._case_full_name)
        self.clip = vp(fname=self.path_to_video)

        self._trainFraction = self.config['TrainingFraction'][self.trainingsetindex]
        self._DLCscorer = auxiliaryfunctions.GetScorerName(
            self.config, self.shuffle, self._trainFraction)

        self.df_label = pd.read_hdf(os.path.join(
            self.label_path, self._case_full_name + self._DLCscorer + '.h5'))

        # TODO maybe rename the variable
        self._orig_df_bodyparts = self.df_label[self._DLCscorer][self.bodyparts]
        self.df_bodyparts_likelihood = self._orig_df_bodyparts.iloc[:, self._orig_df_bodyparts.columns.get_level_values(
            1) == 'likelihood']

        input_cropping = input("cropping for inference ? True/False: ")
def proc_video(
    videos,
    destfolder,
    filtered,
    DLCscorer,
    DLCscorerlegacy,
    track_method,
    cfg,
    individuals,
    color_by,
    bodyparts,
    codec,
    bodyparts2connect,
    trailpoints,
    save_frames,
    outputframerate,
    Frames2plot,
    draw_skeleton,
    skeleton_color,
    displaycropped,
    fastmode,
    video,
):
    """Helper function for create_videos

    Parameters
    ----------


    """
    videofolder = Path(video).parents[0]
    if destfolder is None:
        destfolder = videofolder  # where your folder with videos is.

    auxiliaryfunctions.attempttomakefolder(destfolder)

    os.chdir(destfolder)  # THE VIDEO IS STILL IN THE VIDEO FOLDER
    print("Starting to process video: {}".format(video))
    vname = str(Path(video).stem)

    if filtered:
        videooutname1 = os.path.join(vname + DLCscorer + "filtered_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy + "filtered_labeled.mp4")
    else:
        videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy + "_labeled.mp4")

    if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
        print("Labeled video {} already created.".format(vname))
    else:
        print("Loading {} and data.".format(video))
        try:
            df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
                destfolder, vname, DLCscorer, filtered, track_method
            )
            metadata = auxiliaryfunctions.load_video_metadata(
                destfolder, vname, DLCscorer
            )
            if cfg.get("multianimalproject", False):
                s = "_id" if color_by == "individual" else "_bp"
            else:
                s = ""
                # Adds support for multi_output mode. Adds extra bodyparts found in the data but not in the config.yaml.
                # Only works if "multi_output_format" is set to "separate-bodyparts".
                cmp_set = set(
                    idx[1]
                    for idx in df
                    if (idx[1] in bodyparts or is_extension_part(idx[1], bodyparts))
                )
                bodyparts = [
                    bp
                    for bp in df.columns.get_level_values("bodyparts").unique()
                    if (bp in cmp_set)
                ]
                # print(bodyparts)

            videooutname = filepath.replace(".h5", f"{s}_labeled.mp4")
            if os.path.isfile(videooutname):
                print("Labeled video already created. Skipping...")
                return

            if all(individuals):
                df = df.loc(axis=1)[:, individuals]
            cropping = metadata["data"]["cropping"]
            [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"]
            labeled_bpts = [
                bp
                for bp in df.columns.get_level_values("bodyparts").unique()
                if bp in bodyparts
            ]
            if not fastmode:
                tmpfolder = os.path.join(str(videofolder), "temp-" + vname)
                if save_frames:
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                clip = vp(video)
                CreateVideoSlow(
                    videooutname,
                    clip,
                    df,
                    tmpfolder,
                    cfg["dotsize"],
                    cfg["colormap"],
                    cfg["alphavalue"],
                    cfg["pcutoff"],
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    save_frames,
                    labeled_bpts,
                    outputframerate,
                    Frames2plot,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )
            else:
                if displaycropped:  # then the cropped video + the labels is depicted
                    clip = vp(
                        fname=video,
                        sname=videooutname,
                        codec=codec,
                        sw=x2 - x1,
                        sh=y2 - y1,
                    )
                else:  # then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                    clip = vp(fname=video, sname=videooutname, codec=codec)
                CreateVideo(
                    clip,
                    df,
                    cfg["pcutoff"],
                    cfg["dotsize"],
                    cfg["colormap"],
                    labeled_bpts,
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )

        except FileNotFoundError as e:
            print(e)