Example #1
0
    def __init__(self, manager, videoname, trail_len=50):
        self.manager = manager
        self.cmap = plt.cm.get_cmap(manager.cfg["colormap"],
                                    len(set(manager.tracklet2id)))
        self.videoname = videoname
        self.video = VideoReader(videoname)
        self.nframes = len(self.video)
        # Take into consideration imprecise OpenCV estimation of total number of frames
        if abs(self.nframes - manager.nframes) >= 0.05 * manager.nframes:
            print(
                "Video duration and data length do not match. Continuing nonetheless..."
            )
        self.trail_len = trail_len
        self.help_text = ""
        self.draggable = False
        self._curr_frame = 0
        self.curr_frame = 0

        self.picked = []
        self.picked_pair = []
        self.cuts = []

        self.player = BackgroundPlayer(self)
        self.thread_player = Thread(target=self.player.run, daemon=True)
        self.thread_player.start()

        self.dps = []
Example #2
0
def adddatasetstovideolistandviceversa(config):
    """
    First run comparevideolistsanddatafolders(config) to compare the folders in labeled-data and the ones listed under video_sets (in the config file).
    If you detect differences this function can be used to maker sure each folder has a video entry & vice versa.

    It corrects this problem in the following way:

    If a video entry in the config file does not contain a folder in labeled-data, then the entry is removed.
    If a folder in labeled-data does not contain a video entry in the config file then the prefix path will be added in front of the name of the labeled-data folder and combined
    with the suffix variable as an ending. Width and height will be added as cropping variables as passed on.

    Handle with care!

    Parameter
    ----------
    config : string
        String containing the full path of the config file in the project.
    """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"]
    video_names = [Path(i).stem for i in videos]

    alldatafolders = [
        fn for fn in os.listdir(Path(config).parent / "labeled-data")
        if "_labeled" not in fn and not fn.startswith(".")
    ]

    print("Config file contains:", len(video_names))
    print("Labeled-data contains:", len(alldatafolders))

    toberemoved = []
    for vn in video_names:
        if vn not in alldatafolders:
            print(vn, " is missing as a labeled folder >> removing key!")
            for fullvideo in videos:
                if vn in fullvideo:
                    toberemoved.append(fullvideo)

    for vid in toberemoved:
        del videos[vid]

    # Load updated lists:
    video_names = [Path(i).stem for i in videos]
    for vn in alldatafolders:
        if vn not in video_names:
            print(vn, " is missing in config file >> adding it!")
            # Find the corresponding video file
            found = False
            for file in os.listdir(os.path.join(cfg["project_path"],
                                                "videos")):
                if os.path.splitext(file)[0] == vn:
                    found = True
                    break
            if found:
                video_path = os.path.join(cfg["project_path"], "videos", file)
                clip = VideoReader(video_path)
                videos.update({
                    video_path: {
                        "crop": ", ".join(map(str, clip.get_bbox()))
                    }
                })

    auxiliaryfunctions.write_config(config, cfg)
def extract_frames(
    config,
    mode="automatic",
    algo="kmeans",
    crop=False,
    userfeedback=True,
    cluster_step=1,
    cluster_resizewidth=30,
    cluster_color=False,
    opencv=True,
    slider_width=25,
    config3d=None,
    extracted_cam=0,
    videos_list=None,
):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.

    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n
    by clustering based on visual appearance (k-means), or by manual selection.

    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file.

    After frames have been extracted from all videos from one camera, matched frames from other cameras can be extracted using mode = ``match``.
    This is necessary if you plan to use epipolar lines to improve labeling across multiple camera angles. It will overwrite previously extracted
    images from the second camera angle if necessary.

    Please refer to the user guide for more details on methods and parameters https://www.nature.com/articles/s41596-019-0176-0
    or the preprint: https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual`` to extract the initial set of frames. It can also be ``match`` to match frames between
        the cameras in preparation for the use of epipolar lines during labeling; namely, extract from camera_1 first, then run this to extract the matched frames in camera_2.
        WARNING: if you use match, and you previously extracted and labeled frames from the second camera, this will overwrite your data. This will require you deleting the
        collectdata.h5/.csv files before labeling.... Use with caution!

    algo : string
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this.

    crop : bool, optional
        If True, video frames are cropped according to the corresponding coordinates stored in the config.yaml.
        Alternatively, if cropping coordinates are not known yet, crop='GUI' triggers a user interface
        where the cropping area can be manually drawn and saved.

    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however,
        reading the individual frames takes longer due to the skipping.

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    slider_width: number, default: 25
        Width of the video frames slider, in percent of window

    config3d: string, optional
        Path to the config.yaml file in the 3D project. This will be used to match frames extracted from all cameras present in the field 'camera_names' to the
        frames extracted from the camera given by the parameter 'extracted_cam'

    extracted_cam: number, default: 0
        The index of the camera that already has extracted frames. This will match frame numbers to extract for all other cameras.
        This parameter is necessary if you wish to use epipolar lines in the labeling toolbox. Only use if mode = 'match' and config3d is provided.

    videos_list: list, default: None
            A list of the string containing full paths to videos to extract frames for. If this is left as None all videos specified in the config file will have frames extracted. 
            Otherwise one can select a subset by passing those paths. 

    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and defining the cropping area at runtime.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans','GUI')
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)
    --------
    for extracting frames from a second camera that match the frames extracted from the first
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml', mode='match', extracted_cam=0)

    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose
    if you need to crop or not.
    --------

    """
    import os
    import sys
    import re
    import glob
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.gui import frame_extraction_toolbox

        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg["numframes2pick"]
        start = cfg["start"]
        stop = cfg["stop"]

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )
        if videos_list is None:
            videos = cfg.get("video_sets_original") or cfg["video_sets"]
        else:  #filter video_list by the ones in the config file
            videos = [v for v in cfg["video_sets"] if v in videos_list]

        if opencv:
            from deeplabcut.utils.auxfun_videos import VideoReader
        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for video in videos:
            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video,
                    "?",
                )
                askuser = input("yes/no")
            else:
                askuser = "******"

            if (askuser == "y" or askuser == "yes" or askuser == "Ja"
                    or askuser == "ha" or askuser == "oui"
                    or askuser == "ouais"):  # multilanguage support :)

                if opencv:
                    cap = VideoReader(video)
                    nframes = len(cap)
                else:
                    # Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1.0 / fps))
                if not nframes:
                    print("Video could not be opened. Skipping...")
                    continue

                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(
                    config).parents[0] / "labeled-data" / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        if userfeedback:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                        if not (askuser == "y" or askuser == "yes"
                                or askuser == "Y" or askuser == "Yes"):
                            sys.exit("Delete the frames and try again later!")

                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                try:
                    coords = cfg["video_sets"][video]["crop"].split(",")
                except KeyError:
                    coords = cfg["video_sets_original"][video]["crop"].split(
                        ",")

                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == "uniform":
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick, start, stop)
                elif algo == "kmeans":
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                if not len(frames2pick):
                    print("Frame selection failed...")
                    return

                output_path = (Path(config).parents[0] / "labeled-data" /
                               Path(video).stem)
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set_to_frame(index)  # extract a particular frame
                        frame = cap.read_frame()
                        if frame is not None:
                            image = img_as_ubyte(frame)
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :, ],
                                )  # y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.close()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1.0 / clip.fps))
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  # constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

            else:  # NO!
                has_failed.append(False)

        if all(has_failed):
            print("Frame extraction failed. Video files must be corrupted.")
            return
        elif any(has_failed):
            print("Although most frames were extracted, some were invalid.")
        else:
            print(
                "Frames were successfully extracted, for the videos listed in the config.yaml file."
            )
        print(
            "\nYou can now label the frames using the function 'label_frames' "
            "(Note, you should label frames extracted from diverse videos (and many videos; we do not recommend training on single videos!))."
        )

    elif mode == "match":
        import cv2

        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")
        videos = sorted(cfg["video_sets"].keys())
        project_path = Path(config).parents[0]
        labels_path = os.path.join(project_path, "labeled-data/")
        video_dir = os.path.join(project_path, "videos/")
        try:
            cfg_3d = auxiliaryfunctions.read_config(config3d)
        except:
            raise Exception(
                "You must create a 3D project and edit the 3D config file before extracting matched frames. \n"
            )
        cams = cfg_3d["camera_names"]
        extCam_name = cams[extracted_cam]
        del cams[extracted_cam]
        label_dirs = sorted(
            glob.glob(os.path.join(labels_path, "*" + extCam_name + "*")))

        # select crop method
        crop_list = []
        for video in videos:
            if extCam_name not in video:
                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                    print("in gui code")
                coords = cfg["video_sets"][video]["crop"].split(",")

                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None
                crop_list.append(coords)
        print(crop_list)

        for coords, dirPath in zip(crop_list, label_dirs):
            extracted_images = glob.glob(os.path.join(dirPath, "*png"))

            imgPattern = re.compile("[0-9]{1,10}")
            for cam in cams:
                output_path = re.sub(extCam_name, cam, dirPath)

                for fname in os.listdir(output_path):
                    if fname.endswith(".png"):
                        os.remove(os.path.join(output_path, fname))

                vid = os.path.join(video_dir,
                                   os.path.basename(output_path)) + ".avi"
                cap = cv2.VideoCapture(vid)
                print("\n extracting matched frames from " +
                      os.path.basename(output_path) + ".avi")
                for img in extracted_images:
                    imgNum = re.findall(imgPattern, os.path.basename(img))[0]
                    cap.set(1, int(imgNum))
                    ret, frame = cap.read()
                    if ret:
                        image = img_as_ubyte(
                            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                        img_name = str(output_path) + "/img" + imgNum + ".png"
                        if crop:
                            io.imsave(
                                img_name,
                                image[int(coords[2]):int(coords[3]),
                                      int(coords[0]):int(coords[1]), :, ],
                            )
                        else:
                            io.imsave(img_name, image)
        print(
            "\n Done extracting matched frames. You can now begin labeling frames using the function label_frames\n"
        )

    else:
        print(
            "Invalid MODE. Choose either 'manual', 'automatic' or 'match'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
Example #4
0
def add_new_videos(config, videos, copy_videos=False, coords=None):
    """
    Add new videos to the config file at any stage of the project.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    copy_videos : bool, optional
        If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
        ``False``; if provided it must be either ``True`` or ``False``.
    coords: list, optional
      A list containing the list of cropping coordinates of the video. The default is set to None.
    Examples
    --------
    Video will be added, with cropping dimenions according to the frame dimensinos of mouse5.avi
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])

    Video will be added, with cropping dimenions [0,100,0,200]
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])

    Two videos will be added, with cropping dimenions [0,100,0,200] and [0,100,0,250], respectively.
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])

    """
    import os
    import shutil
    from pathlib import Path

    from deeplabcut.utils import auxiliaryfunctions
    from deeplabcut.utils.auxfun_videos import VideoReader

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)

    video_path = Path(config).parents[0] / "videos"
    data_path = Path(config).parents[0] / "labeled-data"
    videos = [Path(vp) for vp in videos]

    dirs = [data_path / Path(i.stem) for i in videos]

    for p in dirs:
        """
        Creates directory under data & perhaps copies videos (to /video)
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Copying the videos")
                shutil.copy(os.fspath(src), os.fspath(dst))
    else:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Creating the symbolic link of the video")
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)

    if copy_videos:
        videos = destinations  # in this case the *new* location should be added to the config file
    # adds the video list to the config.yaml file
    for idx, video in enumerate(videos):
        try:
            # For windows os.path.realpath does not work and does not link to the real video.
            video_path = str(Path.resolve(Path(video)))
        #           video_path = os.path.realpath(video)
        except:
            video_path = os.readlink(video)

        vid = VideoReader(video_path)
        if coords is not None:
            c = coords[idx]
        else:
            c = vid.get_bbox()
        params = {video_path: {"crop": ", ".join(map(str, c))}}
        if "video_sets_original" not in cfg:
            cfg["video_sets"].update(params)
        else:
            cfg["video_sets_original"].update(params)

    auxiliaryfunctions.write_config(config, cfg)
    print(
        "New video was added to the project! Use the function 'extract_frames' to select frames for labeling."
    )
Example #5
0
# Note that cropping require GUI that may bug a little - if it does you may get an error.

"""## Analysis Stage
The first thing we must do is import deeplabcut.
"""

import deeplabcut

"""### Check for corruption
Video corruption may occur as you move the video between computers.
We can check & fix videos that were corrupt.
"""

if vid_check:
  from deeplabcut.utils.auxfun_videos import VideoReader
  vid = VideoReader(vid_path)
  vid.check_integrity()

# you should not get any printed output from this.

#################################################
# if you do, you need to run the next steps (line-by-line, outside of ipython!)
# see manual for more info, and don't forget to uncomment!

# exit() # to exit ipython if you are still inside it
# cd /d <path_to_the_folder_holding_your_video>
# ffmpeg -i <video_name> -c:v h264 -crf 18 -preset fast <fixed_video_name>

# for example:
# exit() # to exit ipython if you are still inside it
# cd /d ??????
Example #6
0
class TrackletVisualizer:
    def __init__(self, manager, videoname, trail_len=50):
        self.manager = manager
        self.cmap = plt.cm.get_cmap(manager.cfg["colormap"],
                                    len(set(manager.tracklet2id)))
        self.videoname = videoname
        self.video = VideoReader(videoname)
        self.nframes = len(self.video)
        # Take into consideration imprecise OpenCV estimation of total number of frames
        if abs(self.nframes - manager.nframes) >= 0.05 * manager.nframes:
            print(
                "Video duration and data length do not match. Continuing nonetheless..."
            )
        self.trail_len = trail_len
        self.help_text = ""
        self.draggable = False
        self._curr_frame = 0
        self.curr_frame = 0

        self.picked = []
        self.picked_pair = []
        self.cuts = []

        self.player = BackgroundPlayer(self)
        self.thread_player = Thread(target=self.player.run, daemon=True)
        self.thread_player.start()

        self.dps = []

    def _prepare_canvas(self, manager, fig):
        params = {
            "keymap.save": "s",
            "keymap.back": "left",
            "keymap.forward": "right",
            "keymap.yscale": "l",
        }
        for k, v in params.items():
            if v in plt.rcParams[k]:
                plt.rcParams[k].remove(v)

        self.dotsize = manager.cfg["dotsize"]
        self.alpha = manager.cfg["alphavalue"]

        if fig is None:
            self.fig = plt.figure(figsize=(13, 8))
        else:
            self.fig = fig
        gs = self.fig.add_gridspec(2, 2)
        self.ax1 = self.fig.add_subplot(gs[:, 0])
        self.ax2 = self.fig.add_subplot(gs[0, 1])
        self.ax3 = self.fig.add_subplot(gs[1, 1], sharex=self.ax2)
        plt.subplots_adjust(bottom=0.2)
        for ax in self.ax1, self.ax2, self.ax3:
            ax.axis("off")

        self.colors = self.cmap(manager.tracklet2id)
        self.colors[:, -1] = self.alpha

        img = self.video.read_frame()
        self.im = self.ax1.imshow(img)
        self.scat = self.ax1.scatter([], [], s=self.dotsize**2, picker=True)
        self.scat.set_offsets(manager.xy[:, 0])
        self.scat.set_color(self.colors)
        self.trails = sum(
            [self.ax1.plot([], [], "-", lw=2, c=c) for c in self.colors], [])
        self.lines_x = sum(
            [
                self.ax2.plot([], [], "-", lw=1, c=c, pickradius=5)
                for c in self.colors
            ],
            [],
        )
        self.lines_y = sum(
            [
                self.ax3.plot([], [], "-", lw=1, c=c, pickradius=5)
                for c in self.colors
            ],
            [],
        )
        self.vline_x = self.ax2.axvline(0, 0, 1, c="k", ls=":")
        self.vline_y = self.ax3.axvline(0, 0, 1, c="k", ls=":")
        custom_lines = [
            plt.Line2D([0], [0], color=self.cmap(i), lw=4)
            for i in range(len(manager.individuals))
        ]
        self.leg = self.fig.legend(
            custom_lines,
            manager.individuals,
            frameon=False,
            fancybox=None,
            ncol=len(manager.individuals),
            fontsize="small",
            bbox_to_anchor=(0, 0.9, 1, 0.1),
            loc="center",
        )
        for line in self.leg.get_lines():
            line.set_picker(5)

        self.ax_slider = self.fig.add_axes([0.1, 0.1, 0.5, 0.03],
                                           facecolor="lightgray")
        self.ax_slider2 = self.fig.add_axes([0.1, 0.05, 0.3, 0.03],
                                            facecolor="darkorange")
        self.slider = Slider(
            self.ax_slider,
            "# Frame",
            self.curr_frame,
            manager.nframes - 1,
            valinit=0,
            valstep=1,
            valfmt="%i",
        )
        self.slider.on_changed(self.on_change)
        self.slider2 = Slider(
            self.ax_slider2,
            "Marker size",
            1,
            30,
            valinit=self.dotsize,
            valstep=1,
            valfmt="%i",
        )
        self.slider2.on_changed(self.update_dotsize)
        self.ax_drag = self.fig.add_axes([0.65, 0.1, 0.05, 0.03])
        self.ax_lasso = self.fig.add_axes([0.7, 0.1, 0.05, 0.03])
        self.ax_flag = self.fig.add_axes([0.75, 0.1, 0.05, 0.03])
        self.ax_save = self.fig.add_axes([0.80, 0.1, 0.05, 0.03])
        self.ax_help = self.fig.add_axes([0.85, 0.1, 0.05, 0.03])
        self.save_button = Button(self.ax_save, "Save", color="darkorange")
        self.save_button.on_clicked(self.save)
        self.help_button = Button(self.ax_help, "Help")
        self.help_button.on_clicked(self.display_help)
        self.drag_toggle = CheckButtons(self.ax_drag, ["Drag"])
        self.drag_toggle.on_clicked(self.toggle_draggable_points)
        self.flag_button = Button(self.ax_flag, "Flag")
        self.flag_button.on_clicked(self.flag_frame)

        self.fig.canvas.mpl_connect("pick_event", self.on_pick)
        self.fig.canvas.mpl_connect("key_press_event", self.on_press)
        self.fig.canvas.mpl_connect("button_press_event", self.on_click)
        self.fig.canvas.mpl_connect("close_event", self.player.terminate)

        self.selector = PointSelector(self, self.ax1, self.scat, self.alpha)
        self.lasso_toggle = CheckButtons(self.ax_lasso, ["Lasso"])
        self.lasso_toggle.on_clicked(self.selector.toggle)
        self.display_traces(only_picked=False)
        self.ax1_background = self.fig.canvas.copy_from_bbox(self.ax1.bbox)
        plt.show()

    def show(self, fig=None):
        self._prepare_canvas(self.manager, fig)

    def fill_shaded_areas(self):
        self.clean_collections()
        if self.picked_pair:
            mask = self.manager.get_nonoverlapping_segments(*self.picked_pair)
            for ax in self.ax2, self.ax3:
                ax.fill_between(
                    self.manager.times,
                    *ax.dataLim.intervaly,
                    mask,
                    facecolor="darkgray",
                    alpha=0.2,
                )
            trans = mtransforms.blended_transform_factory(
                self.ax_slider.transData, self.ax_slider.transAxes)
            self.ax_slider.vlines(np.flatnonzero(mask),
                                  0,
                                  0.5,
                                  color="darkorange",
                                  transform=trans)

    def toggle_draggable_points(self, *args):
        self.draggable = not self.draggable
        if self.draggable:
            self._curr_frame = self.curr_frame
            self.scat.set_offsets(np.empty((0, 2)))
            self.add_draggable_points()
        else:
            self.save_coords()
            self.clean_points()
            self.display_points(self._curr_frame)
        self.fig.canvas.draw_idle()

    def add_point(self, center, animal, bodypart, **kwargs):
        circle = patches.Circle(center, **kwargs)
        self.ax1.add_patch(circle)
        dp = auxfun_drag.DraggablePoint(circle, bodypart, animal)
        dp.connect()
        self.dps.append(dp)

    def clean_points(self):
        for dp in self.dps:
            dp.annot.set_visible(False)
            dp.disconnect()
        self.dps = []
        for patch in self.ax1.patches[::-1]:
            patch.remove()

    def add_draggable_points(self):
        self.clean_points()
        xy, _, inds = self.manager.get_non_nan_elements(self.curr_frame)
        for i, (animal, bodypart) in enumerate(self.manager._label_pairs):
            if i in inds:
                coords = xy[inds == i].squeeze()
                self.add_point(
                    coords,
                    animal,
                    bodypart,
                    radius=self.dotsize,
                    fc=self.colors[i],
                    alpha=self.alpha,
                )

    def save_coords(self):
        coords, nonempty, inds = self.manager.get_non_nan_elements(
            self._curr_frame)
        if not inds.size:
            return
        prob = self.manager.prob[:, self._curr_frame]
        for dp in self.dps:
            label = dp.individual_names, dp.bodyParts
            ind = self.manager._label_pairs.index(label)
            nrow = np.flatnonzero(inds == ind)
            if not nrow.size:
                return
            nrow = nrow[0]
            if not np.array_equal(
                    coords[nrow],
                    dp.point.center):  # Keypoint has been displaced
                coords[nrow] = dp.point.center
                prob[ind] = 1
        self.manager.xy[nonempty, self._curr_frame] = coords

    def flag_frame(self, *args):
        self.cuts.append(self.curr_frame)
        self.ax_slider.axvline(self.curr_frame, color="r")
        if len(self.cuts) == 2:
            self.cuts.sort()
            mask = np.zeros_like(self.manager.times, dtype=bool)
            mask[self.cuts[0]:self.cuts[1] + 1] = True
            for ax in self.ax2, self.ax3:
                ax.fill_between(
                    self.manager.times,
                    *ax.dataLim.intervaly,
                    mask,
                    facecolor="darkgray",
                    alpha=0.2,
                )
            trans = mtransforms.blended_transform_factory(
                self.ax_slider.transData, self.ax_slider.transAxes)
            self.ax_slider.vlines(np.flatnonzero(mask),
                                  0,
                                  0.5,
                                  color="darkorange",
                                  transform=trans)
        self.fig.canvas.draw_idle()

    def on_scroll(self, event):
        cur_xlim = self.ax1.get_xlim()
        cur_ylim = self.ax1.get_ylim()
        xdata = event.xdata
        ydata = event.ydata
        if event.button == "up":
            scale_factor = 0.5
        elif event.button == "down":
            scale_factor = 2
        else:  # This should never happen anyway
            scale_factor = 1

        self.ax1.set_xlim([
            xdata - (xdata - cur_xlim[0]) / scale_factor,
            xdata + (cur_xlim[1] - xdata) / scale_factor,
        ])
        self.ax1.set_ylim([
            ydata - (ydata - cur_ylim[0]) / scale_factor,
            ydata + (cur_ylim[1] - ydata) / scale_factor,
        ])
        self.fig.canvas.draw()

    def on_press(self, event):
        if event.key == "n" or event.key == "right":
            self.move_forward()
        elif event.key == "b" or event.key == "left":
            self.move_backward()
        elif event.key == "s":
            self.swap()
        elif event.key == "i":
            self.invert()
        elif event.key == "x":
            self.flag_frame()
            if len(self.cuts) > 1:
                self.cuts.sort()
                if self.picked_pair:
                    self.manager.tracklet_swaps[self.picked_pair][
                        self.cuts] = ~self.manager.tracklet_swaps[
                            self.picked_pair][self.cuts]
                    self.fill_shaded_areas()
                    self.cuts = []
                    self.ax_slider.lines.clear()
        elif event.key == "backspace":
            if not self.dps:  # Last flag deletion
                try:
                    self.cuts.pop()
                    self.ax_slider.lines.pop()
                    if not len(self.cuts) == 2:
                        self.clean_collections()
                except IndexError:
                    pass
            else:  # Smart point removal
                i = np.nanargmin([
                    self.calc_distance(*dp.point.center, event.xdata,
                                       event.ydata) for dp in self.dps
                ])
                closest_dp = self.dps[i]
                label = closest_dp.individual_names, closest_dp.bodyParts
                closest_dp.disconnect()
                closest_dp.point.remove()
                self.dps.remove(closest_dp)
                ind = self.manager._label_pairs.index(label)
                self.manager.xy[ind, self._curr_frame] = np.nan
                self.manager.prob[ind, self._curr_frame] = np.nan
            self.fig.canvas.draw_idle()
        elif event.key == "l":
            self.lasso_toggle.set_active(not self.lasso_toggle.get_active)
        elif event.key == "d":
            self.drag_toggle.set_active(not self.drag_toggle.get_active)
        elif event.key == "alt+right":
            self.player.forward()
        elif event.key == "alt+left":
            self.player.rewind()
        elif event.key == " " or event.key == "tab":
            self.player.toggle()

    def move_forward(self):
        if self.curr_frame < self.manager.nframes - 1:
            self.curr_frame += 1
            self.slider.set_val(self.curr_frame)

    def move_backward(self):
        if self.curr_frame > 0:
            self.curr_frame -= 1
            self.slider.set_val(self.curr_frame)

    def swap(self):
        if self.picked_pair:
            swap_inds = self.manager.get_swap_indices(*self.picked_pair)
            inds = np.insert(swap_inds, [0, len(swap_inds)],
                             [0, self.manager.nframes - 1])
            if len(inds):
                ind = np.argmax(inds > self.curr_frame)
                self.manager.swap_tracklets(
                    *self.picked_pair, range(inds[ind - 1], inds[ind] + 1))
                self.display_traces()
                self.slider.set_val(self.curr_frame)

    def invert(self):
        if not self.picked_pair and len(self.picked) == 2:
            self.picked_pair = self.picked
        if self.picked_pair:
            self.manager.swap_tracklets(*self.picked_pair, [self.curr_frame])
            self.display_traces()
            self.slider.set_val(self.curr_frame)

    def on_pick(self, event):
        artist = event.artist
        if artist.axes == self.ax1:
            self.picked = list(event.ind)
        elif artist.axes == self.ax2:
            if isinstance(artist, plt.Line2D):
                self.picked = [self.lines_x.index(artist)]
        elif artist.axes == self.ax3:
            if isinstance(artist, plt.Line2D):
                self.picked = [self.lines_y.index(artist)]
        else:  # Click on the legend lines
            if self.picked:
                num_individual = self.leg.get_lines().index(artist)
                nrow = self.manager.tracklet2id.index(num_individual)
                inds = [
                    nrow + self.manager.to_num_bodypart(pick)
                    for pick in self.picked
                ]
                xy = self.manager.xy[self.picked]
                p = self.manager.prob[self.picked]
                mask = np.zeros(xy.shape[1], dtype=bool)
                if len(self.cuts) > 1:
                    mask[self.cuts[-2]:self.cuts[-1] + 1] = True
                    self.cuts = []
                    self.ax_slider.lines.clear()
                    self.clean_collections()
                else:
                    return
                sl_inds = np.ix_(inds, mask)
                sl_picks = np.ix_(self.picked, mask)
                old_xy = self.manager.xy[sl_inds].copy()
                old_prob = self.manager.prob[sl_inds].copy()
                self.manager.xy[sl_inds] = xy[:, mask]
                self.manager.prob[sl_inds] = p[:, mask]
                self.manager.xy[sl_picks] = old_xy
                self.manager.prob[sl_picks] = old_prob
        self.picked_pair = []
        if len(self.picked) == 1:
            for pair in self.manager.swapping_pairs:
                if self.picked[0] in pair:
                    self.picked_pair = pair
                    break
        self.clean_collections()
        self.display_traces()
        if self.picked_pair:
            self.fill_shaded_areas()
        self.slider.set_val(self.curr_frame)

    def on_click(self, event):
        if (event.inaxes in (self.ax2, self.ax3) and event.button == 1
                and not any(
                    line.contains(event)[0]
                    for line in self.lines_x + self.lines_y)):
            x = max(0, min(event.xdata, self.manager.nframes - 1))
            self.update_vlines(x)
            self.slider.set_val(x)
        elif event.inaxes == self.ax1 and not self.scat.contains(event)[0]:
            self.display_traces(only_picked=False)
            self.clean_collections()

    def clean_collections(self):
        for coll in (self.ax2.collections + self.ax3.collections +
                     self.ax_slider.collections):
            coll.remove()

    def display_points(self, val):
        data = self.manager.xy[:, val]
        self.scat.set_offsets(data)

    def display_trails(self, val):
        sl = slice(val - self.trail_len // 2, val + self.trail_len // 2)
        for n, trail in enumerate(self.trails):
            if n in self.picked:
                xy = self.manager.xy[n, sl]
                trail.set_data(*xy.T)
            else:
                trail.set_data([], [])

    def display_traces(self, only_picked=True):
        if only_picked:
            inds = self.picked + list(self.picked_pair)
        else:
            inds = self.manager.swapping_bodyparts
        for n, (line_x, line_y) in enumerate(zip(self.lines_x, self.lines_y)):
            if n in inds:
                line_x.set_data(self.manager.times, self.manager.xy[n, :, 0])
                line_y.set_data(self.manager.times, self.manager.xy[n, :, 1])
            else:
                line_x.set_data([], [])
                line_y.set_data([], [])
        for ax in self.ax2, self.ax3:
            ax.relim()
            ax.autoscale_view()

    def display_help(self, event):
        if not self.help_text:
            self.help_text = """
            Key D: activate "drag" so you can adjust bodyparts in that particular frame
            Key I: invert the position of a pair of bodyparts
            Key L: toggle the lasso selector
            Key S: swap two tracklets
            Key X: cut swapping tracklets
            Left/Right arrow OR Key B/Key N: navigate through the video (back/next)
            Tab or SPACE: play/pause the video
            Alt+Right/Left: fast forward/rewind - toggles through 5 speed levels
            Backspace: deletes last flag (if set) or deletes point
            Key P: toggles on pan/zoom tool - left button and drag to pan, right button and drag to zoom
            """
            self.text = self.fig.text(
                0.5,
                0.5,
                self.help_text,
                horizontalalignment="center",
                verticalalignment="center",
                fontsize=12,
                color="red",
            )
        else:
            self.help_text = ""
            self.text.remove()

    def update_vlines(self, val):
        self.vline_x.set_xdata([val, val])
        self.vline_y.set_xdata([val, val])

    def on_change(self, val):
        self.curr_frame = int(val)
        self.video.set_to_frame(self.curr_frame)
        img = self.video.read_frame()
        if img is not None:
            # Automatically disable the draggable points
            if self.draggable:
                self.drag_toggle.set_active(False)

            self.im.set_array(img)
            self.display_points(self.curr_frame)
            self.display_trails(self.curr_frame)
            self.update_vlines(self.curr_frame)

    def update_dotsize(self, val):
        self.dotsize = val
        self.scat.set_sizes([self.dotsize**2])

    @staticmethod
    def calc_distance(x1, y1, x2, y2):
        return np.sqrt((x1 - x2)**2 + (y1 - y2)**2)

    def save(self, *args):
        self.save_coords()
        self.manager.save()

    def export_to_training_data(self, pcutoff=0.1):
        import os
        from skimage import io

        inds = self.manager.find_edited_frames()
        if not len(inds):
            print("No frames have been manually edited.")
            return

        # Save additional frames to the labeled-data directory
        strwidth = int(np.ceil(np.log10(self.nframes)))
        tmpfolder = os.path.join(self.manager.cfg["project_path"],
                                 "labeled-data", self.video.name)
        if os.path.isdir(tmpfolder):
            print(
                "Frames from video",
                self.video.name,
                " already extracted (more will be added)!",
            )
        else:
            attempttomakefolder(tmpfolder)
        index = []
        for ind in inds:
            imagename = os.path.join(tmpfolder,
                                     "img" + str(ind).zfill(strwidth) + ".png")
            index.append(os.path.join(*imagename.rsplit(os.path.sep, 3)[-3:]))
            if not os.path.isfile(imagename):
                self.video.set_to_frame(ind)
                frame = self.video.read_frame()
                if frame is None:
                    print("Frame could not be read. Skipping...")
                    continue
                frame = frame.astype(np.ubyte)
                if self.manager.cfg["cropping"]:
                    x1, x2, y1, y2 = [
                        int(self.manager.cfg[key])
                        for key in ("x1", "x2", "y1", "y2")
                    ]
                    frame = frame[y1:y2, x1:x2]
                io.imsave(imagename, frame)

        # Store the newly-refined data
        data = self.manager.format_data()
        df = data.iloc[inds]

        # Uncertain keypoints are ignored
        def filter_low_prob(cols, prob):
            mask = cols.iloc[:, 2] < prob
            cols.loc[mask] = np.nan
            return cols

        df = df.groupby(level="bodyparts", axis=1).apply(filter_low_prob,
                                                         prob=pcutoff)
        df.index = index
        machinefile = os.path.join(
            tmpfolder,
            "machinelabels-iter" + str(self.manager.cfg["iteration"]) + ".h5")
        if os.path.isfile(machinefile):
            df_old = pd.read_hdf(machinefile)
            df_joint = pd.concat([df_old, df])
            df_joint = df_joint[~df_joint.index.duplicated(keep="first")]
            df_joint.to_hdf(machinefile, key="df_with_missing", mode="w")
            df_joint.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))
        else:
            df.to_hdf(machinefile, key="df_with_missing", mode="w")
            df.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))

        # Merge with the already existing annotated data
        df.columns.set_levels([self.manager.cfg["scorer"]],
                              level="scorer",
                              inplace=True)
        df.drop("likelihood", level="coords", axis=1, inplace=True)
        output_path = os.path.join(
            tmpfolder, f'CollectedData_{self.manager.cfg["scorer"]}.h5')
        if os.path.isfile(output_path):
            print(
                "A training dataset file is already found for this video. The refined machine labels are merged to this data!"
            )
            df_orig = pd.read_hdf(output_path)
            df_joint = pd.concat([df, df_orig])
            # Now drop redundant ones keeping the first one [this will make sure that the refined machine file gets preference]
            df_joint = df_joint[~df_joint.index.duplicated(keep="first")]
            df_joint.sort_index(inplace=True)
            df_joint.to_hdf(output_path, key="df_with_missing", mode="w")
            df_joint.to_csv(output_path.replace("h5", "csv"))
        else:
            df.sort_index(inplace=True)
            df.to_hdf(output_path, key="df_with_missing", mode="w")
            df.to_csv(output_path.replace("h5", "csv"))
Example #7
0
def create_new_project(
    project,
    experimenter,
    videos,
    working_directory=None,
    copy_videos=False,
    videotype=".avi",
    multianimal=False,
):
    """Creates a new project directory, sub-directories and a basic configuration file. The configuration file is loaded with the default values. Change its parameters to your projects need.

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    videos : list
        A list of string containing the full paths of the videos to include in the project.
        Attention: Can also be a directory, then all videos of videotype will be imported.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    multianimal: bool, optional. Default: False.
        For creating a multi-animal project (introduced in DLC 2.2)

    Example
    --------
    Linux/MacOs
    >>> deeplabcut.create_new_project('reaching-task','Linus',['/data/videos/mouse1.avi','/data/videos/mouse2.avi','/data/videos/mouse3.avi'],'/analysis/project/')
    >>> deeplabcut.create_new_project('reaching-task','Linus',['/data/videos'],videotype='.mp4')

    Windows:
    >>> deeplabcut.create_new_project('reaching-task','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'], copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    from datetime import datetime as dt
    from deeplabcut.utils import auxiliaryfunctions

    date = dt.today()
    month = date.strftime("%B")
    day = date.day
    d = str(month[0:3] + str(day))
    date = dt.today().strftime("%Y-%m-%d")
    if working_directory == None:
        working_directory = "."
    wd = Path(working_directory).resolve()
    project_name = "{pn}-{exp}-{date}".format(pn=project, exp=experimenter, date=date)
    project_path = wd / project_name

    # Create project and sub-directories
    if not DEBUG and project_path.exists():
        print('Project "{}" already exists!'.format(project_path))
        return
    video_path = project_path / "videos"
    data_path = project_path / "labeled-data"
    shuffles_path = project_path / "training-datasets"
    results_path = project_path / "dlc-models"
    for p in [video_path, data_path, shuffles_path, results_path]:
        p.mkdir(parents=True, exist_ok=DEBUG)
        print('Created "{}"'.format(p))

    # Add all videos in the folder. Multiple folders can be passed in a list, similar to the video files. Folders and video files can also be passed!
    vids = []
    for i in videos:
        # Check if it is a folder
        if os.path.isdir(i):
            vids_in_dir = [
                os.path.join(i, vp) for vp in os.listdir(i) if videotype in vp
            ]
            vids = vids + vids_in_dir
            if len(vids_in_dir) == 0:
                print("No videos found in", i)
                print(
                    "Perhaps change the videotype, which is currently set to:",
                    videotype,
                )
            else:
                videos = vids
                print(
                    len(vids_in_dir),
                    " videos from the directory",
                    i,
                    "were added to the project.",
                )
        else:
            if os.path.isfile(i):
                vids = vids + [i]
            videos = vids

    videos = [Path(vp) for vp in videos]
    dirs = [data_path / Path(i.stem) for i in videos]
    for p in dirs:
        """
        Creates directory under data
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos == True:
        print("Copying the videos")
        for src, dst in zip(videos, destinations):
            shutil.copy(
                os.fspath(src), os.fspath(dst)
            )  # https://www.python.org/dev/peps/pep-0519/
    else:
        # creates the symlinks of the video and puts it in the videos directory.
        print("Attempting to create a symbolic link of the video ...")
        for src, dst in zip(videos, destinations):
            if dst.exists() and not DEBUG:
                raise FileExistsError("Video {} exists already!".format(dst))
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
            except OSError:
                import subprocess

                subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
            print("Created the symlink of {} to {}".format(src, dst))
            videos = destinations

    if copy_videos == True:
        videos = (
            destinations
        )  # in this case the *new* location should be added to the config file

    # adds the video list to the config.yaml file
    video_sets = {}
    for video in videos:
        print(video)
        try:
            # For windows os.path.realpath does not work and does not link to the real video. [old: rel_video_path = os.path.realpath(video)]
            rel_video_path = str(Path.resolve(Path(video)))
        except:
            rel_video_path = os.readlink(str(video))

        try:
            vid = VideoReader(rel_video_path)
            video_sets[rel_video_path] = {"crop": ", ".join(map(str, vid.get_bbox()))}
        except IOError:
            warnings.warn("Cannot open the video file! Skipping to the next one...")
            os.remove(video)  # Removing the video or link from the project

    if not len(video_sets):
        # Silently sweep the files that were already written.
        shutil.rmtree(project_path, ignore_errors=True)
        warnings.warn(
            "No valid videos were found. The project was not created... "
            "Verify the video files and re-create the project."
        )
        return "nothingcreated"

    # Set values to config file:
    if multianimal:  # parameters specific to multianimal project
        cfg_file, ruamelFile = auxiliaryfunctions.create_config_template(multianimal)
        cfg_file["multianimalproject"] = multianimal
        cfg_file["identity"] = False
        cfg_file["individuals"] = ["individual1", "individual2", "individual3"]
        cfg_file["multianimalbodyparts"] = ["bodypart1", "bodypart2", "bodypart3"]
        cfg_file["uniquebodyparts"] = []
        cfg_file["bodyparts"] = "MULTI!"
        cfg_file["skeleton"] = [
            ["bodypart1", "bodypart2"],
            ["bodypart2", "bodypart3"],
            ["bodypart1", "bodypart3"],
        ]
        cfg_file["default_augmenter"] = "multi-animal-imgaug"
    else:
        cfg_file, ruamelFile = auxiliaryfunctions.create_config_template()
        cfg_file["multianimalproject"] = False
        cfg_file["bodyparts"] = ["bodypart1", "bodypart2", "bodypart3", "objectA"]
        cfg_file["skeleton"] = [["bodypart1", "bodypart2"], ["objectA", "bodypart3"]]
        cfg_file["default_augmenter"] = "default"
    cfg_file["croppedtraining"] = False

    # common parameters:
    cfg_file["Task"] = project
    cfg_file["scorer"] = experimenter
    cfg_file["video_sets"] = video_sets
    cfg_file["project_path"] = str(project_path)
    cfg_file["date"] = d
    cfg_file["cropping"] = False
    cfg_file["start"] = 0
    cfg_file["stop"] = 1
    cfg_file["numframes2pick"] = 20
    cfg_file["TrainingFraction"] = [0.95]
    cfg_file["iteration"] = 0
    cfg_file["default_net_type"] = "resnet_50"
    cfg_file["snapshotindex"] = -1
    cfg_file["x1"] = 0
    cfg_file["x2"] = 640
    cfg_file["y1"] = 277
    cfg_file["y2"] = 624
    cfg_file[
        "batch_size"
    ] = (
        8
    )  # batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
    cfg_file["corner2move2"] = (50, 50)
    cfg_file["move2corner"] = True
    cfg_file["skeleton_color"] = "black"
    cfg_file["pcutoff"] = 0.6
    cfg_file["dotsize"] = 12  # for plots size of dots
    cfg_file["alphavalue"] = 0.7  # for plots transparency of markers
    cfg_file["colormap"] = "rainbow"  # for plots type of colormap

    projconfigfile = os.path.join(str(project_path), "config.yaml")
    # Write dictionary to yaml  config file
    auxiliaryfunctions.write_config(projconfigfile, cfg_file)

    print('Generated "{}"'.format(project_path / "config.yaml"))
    print(
        "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)."
        % (project_name, str(wd))
    )
    return projconfigfile
Example #8
0
def create_labeled_video_3d(
    config,
    path,
    videofolder=None,
    start=0,
    end=None,
    trailpoints=0,
    videotype="avi",
    view=[-113, -270],
    xlim=[None, None],
    ylim=[None, None],
    zlim=[None, None],
    draw_skeleton=True,
):
    """
    Creates a video with views from the two cameras and the 3d reconstruction for a selected number of frames.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    path : list
        A list of strings containing the full paths to triangulated files for analysis or a path to the directory, where all the triangulated files are stored.

    videofolder: string
        Full path of the folder where the videos are stored. Use this if the vidoes are stored in a different location other than where the triangulation files are stored. By default is ``None`` and therefore looks for video files in the directory where the triangulation file is stored.

    start: int
        Integer specifying the start of frame index to select. Default is set to 0.

    end: int
        Integer specifying the end of frame index to select. Default is set to None, where all the frames of the video are used for creating the labeled video.

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    view: list
        A list that sets the elevation angle in z plane and azimuthal angle in x,y plane of 3d view. Useful for rotating the axis for 3d view

    xlim: list
        A list of integers specifying the limits for xaxis of 3d view. By default it is set to [None,None], where the x limit is set by taking the minimum and maximum value of the x coordinates for all the bodyparts.

    ylim: list
        A list of integers specifying the limits for yaxis of 3d view. By default it is set to [None,None], where the y limit is set by taking the minimum and maximum value of the y coordinates for all the bodyparts.

    zlim: list
        A list of integers specifying the limits for zaxis of 3d view. By default it is set to [None,None], where the z limit is set by taking the minimum and maximum value of the z coordinates for all the bodyparts.

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``True``

    Example
    -------
    Linux/MacOs
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos/3d.h5'],start=100, end=500)

    To create labeled videos for all the triangulated files in the folder
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500)

    To set the xlim, ylim, zlim and rotate the view of the 3d axis
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500,view=[30,90],xlim=[-12,12],ylim=[15,25],zlim=[20,30])

    """
    start_path = os.getcwd()

    # Read the config file and related variables
    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    cmap = cfg_3d["colormap"]
    bodyparts2connect = cfg_3d["skeleton"]
    skeleton_color = cfg_3d["skeleton_color"]
    scorer_3d = cfg_3d["scorername_3d"]

    # Flatten the list of bodyparts to connect
    bodyparts2plot = list(
        np.unique([val for sublist in bodyparts2connect for val in sublist]))
    color = plt.cm.get_cmap(cmap, len(bodyparts2plot))
    file_list = auxiliaryfunctions_3d.Get_list_of_triangulated_and_videoFiles(
        path, videotype, scorer_3d, cam_names, videofolder)
    print(file_list)
    if file_list == []:
        raise Exception(
            "No corresponding video file(s) found for the specified triangulated file or folder. Did you specify the video file type? If videos are stored in a different location, please use the ``videofolder`` argument to specify their path."
        )

    for file in file_list:
        path_h5_file = Path(file[0]).parents[0]
        triangulate_file = file[0]
        # triangulated file is a list which is always sorted as [triangulated.h5,camera-1.videotype,camera-2.videotype]
        # name for output video
        file_name = str(Path(triangulate_file).stem)
        if os.path.isfile(os.path.join(path_h5_file, file_name + ".mpg")):
            print("Video already created...")
        else:
            string_to_remove = str(Path(triangulate_file).suffix)
            pickle_file = triangulate_file.replace(string_to_remove,
                                                   "_meta.pickle")
            metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)

            base_filename_cam1 = str(Path(file[1]).stem).split(videotype)[
                0]  # required for searching the filtered file
            base_filename_cam2 = str(Path(file[2]).stem).split(videotype)[
                0]  # required for searching the filtered file
            cam1_view_video = file[1]
            cam2_view_video = file[2]
            cam1_scorer = metadata_["scorer_name"][cam_names[0]]
            cam2_scorer = metadata_["scorer_name"][cam_names[1]]
            print("Creating 3D video from %s and %s using %s" % (
                Path(cam1_view_video).name,
                Path(cam2_view_video).name,
                Path(triangulate_file).name,
            ))

            # Read the video files and corresponfing h5 files
            vid_cam1 = VideoReader(cam1_view_video)
            vid_cam2 = VideoReader(cam2_view_video)

            # Look for the filtered predictions file
            try:
                print("Looking for filtered predictions...")
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam1 + cam1_scorer +
                                "*filtered.h5"),
                        ))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam2 + cam2_scorer +
                                "*filtered.h5"),
                        ))[0])
                # print("Found filtered predictions, will be use these for triangulation.")
                print(
                    "Found the following filtered data: ",
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam1 + cam1_scorer +
                            "*filtered.h5"),
                    ),
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam2 + cam2_scorer +
                            "*filtered.h5"),
                    ),
                )
            except FileNotFoundError:
                print(
                    "No filtered predictions found, the unfiltered predictions will be used instead."
                )
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam1 + cam1_scorer +
                                "*.h5")))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam2 + cam2_scorer +
                                "*.h5")))[0])

            df_3d = pd.read_hdf(triangulate_file, "df_with_missing")
            plt.rcParams.update({"figure.max_open_warning": 0})

            if end == None:
                end = len(df_3d)  # All the frames
            frames = list(range(start, end, 1))

            # Start plotting for every frame
            for k in tqdm(frames):
                output_folder, num_frames = plot2D(
                    cfg_3d,
                    k,
                    bodyparts2plot,
                    vid_cam1,
                    vid_cam2,
                    bodyparts2connect,
                    df_cam1,
                    df_cam2,
                    df_3d,
                    pcutoff,
                    markerSize,
                    alphaValue,
                    color,
                    path_h5_file,
                    file_name,
                    skeleton_color,
                    view,
                    draw_skeleton,
                    trailpoints,
                    xlim,
                    ylim,
                    zlim,
                )

            # Once all the frames are saved, then make a movie using ffmpeg.
            cwd = os.getcwd()
            os.chdir(str(output_folder))
            subprocess.call([
                "ffmpeg",
                "-start_number",
                str(start),
                "-framerate",
                str(30),
                "-i",
                str("img%0" + str(num_frames) + "d.png"),
                "-r",
                str(30),
                "-vb",
                "20M",
                os.path.join(output_folder, str("../" + file_name + ".mpg")),
            ])
            os.chdir(cwd)

    os.chdir(start_path)
Example #9
0
def extract_frames(config,
                   mode="automatic",
                   algo="kmeans",
                   crop=False,
                   userfeedback=True,
                   cluster_step=1,
                   cluster_resizewidth=30,
                   cluster_color=False,
                   opencv=True,
                   slider_width=25,
                   user_index=None):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.

    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n
    by clustering based on visual appearance (k-means), or by manual selection.

    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file.

    Please refer to the user guide for more details on methods and parameters https://www.nature.com/articles/s41596-019-0176-0
    or the preprint: https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.

    algo : string
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this.

    crop : bool, optional
        If True, video frames are cropped according to the corresponding coordinates stored in the config.yaml.
        Alternatively, if cropping coordinates are not known yet, crop='GUI' triggers a user interface
        where the cropping area can be manually drawn and saved.

    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however,
        reading the individual frames takes longer due to the skipping.

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    slider_width: number, default: 25
        Width of the video frames slider, in percent of window

    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and defining the cropping area at runtime.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans','GUI')
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)

    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose
    if you need to crop or not.
    --------

    """
    import os
    import sys
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox

        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg["numframes2pick"]
        start = cfg["start"]
        stop = cfg["stop"]

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg["video_sets"].keys()
        if opencv:
            from deeplabcut.utils.auxfun_videos import VideoReader
        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for vindex, video in enumerate(videos):
            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video,
                    "?",
                )
                askuser = input("yes/no")
            else:
                askuser = "******"

            if (askuser == "y" or askuser == "yes" or askuser == "Ja"
                    or askuser == "ha" or askuser == "oui"
                    or askuser == "ouais"):  # multilanguage support :)

                if opencv:
                    cap = VideoReader(video)
                    nframes = len(cap)
                else:
                    # Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1.0 / fps))
                if not nframes:
                    print("Video could not be opened. Skipping...")
                    continue

                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(
                    config).parents[0] / "labeled-data" / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        if userfeedback:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                        if not (askuser == "y" or askuser == "yes"
                                or askuser == "Y" or askuser == "Yes"):
                            sys.exit("Delete the frames and try again later!")

                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                coords = cfg["video_sets"][video]["crop"].split(",")
                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == "uniform":
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick, start, stop)
                elif algo == "kmeans":
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                elif algo == "user_supplied":
                    frames2pick = user_index[vindex]
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                if not len(frames2pick):
                    print("Frame selection failed...")
                    return

                output_path = (Path(config).parents[0] / "labeled-data" /
                               Path(video).stem)
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set_to_frame(index)  # extract a particular frame
                        frame = cap.read_frame()
                        if frame is not None:
                            image = img_as_ubyte(frame)
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :, ],
                                )  # y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.close()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1.0 / clip.fps))
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  # constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

            else:  # NO!
                has_failed.append(False)

        if all(has_failed):
            print("Frame extraction failed. Video files must be corrupted.")
            return
        elif any(has_failed):
            print("Although most frames were extracted, some were invalid.")
        else:
            print(
                "Frames were successfully extracted, for the videos of interest."
            )
        print(
            "\nYou can now label the frames using the function 'label_frames' "
            "(if you extracted enough frames for all videos).")
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
Example #10
0
def add_new_videos(config, videos, copy_videos=False, coords=None, extract_frames=False):
    """
    Add new videos to the config file at any stage of the project.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    videos : list
        A list of strings containing the full paths of the videos to include in the project.

    copy_videos : bool, optional
        If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
        ``False``; if provided it must be either ``True`` or ``False``.

    coords: list, optional
        A list containing the list of cropping coordinates of the video. The default is set to None.

    extract_frames: bool, optional
        if this is set to True extract_frames will be run on the new videos

    Examples
    --------
    Video will be added, with cropping dimensions according to the frame dimensions of mouse5.avi
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])

    Video will be added, with cropping dimensions [0,100,0,200]
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])

    Two videos will be added, with cropping dimensions [0,100,0,200] and [0,100,0,250], respectively.
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])

    """
    import os
    import shutil
    from pathlib import Path

    from deeplabcut.utils import auxiliaryfunctions
    from deeplabcut.utils.auxfun_videos import VideoReader
    from deeplabcut.generate_training_dataset import frame_extraction

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)

    video_path = Path(config).parents[0] / "videos"
    data_path = Path(config).parents[0] / "labeled-data"
    videos = [Path(vp) for vp in videos]

    dirs = [data_path / Path(i.stem) for i in videos]

    for p in dirs:
        """
        Creates directory under data & perhaps copies videos (to /video)
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Copying the videos")
                shutil.copy(os.fspath(src), os.fspath(dst))
  
    else:
        # creates the symlinks of the video and puts it in the videos directory.
        print("Attempting to create a symbolic link of the video ...")
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
                print("Created the symlink of {} to {}".format(src, dst))
            except OSError:
                try:
                    import subprocess

                    subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
                except (OSError, subprocess.CalledProcessError):
                    print(
                        "Symlink creation impossible (exFat architecture?): "
                        "cutting/pasting the video instead."
                    )
                    shutil.move(os.fspath(src), os.fspath(dst))
                    print("{} moved to {}".format(src, dst))
            videos = destinations

    

    if copy_videos:
        videos = destinations  # in this case the *new* location should be added to the config file
    # adds the video list to the config.yaml file
    for idx, video in enumerate(videos):
        try:
            # For windows os.path.realpath does not work and does not link to the real video.
            video_path = str(Path.resolve(Path(video)))
        #           video_path = os.path.realpath(video)
        except:
            video_path = os.readlink(video)

        vid = VideoReader(video_path)
        if coords is not None:
            c = coords[idx]
        else:
            c = vid.get_bbox()
        params = {video_path: {"crop": ", ".join(map(str, c))}}
        if "video_sets_original" not in cfg:
            cfg["video_sets"].update(params)
        else:
            cfg["video_sets_original"].update(params)
    videos_str = [str(video) for video in videos]
    if extract_frames:
        frame_extraction.extract_frames(config, userfeedback=False, videos_list=videos_str)
        print(
            "New videos were added to the project and frames have been extracted for labeling!"
        )
    else:
        print(
            "New videos were added to the project! Use the function 'extract_frames' to select frames for labeling."
        )
    auxiliaryfunctions.write_config(config, cfg)
Example #11
0
def create_labeled_video_3d(
        config,
        path,
        videofolder=None,
        start=0,
        end=None,
        trailpoints=0,
        videotype="avi",
        view=(-113, -270),
        xlim=None,
        ylim=None,
        zlim=None,
        draw_skeleton=True,
        color_by="bodypart",
        figsize=(20, 8),
        fps=30,
        dpi=300,
):
    """
    Creates a video with views from the two cameras and the 3d reconstruction for a selected number of frames.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    path : list
        A list of strings containing the full paths to triangulated files for analysis or a path to the directory, where all the triangulated files are stored.

    videofolder: string
        Full path of the folder where the videos are stored. Use this if the vidoes are stored in a different location other than where the triangulation files are stored. By default is ``None`` and therefore looks for video files in the directory where the triangulation file is stored.

    start: int
        Integer specifying the start of frame index to select. Default is set to 0.

    end: int
        Integer specifying the end of frame index to select. Default is set to None, where all the frames of the video are used for creating the labeled video.

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    view: list
        A list that sets the elevation angle in z plane and azimuthal angle in x,y plane of 3d view. Useful for rotating the axis for 3d view

    xlim: list
        A list of integers specifying the limits for xaxis of 3d view. By default it is set to [None,None], where the x limit is set by taking the minimum and maximum value of the x coordinates for all the bodyparts.

    ylim: list
        A list of integers specifying the limits for yaxis of 3d view. By default it is set to [None,None], where the y limit is set by taking the minimum and maximum value of the y coordinates for all the bodyparts.

    zlim: list
        A list of integers specifying the limits for zaxis of 3d view. By default it is set to [None,None], where the z limit is set by taking the minimum and maximum value of the z coordinates for all the bodyparts.

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``True``

    color_by : string, optional (default='bodypart')
        Coloring rule. By default, each bodypart is colored differently.
        If set to 'individual', points belonging to a single individual are colored the same.

    Example
    -------
    Linux/MacOs
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos/3d.h5'],start=100, end=500)

    To create labeled videos for all the triangulated files in the folder
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500)

    To set the xlim, ylim, zlim and rotate the view of the 3d axis
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500,view=[30,90],xlim=[-12,12],ylim=[15,25],zlim=[20,30])

    """
    start_path = os.getcwd()

    # Read the config file and related variables
    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    cmap = cfg_3d["colormap"]
    bodyparts2connect = cfg_3d["skeleton"]
    skeleton_color = cfg_3d["skeleton_color"]
    scorer_3d = cfg_3d["scorername_3d"]

    if color_by not in ("bodypart", "individual"):
        raise ValueError(f"Invalid color_by={color_by}")

    file_list = auxiliaryfunctions_3d.Get_list_of_triangulated_and_videoFiles(
        path, videotype, scorer_3d, cam_names, videofolder)
    print(file_list)
    if file_list == []:
        raise Exception(
            "No corresponding video file(s) found for the specified triangulated file or folder. Did you specify the video file type? If videos are stored in a different location, please use the ``videofolder`` argument to specify their path."
        )

    for file in file_list:
        path_h5_file = Path(file[0]).parents[0]
        triangulate_file = file[0]
        # triangulated file is a list which is always sorted as [triangulated.h5,camera-1.videotype,camera-2.videotype]
        # name for output video
        file_name = str(Path(triangulate_file).stem)
        videooutname = os.path.join(path_h5_file, file_name + ".mp4")
        if os.path.isfile(videooutname):
            print("Video already created...")
        else:
            string_to_remove = str(Path(triangulate_file).suffix)
            pickle_file = triangulate_file.replace(string_to_remove,
                                                   "_meta.pickle")
            metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)

            base_filename_cam1 = str(Path(file[1]).stem).split(videotype)[
                0]  # required for searching the filtered file
            base_filename_cam2 = str(Path(file[2]).stem).split(videotype)[
                0]  # required for searching the filtered file
            cam1_view_video = file[1]
            cam2_view_video = file[2]
            cam1_scorer = metadata_["scorer_name"][cam_names[0]]
            cam2_scorer = metadata_["scorer_name"][cam_names[1]]
            print("Creating 3D video from %s and %s using %s" % (
                Path(cam1_view_video).name,
                Path(cam2_view_video).name,
                Path(triangulate_file).name,
            ))

            # Read the video files and corresponfing h5 files
            vid_cam1 = VideoReader(cam1_view_video)
            vid_cam2 = VideoReader(cam2_view_video)

            # Look for the filtered predictions file
            try:
                print("Looking for filtered predictions...")
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam1 + cam1_scorer +
                                "*filtered.h5"),
                        ))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam2 + cam2_scorer +
                                "*filtered.h5"),
                        ))[0])
                # print("Found filtered predictions, will be use these for triangulation.")
                print(
                    "Found the following filtered data: ",
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam1 + cam1_scorer +
                            "*filtered.h5"),
                    ),
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam2 + cam2_scorer +
                            "*filtered.h5"),
                    ),
                )
            except FileNotFoundError:
                print(
                    "No filtered predictions found, the unfiltered predictions will be used instead."
                )
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam1 + cam1_scorer +
                                "*.h5")))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam2 + cam2_scorer +
                                "*.h5")))[0])

            df_3d = pd.read_hdf(triangulate_file)
            try:
                num_animals = df_3d.columns.get_level_values(
                    "individuals").unique().size
            except KeyError:
                num_animals = 1

            if end is None:
                end = len(df_3d)  # All the frames
            end = min(end, min(len(vid_cam1), len(vid_cam2)))
            frames = list(range(start, end))

            output_folder = Path(
                os.path.join(path_h5_file, "temp_" + file_name))
            output_folder.mkdir(parents=True, exist_ok=True)

            # Flatten the list of bodyparts to connect
            bodyparts2plot = list(
                np.unique(
                    [val for sublist in bodyparts2connect for val in sublist]))

            # Format data
            mask2d = df_cam1.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xy1 = df_cam1.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible1 = xy1[..., 2] >= pcutoff
            xy1[~visible1] = np.nan
            xy2 = df_cam2.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible2 = xy2[..., 2] >= pcutoff
            xy2[~visible2] = np.nan
            mask = df_3d.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xyz = df_3d.loc[:, mask].to_numpy().reshape((len(df_3d), -1, 3))
            xyz[~(visible1 & visible2)] = np.nan

            bpts = df_3d.columns.get_level_values('bodyparts')[mask][::3]
            links = make_labeled_video.get_segment_indices(
                bodyparts2connect,
                bpts,
            )
            ind_links = tuple(zip(*links))

            if color_by == "bodypart":
                color = plt.cm.get_cmap(cmap, len(bodyparts2plot))
                colors_ = color(range(len(bodyparts2plot)))
                colors = np.tile(colors_, (num_animals, 1))
            elif color_by == "individual":
                color = plt.cm.get_cmap(cmap, num_animals)
                colors_ = color(range(num_animals))
                colors = np.repeat(colors_, len(bodyparts2plot), axis=0)

            # Trick to force equal aspect ratio of 3D plots
            minmax = np.nanpercentile(xyz[frames], q=[25, 75], axis=(0, 1)).T
            minmax *= 1.1
            minmax_range = (minmax[:, 1] - minmax[:, 0]).max() / 2
            if xlim is None:
                mid_x = np.mean(minmax[0])
                xlim = mid_x - minmax_range, mid_x + minmax_range
            if ylim is None:
                mid_y = np.mean(minmax[1])
                ylim = mid_y - minmax_range, mid_y + minmax_range
            if zlim is None:
                mid_z = np.mean(minmax[2])
                zlim = mid_z - minmax_range, mid_z + minmax_range

            # Set up the matplotlib figure beforehand
            fig, axes1, axes2, axes3 = set_up_grid(figsize, xlim, ylim, zlim,
                                                   view)
            points_2d1 = axes1.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im1 = axes1.imshow(np.zeros((vid_cam1.height, vid_cam1.width)))
            points_2d2 = axes2.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im2 = axes2.imshow(np.zeros((vid_cam2.height, vid_cam2.width)))
            points_3d = axes3.scatter(
                *np.zeros((3, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            if draw_skeleton:
                # Set up skeleton LineCollections
                segs = np.zeros((2, len(ind_links), 2))
                coll1 = LineCollection(segs, colors=skeleton_color)
                coll2 = LineCollection(segs, colors=skeleton_color)
                axes1.add_collection(coll1)
                axes2.add_collection(coll2)
                segs = np.zeros((2, len(ind_links), 3))
                coll_3d = Line3DCollection(segs, colors=skeleton_color)
                axes3.add_collection(coll_3d)

            writer = FFMpegWriter(fps=fps)
            with writer.saving(fig, videooutname, dpi=dpi):
                for k in tqdm(frames):
                    vid_cam1.set_to_frame(k)
                    vid_cam2.set_to_frame(k)
                    frame_cam1 = vid_cam1.read_frame()
                    frame_cam2 = vid_cam2.read_frame()
                    if frame_cam1 is None or frame_cam2 is None:
                        raise IOError("A video frame is empty.")

                    im1.set_data(frame_cam1)
                    im2.set_data(frame_cam2)

                    sl = slice(max(0, k - trailpoints), k + 1)
                    coords3d = xyz[sl]
                    coords1 = xy1[sl, :, :2]
                    coords2 = xy2[sl, :, :2]
                    points_3d._offsets3d = coords3d.reshape((-1, 3)).T
                    points_3d.set_color(colors)
                    points_2d1.set_offsets(coords1.reshape((-1, 2)))
                    points_2d1.set_color(colors)
                    points_2d2.set_offsets(coords2.reshape((-1, 2)))
                    points_2d2.set_color(colors)
                    if draw_skeleton:
                        segs3d = xyz[k][tuple([ind_links])].swapaxes(0, 1)
                        coll_3d.set_segments(segs3d)
                        segs1 = xy1[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll1.set_segments(segs1)
                        segs2 = xy2[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll2.set_segments(segs2)

                    writer.grab_frame()