Exemple #1
0
    def is_crop_ok(self, event):
        """
        Checks if the cropping is ok
        """

        self.grab.SetLabel("Grab Frames")
        self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
        self.slider.Show()
        self.start_frames_sizer.ShowItems(show=True)
        self.end_frames_sizer.ShowItems(show=True)
        self.widget_panel.Layout()
        self.slider.SetMax(self.numberFrames)
        self.startFrame.SetMax(self.numberFrames - 1)
        self.endFrame.SetMax(self.numberFrames)
        self.x1 = int(self.new_x1)
        self.x2 = int(self.new_x2)
        self.y1 = int(self.new_y1)
        self.y2 = int(self.new_y2)
        self.canvas.mpl_disconnect(self.cid)
        self.axes.clear()
        self.currFrame = self.slider.GetValue()
        self.update()
        # Update the config.yaml file
        self.cfg["video_sets"][self.video_source] = {
            "crop": ", ".join(map(str, [self.x1, self.x2, self.y1, self.y2]))
        }
        auxiliaryfunctions.write_config(self.config_path, self.cfg)
def merge_datasets(config, forceiterate=None):
    """
    Checks if the original training dataset can be merged with the newly refined training dataset. To do so it will check
    if the frames in all extracted video sets were relabeled. If this is the case then the iterate variable is advanced by 1.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    forceiterate: int, optional
        If an integer is given the iteration variable is set to this value (this is only done if all datasets were labeled or refined)

    Example
    --------
    >>> deeplabcut.merge_datasets('/analysis/project/reaching-task/config.yaml')
    --------
    """

    cfg = auxiliaryfunctions.read_config(config)
    config_path = Path(config).parents[0]

    bf = Path(str(config_path / "labeled-data"))
    allfolders = [
        os.path.join(bf, fn) for fn in os.listdir(bf)
        if "_labeled" not in fn and not fn.startswith(".")
    ]  # exclude labeled data folders and temporary files
    flagged = False
    for findex, folder in enumerate(allfolders):
        if os.path.isfile(os.path.join(folder, "MachineLabelsRefine.h5")
                          ):  # Folder that was manually refine...
            pass
        elif os.path.isfile(
                os.path.join(folder, "CollectedData_" + cfg["scorer"] +
                             ".h5")):  # Folder that contains human data set...
            pass
        else:
            print("The following folder was not manually refined,...", folder)
            flagged = True
            pass  # this folder does not contain a MachineLabelsRefine file (not updated...)

    if not flagged:
        # updates iteration by 1
        iter_prev = cfg["iteration"]
        if not forceiterate:
            cfg["iteration"] = int(iter_prev + 1)
        else:
            cfg["iteration"] = forceiterate

        auxiliaryfunctions.write_config(config, cfg)

        print("Merged data sets and updated refinement iteration to " +
              str(cfg["iteration"]) + ".")
        print(
            "Now you can create a new training set for the expanded annotated images (use create_training_dataset)."
        )
    else:
        print("Please label, or remove the un-corrected folders.")
def select_cropping_area(config, videos=None):
    """
    Interactively select the cropping area of all videos in the config.
    A user interface pops up with a frame to select the cropping parameters.
    Use the left click to draw a box and hit the button 'set cropping parameters'
    to store the cropping parameters for a video in the config.yaml file.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : optional (default=None)
        List of videos whose cropping areas are to be defined. Note that full paths are required.
        By default, all videos in the config are successively loaded.

    Returns
    -------
    cfg : dict
        Updated project configuration
    """

    import cv2
    from deeplabcut.utils import auxiliaryfunctions
    from deeplabcut.utils import select_crop_parameters

    cfg = auxiliaryfunctions.read_config(config)

    if videos is None:
        videos = cfg['video_sets']

    for video in videos:
        clip = cv2.VideoCapture(video)
        if not clip.isOpened():
            print('Video could not be opened. Skipping...')
            continue

        success, frame = clip.read()
        if not success:
            print('Frame could not be read. Skipping...')
            continue

        coords = select_crop_parameters.show(config, frame[:, :, ::-1])
        cfg['video_sets'][video] = {
            'crop':
            ', '.join(
                map(str, [
                    int(coords[0]),
                    int(coords[1]),
                    int(coords[2]),
                    int(coords[3])
                ]))
        }

    auxiliaryfunctions.write_config(config, cfg)
    return cfg
Exemple #4
0
def DLC_edit_bodyparts(config_path, new_bodyparts):
    """
    Utility to facilitate changing the names and number of bodyparts in a DeepLabCut model from the MesoNet GUI.

    :param config_path: (required) The path to a DeepLabCut configuration file (.yaml).
    :param new_bodyparts: (required) A list of new bodypart names to write
    """
    dlc_cfg = read_config(config_path)
    dlc_cfg["bodyparts"] = new_bodyparts
    write_config(config_path, dlc_cfg)
Exemple #5
0
def adddatasetstovideolistandviceversa(config,prefix,width,height,suffix='.mp4'):
    """
    First run comparevideolistsanddatafolders(config) to compare the folders in labeled-data and the ones listed under video_sets (in the config file).
    If you detect differences this function can be used to maker sure each folder has a video entry & vice versa.

    It corrects this problem in the following way:

    If a folder in labeled-data does not contain a video entry in the config file then the prefix path will be added in front of the name of the labeled-data folder and combined
    with the suffix variable as an ending. Width and height will be added as cropping variables as passed on. TODO: This should be written from the actual images!

    If a video entry in the config file does not contain a folder in labeled-data, then the entry is removed.

    Handle with care!

    Parameter
    ----------
    config : string
        String containing the full path of the config file in the project.

    """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg['video_sets'].keys()
    video_names = [Path(i).stem for i in videos]

    alldatafolders = [fn for fn in os.listdir(Path(config).parent / 'labeled-data') if '_labeled' not in fn]

    print("Config file contains:", len(video_names))
    print("Labeled-data contains:", len(alldatafolders))

    toberemoved=[]
    for vn in video_names:
        if vn in alldatafolders:
            pass
        else:
            print(vn, " is missing as a labeled folder >> removing key!")
            for fullvideo in cfg['video_sets'].keys():
                if vn in fullvideo:
                    toberemoved.append(fullvideo)

    for vid in toberemoved:
        del cfg['video_sets'][vid]

    #Load updated lists:
    videos = cfg['video_sets'].keys()
    video_names = [Path(i).stem for i in videos]

    for vn in alldatafolders:
        if vn in video_names:
            pass
        else:
            print(vn, " is missing in config file >> adding it!")
            #cfg['video_sets'][vn]
            cfg['video_sets'].update({os.path.join(prefix,vn+suffix) : {'crop': ', '.join(map(str, [0, width, 0, height]))}})

    auxiliaryfunctions.write_config(config,cfg)
def select_cropping_area(config, videos=None):
    """
    Interactively select the cropping area of all videos in the config.
    A user interface pops up with a frame to select the cropping parameters.
    Use the left click to draw a box and hit the button 'set cropping parameters'
    to store the cropping parameters for a video in the config.yaml file.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : optional (default=None)
        List of videos whose cropping areas are to be defined. Note that full paths are required.
        By default, all videos in the config are successively loaded.

    Returns
    -------
    cfg : dict
        Updated project configuration
    """
    from deeplabcut.utils import auxiliaryfunctions, auxfun_videos

    cfg = auxiliaryfunctions.read_config(config)
    if videos is None:
        videos = list(cfg.get("video_sets_original") or cfg["video_sets"])

    for video in videos:
        coords = auxfun_videos.draw_bbox(video)
        if coords:
            temp = {
                "crop":
                ", ".join(
                    map(
                        str,
                        [
                            int(coords[0]),
                            int(coords[2]),
                            int(coords[1]),
                            int(coords[3]),
                        ],
                    ))
            }
            try:
                cfg["video_sets"][video] = temp
            except KeyError:
                cfg["video_sets_original"][video] = temp

    auxiliaryfunctions.write_config(config, cfg)
    return cfg
def transform_data(config):
    """
    This function adds the full path to labeling dataset.
    It also adds the correct path to the video file in the config file.
    """
    import pandas as pd

    cfg = auxiliaryfunctions.read_config(config)
    project_path = str(Path(config).parents[0])

    cfg['project_path'] = project_path
    video_file = os.path.join(project_path, 'videos', 'reachingvideo1.avi')
    if 'WILL BE AUTOMATICALLY UPDATED BY DEMO CODE' in cfg['video_sets'].keys(
    ):
        cfg['video_sets'][str(video_file)] = cfg['video_sets'].pop(
            'WILL BE AUTOMATICALLY UPDATED BY DEMO CODE')

    auxiliaryfunctions.write_config(config, cfg)
    def create_project(self):
        # Get project directory
        project_dir = self.project_dir
        # Check if project exists
        if project_dir.exists():
            print("The project directory already exists.")
        else:
            # Creates new project then renames according to task and subject
            print("The project directory does not exist.")
            #  Create file name as is created in deeplabcut.create_new_project
            from datetime import datetime as dt
            # Deeplabcut uses the creation name in the date. We want to change this to the study date
            date = dt.today().strftime('%Y-%m-%d')
            old_project_dir = Path("-".join([self.task, self.subject, date]))
            # Create new project
            deeplabcut.create_new_project(
                self.task,
                self.subject,
                self.video,
                working_directory=self.project_dir.resolve().parent,
                copy_videos=True)
            # Rename project to have project date not current date
            old_project_dir.rename(project_dir)

            from deeplabcut.utils import auxiliaryfunctions

            # Get config file path
            main_config_file = Path(self.project_dir).resolve() / self.config

            #  load config.yaml
            main_config = auxiliaryfunctions.read_config(main_config_file)

            #  Update values in config file:
            main_config['bodyparts'] = self.bodyparts
            main_config['date'] = self.date
            main_config['numframes2pick'] = self.numframes2pick

            # Write dictionary to yaml  config file
            auxiliaryfunctions.write_config(main_config_file, main_config)

            # Update project paths
            self.update_project_paths()
        return
def adddatasetstovideolist(config,prefix,width,height,suffix='.mp4'):
    """
    Auxiliary function, compares data sets in labeled-data & listed under video_sets. Adjust both to match up. Handle with care!
    For the videos the prefix path will be added in front of the name of the labeled-data folder and the suffix ending. Width and height
    are added as presented manually.
    To do: This should be written from the actual images!
    """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg['video_sets'].keys()
    video_names = [Path(i).stem for i in videos]

    alldatafolders = [fn for fn in os.listdir(Path(config).parent / 'labeled-data') if '_labeled' not in fn]

    print("Config file contains:", len(video_names))
    print("Labeled-data contains:", len(alldatafolders))

    toberemoved=[]
    for vn in video_names:
        if vn in alldatafolders:
            pass
        else:
            print(vn, " is missing as a labeled folder >> removing key!")
            for fullvideo in cfg['video_sets'].keys():
                if vn in fullvideo:
                    toberemoved.append(fullvideo)

    for vid in toberemoved:
        del cfg['video_sets'][vid]

    #Load updated lists:
    videos = cfg['video_sets'].keys()
    video_names = [Path(i).stem for i in videos]

    for vn in alldatafolders:
        if vn in video_names:
            pass
        else:
            print(vn, " is missing in config file >> adding it!")
            #cfg['video_sets'][vn]
            cfg['video_sets'].update({os.path.join(prefix,vn+suffix) : {'crop': ', '.join(map(str, [0, width, 0, height]))}})

    auxiliaryfunctions.write_config(config,cfg)
def get_track_method(cfg, track_method=""):
    if cfg.get("multianimalproject", False):
        if track_method != "":
            # check if it exists:
            if track_method not in ("box", "skeleton", "ellipse"):
                raise ValueError(
                    "Invalid tracking method. Only `box`, `skeleton` and `ellipse` are currently supported."
                )
            return track_method
        else:  # default
            if cfg.get("default_track_method",
                       "") is None:  #check if empty default
                print(
                    "Empty def. tracker in config file found, overwritten by ellipse tracker."
                )
                cfg["default_track_method"] = "ellipse"
                auxiliaryfunctions.write_config(config, cfg)

            return cfg.get("default_track_method", "ellipse")

    else:  # no tracker for single-animal projects
        return ""
def add_CLARA_videos(config, videos):
    """
    Add new videos to the config file at any stage of the project.

    """
    from deeplabcut.utils import auxiliaryfunctions

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)
    video_path = Path(config).parents[0] / 'videos'
    data_path = Path(config).parents[0] / 'labeled-data'
    videos = [Path(vp) for vp in videos]
    dirs = [data_path / Path(i.stem) for i in videos]
    for p in dirs:
        """
        Creates directory under data & perhaps copies videos (to /video)
        """
        p.mkdir(parents=True, exist_ok=True)

    for idx, video in enumerate(videos):
        try:
            video_path = str(Path.resolve(Path(video)))
        except:
            video_path = os.readlink(video)

        vcap = cv2.VideoCapture(video_path)
        if vcap.isOpened():
            # get vcap property
            width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            cfg['video_sets'].update({
                video_path: {
                    'crop': ', '.join(map(str, [0, width, 0, height]))
                }
            })
        else:
            print("Cannot open the video file!")
    auxiliaryfunctions.write_config(config, cfg)
Exemple #12
0
def get_track_method(cfg, track_method=""):
    if cfg.get("multianimalproject", False):
        if track_method != "":
            # check if it exists:
            if track_method not in TRACK_METHODS:
                raise ValueError(
                    f"Invalid tracking method. Only {', '.join(TRACK_METHODS)} are currently supported."
                )
            return track_method
        else:  # default
            track_method = cfg.get("default_track_method", "")
            if not track_method:
                warnings.warn(
                    "default_track_method` is undefined in the config.yaml file and will be set to `ellipse`."
                )
                track_method = "ellipse"
                cfg["default_track_method"] = track_method
                auxiliaryfunctions.write_config(
                    str(Path(cfg["project_path"]) / "config.yaml"), cfg)
            return track_method

    else:  # no tracker for single-animal projects
        return ""
Exemple #13
0
def transform_data(config):
    """
    This function adds the full path to labeling dataset.
    It also adds the correct path to the video file in the config file.
    """

    cfg = auxiliaryfunctions.read_config(config)
    project_path = str(Path(config).parents[0])

    cfg["project_path"] = project_path
    if "Reaching" in project_path:
        video_file = os.path.join(project_path, "videos", "reachingvideo1.avi")
    elif "openfield" in project_path:
        video_file = os.path.join(project_path, "videos", "m4s1.mp4")
    else:
        print("This is not an official demo dataset.")

    if "WILL BE AUTOMATICALLY UPDATED BY DEMO CODE" in cfg["video_sets"].keys(
    ):
        cfg["video_sets"][str(video_file)] = cfg["video_sets"].pop(
            "WILL BE AUTOMATICALLY UPDATED BY DEMO CODE")

    auxiliaryfunctions.write_config(config, cfg)
def extract_frames(config,
                   mode='automatic',
                   algo='kmeans',
                   crop=False,
                   userfeedback=True,
                   cluster_step=1,
                   cluster_resizewidth=30,
                   cluster_color=False,
                   opencv=True,
                   flymovie=False,
                   slider_width=25):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.
    
    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n 
    by clustering based on visual appearance (k-means), or by manual selection. 
    
    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file. 
    
    Please refer to the user guide for more details on methods and parameters https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf
    
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
        
    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.
        
    algo : string 
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this. 
        
    crop : bool, optional
        If this is set to True, a user interface pops up with a frame to select the cropping parameters. Use the left click to draw a cropping area and hit the button set cropping parameters to save the cropping parameters for a video.
        The default is ``False``; if provided it must be either ``True`` or ``False``.
            
    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos. 
    
    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).
    
    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however, 
        reading the individual frames takes longer due to the skipping.
    
    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases 
        the computational complexity. 
    
    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))
        
    slider_width: number, default: 25
        Width of the video frames slider, in percent of window
        
    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)
    
    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose 
    if you need to crop or not.
    --------
    
    """
    import os
    import sys
    import numpy as np
    from pathlib import Path
    from skimage import io
    import skimage
    from skimage.util import img_as_ubyte
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions
    from matplotlib.widgets import RectangleSelector

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox
        from deeplabcut.utils import select_crop_parameters
        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg['numframes2pick']
        start = cfg['start']
        stop = cfg['stop']

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg['video_sets'].keys()
        if opencv:
            import cv2
        elif flymovie:
            from motmot.FlyMovieFormat import FlyMovieFormat as FMF
            import cv2
        else:
            from moviepy.editor import VideoFileClip
        for vindex, video in enumerate(videos):
            #plt.close("all")
            global coords
            coords = cfg['video_sets'][video]['crop'].split(',')

            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video, "?")
                askuser = input("yes/no")
            else:
                askuser = "******"

            if askuser == 'y' or askuser == 'yes' or askuser == 'Ja' or askuser == 'ha':  # multilanguage support :)
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5
                    )  #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                    duration = nframes * 1. / fps
                elif flymovie:
                    cap = FMF.FlyMovie(video)
                    nframes = cap.n_frames
                    while True:
                        try:
                            cap.get_frame(nframes)
                        except FMF.NoMoreFramesException:
                            nframes -= 1
                            continue
                        break
                    fps = 1. / (cap.get_frame(min(100, nframes))[1] -
                                cap.get_frame(min(100, nframes) - 1)[1])
                    duration = cap.get_frame(nframes)[1]
                else:
                    #Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    duration = clip.duration
                    nframes = int(np.ceil(clip.duration * 1. / fps))

                indexlength = int(np.ceil(np.log10(nframes)))

                if crop == True:
                    from deeplabcut.utils import select_crop_parameters
                    if opencv:
                        cap.set(2, start * duration)
                        ret, frame = cap.read()
                        if ret:
                            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    elif flymovie:
                        frame = cap.get_frame(int(nframes * start))[0]
                        if frame.ndim != 3:
                            frame = skimage.color.gray2rgb(frame)
                        image = frame
                    else:
                        image = clip.get_frame(
                            start * clip.duration
                        )  #frame is accessed by index *1./clip.fps (fps cancels)

                    fname = Path(video)
                    output_path = Path(
                        config).parents[0] / 'labeled-data' / fname.stem

                    if output_path.exists():
                        fig, ax = plt.subplots(1)
                        # Call the GUI to select the cropping parameters
                        coords = select_crop_parameters.show(config, image)
                        # Update the config.yaml file with current cropping parameters
                        cfg['video_sets'][video] = {
                            'crop':
                            ', '.join(
                                map(str, [
                                    int(coords[0]),
                                    int(coords[1]),
                                    int(coords[2]),
                                    int(coords[3])
                                ]))
                        }
                        auxiliaryfunctions.write_config(config_file, cfg)

                        if len(os.listdir(output_path)) == 0:  #check if empty
                            #store full frame from random location (good for augmentation)
                            index = int(start * duration +
                                        np.random.rand() * duration *
                                        (stop - start))
                            if opencv:
                                cap.set(1, index)
                                ret, frame = cap.read()
                                if ret:
                                    image = img_as_ubyte(
                                        cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            elif flymovie:
                                frame = cap.get_frame(int(nframes * start))[0]
                                if frame.ndim != 3:
                                    frame = skimage.color.gray2rgb(frame)
                                image = img_as_ubyte(frame)
                            else:
                                image = img_as_ubyte(
                                    clip.get_frame(index * 1. / clip.fps))
                                clip = clip.crop(
                                    y1=int(coords[2]),
                                    y2=int(coords[3]),
                                    x1=int(coords[0]),
                                    x2=int(coords[1]))  #now crop clip

                            saveimg = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(saveimg, image)

                        else:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                            if askuser == 'y' or askuser == 'yes' or askuser == 'Y' or askuser == 'Yes':
                                #clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]))
                                index = int(start * duration +
                                            np.random.rand() * duration *
                                            (stop - start))
                                if opencv:
                                    cap.set(1, index)
                                    ret, frame = cap.read()
                                    if ret:
                                        image = img_as_ubyte(
                                            cv2.cvtColor(
                                                frame, cv2.COLOR_BGR2RGB))
                                elif flymovie:
                                    frame = cap.get_frame(int(nframes *
                                                              start))[0]
                                    if frame.ndim != 3:
                                        frame = skimage.color.gray2rgb(frame)
                                    image = img_as_ubyte(frame)
                                else:
                                    image = img_as_ubyte(
                                        clip.get_frame(index * 1. / clip.fps))
                                    clip = clip.crop(y1=int(coords[2]),
                                                     y2=int(coords[3]),
                                                     x1=int(coords[0]),
                                                     x2=int(coords[1]))

                                saveimg = str(output_path) + '/img' + str(
                                    index).zfill(indexlength) + ".png"
                                io.imsave(saveimg, image)
                                pass
                            else:
                                sys.exit(
                                    "Delete the frames and try again later!")

                else:
                    numframes2pick = cfg[
                        'numframes2pick'] + 1  # without cropping a full size frame will not be extracted >> thus one more frame should be selected in next stage.

                print("Extracting frames based on %s ..." % algo)

                if algo == 'uniform':  #extract n-1 frames (0 was already stored)
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick - 1, start, stop)
                    elif flymovie:
                        frames2pick = frameselectiontools.UniformFramesfmf(
                            cap, numframes2pick - 1, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick - 1, start, stop)
                elif algo == 'kmeans':
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick - 1,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                    elif flymovie:
                        print("FMF not supported by kmeans as of now!")
                        frames2pick = []
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick - 1,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color)
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                output_path = Path(config).parents[0] / 'labeled-data' / Path(
                    video).stem
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  #extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(
                                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :]
                                )  #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                        else:
                            print("Frame", index, " not found!")
                    cap.release()
                elif flymovie:
                    for index in frames2pick:
                        print(index)
                        frame = cap.get_frame(int(index))[0]
                        if frame.ndim != 3:
                            frame = skimage.color.gray2rgb(frame)
                        image = img_as_ubyte(frame)
                        img_name = str(output_path) + '/img' + str(
                            index).zfill(indexlength) + ".png"
                        if crop:
                            io.imsave(
                                img_name,
                                image[int(coords[2]):int(coords[3]),
                                      int(coords[0]):int(coords[1]), :]
                            )  #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                        else:
                            io.imsave(img_name, image)
                    cap.close()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1. / clip.fps))
                            img_name = str(output_path) + '/img' + str(
                                index).zfill(indexlength) + ".png"
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  #constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )

                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")

                    #close video.
                    clip.close()
                    del clip
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")

    print(
        "\nFrames were selected.\nYou can now label the frames using the function 'label_frames' (if you extracted enough frames for all videos)."
    )
Exemple #15
0
def add_new_videos(config, videos, copy_videos=False, coords=None, extract_frames=False):
    """
    Add new videos to the config file at any stage of the project.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    videos : list
        A list of strings containing the full paths of the videos to include in the project.

    copy_videos : bool, optional
        If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
        ``False``; if provided it must be either ``True`` or ``False``.

    coords: list, optional
        A list containing the list of cropping coordinates of the video. The default is set to None.

    extract_frames: bool, optional
        if this is set to True extract_frames will be run on the new videos

    Examples
    --------
    Video will be added, with cropping dimensions according to the frame dimensions of mouse5.avi
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])

    Video will be added, with cropping dimensions [0,100,0,200]
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])

    Two videos will be added, with cropping dimensions [0,100,0,200] and [0,100,0,250], respectively.
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])

    """
    import os
    import shutil
    from pathlib import Path

    from deeplabcut.utils import auxiliaryfunctions
    from deeplabcut.utils.auxfun_videos import VideoReader
    from deeplabcut.generate_training_dataset import frame_extraction

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)

    video_path = Path(config).parents[0] / "videos"
    data_path = Path(config).parents[0] / "labeled-data"
    videos = [Path(vp) for vp in videos]

    dirs = [data_path / Path(i.stem) for i in videos]

    for p in dirs:
        """
        Creates directory under data & perhaps copies videos (to /video)
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Copying the videos")
                shutil.copy(os.fspath(src), os.fspath(dst))
  
    else:
        # creates the symlinks of the video and puts it in the videos directory.
        print("Attempting to create a symbolic link of the video ...")
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
                print("Created the symlink of {} to {}".format(src, dst))
            except OSError:
                try:
                    import subprocess

                    subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
                except (OSError, subprocess.CalledProcessError):
                    print(
                        "Symlink creation impossible (exFat architecture?): "
                        "cutting/pasting the video instead."
                    )
                    shutil.move(os.fspath(src), os.fspath(dst))
                    print("{} moved to {}".format(src, dst))
            videos = destinations

    

    if copy_videos:
        videos = destinations  # in this case the *new* location should be added to the config file
    # adds the video list to the config.yaml file
    for idx, video in enumerate(videos):
        try:
            # For windows os.path.realpath does not work and does not link to the real video.
            video_path = str(Path.resolve(Path(video)))
        #           video_path = os.path.realpath(video)
        except:
            video_path = os.readlink(video)

        vid = VideoReader(video_path)
        if coords is not None:
            c = coords[idx]
        else:
            c = vid.get_bbox()
        params = {video_path: {"crop": ", ".join(map(str, c))}}
        if "video_sets_original" not in cfg:
            cfg["video_sets"].update(params)
        else:
            cfg["video_sets_original"].update(params)
    videos_str = [str(video) for video in videos]
    if extract_frames:
        frame_extraction.extract_frames(config, userfeedback=False, videos_list=videos_str)
        print(
            "New videos were added to the project and frames have been extracted for labeling!"
        )
    else:
        print(
            "New videos were added to the project! Use the function 'extract_frames' to select frames for labeling."
        )
    auxiliaryfunctions.write_config(config, cfg)
def update_config_crop_coords(config):
    """
    Given the list of videos, users can manually zoom in the area they want to crop and update the coordinates in config.yaml 

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.
    """
    config_file = Path(config).resolve()
    cfg = auxiliaryfunctions.read_config(config_file)

    video_sets = cfg['video_sets'].keys()
    for vindex, video_path in enumerate(video_sets):

        cap = cv2.VideoCapture(video_path)
        if cap.isOpened():
            nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT)
            height, width = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
                cap.get(cv2.CAP_PROP_FRAME_WIDTH))

            print("video {}: {} has original dim in {} by {}".format(
                vindex, video_path, width, height))

            # putting the frame to read at the very middle of the video
            cap.set(cv2.CAP_PROP_POS_FRAMES, int((nframes - 1) / 2))
            res, frame = cap.read()

            display = ZoomedDisplay(frame=frame, height=height, width=width)

            fig1, (ax1, ax2) = plt.subplots(1, 2)

            ax1.imshow(frame)
            ax2.imshow(frame)

            rect = UpdatingRect([0, 0],
                                0,
                                0,
                                facecolor='None',
                                edgecolor='red',
                                linewidth=1.0)
            rect.set_bounds(*ax2.viewLim.bounds)
            ax1.add_patch(rect)

            # Connect for changing the view limits
            ax2.callbacks.connect('xlim_changed', rect)
            ax2.callbacks.connect('ylim_changed', rect)

            ax2.callbacks.connect('xlim_changed', display.ax_update)
            ax2.callbacks.connect('ylim_changed', display.ax_update)
            ax2.set_title("Zoom here")

            plt.show()

            new_width = display.xend - display.xstart
            new_height = display.yend - display.ystart

            print(
                "your cropped coords are {} {} {} {} with dim of {} by {} \n".
                format(display.xstart, display.xend, display.ystart,
                       display.yend, new_width, new_height))

            cfg['video_sets'][video_path] = {
                'crop':
                ', '.join(
                    map(str, [
                        display.xstart, display.xend, display.ystart,
                        display.yend
                    ]))
            }

            cap.release()
            plt.close("all")

        else:
            print("Cannot open the video file: {} !".format(video_path))

    # Update the yaml config file
    auxiliaryfunctions.write_config(config, cfg)
Exemple #17
0
    def update_project_paths(self):

        print('Updating paths..')
        from deeplabcut.utils import auxiliaryfunctions

        #  load config.yaml
        main_config = auxiliaryfunctions.read_config(self.full_config_path())

        # Update path in main config
        new_project_dir = self.project_dir.resolve()
        main_config['project_path'] = str(new_project_dir)

        # Update video path. NOTE: the video path name
        for old_vid in list(main_config["video_sets"]):
            new_vid = str(new_project_dir / "videos" / Path(old_vid).name)
            main_config["video_sets"][new_vid] = main_config["video_sets"].pop(
                old_vid)

        # Write dictionary to yaml  config file
        auxiliaryfunctions.write_config(self.full_config_path(), main_config)

        # Update train and test config.yaml paths
        trainingsetindex = 0
        shuffle = 1
        modelfoldername = auxiliaryfunctions.GetModelFolder(
            main_config["TrainingFraction"][trainingsetindex], shuffle,
            main_config)
        path_train_config = os.path.join(main_config['project_path'],
                                         Path(modelfoldername), 'train',
                                         'pose_cfg.yaml')
        path_test_config = os.path.join(main_config['project_path'],
                                        Path(modelfoldername), 'test',
                                        'pose_cfg.yaml')

        # Update training pose_cfg.yaml
        if os.path.exists(path_train_config):
            #train(str(poseconfigfile),displayiters,saveiters,maxiters,max_to_keep=max_snapshots_to_keep) #pass on path and file name for pose_cfg.yaml!
            with open(path_train_config, "r") as ymlfile:
                cfg_train = yaml.load(ymlfile, Loader=yaml.FullLoader)

            cfg_train['project_path'] = str(new_project_dir)
            old_dataset_train = os.path.join(*cfg_train['dataset'].split(
                '\\'))  #str(Path(cfg_train['dataset']))
            cfg_train['dataset'] = old_dataset_train
            old_metadataset = os.path.join(*cfg_train['metadataset'].split(
                '\\'))  #str(Path(cfg_train['metadataset']))
            cfg_train['metadataset'] = old_metadataset
            # if Path(Path.cwd().parent /
            # init_loc = input("Please specificy directory to resnet_v1_50.ckpt")
            cfg_train['init_weights'] = str(Path.cwd().parent /
                                            "resnet_v1_50.ckpt")
            with open(path_train_config, 'w') as ymlfile:
                yaml.dump(cfg_train, ymlfile)

            # Update MATLAB file contining training files
            if os.path.exists(self.project_dir / cfg_train['dataset']):
                import scipy.io as sio
                # Load Matlab file dataset annotation
                mlab = sio.loadmat(self.project_dir / cfg_train['dataset'])
                num_images = mlab['dataset'].shape[1]
                for i in range(num_images):
                    oldFilePath = mlab['dataset'][0, i][0][0]
                    newFilePath = os.path.join(
                        *oldFilePath.split('\\'))  #str(Path(oldFilePath))
                    mlab['dataset'][0, i][0][0] = newFilePath
                # Saving mat file
                sio.savemat(
                    os.path.join(self.project_dir / cfg_train['dataset']),
                    mlab)

        # Update testing pose_cfg.yaml
        if os.path.exists(path_test_config):
            #train(str(poseconfigfile),displayiters,saveiters,maxiters,max_to_keep=max_snapshots_to_keep) #pass on path and file name for pose_cfg.yaml!
            with open(path_test_config, "r") as ymlfile:
                cfg_test = yaml.load(ymlfile, Loader=yaml.FullLoader)
            cfg_test['init_weights'] = str(Path.cwd().parent /
                                           "resnet_v1_50.ckpt")
            old_dataset_test = os.path.join(*cfg_test['dataset'].split(
                '\\'))  #str(Path(cfg_test['dataset']))
            cfg_test['dataset'] = old_dataset_test
            with open(path_test_config, 'w') as ymlfile:
                yaml.dump(cfg_test, ymlfile)

        print('done.')
Exemple #18
0
def create_new_project(project,
                       experimenter,
                       videos,
                       working_directory=None,
                       copy_videos=False,
                       videotype='.avi'):
    """Creates a new project directory, sub-directories and a basic configuration file. The configuration file is loaded with the default values. Change its parameters to your projects need.

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    videos : list
        A list of string containing the full paths of the videos to include in the project.
        Attention: Can also be a directory, then all videos of videotype will be imported. Do not pass it as a list!

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    Example
    --------
    Linux/MacOs
    >>> deeplabcut.create_new_project('reaching-task','Linus',['/data/videos/mouse1.avi','/data/videos/mouse2.avi','/data/videos/mouse3.avi'],'/analysis/project/')
    >>> deeplabcut.create_new_project('reaching-task','Linus','/data/videos',videotype='.mp4')

    Windows:
    >>> deeplabcut.create_new_project('reaching-task','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'], copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    from datetime import datetime as dt
    from deeplabcut.utils import auxiliaryfunctions
    date = dt.today()
    month = date.strftime("%B")
    day = date.day
    d = str(month[0:3] + str(day))
    date = dt.today().strftime('%Y-%m-%d')
    if working_directory == None:
        working_directory = '.'
    wd = Path(working_directory).resolve()
    project_name = '{pn}-{exp}-{date}'.format(pn=project,
                                              exp=experimenter,
                                              date=date)
    project_path = wd / project_name

    # Create project and sub-directories
    if not DEBUG and project_path.exists():
        print('Project "{}" already exists!'.format(project_path))
        return
    video_path = project_path / 'videos'
    data_path = project_path / 'labeled-data'
    shuffles_path = project_path / 'training-datasets'
    results_path = project_path / 'dlc-models'
    for p in [video_path, data_path, shuffles_path, results_path]:
        p.mkdir(parents=True, exist_ok=DEBUG)
        print('Created "{}"'.format(p))

    # Import all videos in a folder or if just one video withouth [] passed, then make it a list.
    if isinstance(videos, str):
        #there are two cases:
        if os.path.isdir(videos):  # it is a path!
            path = videos
            videos = [
                os.path.join(path, vp) for vp in os.listdir(path)
                if videotype in vp
            ]
            if len(videos) == 0:
                print("No videos found in", path, os.listdir(path))
                print(
                    "Perhaps change the videotype, which is currently set to:",
                    videotype)
            else:
                print("Directory entered, ", len(videos),
                      " videos were found.")
        else:
            if os.path.isfile(videos):
                videos = [videos]

    videos = [Path(vp) for vp in videos]
    dirs = [data_path / Path(i.stem) for i in videos]
    for p in dirs:
        """
        Creates directory under data
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos == True:
        print("Copying the videos")
        for src, dst in zip(videos, destinations):
            shutil.copy(
                os.fspath(src),
                os.fspath(dst))  #https://www.python.org/dev/peps/pep-0519/
            #https://github.com/AlexEMG/DeepLabCut/issues/105 (for windows)
            #try:
            #    #shutil.copy(src,dst)
            #except OSError or TypeError: #https://github.com/AlexEMG/DeepLabCut/issues/105 (for windows)
            #    shutil.copy(os.fspath(src),os.fspath(dst))
    else:
        # creates the symlinks of the video and puts it in the videos directory.
        print("Creating the symbolic link of the video")
        for src, dst in zip(videos, destinations):
            if dst.exists() and not DEBUG:
                raise FileExistsError('Video {} exists already!'.format(dst))
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
            except OSError:
                import subprocess
                subprocess.check_call('mklink %s %s' % (dst, src), shell=True)
            print('Created the symlink of {} to {}'.format(src, dst))
            videos = destinations

    if copy_videos == True:
        videos = destinations  # in this case the *new* location should be added to the config file

    # adds the video list to the config.yaml file
    video_sets = {}
    for video in videos:
        print(video)
        try:
            # For windows os.path.realpath does not work and does not link to the real video. [old: rel_video_path = os.path.realpath(video)]
            rel_video_path = str(Path.resolve(Path(video)))
        except:
            rel_video_path = os.readlink(str(video))

        vcap = cv2.VideoCapture(rel_video_path)
        if vcap.isOpened():
            width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            video_sets[rel_video_path] = {
                'crop': ', '.join(map(str, [0, width, 0, height]))
            }
        else:
            print("Cannot open the video file!")
            video_sets = None

    #        Set values to config file:
    cfg_file, ruamelFile = auxiliaryfunctions.create_config_template()
    cfg_file
    cfg_file['Task'] = project
    cfg_file['scorer'] = experimenter
    cfg_file['video_sets'] = video_sets
    cfg_file['project_path'] = str(project_path)
    cfg_file['date'] = d
    cfg_file['bodyparts'] = ['Hand', 'Finger1', 'Finger2', 'Joystick']
    cfg_file['cropping'] = False
    cfg_file['start'] = 0
    cfg_file['stop'] = 1
    cfg_file['numframes2pick'] = 20
    cfg_file['TrainingFraction'] = [0.95]
    cfg_file['iteration'] = 0
    cfg_file['resnet'] = 50
    cfg_file['snapshotindex'] = -1
    cfg_file['x1'] = 0
    cfg_file['x2'] = 640
    cfg_file['y1'] = 277
    cfg_file['y2'] = 624
    cfg_file[
        'batch_size'] = 4  #batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
    cfg_file['corner2move2'] = (50, 50)
    cfg_file['move2corner'] = True
    cfg_file['pcutoff'] = 0.1
    cfg_file['dotsize'] = 12  #for plots size of dots
    cfg_file['alphavalue'] = 0.7  #for plots transparency of markers
    cfg_file['colormap'] = 'jet'  #for plots type of colormap

    projconfigfile = os.path.join(str(project_path), 'config.yaml')
    # Write dictionary to yaml  config file
    auxiliaryfunctions.write_config(projconfigfile, cfg_file)

    print('Generated "{}"'.format(project_path / 'config.yaml'))
    print(
        "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)."
        % (project_name, str(wd)))
    return projconfigfile
Exemple #19
0
def cropimagesandlabels(
    config,
    numcrops=10,
    size=(400, 400),
    userfeedback=True,
    cropdata=True,
    excludealreadycropped=True,
    updatevideoentries=True,
):
    """
    Crop images into multiple random crops (defined by numcrops) of size dimensions. If cropdata=True then the
    annotation data is loaded and labels for cropped images are inherited.
    If false, then one can make crops for unlabeled folders.

    This can be helpul for large frames with multiple animals. Then a smaller set of equally sized images is created.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    numcrops: number of random crops (around random bodypart)

    size: height x width in pixels

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    cropdata: bool, default True:
        If true creates corresponding annotation data (from ground truth)

    excludealreadycropped: bool, def true
        If true excludes folders that already contain _cropped in their name.

    updatevideoentries, bool, default true
        If true updates video_list entries to refer to cropped frames instead. This makes sense for subsequent processing.

    Example
    --------
    for labeling the frames
    >>> deeplabcut.cropimagesandlabels('/analysis/project/reaching-task/config.yaml')

    --------
    """
    from tqdm import trange

    indexlength = int(np.ceil(np.log10(numcrops)))
    project_path = os.path.dirname(config)
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"].keys()
    video_names = []
    for video in videos:
        parent, filename, ext = _robust_path_split(video)
        if excludealreadycropped and "_cropped" in filename:
            continue
        video_names.append([parent, filename, ext])

    if ("video_sets_original" not in cfg.keys() and updatevideoentries
        ):  # this dict is kept for storing links to original full-sized videos
        cfg["video_sets_original"] = {}

    for vidpath, vidname, videotype in video_names:
        folder = os.path.join(project_path, "labeled-data", vidname)
        if userfeedback:
            print("Do you want to crop frames for folder: ", folder, "?")
            askuser = input("(yes/no):")
        else:
            askuser = "******"
        if askuser == "y" or askuser == "yes" or askuser == "Y" or askuser == "Yes":
            new_vidname = vidname + "_cropped"
            new_folder = os.path.join(project_path, "labeled-data",
                                      new_vidname)
            auxiliaryfunctions.attempttomakefolder(new_folder)

            AnnotationData = []
            pd_index = []

            fn = os.path.join(folder, f"CollectedData_{cfg['scorer']}.h5")
            df = pd.read_hdf(fn, "df_with_missing")
            data = df.values.reshape((df.shape[0], -1, 2))
            sep = "/" if "/" in df.index[0] else "\\"
            if sep != os.path.sep:
                df.index = df.index.str.replace(sep, os.path.sep)
            images = project_path + os.path.sep + df.index
            # Avoid cropping already cropped images
            cropped_images = auxiliaryfunctions.grab_files_in_folder(
                new_folder, "png")
            cropped_names = set(map(lambda x: x.split("c")[0], cropped_images))
            imnames = [
                im for im in images.to_list()
                if Path(im).stem not in cropped_names
            ]
            ic = io.imread_collection(imnames)
            for i in trange(len(ic)):
                frame = ic[i]
                h, w = np.shape(frame)[:2]
                if size[0] >= h or size[1] >= w:
                    shutil.rmtree(new_folder, ignore_errors=True)
                    raise ValueError(
                        "Crop dimensions are larger than image size")

                imagename = os.path.relpath(ic.files[i], project_path)
                ind = np.flatnonzero(df.index == imagename)[0]
                cropindex = 0
                attempts = -1
                while cropindex < numcrops:
                    dd = np.array(data[ind].copy(), dtype=float)
                    y0, x0 = (
                        np.random.randint(h - size[0]),
                        np.random.randint(w - size[1]),
                    )
                    y1 = y0 + size[0]
                    x1 = x0 + size[1]
                    with np.errstate(invalid="ignore"):
                        within = np.all((dd >= [x0, y0]) & (dd < [x1, y1]),
                                        axis=1)
                    if cropdata:
                        dd[within] -= [x0, y0]
                        dd[~within] = np.nan
                    attempts += 1
                    if within.any() or attempts > 10:
                        newimname = str(
                            Path(imagename).stem + "c" +
                            str(cropindex).zfill(indexlength) + ".png")
                        cropppedimgname = os.path.join(new_folder, newimname)
                        io.imsave(cropppedimgname, frame[y0:y1, x0:x1])
                        cropindex += 1
                        pd_index.append(
                            os.path.join("labeled-data", new_vidname,
                                         newimname))
                        AnnotationData.append(dd.flatten())

            if cropdata:
                df = pd.DataFrame(AnnotationData,
                                  index=pd_index,
                                  columns=df.columns)
                fn_new = fn.replace(folder, new_folder)
                df.to_hdf(fn_new, key="df_with_missing", mode="w")
                df.to_csv(fn_new.replace(".h5", ".csv"))

            if updatevideoentries and cropdata:
                # moving old entry to _original, dropping it from video_set and update crop parameters
                video_orig = sep.join((vidpath, vidname + videotype))
                cfg["video_sets_original"][video_orig] = cfg["video_sets"][
                    video_orig]
                cfg["video_sets"].pop(video_orig)
                cfg["video_sets"][sep.join(
                    (vidpath, new_vidname + videotype))] = {
                        "crop": ", ".join(map(str, [0, size[1], 0, size[0]]))
                    }

    cfg["croppedtraining"] = True
    auxiliaryfunctions.write_config(config, cfg)
Exemple #20
0
def create_pretrained_project(
    project,
    experimenter,
    videos,
    model="full_human",
    working_directory=None,
    copy_videos=False,
    videotype=None,
    analyzevideo=True,
    filtered=True,
    createlabeledvideo=True,
    trainFraction=None,
):
    """
    Creates a new project directory, sub-directories and a basic configuration file.
    Change its parameters to your projects need.

    The project will also be initialized with a pre-trained model from the DeepLabCut model zoo!

    http://www.mousemotorlab.org/dlc-modelzoo

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    model: string, options see  http://www.mousemotorlab.org/dlc-modelzoo
        Current option and default: 'full_human'  Creates a demo human project and analyzes a video with ResNet 101 weights pretrained on MPII Human Pose. This is from the DeeperCut paper
        by Insafutdinov et al. https://arxiv.org/abs/1605.03170 Please make sure to cite it too if you use this code!

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional  ON WINDOWS: TRUE is often necessary!
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    analyzevideo " bool, optional
        If true, then the video is analzyed and a labeled video is created. If false, then only the project will be created and the weights downloaded. You can then access them

    filtered: bool, default false
        Boolean variable indicating if filtered pose data output should be plotted rather than frame-by-frame predictions.
        Filtered version can be calculated with deeplabcut.filterpredictions

    trainFraction: By default value from *new* projects. (0.95)
            Fraction that will be used in dlc-model/trainingset folder name.

    Example
    --------
    Linux/MacOs loading full_human model and analzying video /homosapiens1.avi
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/homosapiens1.avi'], copy_videos=False)

    Loading full_cat model and analzying video "felixfeliscatus3.avi"
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/felixfeliscatus3.avi'], model='full_cat')

    Windows:
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'],r'C:\yourusername\analysis\project' copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    if model in globals()["Modeloptions"]:
        cwd = os.getcwd()

        cfg = deeplabcut.create_new_project(project, experimenter, videos,
                                            working_directory, copy_videos,
                                            videotype)
        if trainFraction is not None:
            auxiliaryfunctions.edit_config(
                cfg, {"TrainingFraction": [tranFraction]})

        config = auxiliaryfunctions.read_config(cfg)
        if model == "full_human":
            config["bodyparts"] = [
                "ankle1",
                "knee1",
                "hip1",
                "hip2",
                "knee2",
                "ankle2",
                "wrist1",
                "elbow1",
                "shoulder1",
                "shoulder2",
                "elbow2",
                "wrist2",
                "chin",
                "forehead",
            ]
            config["skeleton"] = [
                ["ankle1", "knee1"],
                ["ankle2", "knee2"],
                ["knee1", "hip1"],
                ["knee2", "hip2"],
                ["hip1", "hip2"],
                ["shoulder1", "shoulder2"],
                ["shoulder1", "hip1"],
                ["shoulder2", "hip2"],
                ["shoulder1", "elbow1"],
                ["shoulder2", "elbow2"],
                ["chin", "forehead"],
                ["elbow1", "wrist1"],
                ["elbow2", "wrist2"],
            ]
            config["default_net_type"] = "resnet_101"
        else:  # just make a case and put the stuff you want.
            # TBD: 'partaffinityfield_graph' >> use to set skeleton!
            pass

        auxiliaryfunctions.write_config(cfg, config)
        config = auxiliaryfunctions.read_config(cfg)

        train_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "train",
            ))
        test_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "test",
            ))

        # Create the model directory
        train_dir.mkdir(parents=True, exist_ok=True)
        test_dir.mkdir(parents=True, exist_ok=True)

        modelfoldername = auxiliaryfunctions.GetModelFolder(
            trainFraction=config["TrainingFraction"][0], shuffle=1, cfg=config)
        path_train_config = str(
            os.path.join(config["project_path"], Path(modelfoldername),
                         "train", "pose_cfg.yaml"))
        path_test_config = str(
            os.path.join(config["project_path"], Path(modelfoldername), "test",
                         "pose_cfg.yaml"))

        # Download the weights and put then in appropriate directory
        print("Dowloading weights...")
        auxfun_models.DownloadModel(model, train_dir)

        pose_cfg = deeplabcut.auxiliaryfunctions.read_plainconfig(
            path_train_config)
        print(path_train_config)
        # Updating config file:
        dict = {
            "default_net_type": pose_cfg["net_type"],
            "default_augmenter": pose_cfg["dataset_type"],
            "bodyparts": pose_cfg["all_joints_names"],
            "skeleton": [],  # TODO: update with paf_graph
            "dotsize": 6,
        }
        auxiliaryfunctions.edit_config(cfg, dict)

        # Create the pose_config.yaml files
        parent_path = Path(os.path.dirname(deeplabcut.__file__))
        defaultconfigfile = str(parent_path / "pose_cfg.yaml")
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(config)
        datafilename, metadatafilename = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder,
            trainFraction=config["TrainingFraction"][0],
            shuffle=1,
            cfg=config,
        )

        # downloading base encoder / not required unless on re-trains (but when a training set is created this happens anyway)
        # model_path, num_shuffles=auxfun_models.Check4weights(pose_cfg['net_type'], parent_path, num_shuffles= 1)

        # Updating training and test pose_cfg:
        snapshotname = [fn for fn in os.listdir(train_dir)
                        if ".meta" in fn][0].split(".meta")[0]
        dict2change = {
            "init_weights": str(os.path.join(train_dir, snapshotname)),
            "project_path": str(config["project_path"]),
        }

        UpdateTrain_pose_yaml(pose_cfg, dict2change, path_train_config)
        keys2save = [
            "dataset",
            "dataset_type",
            "num_joints",
            "all_joints",
            "all_joints_names",
            "net_type",
            "init_weights",
            "global_scale",
            "location_refinement",
            "locref_stdev",
        ]

        MakeTest_pose_yaml(pose_cfg, keys2save, path_test_config)

        video_dir = os.path.join(config["project_path"], "videos")
        if analyzevideo == True:
            print("Analyzing video...")
            deeplabcut.analyze_videos(cfg, [video_dir],
                                      videotype,
                                      save_as_csv=True)

        if createlabeledvideo == True:
            if filtered:
                deeplabcut.filterpredictions(cfg, [video_dir], videotype)

            print("Plotting results...")
            deeplabcut.create_labeled_video(cfg, [video_dir],
                                            videotype,
                                            draw_skeleton=True,
                                            filtered=filtered)
            deeplabcut.plot_trajectories(cfg, [video_dir],
                                         videotype,
                                         filtered=filtered)

        os.chdir(cwd)
        return cfg, path_train_config

    else:
        return "N/A", "N/A"
def create_CLARA_project(videos, project, experimenter, working_directory):
    """Creates a new project directory, sub-directories and a basic configuration file. The configuration file is loaded with the default values. Change its parameters to your projects need.

    """
    from datetime import datetime as dt
    from deeplabcut import DEBUG
    from deeplabcut.utils import auxiliaryfunctions

    date = dt.today()
    month = date.strftime("%B")
    day = date.day
    d = str(month[0:3] + str(day))
    date = dt.today().strftime('%Y-%m-%d')
    wd = Path(working_directory).resolve()
    project_name = '{pn}-{exp}-{date}'.format(pn=project,
                                              exp=experimenter,
                                              date=date)
    project_path = wd / project_name

    # Create project and sub-directories
    if not DEBUG and project_path.exists():
        print('Project "{}" already exists!'.format(project_path))
        return
    video_path = project_path / 'videos'
    data_path = project_path / 'labeled-data'
    shuffles_path = project_path / 'training-datasets'
    results_path = project_path / 'dlc-models'
    for p in [video_path, data_path, shuffles_path, results_path]:
        p.mkdir(parents=True, exist_ok=DEBUG)
        print('Created "{}"'.format(p))

    # adds the video list to the config.yaml file
    video_sets = {}
    for video in videos:
        print(video)
        try:
            # For windows os.path.realpath does not work and does not link to the real video. [old: rel_video_path = os.path.realpath(video)]
            rel_video_path = str(Path.resolve(Path(video)))
        except:
            rel_video_path = os.readlink(str(video))

        vcap = cv2.VideoCapture(rel_video_path)
        if vcap.isOpened():
            width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            video_sets[rel_video_path] = {
                'crop': ', '.join(map(str, [0, width, 0, height]))
            }
        else:
            print("Cannot open the video file!")
            video_sets = None

    #        Set values to config file:
    cfg_file, ruamelFile = auxiliaryfunctions.create_config_template()
    cfg_file
    cfg_file['Task'] = project
    cfg_file['scorer'] = experimenter
    cfg_file['video_sets'] = video_sets
    cfg_file['project_path'] = str(project_path)
    cfg_file['date'] = d
    bpset = {
        'Hand': ['Flat', 'Spread', 'Grab'],
        'Pellet': ['Free', 'InHand'],
        'Other': ['Tongue', 'Mouth']
    }
    cfg_file['bodyparts'] = bpset
    cfg_file['cropping'] = False
    cfg_file['start'] = 0
    cfg_file['stop'] = 1
    cfg_file['numframes2pick'] = 20
    cfg_file['TrainingFraction'] = [0.95]
    cfg_file['iteration'] = 0
    cfg_file['resnet'] = 152
    cfg_file['snapshotindex'] = -1
    cfg_file['x1'] = 0
    cfg_file['x2'] = 640
    cfg_file['y1'] = 277
    cfg_file['y2'] = 624
    cfg_file[
        'batch_size'] = 15  #batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
    cfg_file['corner2move2'] = (50, 50)
    cfg_file['move2corner'] = True
    cfg_file['pcutoff'] = 0.1
    cfg_file['dotsize'] = 8  #for plots size of dots
    cfg_file['alphavalue'] = 0.7  #for plots transparency of markers
    cfg_file['colormap'] = 'jet'  #for plots type of colormap

    projconfigfile = os.path.join(str(project_path), 'config.yaml')
    # Write dictionary to yaml  config file
    auxiliaryfunctions.write_config(projconfigfile, cfg_file)

    print('Generated "{}"'.format(project_path / 'config.yaml'))
    print(
        "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)."
        % (project_name, str(wd)))
    return projconfigfile
Exemple #22
0
def add_new_videos(config, videos, copy_videos=False, coords=None):
    """
    Add new videos to the config file at any stage of the project.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    copy_videos : bool, optional
        If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
        ``False``; if provided it must be either ``True`` or ``False``.
    coords: list, optional
      A list containing the list of cropping coordinates of the video. The default is set to None.
    Examples
    --------
    Video will be added, with cropping dimenions according to the frame dimensinos of mouse5.avi
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])
    
    Video will be added, with cropping dimenions [0,100,0,200]
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])

    Two videos will be added, with cropping dimenions [0,100,0,200] and [0,100,0,250], respectively.
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])

    """
    import os
    import shutil
    import yaml
    from pathlib import Path

    from deeplabcut import DEBUG
    from deeplabcut.utils import auxiliaryfunctions
    import cv2

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)

    for idx, video in enumerate(videos):
        try:
            video_path = os.path.realpath(video)
        except:
            video_path = os.readlink(video)

        vcap = cv2.VideoCapture(video_path)
        if vcap.isOpened():
            # get vcap property
            width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            if coords == None:
                cfg['video_sets'].update({
                    video_path: {
                        'crop': ', '.join(map(str, [0, width, 0, height]))
                    }
                })
            else:
                c = coords[idx]
                cfg['video_sets'].update(
                    {video_path: {
                        'crop': ', '.join(map(str, c))
                    }})
        else:
            print("Cannot open the video file!")

    auxiliaryfunctions.write_config(config, cfg)

    video_path = Path(config).parents[0] / 'videos'
    data_path = Path(config).parents[0] / 'labeled-data'
    videos = [Path(vp) for vp in videos]

    dirs = [data_path / Path(i.stem) for i in videos]

    for p in dirs:
        """
        Creates directory under data
        """
        p.mkdir(parents=True, exist_ok=True)
    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos == True:
        print("Copying the videos")
        for src, dst in zip(videos, destinations):
            shutil.copy(os.fspath(src), os.fspath(dst))
    else:
        print("Creating the symbolic link of the video")
        for src, dst in zip(videos, destinations):
            if dst.exists() and not DEBUG:
                raise FileExistsError('Video {} exists already!'.format(dst))
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
            except shutil.SameFileError:
                if not DEBUG:
                    raise
    print(
        "New video was added to the project! Use the function 'extract_frames' to select frames for labeling."
    )
Exemple #23
0
    cfg_file['project_path']=str(project_path)
    cfg_file['date']=d
    cfg_file['bodyparts']=['WhiskerA1','WhiskerA2','WhiskerA3','WhiskerA4','WhiskerA5','WhiskerA6','WhiskerB1','WhiskerB2','WhiskerB3','WhiskerB4','WhiskerB4','WhiskerB5','WhiskerB6']
    cfg_file['cropping']=False
    cfg_file['start']=0
    cfg_file['stop']=1
	cfg_file['numframes2pick'] = int(numpy.load('C:\\Users\\tex_analysis\\Anaconda3\\envs\\dlc-windowsGPU\\Lib\\numframes.npy'))
    cfg_file['TrainingFraction']=[0.95]
    cfg_file['iteration']=0
    cfg_file['resnet']=50
    cfg_file['snapshotindex']=-1
    cfg_file['x1']=0
    cfg_file['x2']=640
    cfg_file['y1']=277
    cfg_file['y2']=624
    cfg_file['batch_size']=4 #batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
    cfg_file['corner2move2']=(50,50)
    cfg_file['move2corner']=True
    cfg_file['pcutoff']=0.1
    cfg_file['dotsize']=3 #for plots size of dots
    cfg_file['alphavalue']=0.7 #for plots transparency of markers
    cfg_file['colormap']='hsv' #for plots type of colormap

    projconfigfile=os.path.join(str(project_path),'config.yaml')
    # Write dictionary to yaml  config file
    auxiliaryfunctions.write_config(projconfigfile,cfg_file)

    print('Generated "{}"'.format(project_path / 'config.yaml'))
    print("\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)." %(project_name,str(wd)))
    return projconfigfile
Exemple #24
0
def create_new_project(
    project,
    experimenter,
    videos,
    working_directory=None,
    copy_videos=False,
    videotype=".avi",
    multianimal=False,
):
    """Creates a new project directory, sub-directories and a basic configuration file. The configuration file is loaded with the default values. Change its parameters to your projects need.

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    videos : list
        A list of string containing the full paths of the videos to include in the project.
        Attention: Can also be a directory, then all videos of videotype will be imported.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    multianimal: bool, optional. Default: False.
        For creating a multi-animal project (introduced in DLC 2.2)

    Example
    --------
    Linux/MacOs
    >>> deeplabcut.create_new_project('reaching-task','Linus',['/data/videos/mouse1.avi','/data/videos/mouse2.avi','/data/videos/mouse3.avi'],'/analysis/project/')
    >>> deeplabcut.create_new_project('reaching-task','Linus',['/data/videos'],videotype='.mp4')

    Windows:
    >>> deeplabcut.create_new_project('reaching-task','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'], copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    from datetime import datetime as dt
    from deeplabcut.utils import auxiliaryfunctions

    date = dt.today()
    month = date.strftime("%B")
    day = date.day
    d = str(month[0:3] + str(day))
    date = dt.today().strftime("%Y-%m-%d")
    if working_directory == None:
        working_directory = "."
    wd = Path(working_directory).resolve()
    project_name = "{pn}-{exp}-{date}".format(pn=project,
                                              exp=experimenter,
                                              date=date)
    project_path = wd / project_name

    # Create project and sub-directories
    if not DEBUG and project_path.exists():
        print('Project "{}" already exists!'.format(project_path))
        return
    video_path = project_path / "videos"
    data_path = project_path / "labeled-data"
    shuffles_path = project_path / "training-datasets"
    results_path = project_path / "dlc-models"
    for p in [video_path, data_path, shuffles_path, results_path]:
        p.mkdir(parents=True, exist_ok=DEBUG)
        print('Created "{}"'.format(p))

    # Add all videos in the folder. Multiple folders can be passed in a list, similar to the video files. Folders and video files can also be passed!
    vids = []
    for i in videos:
        # Check if it is a folder
        if os.path.isdir(i):
            vids_in_dir = [
                os.path.join(i, vp) for vp in os.listdir(i) if videotype in vp
            ]
            vids = vids + vids_in_dir
            if len(vids_in_dir) == 0:
                print("No videos found in", i)
                print(
                    "Perhaps change the videotype, which is currently set to:",
                    videotype,
                )
            else:
                videos = vids
                print(
                    len(vids_in_dir),
                    " videos from the directory",
                    i,
                    "were added to the project.",
                )
        else:
            if os.path.isfile(i):
                vids = vids + [i]
            videos = vids

    videos = [Path(vp) for vp in videos]
    dirs = [data_path / Path(i.stem) for i in videos]
    for p in dirs:
        """
        Creates directory under data
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos == True:
        print("Copying the videos")
        for src, dst in zip(videos, destinations):
            shutil.copy(
                os.fspath(src),
                os.fspath(dst))  # https://www.python.org/dev/peps/pep-0519/
    else:
        # creates the symlinks of the video and puts it in the videos directory.
        print("Attempting to create a symbolic link of the video ...")
        for src, dst in zip(videos, destinations):
            if dst.exists() and not DEBUG:
                raise FileExistsError("Video {} exists already!".format(dst))
            try:
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)
            except OSError:
                import subprocess

                subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
            print("Created the symlink of {} to {}".format(src, dst))
            videos = destinations

    if copy_videos == True:
        videos = destinations  # in this case the *new* location should be added to the config file

    # adds the video list to the config.yaml file
    video_sets = {}
    for video in videos:
        print(video)
        try:
            # For windows os.path.realpath does not work and does not link to the real video. [old: rel_video_path = os.path.realpath(video)]
            rel_video_path = str(Path.resolve(Path(video)))
        except:
            rel_video_path = os.readlink(str(video))

        vcap = cv2.VideoCapture(rel_video_path)
        if vcap.isOpened():
            width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            video_sets[rel_video_path] = {
                "crop": ", ".join(map(str, [0, width, 0, height]))
            }
        else:
            print("Cannot open the video file! Skipping to the next one...")
            os.remove(video)  # Removing the video or link from the project

    if not len(video_sets):
        # Silently sweep the files that were already written.
        shutil.rmtree(project_path, ignore_errors=True)
        print(
            "WARNING: No valid videos were found. The project was not created ..."
        )
        print("Verify the video files and re-create the project.")
        return "nothingcreated"

    # Set values to config file:
    if multianimal:  # parameters specific to multianimal project
        cfg_file, ruamelFile = auxiliaryfunctions.create_config_template(
            multianimal)
        cfg_file["multianimalproject"] = multianimal
        cfg_file["individuals"] = ["individual1", "individual2", "individual3"]
        cfg_file["multianimalbodyparts"] = [
            "bodypart1", "bodypart2", "bodypart3"
        ]
        cfg_file["uniquebodyparts"] = []
        cfg_file["bodyparts"] = "MULTI!"
        cfg_file["skeleton"] = [
            ["bodypart1", "bodypart2"],
            ["bodypart2", "bodypart3"],
            ["bodypart1", "bodypart3"],
        ]
        cfg_file["default_augmenter"] = "multi-animal-imgaug"
    else:
        cfg_file, ruamelFile = auxiliaryfunctions.create_config_template()
        cfg_file["multianimalproject"] = False
        cfg_file["bodyparts"] = [
            "bodypart1", "bodypart2", "bodypart3", "objectA"
        ]
        cfg_file["skeleton"] = [["bodypart1", "bodypart2"],
                                ["objectA", "bodypart3"]]
        cfg_file["default_augmenter"] = "default"
    cfg_file["croppedtraining"] = False

    # common parameters:
    cfg_file["Task"] = project
    cfg_file["scorer"] = experimenter
    cfg_file["video_sets"] = video_sets
    cfg_file["project_path"] = str(project_path)
    cfg_file["date"] = d
    cfg_file["cropping"] = False
    cfg_file["start"] = 0
    cfg_file["stop"] = 1
    cfg_file["numframes2pick"] = 20
    cfg_file["TrainingFraction"] = [0.95]
    cfg_file["iteration"] = 0
    cfg_file["default_net_type"] = "resnet_50"
    cfg_file["snapshotindex"] = -1
    cfg_file["x1"] = 0
    cfg_file["x2"] = 640
    cfg_file["y1"] = 277
    cfg_file["y2"] = 624
    cfg_file[
        "batch_size"] = 8  # batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
    cfg_file["corner2move2"] = (50, 50)
    cfg_file["move2corner"] = True
    cfg_file["skeleton_color"] = "black"
    cfg_file["pcutoff"] = 0.6
    cfg_file["dotsize"] = 12  # for plots size of dots
    cfg_file["alphavalue"] = 0.7  # for plots transparency of markers
    cfg_file["colormap"] = "plasma"  # for plots type of colormap

    projconfigfile = os.path.join(str(project_path), "config.yaml")
    # Write dictionary to yaml  config file
    auxiliaryfunctions.write_config(projconfigfile, cfg_file)

    print('Generated "{}"'.format(project_path / "config.yaml"))
    print(
        "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)."
        % (project_name, str(wd)))
    return projconfigfile
Exemple #25
0
def add_new_videos(config, videos, copy_videos=False, coords=None):
    """
    Add new videos to the config file at any stage of the project.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    copy_videos : bool, optional
        If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
        ``False``; if provided it must be either ``True`` or ``False``.
    coords: list, optional
      A list containing the list of cropping coordinates of the video. The default is set to None.
    Examples
    --------
    Video will be added, with cropping dimenions according to the frame dimensinos of mouse5.avi
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])

    Video will be added, with cropping dimenions [0,100,0,200]
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])

    Two videos will be added, with cropping dimenions [0,100,0,200] and [0,100,0,250], respectively.
    >>> deeplabcut.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])

    """
    import os
    import shutil
    from pathlib import Path

    from deeplabcut.utils import auxiliaryfunctions
    from deeplabcut.utils.auxfun_videos import VideoReader

    # Read the config file
    cfg = auxiliaryfunctions.read_config(config)

    video_path = Path(config).parents[0] / "videos"
    data_path = Path(config).parents[0] / "labeled-data"
    videos = [Path(vp) for vp in videos]

    dirs = [data_path / Path(i.stem) for i in videos]

    for p in dirs:
        """
        Creates directory under data & perhaps copies videos (to /video)
        """
        p.mkdir(parents=True, exist_ok=True)

    destinations = [video_path.joinpath(vp.name) for vp in videos]
    if copy_videos:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Copying the videos")
                shutil.copy(os.fspath(src), os.fspath(dst))
    else:
        for src, dst in zip(videos, destinations):
            if dst.exists():
                pass
            else:
                print("Creating the symbolic link of the video")
                src = str(src)
                dst = str(dst)
                os.symlink(src, dst)

    if copy_videos:
        videos = destinations  # in this case the *new* location should be added to the config file
    # adds the video list to the config.yaml file
    for idx, video in enumerate(videos):
        try:
            # For windows os.path.realpath does not work and does not link to the real video.
            video_path = str(Path.resolve(Path(video)))
        #           video_path = os.path.realpath(video)
        except:
            video_path = os.readlink(video)

        vid = VideoReader(video_path)
        if coords is not None:
            c = coords[idx]
        else:
            c = vid.get_bbox()
        params = {video_path: {"crop": ", ".join(map(str, c))}}
        if "video_sets_original" not in cfg:
            cfg["video_sets"].update(params)
        else:
            cfg["video_sets_original"].update(params)

    auxiliaryfunctions.write_config(config, cfg)
    print(
        "New video was added to the project! Use the function 'extract_frames' to select frames for labeling."
    )
Exemple #26
0
def adddatasetstovideolistandviceversa(config):
    """
    First run comparevideolistsanddatafolders(config) to compare the folders in labeled-data and the ones listed under video_sets (in the config file).
    If you detect differences this function can be used to maker sure each folder has a video entry & vice versa.

    It corrects this problem in the following way:

    If a video entry in the config file does not contain a folder in labeled-data, then the entry is removed.
    If a folder in labeled-data does not contain a video entry in the config file then the prefix path will be added in front of the name of the labeled-data folder and combined
    with the suffix variable as an ending. Width and height will be added as cropping variables as passed on.

    Handle with care!

    Parameter
    ----------
    config : string
        String containing the full path of the config file in the project.
    """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"]
    video_names = [Path(i).stem for i in videos]

    alldatafolders = [
        fn for fn in os.listdir(Path(config).parent / "labeled-data")
        if "_labeled" not in fn and not fn.startswith(".")
    ]

    print("Config file contains:", len(video_names))
    print("Labeled-data contains:", len(alldatafolders))

    toberemoved = []
    for vn in video_names:
        if vn not in alldatafolders:
            print(vn, " is missing as a labeled folder >> removing key!")
            for fullvideo in videos:
                if vn in fullvideo:
                    toberemoved.append(fullvideo)

    for vid in toberemoved:
        del videos[vid]

    # Load updated lists:
    video_names = [Path(i).stem for i in videos]
    for vn in alldatafolders:
        if vn not in video_names:
            print(vn, " is missing in config file >> adding it!")
            # Find the corresponding video file
            found = False
            for file in os.listdir(os.path.join(cfg["project_path"],
                                                "videos")):
                if os.path.splitext(file)[0] == vn:
                    found = True
                    break
            if found:
                video_path = os.path.join(cfg["project_path"], "videos", file)
                clip = VideoReader(video_path)
                videos.update({
                    video_path: {
                        "crop": ", ".join(map(str, clip.get_bbox()))
                    }
                })

    auxiliaryfunctions.write_config(config, cfg)
Exemple #27
0
def create_pretrained_human_project(project,
                                    experimenter,
                                    videos,
                                    working_directory=None,
                                    copy_videos=False,
                                    videotype='.avi',
                                    createlabeledvideo=True,
                                    analyzevideo=True):
    """
    Creates a demo human project and analyzes a video with ResNet 101 weights pretrained on MPII Human Pose. This is from the DeeperCut paper by Insafutdinov et al. https://arxiv.org/abs/1605.03170 Please make sure to cite it too if you use this code!

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.
    analyzevideo " bool, optional
        If true, then the video is analzyed and a labeled video is created. If false, then only the project will be created and the weights downloaded. You can then access them

    Example
    --------
    Linux/MacOs
    >>> deeplabcut.create_pretrained_human_project('human','Linus',['/data/videos/mouse1.avi'],'/analysis/project/',copy_videos=False)

    Windows:
    >>> deeplabcut.create_pretrained_human_project('human','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'],r'C:\yourusername\analysis\project' copy_videos=False)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )
    --------
    """

    cfg = deeplabcut.create_new_project(project, experimenter, videos,
                                        working_directory, copy_videos,
                                        videotype)

    config = auxiliaryfunctions.read_config(cfg)
    config['bodyparts'] = [
        'ankle1', 'knee1', 'hip1', 'hip2', 'knee2', 'ankle2', 'wrist1',
        'elbow1', 'shoulder1', 'shoulder2', 'elbow2', 'wrist2', 'chin',
        'forehead'
    ]
    config['skeleton'] = [['ankle1', 'knee1'], ['ankle2', 'knee2'],
                          ['knee1', 'hip1'], ['knee2', 'hip2'],
                          ['hip1', 'hip2'], ['shoulder1', 'shoulder2'],
                          ['shoulder1', 'hip1'], ['shoulder2', 'hip2'],
                          ['shoulder1', 'elbow1'], ['shoulder2', 'elbow2'],
                          ['chin', 'forehead'], ['elbow1', 'wrist1'],
                          ['elbow2', 'wrist2']]
    config['default_net_type'] = 'resnet_101'
    auxiliaryfunctions.write_config(cfg, config)
    config = auxiliaryfunctions.read_config(cfg)

    train_dir = Path(
        os.path.join(
            config['project_path'],
            str(
                auxiliaryfunctions.GetModelFolder(
                    trainFraction=config['TrainingFraction'][0],
                    shuffle=1,
                    cfg=config)), 'train'))
    test_dir = Path(
        os.path.join(
            config['project_path'],
            str(
                auxiliaryfunctions.GetModelFolder(
                    trainFraction=config['TrainingFraction'][0],
                    shuffle=1,
                    cfg=config)), 'test'))

    # Create the model directory
    train_dir.mkdir(parents=True, exist_ok=True)
    test_dir.mkdir(parents=True, exist_ok=True)

    modelfoldername = auxiliaryfunctions.GetModelFolder(
        trainFraction=config['TrainingFraction'][0], shuffle=1, cfg=config)

    path_train_config = str(
        os.path.join(config['project_path'], Path(modelfoldername), 'train',
                     'pose_cfg.yaml'))
    path_test_config = str(
        os.path.join(config['project_path'], Path(modelfoldername), 'test',
                     'pose_cfg.yaml'))

    # Download the weights and put then in appropriate directory
    cwd = os.getcwd()
    os.chdir(train_dir)
    print(
        "Checking if the weights are already available, otherwise I will download them!"
    )
    weightfilename = auxfun_models.download_mpii_weigths(train_dir)
    os.chdir(cwd)

    # Create the pose_config.yaml files
    parent_path = Path(os.path.dirname(deeplabcut.__file__))
    defaultconfigfile = str(parent_path / 'pose_cfg.yaml')
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(config)
    datafilename, metadatafilename = auxiliaryfunctions.GetDataandMetaDataFilenames(
        trainingsetfolder,
        trainFraction=config['TrainingFraction'][0],
        shuffle=1,
        cfg=config)
    bodyparts = config['bodyparts']
    net_type = 'resnet_101'
    num_shuffles = 1
    model_path, num_shuffles = auxfun_models.Check4weights(
        net_type, parent_path, num_shuffles)
    items2change = {
        "dataset": 'dataset-test.mat',  #datafilename,
        "metadataset": metadatafilename,
        "num_joints": len(bodyparts),
        "all_joints": [[i] for i in range(len(bodyparts))],
        "all_joints_names": [str(bpt) for bpt in bodyparts],
        "init_weights":
        weightfilename.split('.index')[0],  #'models/mpii/snapshot-1030000',
        "project_path": str(config['project_path']),
        "net_type": net_type,
        "dataset_type": "default"
    }
    trainingdata = MakeTrain_pose_yaml(items2change, path_train_config,
                                       defaultconfigfile)

    keys2save = [
        "dataset", "dataset_type", "num_joints", "all_joints",
        "all_joints_names", "net_type", 'init_weights', 'global_scale',
        'location_refinement', 'locref_stdev'
    ]
    MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)

    video_dir = os.path.join(config['project_path'], 'videos')

    if analyzevideo == True:
        # Analyze the videos
        deeplabcut.analyze_videos(cfg, [video_dir],
                                  videotype,
                                  save_as_csv=True)
    if createlabeledvideo == True:
        deeplabcut.create_labeled_video(cfg, [video_dir],
                                        videotype,
                                        draw_skeleton=True)
        deeplabcut.plot_trajectories(cfg, [video_dir], videotype)
    return cfg, path_train_config