示例#1
0
def undistort_points(config, dataframe, camera_pair, destfolder):
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam2.columns.get_level_values(0)[0]
        stereo_file = auxiliaryfunctions.read_pickle(
            os.path.join(path_camera_matrix, "stereo_params.pickle"))
        path_stereo_file = os.path.join(path_camera_matrix,
                                        "stereo_params.pickle")
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]["cameraMatrix1"]
        dist_l = stereo_file[camera_pair]["distCoeffs1"]

        mtx_r = stereo_file[camera_pair]["cameraMatrix2"]
        dist_r = stereo_file[camera_pair]["distCoeffs2"]

        R1 = stereo_file[camera_pair]["R1"]
        P1 = stereo_file[camera_pair]["P1"]

        R2 = stereo_file[camera_pair]["R2"]
        P2 = stereo_file[camera_pair]["P2"]

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        (
            dataFrame_cam1_undistort,
            scorer_cam1,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam1,
                                                  scorer_cam1,
                                                  flag="2d")
        (
            dataFrame_cam2_undistort,
            scorer_cam2,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam2,
                                                  scorer_cam2,
                                                  flag="2d")

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            # Undistorting the points from cam1 camera
            points_cam1 = np.array([
                dataframe_cam1[scorer_cam1][bp]["x"].values[:],
                dataframe_cam1[scorer_cam1][bp]["y"].values[:],
            ])
            points_cam1 = points_cam1.T
            points_cam1 = np.expand_dims(points_cam1, axis=1)
            points_cam1_remapped = cv2.undistortPoints(src=points_cam1,
                                                       cameraMatrix=mtx_l,
                                                       distCoeffs=dist_l,
                                                       P=P1,
                                                       R=R1)

            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             "x"] = points_cam1_remapped[:, 0,
                                                                         0]
            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             "y"] = points_cam1_remapped[:, 0,
                                                                         1]
            dataFrame_cam1_undistort.iloc[:][
                scorer_cam1, bp, "likelihood"] = dataframe_cam1[scorer_cam1][
                    bp]["likelihood"].values[:]

            # Undistorting the points from cam2 camera
            points_cam2 = np.array([
                dataframe_cam2[scorer_cam2][bp]["x"].values[:],
                dataframe_cam2[scorer_cam2][bp]["y"].values[:],
            ])
            points_cam2 = points_cam2.T
            points_cam2 = np.expand_dims(points_cam2, axis=1)
            points_cam2_remapped = cv2.undistortPoints(src=points_cam2,
                                                       cameraMatrix=mtx_r,
                                                       distCoeffs=dist_r,
                                                       P=P2,
                                                       R=R2)

            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             "x"] = points_cam2_remapped[:, 0,
                                                                         0]
            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             "y"] = points_cam2_remapped[:, 0,
                                                                         1]
            dataFrame_cam2_undistort.iloc[:][
                scorer_cam2, bp, "likelihood"] = dataframe_cam2[scorer_cam2][
                    bp]["likelihood"].values[:]

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
def create_multianimaltraining_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    net_type=None,
    numdigits=2,
):
    """
    Creates a training dataset for multi-animal datasets. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n
    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Imporant differences to standard:
     - stores coordinates with numdigits as many digits
     - creates
    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    net_type: string
        Type of networks. Currently resnet_50, resnet_101, and resnet_152 are supported (not the MobileNets!)

    numdigits: int, optional


    Example
    --------
    >>> deeplabcut.create_multianimaltraining_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)

    Windows:
    >>> deeplabcut.create_multianimaltraining_dataset(r'C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    from skimage import io

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg["scorer"]
    project_path = cfg["project_path"]
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
        cfg)  # Path concatenatn OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(
        os.path.join(project_path, str(trainingsetfolder))),
                                           recursive=True)

    Data = trainingsetmanipulation.merge_annotateddatasets(
        cfg, Path(os.path.join(project_path, trainingsetfolder)),
        windows2linux)
    if Data is None:
        return
    Data = Data[scorer]  # extract labeled data

    # actualbpts=set(Data.columns.get_level_values(0))

    def strip_cropped_image_name(path):
        # utility function to split different crops from same image into either train or test!
        filename = os.path.split(path)[1]
        return filename.split("c")[0]

    img_names = Data.index.map(strip_cropped_image_name).unique()

    # loading & linking pretrained models
    # CURRENTLY ONLY ResNet supported!
    if net_type is None:  # loading & linking pretrained models
        net_type = cfg.get("default_net_type", "resnet_50")
    else:
        if "resnet" in net_type:  # or 'mobilenet' in net_type:
            pass
        else:
            raise ValueError("Currently only resnet is supported.")

    # multianimal case:
    dataset_type = "multi-animal-imgaug"
    partaffinityfield_graph = auxfun_multianimal.getpafgraph(cfg)
    # ATTENTION: order has to be multibodyparts, then uniquebodyparts (for indexing)
    print("Utilizing the following graph:", partaffinityfield_graph)
    num_limbs = len(partaffinityfield_graph)
    partaffinityfield_predict = True

    # Loading the encoder (if necessary downloading from TF)
    dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
    defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
    model_path, num_shuffles = auxfun_models.Check4weights(
        net_type, Path(dlcparent_path), num_shuffles)

    if Shuffles == None:
        Shuffles = range(1, num_shuffles + 1, 1)
    else:
        Shuffles = [i for i in Shuffles if isinstance(i, int)]

    (
        individuals,
        uniquebodyparts,
        multianimalbodyparts,
    ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

    TrainingFraction = cfg["TrainingFraction"]
    for shuffle in Shuffles:  # Creating shuffles starting from 1
        for trainFraction in TrainingFraction:
            train_inds_temp, test_inds_temp = trainingsetmanipulation.SplitTrials(
                range(len(img_names)), trainFraction)
            # Map back to the original indices.
            temp = [
                name for i, name in enumerate(img_names) if i in test_inds_temp
            ]
            mask = Data.index.str.contains("|".join(temp))
            testIndexes = np.flatnonzero(mask)
            trainIndexes = np.flatnonzero(~mask)

            ####################################################
            # Generating data structure with labeled information & frame metadata (for deep cut)
            ####################################################

            # Make training file!
            data = []
            print("Creating training data for ", shuffle, trainFraction)
            print("This can take some time...")
            for jj in tqdm(trainIndexes):
                jointsannotated = False
                H = {}
                # load image to get dimensions:
                filename = Data.index[jj]
                im = io.imread(os.path.join(cfg["project_path"], filename))
                H["image"] = filename

                try:
                    H["size"] = np.array(
                        [np.shape(im)[2],
                         np.shape(im)[0],
                         np.shape(im)[1]])
                except:
                    # print "Grayscale!"
                    H["size"] = np.array([1, np.shape(im)[0], np.shape(im)[1]])

                Joints = {}
                for prfxindex, prefix in enumerate(individuals):
                    joints = (np.zeros(
                        (len(uniquebodyparts) + len(multianimalbodyparts), 3))
                              * np.nan)
                    if prefix != "single":  # first ones are multianimalparts!
                        indexjoints = 0
                        for bpindex, bodypart in enumerate(
                                multianimalbodyparts):
                            socialbdpt = bodypart  # prefix+bodypart #build names!
                            # if socialbdpt in actualbpts:
                            try:
                                x, y = (
                                    Data[prefix][socialbdpt]["x"][jj],
                                    Data[prefix][socialbdpt]["y"][jj],
                                )
                                joints[indexjoints, 0] = int(bpindex)
                                joints[indexjoints, 1] = round(x, numdigits)
                                joints[indexjoints, 2] = round(y, numdigits)
                                indexjoints += 1
                            except:
                                pass
                    else:
                        indexjoints = len(multianimalbodyparts)
                        for bpindex, bodypart in enumerate(uniquebodyparts):
                            socialbdpt = bodypart  # prefix+bodypart #build names!
                            # if socialbdpt in actualbpts:
                            try:
                                x, y = (
                                    Data[prefix][socialbdpt]["x"][jj],
                                    Data[prefix][socialbdpt]["y"][jj],
                                )
                                joints[indexjoints, 0] = len(
                                    multianimalbodyparts) + int(bpindex)
                                joints[indexjoints, 1] = round(x, 2)
                                joints[indexjoints, 2] = round(y, 2)
                                indexjoints += 1
                            except:
                                pass

                    # Drop missing body parts
                    joints = joints[~np.isnan(joints).any(axis=1)]
                    # Drop points lying outside the image
                    inside = np.logical_and.reduce((
                        joints[:, 1] < im.shape[1],
                        joints[:, 1] > 0,
                        joints[:, 2] < im.shape[0],
                        joints[:, 2] > 0,
                    ))
                    joints = joints[inside]

                    if np.size(joints) > 0:  # exclude images without labels
                        jointsannotated = True

                    Joints[prfxindex] = joints  # np.array(joints, dtype=int)

                H["joints"] = Joints
                if jointsannotated:  # exclude images without labels
                    data.append(H)

            if len(trainIndexes) > 0:
                (
                    datafilename,
                    metadatafilename,
                ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)
                ################################################################################
                # Saving metadata and data file (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename),
                    data,
                    trainIndexes,
                    testIndexes,
                    trainFraction,
                )

                datafilename = datafilename.split(".mat")[0] + ".pickle"
                import pickle

                with open(os.path.join(project_path, datafilename), "wb") as f:
                    # Pickle the 'labeled-data' dictionary using the highest protocol available.
                    pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################

                modelfoldername = auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/" +
                    "/train")
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/" +
                    "/test")

                path_train_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "train",
                        "pose_cfg.yaml",
                    ))
                path_test_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "pose_cfg.yaml",
                    ))
                path_inference_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "inference_cfg.yaml",
                    ))

                jointnames = [str(bpt) for bpt in multianimalbodyparts]
                jointnames.extend([str(bpt) for bpt in uniquebodyparts])
                items2change = {
                    "dataset":
                    datafilename,
                    "metadataset":
                    metadatafilename,
                    "num_joints":
                    len(multianimalbodyparts) +
                    len(uniquebodyparts),  # cfg["uniquebodyparts"]),
                    "all_joints": [[i] for i in range(
                        len(multianimalbodyparts) + len(uniquebodyparts))
                                   ],  # cfg["uniquebodyparts"]))],
                    "all_joints_names":
                    jointnames,
                    "init_weights":
                    model_path,
                    "project_path":
                    str(cfg["project_path"]),
                    "net_type":
                    net_type,
                    "pairwise_loss_weight":
                    0.1,
                    "pafwidth":
                    20,
                    "partaffinityfield_graph":
                    partaffinityfield_graph,
                    "partaffinityfield_predict":
                    partaffinityfield_predict,
                    "weigh_only_present_joints":
                    False,
                    "num_limbs":
                    len(partaffinityfield_graph),
                    "dataset_type":
                    dataset_type,
                    "optimizer":
                    "adam",
                    "batch_size":
                    8,
                    "multi_step": [[1e-4, 7500], [5 * 1e-5, 12000],
                                   [1e-5, 200000]],
                    "save_iters":
                    10000,
                    "display_iters":
                    500,
                }

                defaultconfigfile = os.path.join(dlcparent_path,
                                                 "pose_cfg.yaml")
                trainingdata = trainingsetmanipulation.MakeTrain_pose_yaml(
                    items2change, path_train_config, defaultconfigfile)
                keys2save = [
                    "dataset",
                    "num_joints",
                    "all_joints",
                    "all_joints_names",
                    "net_type",
                    "init_weights",
                    "global_scale",
                    "location_refinement",
                    "locref_stdev",
                    "dataset_type",
                    "partaffinityfield_predict",
                    "pairwise_predict",
                    "partaffinityfield_graph",
                    "num_limbs",
                    "dataset_type",
                ]

                trainingsetmanipulation.MakeTest_pose_yaml(
                    trainingdata,
                    keys2save,
                    path_test_config,
                    nmsradius=5.0,
                    minconfidence=0.01,
                )  # setting important def. values for inference

                # Setting inference cfg file:
                defaultinference_configfile = os.path.join(
                    dlcparent_path, "inference_cfg.yaml")
                items2change = {
                    "minimalnumberofconnections":
                    int(len(cfg["multianimalbodyparts"]) / 2),
                    "topktoretain":
                    len(cfg["individuals"]) + 1 *
                    (len(cfg["uniquebodyparts"]) > 0),
                }
                # TODO:   "distnormalization":  could be calculated here based on data and set
                # >> now we calculate this during evaluation (which is a good spot...)
                trainingsetmanipulation.MakeInference_yaml(
                    items2change, path_inference_config,
                    defaultinference_configfile)

                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )
            else:
                pass
示例#3
0
def create_labeled_video_3d(
        config,
        path,
        videofolder=None,
        start=0,
        end=None,
        trailpoints=0,
        videotype="avi",
        view=(-113, -270),
        xlim=None,
        ylim=None,
        zlim=None,
        draw_skeleton=True,
        color_by="bodypart",
        figsize=(20, 8),
        fps=30,
        dpi=300,
):
    """
    Creates a video with views from the two cameras and the 3d reconstruction for a selected number of frames.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    path : list
        A list of strings containing the full paths to triangulated files for analysis or a path to the directory, where all the triangulated files are stored.

    videofolder: string
        Full path of the folder where the videos are stored. Use this if the vidoes are stored in a different location other than where the triangulation files are stored. By default is ``None`` and therefore looks for video files in the directory where the triangulation file is stored.

    start: int
        Integer specifying the start of frame index to select. Default is set to 0.

    end: int
        Integer specifying the end of frame index to select. Default is set to None, where all the frames of the video are used for creating the labeled video.

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    view: list
        A list that sets the elevation angle in z plane and azimuthal angle in x,y plane of 3d view. Useful for rotating the axis for 3d view

    xlim: list
        A list of integers specifying the limits for xaxis of 3d view. By default it is set to [None,None], where the x limit is set by taking the minimum and maximum value of the x coordinates for all the bodyparts.

    ylim: list
        A list of integers specifying the limits for yaxis of 3d view. By default it is set to [None,None], where the y limit is set by taking the minimum and maximum value of the y coordinates for all the bodyparts.

    zlim: list
        A list of integers specifying the limits for zaxis of 3d view. By default it is set to [None,None], where the z limit is set by taking the minimum and maximum value of the z coordinates for all the bodyparts.

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``True``

    color_by : string, optional (default='bodypart')
        Coloring rule. By default, each bodypart is colored differently.
        If set to 'individual', points belonging to a single individual are colored the same.

    Example
    -------
    Linux/MacOs
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos/3d.h5'],start=100, end=500)

    To create labeled videos for all the triangulated files in the folder
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500)

    To set the xlim, ylim, zlim and rotate the view of the 3d axis
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500,view=[30,90],xlim=[-12,12],ylim=[15,25],zlim=[20,30])

    """
    start_path = os.getcwd()

    # Read the config file and related variables
    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    cmap = cfg_3d["colormap"]
    bodyparts2connect = cfg_3d["skeleton"]
    skeleton_color = cfg_3d["skeleton_color"]
    scorer_3d = cfg_3d["scorername_3d"]

    if color_by not in ("bodypart", "individual"):
        raise ValueError(f"Invalid color_by={color_by}")

    file_list = auxiliaryfunctions_3d.Get_list_of_triangulated_and_videoFiles(
        path, videotype, scorer_3d, cam_names, videofolder)
    print(file_list)
    if file_list == []:
        raise Exception(
            "No corresponding video file(s) found for the specified triangulated file or folder. Did you specify the video file type? If videos are stored in a different location, please use the ``videofolder`` argument to specify their path."
        )

    for file in file_list:
        path_h5_file = Path(file[0]).parents[0]
        triangulate_file = file[0]
        # triangulated file is a list which is always sorted as [triangulated.h5,camera-1.videotype,camera-2.videotype]
        # name for output video
        file_name = str(Path(triangulate_file).stem)
        videooutname = os.path.join(path_h5_file, file_name + ".mp4")
        if os.path.isfile(videooutname):
            print("Video already created...")
        else:
            string_to_remove = str(Path(triangulate_file).suffix)
            pickle_file = triangulate_file.replace(string_to_remove,
                                                   "_meta.pickle")
            metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)

            base_filename_cam1 = str(Path(file[1]).stem).split(videotype)[
                0]  # required for searching the filtered file
            base_filename_cam2 = str(Path(file[2]).stem).split(videotype)[
                0]  # required for searching the filtered file
            cam1_view_video = file[1]
            cam2_view_video = file[2]
            cam1_scorer = metadata_["scorer_name"][cam_names[0]]
            cam2_scorer = metadata_["scorer_name"][cam_names[1]]
            print("Creating 3D video from %s and %s using %s" % (
                Path(cam1_view_video).name,
                Path(cam2_view_video).name,
                Path(triangulate_file).name,
            ))

            # Read the video files and corresponfing h5 files
            vid_cam1 = VideoReader(cam1_view_video)
            vid_cam2 = VideoReader(cam2_view_video)

            # Look for the filtered predictions file
            try:
                print("Looking for filtered predictions...")
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam1 + cam1_scorer +
                                "*filtered.h5"),
                        ))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam2 + cam2_scorer +
                                "*filtered.h5"),
                        ))[0])
                # print("Found filtered predictions, will be use these for triangulation.")
                print(
                    "Found the following filtered data: ",
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam1 + cam1_scorer +
                            "*filtered.h5"),
                    ),
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam2 + cam2_scorer +
                            "*filtered.h5"),
                    ),
                )
            except FileNotFoundError:
                print(
                    "No filtered predictions found, the unfiltered predictions will be used instead."
                )
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam1 + cam1_scorer +
                                "*.h5")))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam2 + cam2_scorer +
                                "*.h5")))[0])

            df_3d = pd.read_hdf(triangulate_file)
            try:
                num_animals = df_3d.columns.get_level_values(
                    "individuals").unique().size
            except KeyError:
                num_animals = 1

            if end is None:
                end = len(df_3d)  # All the frames
            end = min(end, min(len(vid_cam1), len(vid_cam2)))
            frames = list(range(start, end))

            output_folder = Path(
                os.path.join(path_h5_file, "temp_" + file_name))
            output_folder.mkdir(parents=True, exist_ok=True)

            # Flatten the list of bodyparts to connect
            bodyparts2plot = list(
                np.unique(
                    [val for sublist in bodyparts2connect for val in sublist]))

            # Format data
            mask2d = df_cam1.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xy1 = df_cam1.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible1 = xy1[..., 2] >= pcutoff
            xy1[~visible1] = np.nan
            xy2 = df_cam2.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible2 = xy2[..., 2] >= pcutoff
            xy2[~visible2] = np.nan
            mask = df_3d.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xyz = df_3d.loc[:, mask].to_numpy().reshape((len(df_3d), -1, 3))
            xyz[~(visible1 & visible2)] = np.nan

            bpts = df_3d.columns.get_level_values('bodyparts')[mask][::3]
            links = make_labeled_video.get_segment_indices(
                bodyparts2connect,
                bpts,
            )
            ind_links = tuple(zip(*links))

            if color_by == "bodypart":
                color = plt.cm.get_cmap(cmap, len(bodyparts2plot))
                colors_ = color(range(len(bodyparts2plot)))
                colors = np.tile(colors_, (num_animals, 1))
            elif color_by == "individual":
                color = plt.cm.get_cmap(cmap, num_animals)
                colors_ = color(range(num_animals))
                colors = np.repeat(colors_, len(bodyparts2plot), axis=0)

            # Trick to force equal aspect ratio of 3D plots
            minmax = np.nanpercentile(xyz[frames], q=[25, 75], axis=(0, 1)).T
            minmax *= 1.1
            minmax_range = (minmax[:, 1] - minmax[:, 0]).max() / 2
            if xlim is None:
                mid_x = np.mean(minmax[0])
                xlim = mid_x - minmax_range, mid_x + minmax_range
            if ylim is None:
                mid_y = np.mean(minmax[1])
                ylim = mid_y - minmax_range, mid_y + minmax_range
            if zlim is None:
                mid_z = np.mean(minmax[2])
                zlim = mid_z - minmax_range, mid_z + minmax_range

            # Set up the matplotlib figure beforehand
            fig, axes1, axes2, axes3 = set_up_grid(figsize, xlim, ylim, zlim,
                                                   view)
            points_2d1 = axes1.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im1 = axes1.imshow(np.zeros((vid_cam1.height, vid_cam1.width)))
            points_2d2 = axes2.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im2 = axes2.imshow(np.zeros((vid_cam2.height, vid_cam2.width)))
            points_3d = axes3.scatter(
                *np.zeros((3, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            if draw_skeleton:
                # Set up skeleton LineCollections
                segs = np.zeros((2, len(ind_links), 2))
                coll1 = LineCollection(segs, colors=skeleton_color)
                coll2 = LineCollection(segs, colors=skeleton_color)
                axes1.add_collection(coll1)
                axes2.add_collection(coll2)
                segs = np.zeros((2, len(ind_links), 3))
                coll_3d = Line3DCollection(segs, colors=skeleton_color)
                axes3.add_collection(coll_3d)

            writer = FFMpegWriter(fps=fps)
            with writer.saving(fig, videooutname, dpi=dpi):
                for k in tqdm(frames):
                    vid_cam1.set_to_frame(k)
                    vid_cam2.set_to_frame(k)
                    frame_cam1 = vid_cam1.read_frame()
                    frame_cam2 = vid_cam2.read_frame()
                    if frame_cam1 is None or frame_cam2 is None:
                        raise IOError("A video frame is empty.")

                    im1.set_data(frame_cam1)
                    im2.set_data(frame_cam2)

                    sl = slice(max(0, k - trailpoints), k + 1)
                    coords3d = xyz[sl]
                    coords1 = xy1[sl, :, :2]
                    coords2 = xy2[sl, :, :2]
                    points_3d._offsets3d = coords3d.reshape((-1, 3)).T
                    points_3d.set_color(colors)
                    points_2d1.set_offsets(coords1.reshape((-1, 2)))
                    points_2d1.set_color(colors)
                    points_2d2.set_offsets(coords2.reshape((-1, 2)))
                    points_2d2.set_color(colors)
                    if draw_skeleton:
                        segs3d = xyz[k][tuple([ind_links])].swapaxes(0, 1)
                        coll_3d.set_segments(segs3d)
                        segs1 = xy1[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll1.set_segments(segs1)
                        segs2 = xy2[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll2.set_segments(segs2)

                    writer.grab_frame()
示例#4
0
                fullpath to cropping_config.yaml file
            case: string
                case to plot
            bodyparts: list
                A list that contains bodyparts to plot. Each bodypart is in a string format
            shuffle: int, optional
                Integer value specifying the shuffle index to select for training. Default is set to 1
            trainingsetindex: int, optional
                Integer specifying which TrainingsetFraction to use.
                By default the first (note that TrainingFraction is a list in config.yaml).

        """
        self.path_to_config = path_to_config
        self.path_to_cropping_config = path_to_cropping_config

        self.config = auxiliaryfunctions.read_config(self.path_to_config)
        self.cropping_config = auxiliaryfunctions.read_config(
            self.path_to_cropping_config)
        self.case = case
        self.bodyparts = bodyparts  # make it as a property and cascade down all the others
        self.shuffle = shuffle
        self.trainingsetindex = trainingsetindex

        self._case_full_name = case + '_beh'
        self.project_path = self.config['project_path']
        self.path_to_video = os.path.join(
            self.project_path, 'videos', self._case_full_name + '.avi')
        self.path_to_analysis = os.path.join(
            self.project_path, 'analysis', self._case_full_name)

        # inference_case_list = [os.path.basename(os.path.normpath(video_path)).split(
示例#5
0
def extract_maps(
    config,
    shuffle=0,
    trainingsetindex=0,
    gputouse=None,
    rescale=False,
    Indices=None,
    modelprefix="",
):
    """
    Extracts the scoremap, locref, partaffinityfields (if available).

    Returns a dictionary indexed by: trainingsetfraction, snapshotindex, and imageindex
    for those keys, each item contains: (image,scmap,locref,paf,bpt names,partaffinity graph, imagename, True/False if this image was in trainingset)
    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 0.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you want to extract the data for image 0 and 103 (of the training set) for model trained with shuffle 0.
    >>> deeplabcut.extract_maps(configfile,0,Indices=[0,103])

    """
    from deeplabcut.utils.auxfun_videos import imread, imresize
    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.pose_estimation_tensorflow.config import load_config
    from deeplabcut.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcut.utils import auxiliaryfunctions
    from tqdm import tqdm
    import tensorflow as tf

    vers = (tf.__version__).split(".")
    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    import pandas as pd
    from pathlib import Path
    import numpy as np

    TF.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    #    tf.logging.set_verbosity(tf.logging.WARN)

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        if trainingsetindex < len(
                cfg["TrainingFraction"]) and trainingsetindex >= 0:
            TrainingFractions = [
                cfg["TrainingFraction"][int(trainingsetindex)]
            ]
        else:
            raise Exception(
                "Please check the trainingsetindex! ",
                trainingsetindex,
                " should be an integer from 0 .. ",
                int(len(cfg["TrainingFraction"]) - 1),
            )

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )

    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))

    Maps = {}
    for trainFraction in TrainingFractions:
        Maps[trainFraction] = {}
        ##################################################
        # Load and setup CNN part detector
        ##################################################
        datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder, trainFraction, shuffle, cfg)

        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetModelFolder(trainFraction,
                                                  shuffle,
                                                  cfg,
                                                  modelprefix=modelprefix)),
        )
        path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
        # Load meta data
        (
            data,
            trainIndices,
            testIndices,
            trainFraction,
        ) = auxiliaryfunctions.LoadMetadata(
            os.path.join(cfg["project_path"], metadatafn))
        try:
            dlc_cfg = load_config(str(path_test_config))
        except FileNotFoundError:
            raise FileNotFoundError(
                "It seems the model for shuffle %s and trainFraction %s does not exist."
                % (shuffle, trainFraction))

        # change batch size, if it was edited during analysis!
        dlc_cfg["batch_size"] = 1  # in case this was edited for analysis.

        # Create folder structure to store results.
        evaluationfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetEvaluationFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix)),
        )
        auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                               recursive=True)
        # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

        # Check which snapshots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split(".")[0]
            for fn in os.listdir(os.path.join(str(modelfolder), "train"))
            if "index" in fn
        ])
        try:  # check if any where found?
            Snapshots[0]
        except IndexError:
            raise FileNotFoundError(
                "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                % (shuffle, trainFraction))

        increasing_indices = np.argsort(
            [int(m.split("-")[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        if cfg["snapshotindex"] == -1:
            snapindices = [-1]
        elif cfg["snapshotindex"] == "all":
            snapindices = range(len(Snapshots))
        elif cfg["snapshotindex"] < len(Snapshots):
            snapindices = [cfg["snapshotindex"]]
        else:
            print(
                "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
            )

        ########################### RESCALING (to global scale)
        scale = dlc_cfg["global_scale"] if rescale else 1
        Data *= scale

        bptnames = [
            dlc_cfg["all_joints_names"][i]
            for i in range(len(dlc_cfg["all_joints"]))
        ]

        for snapindex in snapindices:
            dlc_cfg["init_weights"] = os.path.join(
                str(modelfolder), "train", Snapshots[snapindex]
            )  # setting weights to corresponding snapshot.
            trainingsiterations = (
                dlc_cfg["init_weights"].split(os.sep)[-1]
            ).split("-")[
                -1]  # read how many training siterations that corresponds to.

            # Name for deeplabcut net (based on its parameters)
            # DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations)
            # notanalyzed, resultsfilename, DLCscorer=auxiliaryfunctions.CheckifNotEvaluated(str(evaluationfolder),DLCscorer,DLCscorerlegacy,Snapshots[snapindex])
            # print("Extracting maps for ", DLCscorer, " with # of trainingiterations:", trainingsiterations)
            # if notanalyzed: #this only applies to ask if h5 exists...

            # Specifying state of model (snapshot / training state)
            sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
            Numimages = len(Data.index)
            PredicteData = np.zeros(
                (Numimages, 3 * len(dlc_cfg["all_joints_names"])))
            print("Analyzing data...")
            if Indices is None:
                Indices = enumerate(Data.index)
            else:
                Ind = [Data.index[j] for j in Indices]
                Indices = enumerate(Ind)

            DATA = {}
            for imageindex, imagename in tqdm(Indices):
                image = imread(os.path.join(cfg["project_path"], imagename),
                               mode="RGB")
                if scale != 1:
                    image = imresize(image, scale)

                image_batch = data_to_input(image)
                # Compute prediction with the CNN
                outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})

                if cfg.get("multianimalproject", False):
                    scmap, locref, paf = predictma.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    pagraph = dlc_cfg["partaffinityfield_graph"]
                else:
                    scmap, locref = predict.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    paf = None
                    pagraph = []

                if imageindex in testIndices:
                    trainingfram = False
                else:
                    trainingfram = True

                DATA[imageindex] = [
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pagraph,
                    imagename,
                    trainingfram,
                ]
            Maps[trainFraction][Snapshots[snapindex]] = DATA
    os.chdir(str(start_path))
    return Maps
示例#6
0
def plot_trajectories(config, videos, videotype='.avi', shuffle=1, trainingsetindex=0, filtered=False,
                      displayedbodyparts='all', showfigures=False, destfolder=None, outformat = '.pdf'):
    """
    Plots the trajectories of various bodyparts across the video.

    Parameters
    ----------
     config : string
    Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: list, optional
    List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
    Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
    Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video.
        Either ``all``, then all body parts from config.yaml are used,
        or a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    showfigures: bool, default false
    If true then plots are also displayed.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    Example
    --------
    for labeling the frames
    >>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction) #automatically loads corresponding model (even training iteration based on snapshot index)
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(cfg, displayedbodyparts)
    Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
    for video in Videos:
        print(video)
        if destfolder is None:
            videofolder = str(Path(video).parents[0])
        else:
            videofolder=destfolder

        vname = str(Path(video).stem)
        print("Starting % ", videofolder, video)
        print("Video Name is: " + vname)
        notanalyzed, dataname, DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,vname,DLCscorer,DLCscorerlegacy,flag='checking')

        if notanalyzed:
            print("The video was not analyzed with this scorer:", DLCscorer)
        else:
            #LoadData
            print("Loading ", video, "and data.")
            datafound,metadata,Dataframe,DLCscorer,suffix=auxiliaryfunctions.LoadAnalyzedData(str(videofolder),vname,DLCscorer,filtered) #returns boolean variable if data was found and metadata + pandas array
            if datafound:
                basefolder=videofolder
                auxiliaryfunctions.attempttomakefolder(basefolder)
                auxiliaryfunctions.attempttomakefolder(os.path.join(basefolder,'plot-poses'))
                tmpfolder = os.path.join(basefolder,'plot-poses', vname)
                auxiliaryfunctions.attempttomakefolder(tmpfolder)
                PlottingResults(tmpfolder, Dataframe, DLCscorer, cfg, bodyparts, showfigures, suffix+outformat, vname = vname)

    print('Plots created! Please check the directory "plot-poses" within the video directory')
示例#7
0
def analyze_time_lapse_frames(config,
                              directory,
                              frametype='.png',
                              shuffle=1,
                              trainingsetindex=0,
                              gputouse=None,
                              save_as_csv=False,
                              rgb=True):
    """
    Analyzed all images (of type = frametype) in a folder and stores the output in one file.

    You can crop the frames (before analysis), by changing 'cropping'=True and setting 'x1','x2','y1','y2' in the config file.

    Output: The labels are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
            in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
            in the same directory, where the video is stored. However, if the flag save_as_csv is set to True, the data can also be exported in \n
            comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    directory: string
        Full path to directory containing the frames that shall be analyzed

    frametype: string, optional
        Checks for the file extension of the frames. Only images with this extension are analyzed. The default is ``.png``

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
    See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``

    rbg: bool, optional.
        Whether to load image as rgb; Note e.g. some tiffs do not alow that option in imread, then just set this to false.

    Examples
    --------
    If you want to analyze all frames in /analysis/project/timelapseexperiment1
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml','/analysis/project/timelapseexperiment1')
    --------

    If you want to analyze all frames in /analysis/project/timelapseexperiment1
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml','/analysis/project/timelapseexperiment1')
    --------

    Note: for test purposes one can extract all frames from a video with ffmeg, e.g. ffmpeg -i testvideo.avi thumb%04d.png
    """
    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ[
            'TF_CUDNN_USE_AUTOTUNE']  #was potentially set during training

    if gputouse is not None:  # gpu selection
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    vers = (tf.__version__).split('.')
    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    TF.reset_default_graph()
    start_path = os.getcwd(
    )  #record cwd to return to this directory in the end

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    modelfolder = os.path.join(
        cfg["project_path"],
        str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError(
            "It seems the model for shuffle %s and trainFraction %s does not exist."
            % (shuffle, trainFraction))
    # Check which snapshots are available and sort them by # iterations
    try:
        Snapshots = np.array([
            fn.split('.')[0]
            for fn in os.listdir(os.path.join(modelfolder, 'train'))
            if "index" in fn
        ])
    except FileNotFoundError:
        raise FileNotFoundError(
            "Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."
            % (shuffle, shuffle))

    if cfg['snapshotindex'] == 'all':
        print(
            "Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!"
        )
        snapshotindex = -1
    else:
        snapshotindex = cfg['snapshotindex']

    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder, 'train',
                                           Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(
        os.sep)[-1]).split('-')[-1]

    #update batchsize (based on parameters in config.yaml)
    dlc_cfg['batch_size'] = cfg['batch_size']

    # Name for scorer:
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, trainingsiterations=trainingsiterations)
    sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)

    # update number of outputs and adjust pandas indices
    dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1)

    xyz_labs_orig = ['x', 'y', 'likelihood']
    suffix = [str(s + 1) for s in range(dlc_cfg['num_outputs'])]
    suffix[0] = ''  # first one has empty suffix for backwards compatibility
    xyz_labs = [x + s for s in suffix for x in xyz_labs_orig]

    pdindex = pd.MultiIndex.from_product(
        [[DLCscorer], dlc_cfg['all_joints_names'], xyz_labs],
        names=['scorer', 'bodyparts', 'coords'])

    if gputouse is not None:  #gpu selectinon
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    ##################################################
    # Loading the images
    ##################################################
    #checks if input is a directory
    if os.path.isdir(directory) == True:
        """
        Analyzes all the frames in the directory.
        """
        print("Analyzing all frames in the directory: ", directory)
        os.chdir(directory)
        framelist = np.sort(
            [fn for fn in os.listdir(os.curdir) if (frametype in fn)])
        vname = Path(directory).stem
        notanalyzed, dataname, DLCscorer = auxiliaryfunctions.CheckifNotAnalyzed(
            directory, vname, DLCscorer, DLCscorerlegacy, flag='framestack')
        if notanalyzed:
            nframes = len(framelist)
            if nframes > 0:
                start = time.time()

                PredictedData, nframes, nx, ny = GetPosesofFrames(
                    cfg, dlc_cfg, sess, inputs, outputs, directory, framelist,
                    nframes, dlc_cfg['batch_size'], rgb)
                stop = time.time()

                if cfg['cropping'] == True:
                    coords = [cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2']]
                else:
                    coords = [0, nx, 0, ny]

                dictionary = {
                    "start": start,
                    "stop": stop,
                    "run_duration": stop - start,
                    "Scorer": DLCscorer,
                    "config file": dlc_cfg,
                    "batch_size": dlc_cfg["batch_size"],
                    "num_outputs": dlc_cfg["num_outputs"],
                    "frame_dimensions": (ny, nx),
                    "nframes": nframes,
                    "cropping": cfg['cropping'],
                    "cropping_parameters": coords
                }
                metadata = {'data': dictionary}

                print("Saving results in %s..." % (directory))

                auxiliaryfunctions.SaveData(PredictedData[:nframes, :],
                                            metadata, dataname, pdindex,
                                            framelist, save_as_csv)
                print(
                    "The folder was analyzed. Now your research can truly start!"
                )
                print(
                    "If the tracking is not satisfactory for some frome, consider expanding the training set."
                )
            else:
                print(
                    "No frames were found. Consider changing the path or the frametype."
                )

    os.chdir(str(start_path))
def create_training_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    userfeedback=False,
    trainIndices=None,
    testIndices=None,
    net_type=None,
    augmenter_type=None,
    posecfg_template=None,
):
    """Creates a training dataset.

    Labels from all the extracted frames are merged into a single .h5 file.
    Only the videos included in the config file are used to create this dataset.

    Parameters
    ----------
    config : string
        Full path of the ``config.yaml`` file as a string.

    num_shuffles : int, optional, default=1
        Number of shuffles of training dataset to create, i.e. ``[1,2,3]`` for
        ``num_shuffles=3``.

    Shuffles: list[int], optional
        Alternatively the user can also give a list of shuffles.

    userfeedback: bool, optional, default=False
        If ``False``, all requested train/test splits are created (no matter if they
        already exist). If you want to assure that previous splits etc. are not
        overwritten, set this to ``True`` and you will be asked for each split.

    trainIndices: list of lists, optional, default=None
        List of one or multiple lists containing train indexes.
        A list containing two lists of training indexes will produce two splits.

    testIndices: list of lists, optional, default=None
        List of one or multiple lists containing test indexes.

    net_type: list, optional, default=None
        Type of networks. Currently supported options are

        * ``resnet_50``
        * ``resnet_101``
        * ``resnet_152``
        * ``mobilenet_v2_1.0``
        * ``mobilenet_v2_0.75``
        * ``mobilenet_v2_0.5``
        * ``mobilenet_v2_0.35``
        * ``efficientnet-b0``
        * ``efficientnet-b1``
        * ``efficientnet-b2``
        * ``efficientnet-b3``
        * ``efficientnet-b4``
        * ``efficientnet-b5``
        * ``efficientnet-b6``

    augmenter_type: string, optional, default=None
        Type of augmenter. Currently supported augmenters are
        
        * ``default``
        * ``scalecrop``
        * ``imgaug``
        * ``tensorpack``
        * ``deterministic``

    posecfg_template: string, optional, default=None
        Path to a ``pose_cfg.yaml`` file to use as a template for generating the new
        one for the current iteration. Useful if you would like to start with the same
        parameters a previous training iteration. None uses the default
        ``pose_cfg.yaml``.

    Returns
    -------
    list(tuple) or None
        If training dataset was successfully created, a list of tuples is returned.
        The first two elements in each tuple represent the training fraction and the
        shuffle value. The last two elements in each tuple are arrays of integers
        representing the training and test indices.

        Returns None if training dataset could not be created.

    Notes
    -----
    Use the function ``add_new_videos`` at any stage of the project to add more videos
    to the project.

    Examples
    --------

    Linux/MacOS

    >>> deeplabcut.create_training_dataset(
            '/analysis/project/reaching-task/config.yaml', num_shuffles=1,
        )

    Windows

    >>> deeplabcut.create_training_dataset(
            'C:\\Users\\Ulf\\looming-task\\config.yaml', Shuffles=[3,17,5],
        )
    """
    import scipy.io as sio

    if windows2linux:
        # DeprecationWarnings are silenced since Python 3.2 unless triggered in __main__
        warnings.warn(
            "`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
            FutureWarning,
        )

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    if posecfg_template:
        if not posecfg_template.endswith("pose_cfg.yaml"):
            raise ValueError(
                "posecfg_template argument must contain path to a pose_cfg.yaml file"
            )
        else:
            print("Reloading pose_cfg parameters from " + posecfg_template +
                  '\n')
            from deeplabcut.utils.auxiliaryfunctions import read_plainconfig

            prior_cfg = read_plainconfig(posecfg_template)
    if cfg.get("multianimalproject", False):
        from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
            create_multianimaltraining_dataset, )

        create_multianimaltraining_dataset(config,
                                           num_shuffles,
                                           Shuffles,
                                           net_type=net_type)
    else:
        scorer = cfg["scorer"]
        project_path = cfg["project_path"]
        # Create path for training sets & store data there
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
            cfg)  # Path concatenation OS platform independent
        auxiliaryfunctions.attempttomakefolder(Path(
            os.path.join(project_path, str(trainingsetfolder))),
                                               recursive=True)

        Data = merge_annotateddatasets(
            cfg,
            Path(os.path.join(project_path, trainingsetfolder)),
        )
        if Data is None:
            return
        Data = Data[scorer]  # extract labeled data

        # loading & linking pretrained models
        if net_type is None:  # loading & linking pretrained models
            net_type = cfg.get("default_net_type", "resnet_50")
        else:
            if ("resnet" in net_type or "mobilenet" in net_type
                    or "efficientnet" in net_type):
                pass
            else:
                raise ValueError("Invalid network type:", net_type)

        if augmenter_type is None:
            augmenter_type = cfg.get("default_augmenter", "imgaug")
            if augmenter_type is None:  # this could be in config.yaml for old projects!
                # updating variable if null/None! #backwardscompatability
                auxiliaryfunctions.edit_config(config,
                                               {"default_augmenter": "imgaug"})
                augmenter_type = "imgaug"
        elif augmenter_type not in [
                "default",
                "scalecrop",
                "imgaug",
                "tensorpack",
                "deterministic",
        ]:
            raise ValueError("Invalid augmenter type:", augmenter_type)

        if posecfg_template:
            if net_type != prior_cfg["net_type"]:
                print(
                    "WARNING: Specified net_type does not match net_type from posecfg_template path entered. Proceed with caution."
                )
            if augmenter_type != prior_cfg["dataset_type"]:
                print(
                    "WARNING: Specified augmenter_type does not match dataset_type from posecfg_template path entered. Proceed with caution."
                )

        # Loading the encoder (if necessary downloading from TF)
        dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
        if not posecfg_template:
            defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
        elif posecfg_template:
            defaultconfigfile = posecfg_template
        model_path, num_shuffles = auxfun_models.check_for_weights(
            net_type, Path(dlcparent_path), num_shuffles)

        if Shuffles is None:
            Shuffles = range(1, num_shuffles + 1)
        else:
            Shuffles = [i for i in Shuffles if isinstance(i, int)]

        # print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
        if trainIndices is None and testIndices is None:
            splits = [(
                trainFraction,
                shuffle,
                SplitTrials(range(len(Data.index)), trainFraction),
            ) for trainFraction in cfg["TrainingFraction"]
                      for shuffle in Shuffles]
        else:
            if len(trainIndices) != len(testIndices) != len(Shuffles):
                raise ValueError(
                    "Number of Shuffles and train and test indexes should be equal."
                )
            splits = []
            for shuffle, (train_inds, test_inds) in enumerate(
                    zip(trainIndices, testIndices)):
                trainFraction = round(
                    len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)),
                    2)
                print(
                    f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
                )
                # Now that the training fraction is guaranteed to be correct,
                # the values added to pad the indices are removed.
                train_inds = np.asarray(train_inds)
                train_inds = train_inds[train_inds != -1]
                test_inds = np.asarray(test_inds)
                test_inds = test_inds[test_inds != -1]
                splits.append((trainFraction, Shuffles[shuffle], (train_inds,
                                                                  test_inds)))

        bodyparts = cfg["bodyparts"]
        nbodyparts = len(bodyparts)
        for trainFraction, shuffle, (trainIndices, testIndices) in splits:
            if len(trainIndices) > 0:
                if userfeedback:
                    trainposeconfigfile, _, _ = training.return_train_network_path(
                        config,
                        shuffle=shuffle,
                        trainingsetindex=cfg["TrainingFraction"].index(
                            trainFraction),
                    )
                    if trainposeconfigfile.is_file():
                        askuser = input(
                            "The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
                        )
                        if (askuser == "no" or askuser == "No"
                                or askuser == "N" or askuser == "No"):
                            raise Exception(
                                "Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
                            )

                ####################################################
                # Generating data structure with labeled information & frame metadata (for deep cut)
                ####################################################
                # Make training file!
                (
                    datafilename,
                    metadatafilename,
                ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)

                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################
                data, MatlabData = format_training_data(
                    Data, trainIndices, nbodyparts, project_path)
                sio.savemat(os.path.join(project_path, datafilename),
                            {"dataset": MatlabData})

                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename),
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                )

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################
                modelfoldername = auxiliaryfunctions.get_model_folder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/train")
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/test")

                path_train_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "train",
                        "pose_cfg.yaml",
                    ))
                path_test_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "pose_cfg.yaml",
                    ))
                # str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')
                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": [str(bpt) for bpt in bodyparts],
                    "init_weights": model_path,
                    "project_path": str(cfg["project_path"]),
                    "net_type": net_type,
                    "dataset_type": augmenter_type,
                }

                items2drop = {}
                if augmenter_type == "scalecrop":
                    # these values are dropped as scalecrop
                    # doesn't have rotation implemented
                    items2drop = {"rotation": 0, "rotratio": 0.0}
                # Also drop maDLC smart cropping augmentation parameters
                for key in [
                        "pre_resize", "crop_size", "max_shift", "crop_sampling"
                ]:
                    items2drop[key] = None

                trainingdata = MakeTrain_pose_yaml(items2change,
                                                   path_train_config,
                                                   defaultconfigfile,
                                                   items2drop)

                keys2save = [
                    "dataset",
                    "num_joints",
                    "all_joints",
                    "all_joints_names",
                    "net_type",
                    "init_weights",
                    "global_scale",
                    "location_refinement",
                    "locref_stdev",
                ]
                MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )

        return splits
示例#9
0
def create_labeled_video(
    config,
    videos,
    videotype="avi",
    shuffle=1,
    trainingsetindex=0,
    filtered=False,
    fastmode=True,
    save_frames=False,
    Frames2plot=None,
    displayedbodyparts="all",
    displayedindividuals="all",
    codec="mp4v",
    outputframerate=None,
    destfolder=None,
    draw_skeleton=False,
    trailpoints=0,
    displaycropped=False,
    color_by="bodypart",
    modelprefix="",
    track_method="",
):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
        Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    fastmode: bool
        If true uses openCV (much faster but less customization of video) vs matplotlib (if false). You can also
        "save_frames" individually or not in the matplotlib mode (if you set the "save_frames" variable accordingly).
        However, using matplotlib to create the frames it therefore allows much more flexible (one can set transparency of markers, crop, and easily customize).

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames.

    Frames2plot: List of indices
        If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame.

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    displayedindividuals: list of strings, optional
        Individuals plotted in the video. By default, all individuals present in the config will be showed.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]

    outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False``

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    displaycropped: bool, optional
        Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset.

    color_by : string, optional (default='bodypart')
        Coloring rule. By default, each bodypart is colored differently.
        If set to 'individual', points belonging to a single individual are colored the same.

    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],fastmode=True, save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix
    )  # automatically loads corresponding model (even training iteration based on snapshot index)

    if save_frames:
        fastmode = False  # otherwise one cannot save frames

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts
    )
    individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser(
        cfg, displayedindividuals
    )
    if draw_skeleton:
        bodyparts2connect = cfg["skeleton"]
        skeleton_color = cfg["skeleton_color"]
    else:
        bodyparts2connect = None
        skeleton_color = None

    start_path = os.getcwd()
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)

    if not Videos:
        print("No video(s) were found. Please check your paths and/or 'video_type'.")
        return

    func = partial(
        proc_video,
        videos,
        destfolder,
        filtered,
        DLCscorer,
        DLCscorerlegacy,
        track_method,
        cfg,
        individuals,
        color_by,
        bodyparts,
        codec,
        bodyparts2connect,
        trailpoints,
        save_frames,
        outputframerate,
        Frames2plot,
        draw_skeleton,
        skeleton_color,
        displaycropped,
        fastmode,
    )

    with Pool(min(os.cpu_count(), len(Videos))) as pool:
        pool.map(func, Videos)

    os.chdir(start_path)
def check_labels(
    config,
    Labels=["+", ".", "x"],
    scale=1,
    dpi=100,
    draw_skeleton=True,
    visualizeindividuals=True,
):
    """Check the labeled frames.

    Double check if the labels were at the correct locations and stored in the proper
    file format.

    This creates a new subdirectory for each video under the 'labeled-data' and all the
    frames are plotted with the labels.

    Make sure that these labels are fine.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    Labels: list, default='+'
        List of at least 3 matplotlib markers. The first one will be used to indicate
        the human ground truth location (Default: +)

    scale : float, default=1
        Change the relative size of the output images.

    dpi : int, optional, default=100
        Output resolution in dpi.

    draw_skeleton: bool, default=True
        Plot skeleton overlaid over body parts.

    visualizeindividuals: bool, default: True.
        For a multianimal project, if True, the different individuals have different
        colors (and all bodyparts the same). If False, the colors change over bodyparts
        rather than individuals.

    Returns
    -------
    None

    Examples
    --------
    >>> deeplabcut.check_labels('/analysis/project/reaching-task/config.yaml')
    """

    from deeplabcut.utils import visualization

    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"].keys()
    video_names = [_robust_path_split(video)[1] for video in videos]

    folders = [
        os.path.join(cfg["project_path"], "labeled-data", str(Path(i)))
        for i in video_names
    ]
    print("Creating images with labels by %s." % cfg["scorer"])
    for folder in folders:
        try:
            DataCombined = pd.read_hdf(
                os.path.join(str(folder),
                             "CollectedData_" + cfg["scorer"] + ".h5"))
            conversioncode.guarantee_multiindex_rows(DataCombined)
            if cfg.get("multianimalproject", False):
                color_by = "individual" if visualizeindividuals else "bodypart"
            else:  # for single animal projects
                color_by = "bodypart"

            visualization.make_labeled_images_from_dataframe(
                DataCombined,
                cfg,
                folder,
                scale,
                dpi=dpi,
                keypoint=Labels[0],
                draw_skeleton=draw_skeleton,
                color_by=color_by,
            )
        except FileNotFoundError:
            print("Attention:", folder,
                  "does not appear to have labeled data!")

    print(
        "If all the labels are ok, then use the function 'create_training_dataset' to create the training dataset!"
    )
def adddatasetstovideolistandviceversa(config):
    """
    First run comparevideolistsanddatafolders(config) to compare the folders in labeled-data and the ones listed under video_sets (in the config file).
    If you detect differences this function can be used to maker sure each folder has a video entry & vice versa.

    It corrects this problem in the following way:

    If a video entry in the config file does not contain a folder in labeled-data, then the entry is removed.
    If a folder in labeled-data does not contain a video entry in the config file then the prefix path will be added in front of the name of the labeled-data folder and combined
    with the suffix variable as an ending. Width and height will be added as cropping variables as passed on.

    Handle with care!

    Parameter
    ----------
    config : string
        String containing the full path of the config file in the project.
    """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"]
    video_names = [Path(i).stem for i in videos]

    alldatafolders = [
        fn for fn in os.listdir(Path(config).parent / "labeled-data")
        if "_labeled" not in fn and not fn.startswith(".")
    ]

    print("Config file contains:", len(video_names))
    print("Labeled-data contains:", len(alldatafolders))

    toberemoved = []
    for vn in video_names:
        if vn not in alldatafolders:
            print(vn, " is missing as a labeled folder >> removing key!")
            for fullvideo in videos:
                if vn in fullvideo:
                    toberemoved.append(fullvideo)

    for vid in toberemoved:
        del videos[vid]

    # Load updated lists:
    video_names = [Path(i).stem for i in videos]
    for vn in alldatafolders:
        if vn not in video_names:
            print(vn, " is missing in config file >> adding it!")
            # Find the corresponding video file
            found = False
            for file in os.listdir(os.path.join(cfg["project_path"],
                                                "videos")):
                if os.path.splitext(file)[0] == vn:
                    found = True
                    break
            if found:
                video_path = os.path.join(cfg["project_path"], "videos", file)
                clip = VideoReader(video_path)
                videos.update({
                    video_path: {
                        "crop": ", ".join(map(str, clip.get_bbox()))
                    }
                })

    auxiliaryfunctions.write_config(config, cfg)
def create_training_model_comparison(
    config,
    trainindex=0,
    num_shuffles=1,
    net_types=["resnet_50"],
    augmenter_types=["imgaug"],
    userfeedback=False,
    windows2linux=False,
):
    """Creates a training dataset to compare networks and augmentation types.

    The datasets are created such that the shuffles have same training and testing
    indices. Therefore, this function is useful for benchmarking the performance of
    different network and augmentation types on the same training/testdata.

    Parameters
    ----------
    config: str
        Full path of the config.yaml file.

    trainindex: int, optional, default=0
        Either (in case uniform = True) indexes which element of TrainingFraction in
        the config file should be used (note it is a list!).
        Alternatively (uniform = False) indexes which folder is dropped, i.e. the first
        if trainindex=0, the second if trainindex=1, etc.

    num_shuffles : int, optional, default=1
        Number of shuffles of training dataset to create,
        i.e. [1,2,3] for num_shuffles=3.

    net_types: list[str], optional, default=["resnet_50"]
        Currently supported networks are

        * ``"resnet_50"``
        * ``"resnet_101"``
        * ``"resnet_152"``
        * ``"mobilenet_v2_1.0"``
        * ``"mobilenet_v2_0.75"``
        * ``"mobilenet_v2_0.5"``
        * ``"mobilenet_v2_0.35"``
        * ``"efficientnet-b0"``
        * ``"efficientnet-b1"``
        * ``"efficientnet-b2"``
        * ``"efficientnet-b3"``
        * ``"efficientnet-b4"``
        * ``"efficientnet-b5"``
        * ``"efficientnet-b6"``

    augmenter_types: list[str], optional, default=["imgaug"]
        Currently supported augmenters are

        * ``"default"``
        * ``"imgaug"``
        * ``"tensorpack"``
        * ``"deterministic"``

    userfeedback: bool, optional, default=False
        If ``False``, then all requested train/test splits are created, no matter if
        they already exist. If you want to assure that previous splits etc. are not
        overwritten, then set this to True and you will be asked for each split.

    windows2linux

        ..deprecated::
            Has no effect since 2.2.0.4 and will be removed in 2.2.1.

    Returns
    -------
    shuffle_list: list
        List of indices corresponding to the trainingsplits/models that were created.

    Examples
    --------
    On Linux/MacOS

    >>> shuffle_list = deeplabcut.create_training_model_comparison(
            '/analysis/project/reaching-task/config.yaml',
            num_shuffles=1,
            net_types=['resnet_50','resnet_152'],
            augmenter_types=['tensorpack','deterministic'],
        )

    On Windows

    >>> shuffle_list = deeplabcut.create_training_model_comparison(
            'C:\\Users\\Ulf\\looming-task\\config.yaml',
            num_shuffles=1,
            net_types=['resnet_50','resnet_152'],
            augmenter_types=['tensorpack','deterministic'],
        )

    See ``examples/testscript_openfielddata_augmentationcomparison.py`` for an example
    of how to use ``shuffle_list``.
    """
    # read cfg file
    cfg = auxiliaryfunctions.read_config(config)

    if windows2linux:
        warnings.warn(
            "`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
            FutureWarning,
        )

    # create log file
    log_file_name = os.path.join(cfg["project_path"],
                                 "training_model_comparison.log")
    logger = logging.getLogger("training_model_comparison")
    if not logger.handlers:
        logger = logging.getLogger("training_model_comparison")
        hdlr = logging.FileHandler(log_file_name)
        formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)
        logger.setLevel(logging.INFO)
    else:
        pass

    largestshuffleindex = get_largestshuffle_index(config)

    shuffle_list = []
    for shuffle in range(num_shuffles):
        trainIndices, testIndices = mergeandsplit(config,
                                                  trainindex=trainindex,
                                                  uniform=True)
        for idx_net, net in enumerate(net_types):
            for idx_aug, aug in enumerate(augmenter_types):
                get_max_shuffle_idx = (
                    largestshuffleindex + idx_aug +
                    idx_net * len(augmenter_types) +
                    shuffle * len(augmenter_types) * len(net_types))

                shuffle_list.append(get_max_shuffle_idx)
                log_info = str("Shuffle index:" + str(get_max_shuffle_idx) +
                               ", net_type:" + net + ", augmenter_type:" +
                               aug + ", trainsetindex:" + str(trainindex) +
                               ", frozen shuffle ID:" + str(shuffle))
                create_training_dataset(
                    config,
                    Shuffles=[get_max_shuffle_idx],
                    net_type=net,
                    trainIndices=[trainIndices],
                    testIndices=[testIndices],
                    augmenter_type=aug,
                    userfeedback=userfeedback,
                )
                logger.info(log_info)

    return shuffle_list
    def __init__(self, parent, config, video, shuffle, Dataframe, savelabeled,
                 multianimal):
        super(MainFrame, self).__init__(
            "DeepLabCut2.0 - Manual Outlier Frame Extraction",
            parent,
        )

        ###################################################################################################################################################
        # Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
        # topSplitter = wx.SplitterWindow(self)
        #
        # self.image_panel = ImagePanel(topSplitter, config,video,shuffle,Dataframe,self.gui_size)
        # self.widget_panel = WidgetPanel(topSplitter)
        #
        # topSplitter.SplitHorizontally(self.image_panel, self.widget_panel,sashPosition=self.gui_size[1]*0.83)#0.9
        # topSplitter.SetSashGravity(1)
        # sizer = wx.BoxSizer(wx.VERTICAL)
        # sizer.Add(topSplitter, 1, wx.EXPAND)
        # self.SetSizer(sizer)

        # Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!

        topSplitter = wx.SplitterWindow(self)
        vSplitter = wx.SplitterWindow(topSplitter)

        self.image_panel = ImagePanel(vSplitter, self.gui_size)
        self.choice_panel = ScrollPanel(vSplitter)

        vSplitter.SplitVertically(self.image_panel,
                                  self.choice_panel,
                                  sashPosition=self.gui_size[0] * 0.8)
        vSplitter.SetSashGravity(1)
        self.widget_panel = WidgetPanel(topSplitter)
        topSplitter.SplitHorizontally(vSplitter,
                                      self.widget_panel,
                                      sashPosition=self.gui_size[1] *
                                      0.83)  # 0.9
        topSplitter.SetSashGravity(1)
        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(topSplitter, 1, wx.EXPAND)
        self.SetSizer(sizer)

        ###################################################################################################################################################
        # Add Buttons to the WidgetPanel and bind them to their respective functions.

        widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)

        self.load_button_sizer = wx.BoxSizer(wx.VERTICAL)
        self.help_button_sizer = wx.BoxSizer(wx.VERTICAL)

        self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
        self.help_button_sizer.Add(self.help, 1, wx.ALL, 15)
        #        widgetsizer.Add(self.help , 1, wx.ALL, 15)
        self.help.Bind(wx.EVT_BUTTON, self.helpButton)

        widgetsizer.Add(self.help_button_sizer, 1, wx.ALL, 0)

        self.grab = wx.Button(self.widget_panel,
                              id=wx.ID_ANY,
                              label="Grab Frames")
        widgetsizer.Add(self.grab, 1, wx.ALL, 15)
        self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
        self.grab.Enable(True)

        widgetsizer.AddStretchSpacer(5)
        self.slider = wx.Slider(
            self.widget_panel,
            id=wx.ID_ANY,
            value=0,
            minValue=0,
            maxValue=1,
            size=(200, -1),
            style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS,
        )
        widgetsizer.Add(self.slider, 1, wx.ALL, 5)
        self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)

        widgetsizer.AddStretchSpacer(5)
        self.start_frames_sizer = wx.BoxSizer(wx.VERTICAL)
        self.end_frames_sizer = wx.BoxSizer(wx.VERTICAL)

        self.start_frames_sizer.AddSpacer(15)
        #        self.startFrame = wx.SpinCtrl(self.widget_panel, value='0', size=(100, -1), min=0, max=120)
        self.startFrame = wx.SpinCtrl(self.widget_panel,
                                      value="0",
                                      size=(100, -1))  # ,style=wx.SP_VERTICAL)
        self.startFrame.Enable(False)
        self.start_frames_sizer.Add(self.startFrame, 1,
                                    wx.EXPAND | wx.ALIGN_LEFT, 15)
        start_text = wx.StaticText(self.widget_panel,
                                   label="Start Frame Index")
        self.start_frames_sizer.Add(start_text, 1, wx.EXPAND | wx.ALIGN_LEFT,
                                    15)
        self.checkBox = wx.CheckBox(self.widget_panel,
                                    id=wx.ID_ANY,
                                    label="Range of frames")
        self.checkBox.Bind(wx.EVT_CHECKBOX, self.activate_frame_range)
        self.start_frames_sizer.Add(self.checkBox, 1,
                                    wx.EXPAND | wx.ALIGN_LEFT, 15)
        #
        self.end_frames_sizer.AddSpacer(15)
        self.endFrame = wx.SpinCtrl(self.widget_panel,
                                    value="1",
                                    size=(160, -1))  # , min=1, max=120)
        self.endFrame.Enable(False)
        self.end_frames_sizer.Add(self.endFrame, 1, wx.EXPAND | wx.ALIGN_LEFT,
                                  15)
        end_text = wx.StaticText(self.widget_panel, label="Number of Frames")
        self.end_frames_sizer.Add(end_text, 1, wx.EXPAND | wx.ALIGN_LEFT, 15)
        self.updateFrame = wx.Button(self.widget_panel,
                                     id=wx.ID_ANY,
                                     label="Update")
        self.end_frames_sizer.Add(self.updateFrame, 1,
                                  wx.EXPAND | wx.ALIGN_LEFT, 15)
        self.updateFrame.Bind(wx.EVT_BUTTON, self.updateSlider)
        self.updateFrame.Enable(False)

        widgetsizer.Add(self.start_frames_sizer, 1, wx.ALL, 0)
        widgetsizer.AddStretchSpacer(5)
        widgetsizer.Add(self.end_frames_sizer, 1, wx.ALL, 0)
        widgetsizer.AddStretchSpacer(15)

        self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
        widgetsizer.Add(self.quit, 1, wx.ALL, 15)
        self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
        self.quit.Enable(True)

        self.widget_panel.SetSizer(widgetsizer)
        self.widget_panel.SetSizerAndFit(widgetsizer)

        # Variables initialization
        self.numberFrames = 0
        self.currFrame = 0
        self.figure = Figure()
        self.axes = self.figure.add_subplot(111)
        self.drs = []
        self.extract_range_frame = False
        self.firstFrame = 0
        self.Colorscheme = []

        # Read confing file
        self.cfg = auxiliaryfunctions.read_config(config)
        self.Task = self.cfg["Task"]
        self.start = self.cfg["start"]
        self.stop = self.cfg["stop"]
        self.date = self.cfg["date"]
        self.trainFraction = self.cfg["TrainingFraction"]
        self.trainFraction = self.trainFraction[0]
        self.videos = self.cfg["video_sets"].keys()
        self.bodyparts = self.cfg["bodyparts"]
        self.colormap = plt.get_cmap(self.cfg["colormap"])
        self.colormap = self.colormap.reversed()
        self.markerSize = self.cfg["dotsize"]
        self.alpha = self.cfg["alphavalue"]
        self.iterationindex = self.cfg["iteration"]
        self.cropping = self.cfg["cropping"]
        self.video_names = [Path(i).stem for i in self.videos]
        self.config_path = Path(config)
        self.video_source = Path(video).resolve()
        self.shuffle = shuffle
        self.Dataframe = Dataframe
        self.savelabeled = savelabeled
        self.multianimal = multianimal
        if self.multianimal:
            from deeplabcut.utils import auxfun_multianimal

            (
                self.individual_names,
                self.uniquebodyparts,
                self.multianimalbodyparts,
            ) = auxfun_multianimal.extractindividualsandbodyparts(self.cfg)
            self.choiceBox, self.visualization_rdb = self.choice_panel.addRadioButtons(
            )
            self.Colorscheme = visualization.get_cmap(
                len(self.individual_names), self.cfg["colormap"])
            self.visualization_rdb.Bind(wx.EVT_RADIOBOX, self.clear_plot)
        # Read the video file
        self.vid = VideoWriter(str(self.video_source))
        if self.cropping:
            self.vid.set_bbox(self.cfg["x1"], self.cfg["x2"], self.cfg["y1"],
                              self.cfg["y2"])
        self.filename = Path(self.video_source).name
        self.numberFrames = len(self.vid)
        self.strwidth = int(np.ceil(np.log10(self.numberFrames)))
        # Set the values of slider and range of frames
        self.startFrame.SetMax(self.numberFrames - 1)
        self.slider.SetMax(self.numberFrames - 1)
        self.endFrame.SetMax(self.numberFrames - 1)
        self.startFrame.Bind(wx.EVT_SPINCTRL, self.updateSlider)  # wx.EVT_SPIN
        # Set the status bar
        self.statusbar.SetStatusText("Working on video: {}".format(
            self.filename))
        # Adding the video file to the config file.
        if self.vid.name not in self.video_names:
            add.add_new_videos(self.config_path, [self.video_source])

        self.update()
        self.plot_labels()
        self.widget_panel.Layout()
示例#14
0
def triangulate(dlc_3d_config, video_cfg, origins, table_corners, tocchini, video_path, filterpredictions=True, destfolder=None,
                save_as_csv=False):

    cfg_3d = auxiliaryfunctions.read_config(dlc_3d_config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']
    pcutoff = cfg_3d['pcutoff']
    scorer_3d = cfg_3d['scorername_3d']

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str('config_file_' + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(str("It seems the file specified in the variable config_file_" + str(cam)) +
                            " does not exist. Please edit the config file with correct file path and retry.")

    path_stereo_file = os.path.join(path_camera_matrix, 'stereo_params.pickle')
    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)

    position=np.array([0.0,0.0,0.0])
    rotation=np.array([[1.0, 0.0, 0.0],[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
    rotations=[np.linalg.inv(rotation)]
    translations=[np.matmul(-rotation, position)]
    projections=[]

    for cam_idx in range(1,len(cam_names)):
        pair_rotation=stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['R']
        pair_translation = stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['T']
        rotations.append(np.matmul(pair_rotation,rotations[0]))
        translations.append(np.matmul(pair_rotation, translations[0])+np.transpose(pair_translation))

    for cam_idx in range(len(cam_names)):
        path_camera_file = os.path.join(path_camera_matrix, '%s_intrinsic_params.pickle' % cam_names[cam_idx])
        intrinsic_file = auxiliaryfunctions.read_pickle(path_camera_file)

        projection=np.zeros((3,4))
        projection[:,0:3]=rotations[cam_idx]
        projection[:,3]=translations[cam_idx]

        projection=np.matmul(intrinsic_file[cam_names[cam_idx]]['mtx'], projection)
        projections.append(projection)

    for view in cam_names:
        origins[view]=np.array(origins[view])
        origins[view][0]=origins[view][0]+video_cfg['crop_limits'][view][0]
        origins[view][1]=origins[view][1]+video_cfg['crop_limits'][view][2]
    [origin, pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1}, origins, pcutoff, projections, reconMode='all')


    table_coords_3d=[]
    for corner_idx in range(len(table_corners['front'])):
        corner={}
        for view in cam_names:
            corner[view]=table_corners[view][corner_idx]
            corner[view] =np.array(corner[view])
            corner[view][0]=corner[view][0]+video_cfg['crop_limits'][view][0]
            corner[view][1]=corner[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},corner,pcutoff,projections, reconMode='all')
        table_coords_3d.append(coord)

    xy_norm=np.array([[0,0,1]])

    table_center=np.mean(np.array(table_coords_3d),axis=0)

    table_vec1=table_coords_3d[0]-table_center
    table_vec2=table_coords_3d[1]-table_center
    table_norm = unit_vector(np.cross(np.transpose(table_vec1), np.transpose(table_vec2)))

    rot_vec=unit_vector(np.cross(xy_norm, table_norm), axis=1)
    rot_angle=-np.arccos(np.abs(np.sum(xy_norm*table_norm))/np.sqrt(np.sum(table_norm**2)))

    rot_mat=rotation_matrix(rot_angle, np.transpose(rot_vec))
    rot_mat=np.matmul(rotation_matrix(np.pi, [1, 0, 0]), rot_mat)
    rot_mat=rot_mat[0:3,0:3]

    origin=np.matmul(rot_mat, origin)

    for idx, coord in enumerate(table_coords_3d):
        coord=np.matmul(rot_mat, coord)-origin
        table_coords_3d[idx]=coord

    tocchini_coords_3d=[]
    for tocchino_idx in range(len(tocchini['front'])):
        tocchino={}
        for view in cam_names:
            tocchino[view]=tocchini[view][tocchino_idx]
            tocchino[view]=np.array(tocchino[view])
            tocchino[view][0]=tocchino[view][0]+video_cfg['crop_limits'][view][0]
            tocchino[view][1]=tocchino[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},tocchino,pcutoff,projections, reconMode='all')
        tocchino_coord=np.matmul(rot_mat,coord)-origin
        tocchini_coords_3d.append(tocchino_coord)

    file_name_3d_scorer = []
    dataname = []
    for cam_name in cam_names:
        dlc_3d_config = snapshots[cam_name]
        cfg = auxiliaryfunctions.read_config(dlc_3d_config)

        shuffle = cfg_3d[str('shuffle_' + cam_name)]
        trainingsetindex = cfg_3d[str('trainingsetindex_' + cam_name)]

        video=video_path[cam_name]
        vname = Path(video).stem


        trainFraction = cfg['TrainingFraction'][trainingsetindex]
        modelfolder = os.path.join(cfg["project_path"],
                                   str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
        path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
        dlc_cfg = load_config(str(path_test_config))
        Snapshots = np.array([fn.split('.')[0] for fn in os.listdir(os.path.join(modelfolder, 'train')) if "index" in fn])
        snapshotindex = cfg['snapshotindex']

        increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        dlc_cfg['init_weights'] = os.path.join(modelfolder, 'train', Snapshots[snapshotindex])
        trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
        DLCscorer = auxiliaryfunctions.GetScorerName(cfg, shuffle, trainFraction,
                                                     trainingsiterations=trainingsiterations)

        file_name_3d_scorer.append(DLCscorer)

        if filterpredictions:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + 'filtered.h5'))
        else:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + '.h5'))

    output_filename = os.path.join(destfolder, vname + '_' + scorer_3d)
    if os.path.isfile(output_filename + '.h5'):  # TODO: don't check twice and load the pickle file to check if the same snapshots + camera matrices were used.
        print("Already analyzed...", output_filename+'.h5')
    else:
        if len(dataname) > 0:
            df = pd.read_hdf(dataname[0])
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(df, scorer_3d, flag='3d')

            for bpindex, bp in enumerate(bodyparts):
                bp_coords=np.zeros((3,len(df_3d)))
                for f_idx in range(len(df_3d)):
                    likelihoods={}
                    coords={}
                    for cam_idx,cam_name in enumerate(cam_names):
                        dataframe_cam = pd.read_hdf(dataname[cam_idx])
                        scorer_cam = dataframe_cam.columns.get_level_values(0)[0]
                        likelihoods[cam_name]=dataframe_cam[scorer_cam][bp]['likelihood'].values[f_idx]
                        coords[cam_name]=np.array([dataframe_cam[scorer_cam][bp]['x'].values[f_idx], dataframe_cam[scorer_cam][bp]['y'].values[f_idx]])
                        coords[cam_name][0]=coords[cam_name][0]+video_cfg['crop_limits'][cam_name][0]
                        coords[cam_name][1]=coords[cam_name][1]+video_cfg['crop_limits'][cam_name][2]
                    [coord, pairs_used] = locate(cam_names, likelihoods, coords, pcutoff, projections, reconMode='bestpossible')

                    coord=np.matmul(rot_mat, coord)-origin
                    if pairs_used < 3:
                        coord[0]=np.nan
                        coord[1]=np.nan
                        coord[2]=np.nan
                    bp_coords[:,f_idx]=np.squeeze(coord)
                df_3d.iloc[:][scorer_3d, bp, 'x'] = bp_coords[0,:]
                df_3d.iloc[:][scorer_3d, bp, 'y'] = bp_coords[1,:]
                df_3d.iloc[:][scorer_3d, bp, 'z'] = bp_coords[2,:]

            df_3d.to_hdf(str(output_filename + '.h5'), 'df_with_missing', format='table', mode='w')

            if save_as_csv:
                df_3d.to_csv(str(output_filename + '.csv'))

            print("Triangulated data for video", vname)
            print("Results are saved under: ", destfolder)

    return str(output_filename + '.h5'), table_coords_3d, tocchini_coords_3d
示例#15
0
def create_training_model_comparison(config,trainindex=0,num_shuffles=1,net_types=['resnet_50'],augmenter_types=['default'],userfeedback=False,windows2linux=False):
    """
    Creates a training dataset with different networks and augmentation types (dataset_loader) so that the shuffles
    have same training and testing indices.

    Therefore, this function is useful for benchmarking the performance of different network and augmentation types on the same training/testdata.\n

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    trainindex: int, optional
        Either (in case uniform = True) indexes which element of TrainingFraction in the config file should be used (note it is a list!).
        Alternatively (uniform = False) indexes which folder is dropped, i.e. the first if trainindex=0, the second if trainindex =1, etc.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    net_types: list
        Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0,mobilenet_v2_0.75, mobilenet_v2_0.5, and mobilenet_v2_0.35 are supported.

    augmenter_types: list
        Type of augmenters. Currently "default", "imgaug", "tensorpack", and "deterministic" are supported.

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    Example
    --------
    >>> deeplabcut.create_training_model_comparison('/analysis/project/reaching-task/config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])

    Windows:
    >>> deeplabcut.create_training_model_comparison('C:\\Users\\Ulf\\looming-task\\config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])

    --------
    """
    # read cfg file
    cfg = auxiliaryfunctions.read_config(config)

    # create log file
    log_file_name = os.path.join(cfg['project_path'],'training_model_comparison.log')
    logger = logging.getLogger('training_model_comparison')
    if not logger.handlers:
        logger = logging.getLogger('training_model_comparison')
        hdlr = logging.FileHandler(log_file_name)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)
        logger.setLevel(logging.INFO)
    else:
        pass

    largestshuffleindex = get_largestshuffle_index(config)

    for shuffle in range(num_shuffles):
        trainIndexes, testIndexes=mergeandsplit(config,trainindex=trainindex,uniform=True)
        for idx_net,net in enumerate(net_types):
            for idx_aug,aug in enumerate(augmenter_types):
                get_max_shuffle_idx=(largestshuffleindex+idx_aug+idx_net*len(augmenter_types)+shuffle*len(augmenter_types)*len(net_types))+1 #get shuffle index; starts ith 0 so added 1
                log_info = str("Shuffle index:" + str(get_max_shuffle_idx) + ", net_type:"+net +", augmenter_type:"+aug + ", trainsetindex:" +str(trainindex))
                create_training_dataset(config,Shuffles=[get_max_shuffle_idx],net_type=net,trainIndexes=trainIndexes,testIndexes=testIndexes,augmenter_type=aug,userfeedback=userfeedback,windows2linux=windows2linux)
                logger.info(log_info)
示例#16
0
def create_video_with_all_detections(
    config, videos, DLCscorername, displayedbodyparts="all", destfolder=None
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    DLCscorername: str
        Name of network. E.g. 'DLC_resnet50_project_userMar23shuffle1_50000

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import (
        convertdetectiondict2listoflist,
    )
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername + "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.mp4"
            )
            full_pickle = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.pickle"
            )

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = convertdetectiondict2listoflist(data[frame_names[ind]], bpts)
                    for i, det in enumerate(dets):
                        color = colors[i]
                        for x, y, p, _ in det:
                            if p > pcutoff:
                                rr, cc = circle(y, x, dotsize, shape=(ny, nx))
                                frame[rr, cc] = color
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)
def analyzeskeleton(
    config,
    videos,
    videotype="avi",
    shuffle=1,
    trainingsetindex=0,
    filtered=False,
    save_as_csv=False,
    destfolder=None,
    modelprefix="",
    track_method="",
):
    """
    Extracts length and orientation of each "bone" of the skeleton as defined in the config file.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    shuffle : int, optional
        The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
        the corresponding shuffle of training dataset. Default is set to 1

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
        Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
        folder also needs to be passed.

    track_method: string, optional
         Specifies the tracker used to generate the data. Empty by default (corresponding to a single animal project).
         For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will be taken from the config.yaml file if none is given.

    """
    # Load config file, scorer and videos
    cfg = auxiliaryfunctions.read_config(config)
    if not cfg["skeleton"]:
        raise ValueError("No skeleton defined in the config.yaml.")

    track_method = auxfun_multianimal.get_track_method(
        cfg, track_method=track_method)
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg,
        shuffle,
        trainFraction=cfg["TrainingFraction"][trainingsetindex],
        modelprefix=modelprefix,
    )

    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        print("Processing %s" % (video))
        if destfolder is None:
            destfolder = str(Path(video).parents[0])

        vname = Path(video).stem
        try:
            df, filepath, scorer, _ = auxiliaryfunctions.load_analyzed_data(
                destfolder, vname, DLCscorer, filtered, track_method)
            output_name = filepath.replace(".h5", f"_skeleton.h5")
            if os.path.isfile(output_name):
                print(
                    f"Skeleton in video {vname} already processed. Skipping..."
                )
                continue

            bones = {}
            if "individuals" in df.columns.names:
                for animal_name, df_ in df.groupby(level="individuals",
                                                   axis=1):
                    temp = df_.droplevel(["scorer", "individuals"], axis=1)
                    if animal_name != "single":
                        for bp1, bp2 in cfg["skeleton"]:
                            name = "{}_{}_{}".format(animal_name, bp1, bp2)
                            bones[name] = analyzebone(temp[bp1], temp[bp2])
            else:
                for bp1, bp2 in cfg["skeleton"]:
                    name = "{}_{}".format(bp1, bp2)
                    bones[name] = analyzebone(df[scorer][bp1], df[scorer][bp2])

            skeleton = pd.concat(bones, axis=1)
            skeleton.to_hdf(output_name,
                            "df_with_missing",
                            format="table",
                            mode="w")
            if save_as_csv:
                skeleton.to_csv(output_name.replace(".h5", ".csv"))

        except FileNotFoundError as e:
            print(e)
            continue
示例#18
0
def create_pretrained_project(
    project,
    experimenter,
    videos,
    model="full_human",
    working_directory=None,
    copy_videos=False,
    videotype=None,
    analyzevideo=True,
    filtered=True,
    createlabeledvideo=True,
    trainFraction=None,
):
    """
    Creates a new project directory, sub-directories and a basic configuration file.
    Change its parameters to your projects need.

    The project will also be initialized with a pre-trained model from the DeepLabCut model zoo!

    http://modelzoo.deeplabcut.org

    Parameters
    ----------
    project : string
        String containing the name of the project.

    experimenter : string
        String containing the name of the experimenter.

    model: string, options see  http://www.mousemotorlab.org/dlc-modelzoo
        Current option and default: 'full_human'  Creates a demo human project and analyzes a video with ResNet 101 weights pretrained on MPII Human Pose. This is from the DeeperCut paper
        by Insafutdinov et al. https://arxiv.org/abs/1605.03170 Please make sure to cite it too if you use this code!

    videos : list
        A list of string containing the full paths of the videos to include in the project.

    working_directory : string, optional
        The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.

    copy_videos : bool, optional  ON WINDOWS: TRUE is often necessary!
        If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
        ``True`` or ``False``.

    analyzevideo " bool, optional
        If true, then the video is analzyed and a labeled video is created. If false, then only the project will be created and the weights downloaded. You can then access them

    filtered: bool, default false
        Boolean variable indicating if filtered pose data output should be plotted rather than frame-by-frame predictions.
        Filtered version can be calculated with deeplabcut.filterpredictions

    trainFraction: By default value from *new* projects. (0.95)
            Fraction that will be used in dlc-model/trainingset folder name.

    Example
    --------
    Linux/MacOs loading full_human model and analzying video /homosapiens1.avi
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/homosapiens1.avi'], copy_videos=False)

    Loading full_cat model and analzying video "felixfeliscatus3.avi"
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Linus',['/data/videos/felixfeliscatus3.avi'], model='full_cat')

    Windows:
    >>> deeplabcut.create_pretrained_project('humanstrokestudy','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'],r'C:\yourusername\analysis\project' copy_videos=True)
    Users must format paths with either:  r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )

    """
    if model in globals()["Modeloptions"]:
        cwd = os.getcwd()

        cfg = deeplabcut.create_new_project(project, experimenter, videos,
                                            working_directory, copy_videos,
                                            videotype)
        if trainFraction is not None:
            auxiliaryfunctions.edit_config(
                cfg, {"TrainingFraction": [trainFraction]})

        config = auxiliaryfunctions.read_config(cfg)
        if model == "full_human":
            config["bodyparts"] = [
                "ankle1",
                "knee1",
                "hip1",
                "hip2",
                "knee2",
                "ankle2",
                "wrist1",
                "elbow1",
                "shoulder1",
                "shoulder2",
                "elbow2",
                "wrist2",
                "chin",
                "forehead",
            ]
            config["skeleton"] = [
                ["ankle1", "knee1"],
                ["ankle2", "knee2"],
                ["knee1", "hip1"],
                ["knee2", "hip2"],
                ["hip1", "hip2"],
                ["shoulder1", "shoulder2"],
                ["shoulder1", "hip1"],
                ["shoulder2", "hip2"],
                ["shoulder1", "elbow1"],
                ["shoulder2", "elbow2"],
                ["chin", "forehead"],
                ["elbow1", "wrist1"],
                ["elbow2", "wrist2"],
            ]
            config["default_net_type"] = "resnet_101"
        else:  # just make a case and put the stuff you want.
            # TBD: 'partaffinityfield_graph' >> use to set skeleton!
            pass

        auxiliaryfunctions.write_config(cfg, config)
        config = auxiliaryfunctions.read_config(cfg)

        train_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "train",
            ))
        test_dir = Path(
            os.path.join(
                config["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction=config["TrainingFraction"][0],
                        shuffle=1,
                        cfg=config,
                    )),
                "test",
            ))

        # Create the model directory
        train_dir.mkdir(parents=True, exist_ok=True)
        test_dir.mkdir(parents=True, exist_ok=True)

        modelfoldername = auxiliaryfunctions.GetModelFolder(
            trainFraction=config["TrainingFraction"][0], shuffle=1, cfg=config)
        path_train_config = str(
            os.path.join(config["project_path"], Path(modelfoldername),
                         "train", "pose_cfg.yaml"))
        path_test_config = str(
            os.path.join(config["project_path"], Path(modelfoldername), "test",
                         "pose_cfg.yaml"))

        # Download the weights and put then in appropriate directory
        print("Dowloading weights...")
        auxfun_models.DownloadModel(model, train_dir)

        pose_cfg = deeplabcut.auxiliaryfunctions.read_plainconfig(
            path_train_config)
        print(path_train_config)
        # Updating config file:
        dict_ = {
            "default_net_type": pose_cfg["net_type"],
            "default_augmenter": pose_cfg["dataset_type"],
            "bodyparts": pose_cfg["all_joints_names"],
            "dotsize": 6,
        }
        auxiliaryfunctions.edit_config(cfg, dict_)

        # downloading base encoder / not required unless on re-trains (but when a training set is created this happens anyway)
        # model_path, num_shuffles=auxfun_models.Check4weights(pose_cfg['net_type'], parent_path, num_shuffles= 1)

        # Updating training and test pose_cfg:
        snapshotname = [fn for fn in os.listdir(train_dir)
                        if ".meta" in fn][0].split(".meta")[0]
        dict2change = {
            "init_weights": str(os.path.join(train_dir, snapshotname)),
            "project_path": str(config["project_path"]),
        }

        UpdateTrain_pose_yaml(pose_cfg, dict2change, path_train_config)
        keys2save = [
            "dataset",
            "dataset_type",
            "num_joints",
            "all_joints",
            "all_joints_names",
            "net_type",
            "init_weights",
            "global_scale",
            "location_refinement",
            "locref_stdev",
        ]

        MakeTest_pose_yaml(pose_cfg, keys2save, path_test_config)

        video_dir = os.path.join(config["project_path"], "videos")
        if analyzevideo == True:
            print("Analyzing video...")
            deeplabcut.analyze_videos(cfg, [video_dir],
                                      videotype,
                                      save_as_csv=True)

        if createlabeledvideo == True:
            if filtered:
                deeplabcut.filterpredictions(cfg, [video_dir], videotype)

            print("Plotting results...")
            deeplabcut.create_labeled_video(cfg, [video_dir],
                                            videotype,
                                            draw_skeleton=True,
                                            filtered=filtered)
            deeplabcut.plot_trajectories(cfg, [video_dir],
                                         videotype,
                                         filtered=filtered)

        os.chdir(cwd)
        return cfg, path_train_config

    else:
        return "N/A", "N/A"
示例#19
0
def analyze_videos(config,
                   videos,
                   videotype='avi',
                   shuffle=1,
                   trainingsetindex=0,
                   gputouse=None,
                   save_as_csv=False,
                   destfolder=None,
                   batchsize=None,
                   crop=None,
                   get_nframesfrommetadata=True,
                   TFGPUinference=True,
                   dynamic=(False, .5, 10)):
    """
    Makes prediction based on a trained network. The index of the trained network is specified by parameters in the config file (in particular the variable 'snapshotindex')

    Output: The labels are stored as MultiIndex Pandas Array, which contains the name of the network, body part name, (x, y) label position \n
            in pixels, and the likelihood for each frame per body part. These arrays are stored in an efficient Hierarchical Data Format (HDF) \n
            in the same directory, where the video is stored. However, if the flag save_as_csv is set to True, the data can also be exported in \n
            comma-separated values format (.csv), which in turn can be imported in many programs, such as MATLAB, R, Prism, etc.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
    See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
        folder also needs to be passed.

    batchsize: int, default from pose_cfg.yaml
        Change batch size for inference; if given overwrites value in pose_cfg.yaml

    crop: list, optional (default=None)
        List of cropping coordinates as [x1, x2, y1, y2].
        Note that the same cropping parameters will then be used for all videos.
        If different video crops are desired, run 'analyze_videos' on individual videos
        with the corresponding cropping coordinates.

    TFGPUinference: bool, default: True
        Perform inference on GPU with Tensorflow code. Introduced in "Pretraining boosts out-of-domain robustness for pose estimation" by
        Alexander Mathis, Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis Source: https://arxiv.org/abs/1909.11229

    dynamic: triple containing (state,detectiontreshold,margin)
        If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
        then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This  window is
        expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
        current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
        enough given the movement of the animal).

    Examples
    --------

    Windows example for analyzing 1 video
    >>> deeplabcut.analyze_videos('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
    --------

    If you want to analyze only 1 video
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to analyze all videos of type avi in a folder:
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos'],videotype='.avi')
    --------

    If you want to analyze multiple videos
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to analyze multiple videos with shuffle = 2
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'],shuffle=2)

    --------
    If you want to analyze multiple videos with shuffle = 2 and save results as an additional csv file too
    >>> deeplabcut.analyze_videos('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'],shuffle=2,save_as_csv=True)
    --------

    """
    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ[
            'TF_CUDNN_USE_AUTOTUNE']  #was potentially set during training

    if gputouse is not None:  #gpu selection
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    tf.reset_default_graph()
    start_path = os.getcwd(
    )  #record cwd to return to this directory in the end

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]

    if crop is not None:
        cfg['cropping'] = True
        cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2'] = crop
        print("Overwriting cropping parameters:", crop)
        print(
            "These are used for all videos, but won't be save to the cfg file."
        )

    modelfolder = os.path.join(
        cfg["project_path"],
        str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError(
            "It seems the model for shuffle %s and trainFraction %s does not exist."
            % (shuffle, trainFraction))

    # Check which snapshots are available and sort them by # iterations
    try:
        Snapshots = np.array([
            fn.split('.')[0]
            for fn in os.listdir(os.path.join(modelfolder, 'train'))
            if "index" in fn
        ])
    except FileNotFoundError:
        raise FileNotFoundError(
            "Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."
            % (shuffle, shuffle))

    if cfg['snapshotindex'] == 'all':
        print(
            "Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!"
        )
        snapshotindex = -1
    else:
        snapshotindex = cfg['snapshotindex']

    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder, 'train',
                                           Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(
        os.sep)[-1]).split('-')[-1]
    # Update number of output and batchsize
    dlc_cfg['num_outputs'] = cfg.get('num_outputs',
                                     dlc_cfg.get('num_outputs', 1))

    if batchsize == None:
        #update batchsize (based on parameters in config.yaml)
        dlc_cfg['batch_size'] = cfg['batch_size']
    else:
        dlc_cfg['batch_size'] = batchsize
        cfg['batch_size'] = batchsize

    if dynamic[0]:  #state=true
        #(state,detectiontreshold,margin)=dynamic
        print("Starting analysis in dynamic cropping mode with parameters:",
              dynamic)
        dlc_cfg['num_outputs'] = 1
        TFGPUinference = False
        dlc_cfg['batch_size'] = 1
        print(
            "Switching batchsize to 1, num_outputs (per animal) to 1 and TFGPUinference to False (all these features are not supported in this mode)."
        )

    # Name for scorer:
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, trainingsiterations=trainingsiterations)
    if dlc_cfg['num_outputs'] > 1:
        if TFGPUinference:
            print(
                "Switching to numpy-based keypoint extraction code, as multiple point extraction is not supported by TF code currently."
            )
            TFGPUinference = False
        print("Extracting ", dlc_cfg['num_outputs'], "instances per bodypart")
        xyz_labs_orig = ['x', 'y', 'likelihood']
        suffix = [str(s + 1) for s in range(dlc_cfg['num_outputs'])]
        suffix[
            0] = ''  # first one has empty suffix for backwards compatibility
        xyz_labs = [x + s for s in suffix for x in xyz_labs_orig]
    else:
        xyz_labs = ['x', 'y', 'likelihood']

    #sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
    if TFGPUinference:
        sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_cfg)
    else:
        sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)

    pdindex = pd.MultiIndex.from_product(
        [[DLCscorer], dlc_cfg['all_joints_names'], xyz_labs],
        names=['scorer', 'bodyparts', 'coords'])

    ##################################################
    # Datafolder
    ##################################################
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    if len(Videos) > 0:
        #looping over videos
        for video in Videos:
            DLCscorer = AnalyzeVideo(video, DLCscorer, DLCscorerlegacy,
                                     trainFraction, cfg, dlc_cfg, sess, inputs,
                                     outputs, pdindex, save_as_csv, destfolder,
                                     TFGPUinference, dynamic)

        os.chdir(str(start_path))
        print(
            "The videos are analyzed. Now your research can truly start! \n You can create labeled videos with 'create_labeled_video'."
        )
        print(
            "If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract any outlier frames!"
        )
        return DLCscorer  #note: this is either DLCscorer or DLCscorerlegacy depending on what was used!
    else:
        os.chdir(str(start_path))
        print("No video/s found. Please check your path!")
        return DLCscorer
示例#20
0
def create_training_dataset(config,num_shuffles=1,Shuffles=None,windows2linux=False):
    """
    Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n
    [OPTIONAL]Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows 
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths. 
    
    Example
    --------
    >>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
    Windows:
    >>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    from skimage import io
    import scipy.io as sio
    import deeplabcut
    import subprocess

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg['scorer']
    project_path = cfg['project_path']
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg) #Path concatenation OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(os.path.join(project_path,str(trainingsetfolder))),recursive=True)
    
    Data = merge_annotateddatasets(cfg,project_path,Path(os.path.join(project_path,trainingsetfolder)),windows2linux)
    Data = Data[scorer] #extract labeled data

    #set model type. we will allow more in the future.
    if cfg['resnet']==50:
        net_type ='resnet_'+str(cfg['resnet'])
        resnet_path = str(Path(deeplabcut.__file__).parents[0] / 'pose_estimation_tensorflow/models/pretrained/resnet_v1_50.ckpt')
    elif cfg['resnet']==101:
        net_type ='resnet_'+str(cfg['resnet'])
        resnet_path = str(Path(deeplabcut.__file__).parents[0] / 'Pose_Estimation_Tensorflow/models/pretrained/resnet_v1_101.ckpt')
    else:
        print("Currently only ResNet 50 or 101 supported, please change 'resnet' entry in config.yaml!")
        num_shuffles=-1 #thus the loop below is empty...

    if not Path(resnet_path).is_file():
        """
        Downloads the ImageNet pretrained weights for ResNet.
        """
        start = os.getcwd()
        os.chdir(str(Path(resnet_path).parents[0]))
        print("Downloading the pretrained model (ResNets)....")
        subprocess.call("download.sh", shell=True)
        os.chdir(start)

    if Shuffles==None:
        Shuffles=range(1,num_shuffles+1,1)
    else:
        Shuffles=[i for i in Shuffles if isinstance(i,int)]

    bodyparts = cfg['bodyparts']
    TrainingFraction = cfg['TrainingFraction']
    for shuffle in Shuffles: # Creating shuffles starting from 1
        for trainFraction in TrainingFraction:
            trainIndexes, testIndexes = SplitTrials(range(len(Data.index)), trainFraction)

            ####################################################
            # Generating data structure with labeled information & frame metadata (for deep cut)
            ####################################################

            # Make matlab train file!
            data = []
            for jj in trainIndexes:
                H = {}
                # load image to get dimensions:
                filename = Data.index[jj]
                im = io.imread(os.path.join(cfg['project_path'],filename))
                H['image'] = filename

                try:
                    H['size'] = np.array(
                        [np.shape(im)[2],
                         np.shape(im)[0],
                         np.shape(im)[1]])
                except:
                    # print "Grayscale!"
                    H['size'] = np.array([1, np.shape(im)[0], np.shape(im)[1]])

                indexjoints=0
                joints=np.zeros((len(bodyparts),3))*np.nan
                for bpindex,bodypart in enumerate(bodyparts):
                    if Data[bodypart]['x'][jj]<np.shape(im)[1] and Data[bodypart]['y'][jj]<np.shape(im)[0]: #are labels in image?
                        	joints[indexjoints,0]=int(bpindex)
                        	joints[indexjoints,1]=Data[bodypart]['x'][jj]
                        	joints[indexjoints,2]=Data[bodypart]['y'][jj]
                        	indexjoints+=1

                joints = joints[np.where(
                    np.prod(np.isfinite(joints),
                            1))[0], :]  # drop NaN, i.e. lines for missing body parts

                assert (np.prod(np.array(joints[:, 2]) < np.shape(im)[0])
                        )  # y coordinate within image?
                assert (np.prod(np.array(joints[:, 1]) < np.shape(im)[1])
                        )  # x coordinate within image?

                H['joints'] = np.array(joints, dtype=int)
                if np.size(joints)>0: #exclude images without labels
                        data.append(H)

            if len(trainIndexes)>0:
                datafilename,metadatafilename=auxiliaryfunctions.GetDataandMetaDataFilenames(trainingsetfolder,trainFraction,shuffle,cfg)
                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(os.path.join(project_path,metadatafilename),data, trainIndexes, testIndexes, trainFraction)
                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################

                DTYPE = [('image', 'O'), ('size', 'O'), ('joints', 'O')]
                MatlabData = np.array(
                    [(np.array([data[item]['image']], dtype='U'),
                      np.array([data[item]['size']]),
                      boxitintoacell(data[item]['joints']))
                     for item in range(len(data))],
                    dtype=DTYPE)

                sio.savemat(os.path.join(project_path,datafilename), {'dataset': MatlabData})

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################

                modelfoldername=auxiliaryfunctions.GetModelFolder(trainFraction,shuffle,cfg)
                auxiliaryfunctions.attempttomakefolder(Path(config).parents[0] / modelfoldername,recursive=True)
                auxiliaryfunctions.attempttomakefolder(str(Path(config).parents[0] / modelfoldername)+ '/'+ '/train')
                auxiliaryfunctions.attempttomakefolder(str(Path(config).parents[0] / modelfoldername)+ '/'+ '/test')

                path_train_config = str(os.path.join(cfg['project_path'],Path(modelfoldername),'train','pose_cfg.yaml'))
                path_test_config = str(os.path.join(cfg['project_path'],Path(modelfoldername),'test','pose_cfg.yaml'))
                #str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')

                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": bodyparts,
                    "init_weights": resnet_path,
                    "project_path": cfg['project_path'],
                    "net_type": net_type
                }

                defaultconfigfile = str(Path(deeplabcut.__file__).parents[0] / 'pose_cfg.yaml')

                trainingdata = MakeTrain_pose_yaml(items2change,path_train_config,defaultconfigfile)
                keys2save = [
                    "dataset", "num_joints", "all_joints", "all_joints_names",
                    "net_type", 'init_weights', 'global_scale', 'location_refinement',
                    'locref_stdev'
                ]
                MakeTest_pose_yaml(trainingdata, keys2save,path_test_config)
                print("The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!")
            else:
                pass
示例#21
0
    def __init__(self, parent, gui_size, cfg):
        """Constructor"""
        wx.Panel.__init__(self, parent=parent)

        # variable initialization
        self.config = cfg
        self.bodyparts = []
        # design the panel
        self.sizer = wx.GridBagSizer(5, 5)

        text = wx.StaticText(self,
                             label="DeepLabCut - Step 6. Evaluate Network")
        self.sizer.Add(text,
                       pos=(0, 0),
                       flag=wx.TOP | wx.LEFT | wx.BOTTOM,
                       border=15)
        # Add logo of DLC
        icon = wx.StaticBitmap(self, bitmap=wx.Bitmap(LOGO_PATH))
        self.sizer.Add(icon,
                       pos=(0, 4),
                       flag=wx.TOP | wx.RIGHT | wx.ALIGN_RIGHT,
                       border=5)

        line1 = wx.StaticLine(self)
        self.sizer.Add(line1,
                       pos=(1, 0),
                       span=(1, 5),
                       flag=wx.EXPAND | wx.BOTTOM,
                       border=10)

        self.cfg_text = wx.StaticText(self, label="Select the config file")
        self.sizer.Add(self.cfg_text,
                       pos=(2, 0),
                       flag=wx.TOP | wx.LEFT,
                       border=5)

        if sys.platform == "darwin":
            self.sel_config = wx.FilePickerCtrl(
                self,
                path="",
                style=wx.FLP_USE_TEXTCTRL,
                message="Choose the config.yaml file",
                wildcard="*.yaml",
            )
        else:
            self.sel_config = wx.FilePickerCtrl(
                self,
                path="",
                style=wx.FLP_USE_TEXTCTRL,
                message="Choose the config.yaml file",
                wildcard="config.yaml",
            )
        # self.sel_config = wx.FilePickerCtrl(self, path="",style=wx.FLP_USE_TEXTCTRL,message="Choose the config.yaml file", wildcard="config.yaml")
        self.sizer.Add(self.sel_config,
                       pos=(2, 1),
                       span=(1, 3),
                       flag=wx.TOP | wx.EXPAND,
                       border=5)
        self.sel_config.SetPath(self.config)
        self.sel_config.Bind(wx.EVT_FILEPICKER_CHANGED, self.select_config)

        sb = wx.StaticBox(self, label="Attributes")
        boxsizer = wx.StaticBoxSizer(sb, wx.VERTICAL)

        self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
        self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
        self.hbox3 = wx.BoxSizer(wx.HORIZONTAL)

        config_file = auxiliaryfunctions.read_config(self.config)

        shuffles_text = wx.StaticBox(self, label="Specify the shuffle")
        shuffles_text_boxsizer = wx.StaticBoxSizer(shuffles_text, wx.VERTICAL)
        self.shuffles = wx.SpinCtrl(self, value="1", min=0, max=100)
        shuffles_text_boxsizer.Add(self.shuffles, 1,
                                   wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        #trainingset = wx.StaticBox(self, label="Specify the trainingset index")
        #trainingset_boxsizer = wx.StaticBoxSizer(trainingset, wx.VERTICAL)
        #self.trainingset = wx.SpinCtrl(self, value="0", min=0, max=100)
        #trainingset_boxsizer.Add(
        #    self.trainingset, 1, wx.EXPAND | wx.TOP | wx.BOTTOM, 10
        #)

        self.plot_choice = wx.RadioBox(
            self,
            label="Want to plot predictions (as in standard DLC projects)?",
            choices=["Yes", "No"],
            majorDimension=1,
            style=wx.RA_SPECIFY_COLS,
        )
        self.plot_choice.SetSelection(0)

        self.plot_scoremaps = wx.RadioBox(
            self,
            label="Want to plot maps (ALL images): scoremaps, PAFs, locrefs?",
            choices=["Yes", "No"],
            majorDimension=1,
            style=wx.RA_SPECIFY_COLS,
        )
        self.plot_scoremaps.SetSelection(1)

        self.bodypart_choice = wx.RadioBox(
            self,
            label="Compare all bodyparts?",
            choices=["Yes", "No"],
            majorDimension=1,
            style=wx.RA_SPECIFY_COLS,
        )
        self.bodypart_choice.Bind(wx.EVT_RADIOBOX, self.chooseOption)

        if config_file.get("multianimalproject", False):
            bodyparts = config_file["multianimalbodyparts"]
        else:
            bodyparts = config_file["bodyparts"]
        self.bodyparts_to_compare = wx.CheckListBox(
            self, choices=bodyparts, style=0, name="Select the bodyparts")
        self.bodyparts_to_compare.Bind(wx.EVT_CHECKLISTBOX, self.getbp)
        self.bodyparts_to_compare.Hide()

        self.hbox1.Add(shuffles_text_boxsizer, 5,
                       wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
        #self.hbox1.Add(trainingset_boxsizer, 5, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
        self.hbox1.Add(self.plot_scoremaps, 5, wx.EXPAND | wx.TOP | wx.BOTTOM,
                       5)

        self.hbox2.Add(self.plot_choice, 5, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
        self.hbox2.Add(self.bodypart_choice, 5, wx.EXPAND | wx.TOP | wx.BOTTOM,
                       5)
        self.hbox2.Add(self.bodyparts_to_compare, 10,
                       wx.EXPAND | wx.TOP | wx.BOTTOM, 5)

        boxsizer.Add(self.hbox1, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        boxsizer.Add(self.hbox2, 5, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        self.sizer.Add(
            boxsizer,
            pos=(3, 0),
            span=(1, 5),
            flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
            border=10,
        )

        self.help_button = wx.Button(self, label="Help")
        self.sizer.Add(self.help_button, pos=(4, 0), flag=wx.LEFT, border=10)
        self.help_button.Bind(wx.EVT_BUTTON, self.help_function)

        self.ok = wx.Button(self, label="RUN: Evaluate Network")
        self.sizer.Add(self.ok, pos=(4, 3))
        self.ok.Bind(wx.EVT_BUTTON, self.evaluate_network)

        self.ok = wx.Button(self, label="Optional: Plot 3 test maps")
        self.sizer.Add(self.ok, pos=(5, 3))
        self.ok.Bind(wx.EVT_BUTTON, self.plot_maps)

        self.cancel = wx.Button(self, label="Reset")
        self.sizer.Add(self.cancel,
                       pos=(4, 1),
                       span=(1, 1),
                       flag=wx.BOTTOM | wx.RIGHT,
                       border=10)
        self.cancel.Bind(wx.EVT_BUTTON, self.cancel_evaluate_network)

        if config_file.get("multianimalproject", False):
            self.inf_cfg_text = wx.Button(
                self, label="Edit the inference_config.yaml")
            self.inf_cfg_text.Bind(wx.EVT_BUTTON, self.edit_inf_config)
            self.sizer.Add(
                self.inf_cfg_text,
                pos=(4, 2),
                span=(1, 1),
                flag=wx.BOTTOM | wx.RIGHT,
                border=10,
            )

        self.sizer.AddGrowableCol(2)

        self.SetSizer(self.sizer)
        self.sizer.Fit(self)
示例#22
0
def extract_frames(
    config,
    mode="automatic",
    algo="kmeans",
    crop=False,
    userfeedback=True,
    cluster_step=1,
    cluster_resizewidth=30,
    cluster_color=False,
    opencv=True,
    slider_width=25,
):
    """
    Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n
    Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames.

    The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n
    by clustering based on visual appearance (k-means), or by manual selection.

    Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file.

    Please refer to the user guide for more details on methods and parameters https://www.nature.com/articles/s41596-019-0176-0
    or the preprint: https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    mode : string
        String containing the mode of extraction. It must be either ``automatic`` or ``manual``.

    algo : string
        String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is
        only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details).
        Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this.

    crop : bool, optional
        If True, video frames are cropped according to the corresponding coordinates stored in the config.yaml.
        Alternatively, if cropping coordinates are not known yet, crop='GUI' triggers a user interface
        where the cropping area can be manually drawn and saved.

    userfeedback: bool, optional
        If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog,
        where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled
        some folders and want to extract data for new videos.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_step: number, default: 1
        By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however,
        reading the individual frames takes longer due to the skipping.

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    slider_width: number, default: 25
        Width of the video frames slider, in percent of window

    Examples
    --------
    for selecting frames automatically with 'kmeans' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True)
    --------
    for selecting frames automatically with 'kmeans' and defining the cropping area at runtime.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans','GUI')
    --------
    for selecting frames automatically with 'kmeans' and considering the color information.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True)
    --------
    for selecting frames automatically with 'uniform' and want to crop the frames.
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True)
    --------
    for selecting frames manually,
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual')
    --------
    for selecting frames manually, with a 60% wide frames slider
    >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60)

    While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose
    if you need to crop or not.
    --------

    """
    import os
    import sys
    import numpy as np
    from pathlib import Path
    from skimage import io
    from skimage.util import img_as_ubyte
    from deeplabcut.utils import frameselectiontools
    from deeplabcut.utils import auxiliaryfunctions

    if mode == "manual":
        wd = Path(config).resolve().parents[0]
        os.chdir(str(wd))
        from deeplabcut.generate_training_dataset import frame_extraction_toolbox
        from deeplabcut.utils import select_crop_parameters

        frame_extraction_toolbox.show(config, slider_width)

    elif mode == "automatic":
        config_file = Path(config).resolve()
        cfg = auxiliaryfunctions.read_config(config_file)
        print("Config file read successfully.")

        numframes2pick = cfg["numframes2pick"]
        start = cfg["start"]
        stop = cfg["stop"]

        # Check for variable correctness
        if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
            raise Exception(
                "Erroneous start or stop values. Please correct it in the config file."
            )
        if numframes2pick < 1 and not int(numframes2pick):
            raise Exception(
                "Perhaps consider extracting more, or a natural number of frames."
            )

        videos = cfg["video_sets"].keys()
        if opencv:
            import cv2
        else:
            from moviepy.editor import VideoFileClip

        has_failed = []
        for vindex, video in enumerate(videos):
            if userfeedback:
                print(
                    "Do you want to extract (perhaps additional) frames for video:",
                    video,
                    "?",
                )
                askuser = input("yes/no")
            else:
                askuser = "******"

            if (askuser == "y" or askuser == "yes" or askuser == "Ja"
                    or askuser == "ha" or askuser == "oui"
                    or askuser == "ouais"):  # multilanguage support :)
                if opencv:
                    cap = cv2.VideoCapture(video)
                    fps = cap.get(
                        5
                    )  # https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
                    nframes = int(cap.get(7))
                else:
                    # Moviepy:
                    clip = VideoFileClip(video)
                    fps = clip.fps
                    nframes = int(np.ceil(clip.duration * 1.0 / fps))
                if not nframes:
                    print("Video could not be opened. Skipping...")
                    continue

                indexlength = int(np.ceil(np.log10(nframes)))

                fname = Path(video)
                output_path = Path(
                    config).parents[0] / "labeled-data" / fname.stem

                if output_path.exists():
                    if len(os.listdir(output_path)):
                        if userfeedback:
                            askuser = input(
                                "The directory already contains some frames. Do you want to add to it?(yes/no): "
                            )
                        if not (askuser == "y" or askuser == "yes"
                                or askuser == "Y" or askuser == "Yes"):
                            sys.exit("Delete the frames and try again later!")

                if crop == "GUI":
                    cfg = select_cropping_area(config, [video])
                coords = cfg["video_sets"][video]["crop"].split(",")
                if crop and not opencv:
                    clip = clip.crop(
                        y1=int(coords[2]),
                        y2=int(coords[3]),
                        x1=int(coords[0]),
                        x2=int(coords[1]),
                    )
                elif not crop:
                    coords = None

                print("Extracting frames based on %s ..." % algo)
                if algo == "uniform":
                    if opencv:
                        frames2pick = frameselectiontools.UniformFramescv2(
                            cap, numframes2pick, start, stop)
                    else:
                        frames2pick = frameselectiontools.UniformFrames(
                            clip, numframes2pick, start, stop)
                elif algo == "kmeans":
                    if opencv:
                        frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                            cap,
                            numframes2pick,
                            start,
                            stop,
                            crop,
                            coords,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                    else:
                        frames2pick = frameselectiontools.KmeansbasedFrameselection(
                            clip,
                            numframes2pick,
                            start,
                            stop,
                            step=cluster_step,
                            resizewidth=cluster_resizewidth,
                            color=cluster_color,
                        )
                else:
                    print(
                        "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
                    )
                    frames2pick = []

                if not len(frames2pick):
                    print("Frame selection failed...")
                    return

                output_path = (Path(config).parents[0] / "labeled-data" /
                               Path(video).stem)
                is_valid = []
                if opencv:
                    for index in frames2pick:
                        cap.set(1, index)  # extract a particular frame
                        ret, frame = cap.read()
                        if ret:
                            image = img_as_ubyte(
                                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            if crop:
                                io.imsave(
                                    img_name,
                                    image[int(coords[2]):int(coords[3]),
                                          int(coords[0]):int(coords[1]), :, ],
                                )  # y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1]
                            else:
                                io.imsave(img_name, image)
                            is_valid.append(True)
                        else:
                            print("Frame", index, " not found!")
                            is_valid.append(False)
                    cap.release()
                else:
                    for index in frames2pick:
                        try:
                            image = img_as_ubyte(
                                clip.get_frame(index * 1.0 / clip.fps))
                            img_name = (str(output_path) + "/img" +
                                        str(index).zfill(indexlength) + ".png")
                            io.imsave(img_name, image)
                            if np.var(image) == 0:  # constant image
                                print(
                                    "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
                                )
                            is_valid.append(True)
                        except FileNotFoundError:
                            print("Frame # ", index, " does not exist.")
                            is_valid.append(False)
                    clip.close()
                    del clip

                if not any(is_valid):
                    has_failed.append(True)
                else:
                    has_failed.append(False)

        if all(has_failed):
            print("Frame extraction failed. Video files must be corrupted.")
            return
        elif any(has_failed):
            print("Although most frames were extracted, some were invalid.")
        else:
            print("Frames were successfully extracted.")
        print(
            "\nYou can now label the frames using the function 'label_frames' "
            "(if you extracted enough frames for all videos).")
    else:
        print(
            "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
              for ipython/jupyter notebook for more details.")
def create_labeled_video(config,
                         videos,
                         shuffle=1,
                         trainingsetindex=0,
                         videotype='avi',
                         save_frames=False,
                         delete=False,
                         displayedbodyparts='all',
                         codec='mp4v'):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of string containing the full paths of the videos to analyze.

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize).

    delete: bool
        If true then the individual frames created during the video generation will be deleted.

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]

    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcut.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)

    if [os.path.isdir(i) for i in videos] == [True]:
        print("Analyzing all the videos in the directory")
        videofolder = videos[0]
        os.chdir(videofolder)
        Videos = np.sort(
            [fn for fn in os.listdir(os.curdir) if (videotype in fn)])
        print("Starting ", videofolder, Videos)
    else:
        Videos = videos

    for video in Videos:
        videofolder = Path(video).parents[
            0]  #where your folder with videos is.
        #os.chdir(str(videofolder))
        videotype = Path(video).suffix
        print("Starting % ", videofolder, videos)
        vname = str(Path(video).stem)
        if os.path.isfile(
                os.path.join(str(videofolder),
                             vname + DLCscorer + '_labeled.mp4')):
            print("Labeled video already created.")
        else:
            print("Loading ", video, "and data.")
            dataname = os.path.join(str(videofolder),
                                    vname + DLCscorer + '.h5')
            try:
                Dataframe = pd.read_hdf(dataname)
                metadata = auxiliaryfunctions.LoadVideoMetadata(dataname)
                #print(metadata)
                datanames = [dataname]
            except FileNotFoundError:
                datanames = [
                    fn for fn in os.listdir(os.curdir)
                    if (vname in fn) and (".h5" in fn) and "resnet" in fn
                ]
                if len(datanames) == 0:
                    print("The video was not analyzed with this scorer:",
                          DLCscorer)
                    print(
                        "No other scorers were found, please use the function 'analyze_videos' first."
                    )
                elif len(datanames) > 0:
                    print("The video was not analyzed with this scorer:",
                          DLCscorer)
                    print("Other scorers were found, however:", datanames)
                    DLCscorer = 'DeepCut' + (
                        datanames[0].split('DeepCut')[1]).split('.h5')[0]
                    print("Creating labeled video for:", DLCscorer,
                          " instead.")
                    Dataframe = pd.read_hdf(datanames[0])
                    metadata = auxiliaryfunctions.LoadVideoMetadata(
                        datanames[0])

            if len(datanames) > 0:
                #Loading cropping data used during analysis
                cropping = metadata['data']["cropping"]
                [x1, x2, y1, y2] = metadata['data']["cropping_parameters"]
                print(cropping, x1, x2, y1, y2)

                if save_frames == True:
                    tmpfolder = os.path.join(str(videofolder), 'temp-' + vname)
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                    clip = vp(video)
                    #CreateVideoSlow(clip,Dataframe,tmpfolder,cfg["dotsize"],cfg["colormap"],cfg["alphavalue"],cfg["pcutoff"],cfg["cropping"],cfg["x1"],cfg["x2"],cfg["y1"],cfg["y2"],delete,DLCscorer,bodyparts)
                    CreateVideoSlow(clip, Dataframe, tmpfolder, cfg["dotsize"],
                                    cfg["colormap"], cfg["alphavalue"],
                                    cfg["pcutoff"], cropping, x1, x2, y1, y2,
                                    delete, DLCscorer, bodyparts)
                else:
                    clip = vp(fname=video,
                              sname=os.path.join(vname + DLCscorer +
                                                 '_labeled.mp4'),
                              codec=codec)
                    if cropping:
                        print(
                            "Fast video creation has currently not been implemented for cropped videos. Please use 'save_frames=True' to get the video."
                        )
                    else:
                        CreateVideo(clip, Dataframe, cfg["pcutoff"],
                                    cfg["dotsize"], cfg["colormap"], DLCscorer,
                                    bodyparts, cropping, x1, x2, y1,
                                    y2)  #NEED TO ADD CROPPING!
示例#24
0
def triangulate(
    config,
    video_path,
    videotype="avi",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n
        Only videos with this extension are analyzed. The default is ``.avi``

    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str("It seems the file specified in the variable config_file_"
                    + str(cam)) +
                " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype)
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print("Analyzing video %s using %s" %
                      (video_list[i][j], str("config_file_" + cam_names[j])))

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)
                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" +
                                              cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder,
                                                   prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                            output_filename + ".csv"):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv"))

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(
                        pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                        _,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(path_camera_matrix,
                                                    "stereo_params.pickle")
                    stereo_file = auxiliaryfunctions.read_pickle(
                        path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    if_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(metadata_["stereo_matrix"][k] ==
                                  stereo_file[cam_pair][k]):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations="unknown")

                    if (metadata_["scorer_name"][cam_names[j]] == DLCscorer
                        ):  # TODO: CHECK FOR BOTH?
                        if_video_analyzed = True
                    elif metadata_["scorer_name"][
                            cam_names[j]] == DLCscorerlegacy:
                        if_video_analyzed = True
                    else:
                        if_video_analyzed = False
                        run_triangulate = True

                    if if_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        if_video_analyzed = False
                        run_triangulate = True
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )

                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(config, dataname,
                                 str(cam_names[0] + "-" + cam_names[1]))
            if len(dataFrame_camera1_undistort) != len(
                    dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(
                        dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[:len(
                        dataFrame_camera2_undistort)]
                if len(dataFrame_camera2_undistort) > len(
                        dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[:len(
                        dataFrame_camera1_undistort)]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            X_final = []
            triangulate = []
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(
                0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(
                0)[0]
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(
                dataFrame_camera1_undistort, scorer_3d, flag="3d")
            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]

            print("Computing the triangulation...")
            for bpindex, bp in enumerate(bodyparts):
                # Extract the indices of frames where the likelihood of a bodypart for both cameras are less than pvalue
                likelihoods = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["likelihood"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["likelihood"].values[:],
                ])
                likelihoods = likelihoods.T

                # Extract frames where likelihood for both the views is less than the pcutoff
                low_likelihood_frames = np.any(likelihoods < pcutoff, axis=1)
                # low_likelihood_frames = np.all(likelihoods < pcutoff, axis=1)

                low_likelihood_frames = np.where(
                    low_likelihood_frames == True)[0]
                points_cam1_undistort = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["x"].values[:],
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["y"].values[:],
                ])
                points_cam1_undistort = points_cam1_undistort.T

                # For cam1 camera: Assign nans to x and y values of a bodypart where the likelihood for is less than pvalue
                points_cam1_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam1_undistort = np.expand_dims(points_cam1_undistort,
                                                       axis=1)

                points_cam2_undistort = np.array([
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["x"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["y"].values[:],
                ])
                points_cam2_undistort = points_cam2_undistort.T

                # For cam2 camera: Assign nans to x and y values of a bodypart where the likelihood is less than pvalue
                points_cam2_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam2_undistort = np.expand_dims(points_cam2_undistort,
                                                       axis=1)

                X_l = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, points_cam1_undistort, points_cam2_undistort)

                # ToDo: speed up func. below by saving in numpy.array
                X_final.append(X_l)
            triangulate.append(X_final)
            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create an empty dataframe to store x,y,z of 3d data
            for bpindex, bp in enumerate(bodyparts):
                df_3d.iloc[:][scorer_3d, bp, "x"] = triangulate[0, bpindex,
                                                                0, :]
                df_3d.iloc[:][scorer_3d, bp, "y"] = triangulate[0, bpindex,
                                                                1, :]
                df_3d.iloc[:][scorer_3d, bp, "z"] = triangulate[0, bpindex,
                                                                2, :]

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )
            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata)

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )
示例#25
0
def extract_save_all_maps(
    config,
    shuffle=1,
    trainingsetindex=0,
    comparisonbodyparts="all",
    all_paf_in_one=True,
    gputouse=None,
    rescale=False,
    Indices=None,
    modelprefix="",
    dest_folder=None,
):
    """
    Extracts the scoremap, location refinement field and part affinity field prediction of the model. The maps
    will be rescaled to the size of the input image and stored in the corresponding model folder in /evaluation-results.

    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    all_paf_in_one : bool
        By default, all part affinity fields are displayed on a single frame.
        If false, individual fields are shown on separate frames.

    Indices: default None
        For which images shall the scmap/locref and paf be computed? Give a list of images

    nplots_per_row: int, optional (default=None)
        Number of plots per row in grid plots. By default, calculated to approximate a squared grid of plots

    Examples
    --------
    Calculated maps for images 0, 1 and 33.
    >>> deeplabcut.extract_save_all_maps('/analysis/project/reaching-task/config.yaml', shuffle=1,Indices=[0,1,33])

    """

    from deeplabcut.utils.auxiliaryfunctions import (
        read_config,
        attempttomakefolder,
        GetEvaluationFolder,
        IntersectionofBodyPartsandOnesGivenbyUser,
    )
    from tqdm import tqdm

    cfg = read_config(config)
    data = extract_maps(config, shuffle, trainingsetindex, gputouse, rescale,
                        Indices, modelprefix)

    comparisonbodyparts = IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)

    print("Saving plots...")
    for frac, values in data.items():
        if not dest_folder:
            dest_folder = os.path.join(
                cfg["project_path"],
                str(
                    GetEvaluationFolder(frac,
                                        shuffle,
                                        cfg,
                                        modelprefix=modelprefix)),
                "maps",
            )
        attempttomakefolder(dest_folder)
        filepath = "{imname}_{map}_{label}_{shuffle}_{frac}_{snap}.png"
        dest_path = os.path.join(dest_folder, filepath)
        for snap, maps in values.items():
            for imagenr in tqdm(maps):
                (
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pafgraph,
                    impath,
                    trainingframe,
                ) = maps[imagenr]
                label = "train" if trainingframe else "test"
                imname = os.path.split(os.path.splitext(impath)[0])[1]
                scmap, (locref_x, locref_y), paf = resize_all_maps(
                    image, scmap, locref, paf)
                to_plot = [
                    i for i, bpt in enumerate(bptnames)
                    if bpt in comparisonbodyparts
                ]
                list_of_inds = []
                for n, edge in enumerate(pafgraph):
                    if any(ind in to_plot for ind in edge):
                        list_of_inds.append([(2 * n, 2 * n + 1),
                                             (bptnames[edge[0]],
                                              bptnames[edge[1]])])
                if len(to_plot) > 1:
                    map_ = scmap[:, :, to_plot].sum(axis=2)
                    locref_x_ = locref_x[:, :, to_plot].sum(axis=2)
                    locref_y_ = locref_y[:, :, to_plot].sum(axis=2)
                elif len(to_plot) == 1 and len(bptnames) > 1:
                    map_ = scmap[:, :, to_plot]
                    locref_x_ = locref_x[:, :, to_plot]
                    locref_y_ = locref_y[:, :, to_plot]
                else:
                    map_ = scmap[:, :]
                    locref_x_ = locref_x[:, :]
                    locref_y_ = locref_y[:, :]
                fig1, _ = visualize_scoremaps(image, map_)
                temp = dest_path.format(
                    imname=imname,
                    map="scmap",
                    label=label,
                    shuffle=shuffle,
                    frac=frac,
                    snap=snap,
                )
                fig1.savefig(temp)

                fig2, _ = visualize_locrefs(image, map_, locref_x_, locref_y_)
                temp = dest_path.format(
                    imname=imname,
                    map="locref",
                    label=label,
                    shuffle=shuffle,
                    frac=frac,
                    snap=snap,
                )
                fig2.savefig(temp)

                if paf is not None:
                    if not all_paf_in_one:
                        for inds, names in list_of_inds:
                            fig3, _ = visualize_paf(image, paf[:, :, [inds]])
                            temp = dest_path.format(
                                imname=imname,
                                map=f'paf_{"_".join(names)}',
                                label=label,
                                shuffle=shuffle,
                                frac=frac,
                                snap=snap,
                            )
                            fig3.savefig(temp)
                    else:
                        inds = [elem[0] for elem in list_of_inds]
                        n_inds = len(inds)
                        cmap = plt.cm.get_cmap(cfg["colormap"], n_inds)
                        colors = cmap(range(n_inds))
                        fig3, _ = visualize_paf(image,
                                                paf[:, :, inds],
                                                colors=colors)
                        temp = dest_path.format(
                            imname=imname,
                            map=f"paf",
                            label=label,
                            shuffle=shuffle,
                            frac=frac,
                            snap=snap,
                        )
                        fig3.savefig(temp)
                plt.close("all")
示例#26
0
def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
        _,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam2.columns.get_level_values(0)[0]
        path_stereo_file = os.path.join(path_camera_matrix,
                                        "stereo_params.pickle")
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]["cameraMatrix1"]
        dist_l = stereo_file[camera_pair]["distCoeffs1"]

        mtx_r = stereo_file[camera_pair]["cameraMatrix2"]
        dist_r = stereo_file[camera_pair]["distCoeffs2"]

        R1 = stereo_file[camera_pair]["R1"]
        P1 = stereo_file[camera_pair]["P1"]

        R2 = stereo_file[camera_pair]["R2"]
        P2 = stereo_file[camera_pair]["P2"]

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        (
            dataFrame_cam1_undistort,
            scorer_cam1,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam1,
                                                  scorer_cam1,
                                                  flag="2d")
        (
            dataFrame_cam2_undistort,
            scorer_cam2,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam2,
                                                  scorer_cam2,
                                                  flag="2d")

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            points_cam1 = dataframe_cam1.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam1_remapped = cv2.undistortPoints(
                src=points_cam1.astype(np.float32),
                cameraMatrix=mtx_l,
                distCoeffs=dist_l,
                P=P1,
                R=R1,
            )
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp,
                        ["x", "y"]] = points_cam1_remapped.squeeze()
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp, "likelihood"] = dataframe_cam1.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

            # Undistorting the points from cam2 camera
            points_cam2 = dataframe_cam2.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam2_remapped = cv2.undistortPoints(
                src=points_cam2.astype(np.float32),
                cameraMatrix=mtx_r,
                distCoeffs=dist_r,
                P=P2,
                R=R2,
            )
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp,
                        ["x", "y"]] = points_cam2_remapped.squeeze()
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp, "likelihood"] = dataframe_cam2.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
示例#27
0
def extract_outlier_frames(config,videos,videotype='avi',shuffle=1,trainingsetindex=0,outlieralgorithm='jump',comparisonbodyparts='all',epsilon=20,p_bound=.01,ARdegree=3,MAdegree=1,alpha=.01,extractionalgorithm='kmeans',automatic=False,cluster_resizewidth=30,cluster_color=False,opencv=True,savelabeled=True, destfolder=None):
    """
    Extracts the outlier frames in case, the predictions are not correct for a certain video from the cropped video running from
    start to stop as defined in config.yaml.

    Another crucial parameter in config.yaml is how many frames to extract 'numframes2extract'.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
        the corresponding shuffle of training dataset. Default is set to 1

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    outlieralgorithm: 'fitting', 'jump', 'uncertain', or 'manual'
        String specifying the algorithm used to detect the outliers. Currently, deeplabcut supports three methods + a manual GUI option. 'Fitting'
        fits a Auto Regressive Integrated Moving Average model to the data and computes the distance to the estimated data. Larger distances than
        epsilon are then potentially identified as outliers. The methods 'jump' identifies larger jumps than 'epsilon' in any body part; and 'uncertain'
        looks for frames with confidence below p_bound. The default is set to ``jump``.

    comparisonbodyparts: list of strings, optional
        This select the body parts for which the comparisons with the outliers are carried out. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    p_bound: float between 0 and 1, optional
        For outlieralgorithm 'uncertain' this parameter defines the likelihood below, below which a body part will be flagged as a putative outlier.

    epsilon; float,optional
        Meaning depends on outlieralgoritm. The default is set to 20 pixels.
        For outlieralgorithm 'fitting': Float bound according to which frames are picked when the (average) body part estimate deviates from model fit
        For outlieralgorithm 'jump': Float bound specifying the distance by which body points jump from one frame to next (Euclidean distance)

    ARdegree: int, optional
        For outlieralgorithm 'fitting': Autoregressive degree of ARIMA model degree. (Note we use SARIMAX without exogeneous and seasonal part)
        see https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html

    MAdegree: int
        For outlieralgorithm 'fitting': MovingAvarage degree of ARIMA model degree. (Note we use SARIMAX without exogeneous and seasonal part)
        See https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html

    alpha: float
        Significance level for detecting outliers based on confidence interval of fitted ARIMA model. Only the distance is used however.

    extractionalgorithm : string, optional
        String specifying the algorithm to use for selecting the frames from the identified putatative outlier frames. Currently, deeplabcut
        supports either ``kmeans`` or ``uniform`` based selection (same logic as for extract_frames).
        The default is set to``uniform``, if provided it must be either ``uniform`` or ``kmeans``.

    automatic : bool, optional
        Set it to True, if you want to extract outliers without being asked for user feedback.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    savelabeled: bool, default: True
        If true also saves frame with predicted labels in each folder.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    Examples

    Windows example for extracting the frames with default settings
    >>> deeplabcut.extract_outlier_frames('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
    --------
    for extracting the frames with default settings
    >>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'])
    --------
    for extracting the frames with kmeans
    >>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],extractionalgorithm='kmeans')
    --------
    for extracting the frames with kmeans and epsilon = 5 pixels.
    >>> deeplabcut.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],epsilon = 5,extractionalgorithm='kmeans')
    --------
    """

    cfg = auxiliaryfunctions.read_config(config)
    DLCscorer,DLCscorerlegacy=auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction = cfg['TrainingFraction'][trainingsetindex])
    Videos=auxiliaryfunctions.Getlistofvideos(videos,videotype)
    for video in Videos:
      if destfolder is None:
            videofolder = str(Path(video).parents[0])
      else:
            videofolder=destfolder

      notanalyzed,dataname,DLCscorer=auxiliaryfunctions.CheckifNotAnalyzed(videofolder,str(Path(video).stem),DLCscorer,DLCscorerlegacy,flag='checking')
      if notanalyzed:
          print("It seems the video has not been analyzed yet, or the video is not found! You can only refine the labels after the a video is analyzed. Please run 'analyze_video' first. Or, please double check your video file path")
      else:
          Dataframe = pd.read_hdf(dataname,'df_with_missing')
          scorer=Dataframe.columns.get_level_values(0)[0] #reading scorer from
          nframes=np.size(Dataframe.index)
          # extract min and max index based on start stop interval.
          startindex=max([int(np.floor(nframes*cfg['start'])),0])
          stopindex=min([int(np.ceil(nframes*cfg['stop'])),nframes])
          Index=np.arange(stopindex-startindex)+startindex
          #figure out body part list:
          bodyparts=auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(cfg,comparisonbodyparts)

          Indices=[]
          if outlieralgorithm=='uncertain': #necessary parameters: considered body parts and
              for bpindex,bp in enumerate(bodyparts):
                  if bp in cfg['bodyparts']: #filter [who knows what users put in...]
                      p=Dataframe[scorer][bp]['likelihood'].values[Index]
                      Indices.extend(np.where(p<p_bound)[0]+startindex) # all indices between start and stop that are below p_bound.

          elif outlieralgorithm=='jump':
              for bpindex,bp in enumerate(bodyparts):
                  if bp in cfg['bodyparts']: #filter [who knows what users put in...]
                      dx=np.diff(Dataframe[scorer][bp]['x'].values[Index])
                      dy=np.diff(Dataframe[scorer][bp]['y'].values[Index])
                      # all indices between start and stop with jump larger than epsilon (leading up to this point!)
                      Indices.extend(np.where((dx**2+dy**2)>epsilon**2)[0]+startindex+1)
          elif outlieralgorithm=='fitting':
              #deviation_dataname = str(Path(videofolder)/Path(dataname))
              # Calculate deviatons for video
              [d,o] = ComputeDeviations(Dataframe,cfg,bodyparts,scorer,dataname,p_bound,alpha,ARdegree,MAdegree)

              #Some heuristics for extracting frames based on distance:
              Indices=np.where(d>epsilon)[0] # time points with at least average difference of epsilon

              if len(Index)<cfg['numframes2pick']*2 and len(d)>cfg['numframes2pick']*2: # if too few points qualify, extract the most distant ones.
                  Indices=np.argsort(d)[::-1][:cfg['numframes2pick']*2]
          elif outlieralgorithm=='manual':
              wd = Path(config).resolve().parents[0]
              os.chdir(str(wd))
              from deeplabcut.refine_training_dataset import outlier_frame_extraction_toolbox

              outlier_frame_extraction_toolbox.show(config,video,shuffle,Dataframe,scorer,savelabeled)
          # Run always except when the outlieralgorithm == manual.
          if not outlieralgorithm=='manual':
              Indices=np.sort(list(set(Indices))) #remove repetitions.
              print("Method ", outlieralgorithm, " found ", len(Indices)," putative outlier frames.")
              print("Do you want to proceed with extracting ", cfg['numframes2pick'], " of those?")
              if outlieralgorithm=='uncertain':
                  print("If this list is very large, perhaps consider changing the paramters (start, stop, p_bound, comparisonbodyparts) or use a different method.")
              elif outlieralgorithm=='jump':
                  print("If this list is very large, perhaps consider changing the paramters (start, stop, epsilon, comparisonbodyparts) or use a different method.")
              elif outlieralgorithm=='fitting':
                  print("If this list is very large, perhaps consider changing the paramters (start, stop, epsilon, ARdegree, MAdegree, alpha, comparisonbodyparts) or use a different method.")

              if automatic==False:
                  askuser = input("yes/no")
              else:
                  askuser='******'

              if askuser=='y' or askuser=='yes' or askuser=='Ja' or askuser=='ha': # multilanguage support :)
                  #Now extract from those Indices!
                  ExtractFramesbasedonPreselection(Indices,extractionalgorithm,Dataframe,dataname,scorer,video,cfg,config,opencv,cluster_resizewidth,cluster_color,savelabeled)
              else:
                  print("Nothing extracted, please change the parameters and start again...")
示例#28
0
def create_training_dataset(config,num_shuffles=1,Shuffles=None,windows2linux=False,userfeedback=False,
        trainIndexes=None,testIndexes=None,
        net_type=None,augmenter_type=None):
    """
    Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n

    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    net_type: string
        Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0,mobilenet_v2_0.75, mobilenet_v2_0.5, and mobilenet_v2_0.35 are supported.

    augmenter_type: string
        Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.

    Example
    --------
    >>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
    Windows:
    >>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    from skimage import io
    import scipy.io as sio

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg['scorer']
    project_path = cfg['project_path']
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg) #Path concatenation OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(os.path.join(project_path,str(trainingsetfolder))),recursive=True)

    Data = merge_annotateddatasets(cfg,project_path,Path(os.path.join(project_path,trainingsetfolder)),windows2linux)
    Data = Data[scorer] #extract labeled data

    #loading & linking pretrained models
    if net_type is None: #loading & linking pretrained models
        net_type =cfg.get('default_net_type', 'resnet_50')
    else:
        if 'resnet' in net_type or 'mobilenet' in net_type:
            pass
        else:
            raise ValueError('Invalid network type:', net_type)

    if augmenter_type is None:
        augmenter_type=cfg.get('default_augmenter', 'default')
    else:
        if augmenter_type in ['default','imgaug','tensorpack','deterministic']:
            pass
        else:
            raise ValueError('Invalid augmenter type:', augmenter_type)

    import deeplabcut
    parent_path = Path(os.path.dirname(deeplabcut.__file__))
    defaultconfigfile = str(parent_path / 'pose_cfg.yaml')
    model_path,num_shuffles=auxfun_models.Check4weights(net_type,parent_path,num_shuffles) #if the model does not exist >> throws error!

    if Shuffles==None:
        Shuffles=range(1,num_shuffles+1,1)
    else:
        Shuffles=[i for i in Shuffles if isinstance(i,int)]

    bodyparts = cfg['bodyparts']
    TrainingFraction = cfg['TrainingFraction']
    for shuffle in Shuffles: # Creating shuffles starting from 1
        for trainingsetindex,trainFraction in enumerate(TrainingFraction):
            if userfeedback:
                trainposeconfigfile,testposeconfigfile,snapshotfolder =  training.return_train_network_path(config,shuffle=shuffle,trainingsetindex=trainingsetindex)
                if os.path.isfile(trainposeconfigfile):
                    askuser=input ("The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): ")
                    if askuser=='no'or askuser=='No' or askuser=='N' or askuser=='No':
                        raise Exception("Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details.")
                    else:
                        pass
            #trainIndexes, testIndexes = SplitTrials(range(len(Data.index)), trainFraction)
            if trainIndexes is None and testIndexes is None:
                trainIndexes, testIndexes = SplitTrials(range(len(Data.index)), trainFraction)
            else:
                print("You passed a split with the following fraction:", len(trainIndexes)*1./(len(testIndexes)+len(trainIndexes))*100)
            ####################################################
            # Generating data structure with labeled information & frame metadata (for deep cut)
            ####################################################
            # Make training file!
            data = []
            for jj in trainIndexes:
                H = {}
                # load image to get dimensions:
                filename = Data.index[jj]
                im = io.imread(os.path.join(cfg['project_path'],filename))
                H['image'] = filename

                if np.ndim(im)==3:
                    H['size'] = np.array(
                        [np.shape(im)[2],
                         np.shape(im)[0],
                         np.shape(im)[1]])
                else:
                    # print "Grayscale!"
                    H['size'] = np.array([1, np.shape(im)[0], np.shape(im)[1]])

                indexjoints=0
                joints=np.zeros((len(bodyparts),3))*np.nan
                for bpindex,bodypart in enumerate(bodyparts):
                    # check whether the labels are positive and inside the img
                    x_pos_n_inside = 0 <= Data[bodypart]['x'][jj] < np.shape(im)[1]
                    y_pos_n_inside = 0 <= Data[bodypart]['y'][jj] < np.shape(im)[0]
                    if x_pos_n_inside and y_pos_n_inside:
                        joints[indexjoints,0]=int(bpindex)
                        joints[indexjoints,1]=Data[bodypart]['x'][jj]
                        joints[indexjoints,2]=Data[bodypart]['y'][jj]
                        indexjoints+=1

                joints = joints[np.where(
                    np.prod(np.isfinite(joints),
                            1))[0], :]  # drop NaN, i.e. lines for missing body parts

                assert (np.prod(np.array(joints[:, 2]) < np.shape(im)[0])
                        )  # y coordinate within image?
                assert (np.prod(np.array(joints[:, 1]) < np.shape(im)[1])
                        )  # x coordinate within image?

                H['joints'] = np.array(joints, dtype=int)
                if np.size(joints)>0: #exclude images without labels
                        data.append(H)

            if len(trainIndexes)>0:

                datafilename,metadatafilename=auxiliaryfunctions.GetDataandMetaDataFilenames(trainingsetfolder,trainFraction,shuffle,cfg)
                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(os.path.join(project_path,metadatafilename),data, trainIndexes, testIndexes, trainFraction)
                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################

                DTYPE = [('image', 'O'), ('size', 'O'), ('joints', 'O')]
                MatlabData = np.array(
                    [(np.array([data[item]['image']], dtype='U'),
                      np.array([data[item]['size']]),
                      boxitintoacell(data[item]['joints']))
                     for item in range(len(data))],
                    dtype=DTYPE)

                sio.savemat(os.path.join(project_path,datafilename), {'dataset': MatlabData})

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################
                modelfoldername=auxiliaryfunctions.GetModelFolder(trainFraction,shuffle,cfg)
                auxiliaryfunctions.attempttomakefolder(Path(config).parents[0] / modelfoldername,recursive=True)
                auxiliaryfunctions.attempttomakefolder(str(Path(config).parents[0] / modelfoldername)+ '/'+ '/train')
                auxiliaryfunctions.attempttomakefolder(str(Path(config).parents[0] / modelfoldername)+ '/'+ '/test')

                path_train_config = str(os.path.join(cfg['project_path'],Path(modelfoldername),'train','pose_cfg.yaml'))
                path_test_config = str(os.path.join(cfg['project_path'],Path(modelfoldername),'test','pose_cfg.yaml'))
                #str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')

                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": [str(bpt) for bpt in bodyparts],
                    "init_weights": model_path,
                    "project_path": str(cfg['project_path']),
                    "net_type": net_type,
                    "dataset_type": augmenter_type
                }
                trainingdata = MakeTrain_pose_yaml(items2change,path_train_config,defaultconfigfile)
                keys2save = [
                    "dataset", "num_joints", "all_joints", "all_joints_names",
                    "net_type", 'init_weights', 'global_scale', 'location_refinement',
                    'locref_stdev'
                ]
                MakeTest_pose_yaml(trainingdata, keys2save,path_test_config)
                print("The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!")
示例#29
0
def mergeandsplit(config, trainindex=0, uniform=True, windows2linux=False):
    """
    This function allows additional control over "create_training_dataset". 
    
    Merge annotated data sets (from different folders) and split data in a specific way, returns the split variables (train/test indices). 
    Importantly, this allows one to freeze a split. 
    
    One can also either create a uniform split (uniform = True; thereby indexing TrainingFraction in config file) or leave-one-folder out split 
    by passing the index of the corrensponding video from the config.yaml file as variable trainindex.
    
    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    trainindex: int, optional
        Either (in case uniform = True) indexes which element of TrainingFraction in the config file should be used (note it is a list!).
        Alternatively (uniform = False) indexes which folder is dropped, i.e. the first if trainindex=0, the second if trainindex =1, etc.

    uniform: bool, optional
        Perform uniform split (disregarding folder structure in labeled data), or (if False) leave one folder out.

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows 
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths. 
    
    Examples
    --------
    To create a leave-one-folder-out model:
    >>> trainIndexes, testIndexes=deeplabcut.mergeandsplit(config,trainindex=0,uniform=False)
    returns the indices for the first video folder (as defined in config file) as testIndexes and all others as trainIndexes.
    You can then create the training set by calling (e.g. defining it as Shuffle 3):
    >>> deeplabcut.create_training_dataset(config,Shuffles=[3],trainIndexes=trainIndexes,testIndexes=testIndexes)
    
    To freeze a (uniform) split:
    >>> trainIndexes, testIndexes=deeplabcut.mergeandsplit(config,trainindex=0,uniform=True)
    You can then create two model instances that have the identical trainingset. Thereby you can assess the role of various parameters on the performance of DLC.
    
    >>> deeplabcut.create_training_dataset(config,Shuffles=[0],trainIndexes=trainIndexes,testIndexes=testIndexes)
    >>> deeplabcut.create_training_dataset(config,Shuffles=[1],trainIndexes=trainIndexes,testIndexes=testIndexes)
    --------
    
    """
    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg['scorer']
    project_path = cfg['project_path']
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
        cfg)  #Path concatenation OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(
        os.path.join(project_path, str(trainingsetfolder))),
                                           recursive=True)
    fn = os.path.join(project_path, trainingsetfolder,
                      'CollectedData_' + cfg['scorer'])

    try:
        Data = pd.read_hdf(fn + '.h5', 'df_with_missing')
    except FileNotFoundError:
        Data = merge_annotateddatasets(
            cfg,
            project_path,
            Path(os.path.join(project_path, trainingsetfolder)),
            windows2linux=windows2linux)

    Data = Data[scorer]  #extract labeled data

    if uniform == True:
        TrainingFraction = cfg['TrainingFraction']
        trainFraction = TrainingFraction[trainindex]
        trainIndexes, testIndexes = SplitTrials(range(len(Data.index)),
                                                trainFraction)
    else:  #leave one folder out split
        videos = cfg['video_sets'].keys()
        test_video_name = [Path(i).stem for i in videos][trainindex]
        print("Excluding the following folder (from training):",
              test_video_name)
        trainIndexes, testIndexes = [], []
        for index, name in enumerate(Data.index):
            #print(index,name.split(os.sep)[1])
            if test_video_name == name.split(
                    os.sep)[1]:  #this is the video name
                #print(name,test_video_name)
                testIndexes.append(index)
            else:
                trainIndexes.append(index)

    return trainIndexes, testIndexes
示例#30
0
def convert_single2multiplelegacyAM(config, userfeedback=True, target=None):
    """ Convert multi animal to single animal code and vice versa. Note that by providing target='single'/'multi' this will be target! """
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"].keys()
    video_names = [Path(i).stem for i in videos]
    folders = [
        Path(config).parent / "labeled-data" / Path(i) for i in video_names
    ]

    prefixes, uniquebodyparts, multianimalbodyparts = extractindividualsandbodyparts(
        cfg)
    for folder in folders:
        if userfeedback == True:
            print("Do you want to convert the annotation file in folder:",
                  folder, "?")
            askuser = input("yes/no")
        else:
            askuser = "******"

        if (askuser == "y" or askuser == "yes" or askuser == "Ja"
                or askuser == "ha"):  # multilanguage support :)
            fn = os.path.join(str(folder), "CollectedData_" + cfg["scorer"])
            Data = pd.read_hdf(fn + ".h5")
            imindex = Data.index

            if "individuals" in Data.columns.names and (target == None
                                                        or target == "single"):
                print(
                    "This is a multianimal data set, converting to single...",
                    folder)
                for prfxindex, prefix in enumerate(prefixes):
                    if prefix == "single":
                        for j, bpt in enumerate(uniquebodyparts):
                            index = pd.MultiIndex.from_product(
                                [[cfg["scorer"]], [bpt], ["x", "y"]],
                                names=["scorer", "bodyparts", "coords"],
                            )
                            frame = pd.DataFrame(
                                Data[cfg["scorer"]][prefix][bpt].values,
                                columns=index,
                                index=imindex,
                            )
                            if j == 0:
                                dataFrame = frame
                            else:
                                dataFrame = pd.concat([dataFrame, frame],
                                                      axis=1)
                    else:
                        for j, bpt in enumerate(multianimalbodyparts):
                            index = pd.MultiIndex.from_product(
                                [[cfg["scorer"]], [prefix + bpt], ["x", "y"]],
                                names=["scorer", "bodyparts", "coords"],
                            )
                            frame = pd.DataFrame(
                                Data[cfg["scorer"]][prefix][bpt].values,
                                columns=index,
                                index=imindex,
                            )
                            if j == 0:
                                dataFrame = frame
                            else:
                                dataFrame = pd.concat([dataFrame, frame],
                                                      axis=1)
                    if prfxindex == 0:
                        DataFrame = dataFrame
                    else:
                        DataFrame = pd.concat([DataFrame, dataFrame], axis=1)

                Data.to_hdf(fn + "multianimal.h5",
                            "df_with_missing",
                            format="table",
                            mode="w")
                Data.to_csv(fn + "multianimal.csv")

                DataFrame.to_hdf(fn + ".h5",
                                 "df_with_missing",
                                 format="table",
                                 mode="w")
                DataFrame.to_csv(fn + ".csv")
            elif target == None or target == "multi":
                print(
                    "This is a single animal data set, converting to multi...",
                    folder)
                for prfxindex, prefix in enumerate(prefixes):
                    if prefix == "single":
                        if cfg["uniquebodyparts"] != [None]:
                            for j, bpt in enumerate(uniquebodyparts):
                                index = pd.MultiIndex.from_arrays(
                                    np.array([
                                        2 * [cfg["scorer"]],
                                        2 * [prefix],
                                        2 * [bpt],
                                        ["x", "y"],
                                    ]),
                                    names=[
                                        "scorer",
                                        "individuals",
                                        "bodyparts",
                                        "coords",
                                    ],
                                )
                                if bpt in Data[cfg["scorer"]].keys():
                                    frame = pd.DataFrame(
                                        Data[cfg["scorer"]][bpt].values,
                                        columns=index,
                                        index=imindex,
                                    )
                                else:  # fill with nans...
                                    frame = pd.DataFrame(
                                        np.ones((len(imindex), 2)) * np.nan,
                                        columns=index,
                                        index=imindex,
                                    )

                                if j == 0:
                                    dataFrame = frame
                                else:
                                    dataFrame = pd.concat([dataFrame, frame],
                                                          axis=1)
                        else:
                            dataFrame = None
                    else:
                        for j, bpt in enumerate(multianimalbodyparts):
                            index = pd.MultiIndex.from_arrays(
                                np.array([
                                    2 * [cfg["scorer"]],
                                    2 * [prefix],
                                    2 * [bpt],
                                    ["x", "y"],
                                ]),
                                names=[
                                    "scorer", "individuals", "bodyparts",
                                    "coords"
                                ],
                            )
                            if prefix + "_" + bpt in Data[
                                    cfg["scorer"]].keys():
                                frame = pd.DataFrame(
                                    Data[cfg["scorer"]][prefix + "_" +
                                                        bpt].values,
                                    columns=index,
                                    index=imindex,
                                )
                            else:
                                frame = pd.DataFrame(
                                    np.ones((len(imindex), 2)) * np.nan,
                                    columns=index,
                                    index=imindex,
                                )

                            if j == 0:
                                dataFrame = frame
                            else:
                                dataFrame = pd.concat([dataFrame, frame],
                                                      axis=1)
                    if prfxindex == 0:
                        DataFrame = dataFrame
                    else:
                        DataFrame = pd.concat([DataFrame, dataFrame], axis=1)

                Data.to_hdf(fn + "singleanimal.h5",
                            "df_with_missing",
                            format="table",
                            mode="w")
                Data.to_csv(fn + "singleanimal.csv")

                DataFrame.to_hdf(fn + ".h5",
                                 "df_with_missing",
                                 format="table",
                                 mode="w")
                DataFrame.to_csv(fn + ".csv")