def RunTrajectoryAnalysis(video, basefolder, scorer, videofolder, cfg,
                          showfigures):
    #    vname = video.split('.')[0]
    vname = str(Path(video).stem)
    auxiliaryfunctions.attempttomakefolder(
        os.path.join(basefolder, 'plot-poses'))
    tmpfolder = os.path.join(basefolder, 'plot-poses', vname)
    auxiliaryfunctions.attempttomakefolder(tmpfolder)

    print("Loading ", video, "and data.")
    dataname = str(Path(video).stem) + scorer + '.h5'
    try:
        Dataframe = pd.read_hdf(os.path.join(videofolder, dataname))
        PlottingResults(video, tmpfolder, Dataframe, scorer, cfg, showfigures)
    except FileNotFoundError:
        datanames = [
            fn for fn in os.listdir(videofolder)
            if (vname in fn) and (".h5" in fn) and "resnet" in fn
        ]
        if len(datanames) == 0:
            print("The video was not analyzed with this scorer:", scorer)
            print(
                "No other scorers were found, please run AnalysisVideos.py first."
            )
        elif len(datanames) > 0:
            print("The video was not analyzed with this scorer:", scorer)
            print("Other scorers were found, however:", datanames)
            print("Creating labeled video for:", datanames[0], " instead.")
            Dataframe = pd.read_hdf(os.path.join(videofolder, datanames[0]))
            PlottingResults(video, tmpfolder, Dataframe, scorer, cfg,
                            showfigures)
    except FileNotFoundError:
        print("Data was not analyzed (run AnalysisVideos.py first).")
Exemple #2
0
def plot_trajectories(config,
                      videos,
                      videotype='.avi',
                      shuffle=1,
                      trainingsetindex=0,
                      showfigures=False):
    """
    Plots the trajectories of various bodyparts across the video.
    
    Parameters
    ----------
     config : string
    Full path of the config.yaml file as a string.
    
    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.
    
    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``
    
    shuffle: list, optional
    List of integers specifying the shuffle indices of the training dataset. The default is [1]
    
     trainingsetindex: int, optional
    Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
    
    showfigures: bool, default false
    If true then plots are also displayed.
    
    
    Example
    --------
    for labeling the frames
    >>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
    --------
    
    """

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)

    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        print(video)
        videofolder = str(
            Path(video).parents[0])  #where your folder with videos is.
        videotype = str(Path(video).suffix)
        print("Starting % ", videofolder, videos)
        basefolder = videofolder
        auxiliaryfunctions.attempttomakefolder(basefolder)
        RunTrajectoryAnalysis(video, basefolder, DLCscorer, videofolder, cfg,
                              showfigures)
        print(
            'Plots created! Please check the directory "plot-poses" within the video directory'
        )
Exemple #3
0
def RunTrajectoryAnalysis(video,
                          basefolder,
                          DLCscorer,
                          videofolder,
                          cfg,
                          showfigures,
                          bodyparts2plot,
                          filtered=False):
    vname = str(Path(video).stem)
    auxiliaryfunctions.attempttomakefolder(
        os.path.join(basefolder, 'plot-poses'))
    tmpfolder = os.path.join(basefolder, 'plot-poses', vname)
    auxiliaryfunctions.attempttomakefolder(tmpfolder)

    print("Loading ", video, "and data.")
    try:
        if filtered == False:
            dataname = str(Path(video).stem) + DLCscorer + '.h5'
            Dataframe = pd.read_hdf(os.path.join(videofolder, dataname))
            suffix = '.png'
        else:  #for filtered output
            try:
                Dataframe = pd.read_hdf(
                    os.path.join(
                        videofolder,
                        str(Path(video).stem) + DLCscorer + 'filtered.h5'))
            except FileNotFoundError:
                print(
                    "No filtered predictions found, using frame-by-frame output instead."
                )
                Dataframe = pd.read_hdf(
                    os.path.join(videofolder,
                                 str(Path(video).stem) + DLCscorer + '.h5'))
            suffix = '_filtered.png'

        PlottingResults(video, tmpfolder, Dataframe, DLCscorer, cfg,
                        showfigures, bodyparts2plot, suffix)

    except FileNotFoundError:
        datanames = [
            fn for fn in os.listdir(videofolder)
            if (vname in fn) and (".h5" in fn) and "resnet" in fn
        ]
        if len(datanames) == 0:
            print("The video was not analyzed with this scorer:", DLCscorer)
            print(
                "No other scorers were found, please run AnalysisVideos.py first."
            )

        elif len(datanames) > 0:
            print("The video was not analyzed with this scorer:", DLCscorer)
            print("Other scorers were found, however:", datanames)
            print("Creating plots for:", datanames[0], " instead.")

            Dataframe = pd.read_hdf(os.path.join(videofolder, datanames[0]))
            PlottingResults(video, tmpfolder, Dataframe, DLCscorer, cfg,
                            showfigures, bodyparts2plot, suffix)
Exemple #4
0
def MakeLabeledPlots(folder, DataCombined, cfg, Labels, Colorscheme, cc,
                     scale):
    tmpfolder = str(folder) + '_labeled'
    auxiliaryfunctions.attempttomakefolder(tmpfolder)
    for index, imagename in enumerate(DataCombined.index.values):
        image = io.imread(os.path.join(cfg['project_path'], imagename))
        plt.axis('off')

        if np.ndim(image) == 2:
            h, w = np.shape(image)
        else:
            h, w, nc = np.shape(image)

        plt.figure(frameon=False,
                   figsize=(w * 1. / 100 * scale, h * 1. / 100 * scale))
        plt.subplots_adjust(left=0,
                            bottom=0,
                            right=1,
                            top=1,
                            wspace=0,
                            hspace=0)

        plt.imshow(image, 'gray')
        if index == 0:
            print("They are stored in the following folder: %s." %
                  tmpfolder)  #folder)

        for c, bp in enumerate(cfg['bodyparts']):
            plt.plot(DataCombined[cfg['scorer']][bp]['x'].values[index],
                     DataCombined[cfg['scorer']][bp]['y'].values[index],
                     Labels[cc],
                     color=Colorscheme(c),
                     alpha=cfg['alphavalue'],
                     ms=cfg['dotsize'])

        plt.xlim(0, w)
        plt.ylim(0, h)
        plt.axis('off')
        plt.subplots_adjust(left=0,
                            bottom=0,
                            right=1,
                            top=1,
                            wspace=0,
                            hspace=0)
        plt.gca().invert_yaxis()

        #plt.savefig(str(Path(tmpfolder)/imagename.split(os.sep)[-1]))
        plt.savefig(
            os.path.join(tmpfolder, str(Path(imagename).name))
        )  #create file name also on Windows for Unix projects (and vice versa)
        plt.close("all")
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=False,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
):
    from deeplabcut.pose_estimation_tensorflow.core import (
        predict,
        predict_multianimal as predictma,
    )
    from deeplabcut.utils import (
        auxiliaryfunctions,
        auxfun_multianimal,
        auxfun_videos,
        conversioncode,
    )

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ["TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.compat.v1.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    if plotting is True:
        plotting = "bodypart"

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        )
    )
    conversioncode.guarantee_multiindex_rows(Data)

    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts
    )
    all_bpts = np.asarray(
        len(cfg["individuals"]) * cfg["multianimalbodyparts"] + cfg["uniquebodyparts"]
    )
    colors = visualization.get_cmap(len(comparisonbodyparts), name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/")
    )
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg
            )
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn)
            )

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction)
                )

            pipeline = iaa.Sequential(random_order=False)
            pre_resize = dlc_cfg.get("pre_resize")
            if pre_resize:
                width, height = pre_resize
                pipeline.add(iaa.Resize({"height": height, "width": width}))

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            stride = dlc_cfg["stride"]
            # Ignore best edges possibly defined during a prior evaluation
            _ = dlc_cfg.pop("paf_best", None)
            joints = dlc_cfg["all_joints_names"]

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder, recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array(
                [
                    fn.split(".")[0]
                    for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                    if "index" in fn
                ]
            )
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction)
                )
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots]
                )
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split("-")[
                        -1
                    ]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    data_path = resultsfilename.split(".h5")[0] + "_full.pickle"

                    if plotting:
                        foldername = os.path.join(
                            str(evaluationfolder),
                            "LabeledImages_" + DLCscorer + "_" + Snapshots[snapindex],
                        )
                        auxiliaryfunctions.attempttomakefolder(foldername)
                        if plotting == "bodypart":
                            fig, ax = visualization.create_minimal_figure()

                    if os.path.isfile(data_path):
                        print("Model already evaluated.", resultsfilename)
                    else:

                        (sess, inputs, outputs,) = predict.setup_pose_prediction(
                            dlc_cfg
                        )

                        PredicteData = {}
                        dist = np.full((len(Data), len(all_bpts)), np.nan)
                        conf = np.full_like(dist, np.nan)
                        print("Network Evaluation underway...")
                        for imageindex, imagename in tqdm(enumerate(Data.index)):
                            image_path = os.path.join(cfg["project_path"], *imagename)
                            frame = auxfun_videos.imread(image_path, mode="skimage")

                            GT = Data.iloc[imageindex]
                            if not GT.any():
                                continue

                            # Pass the image and the keypoints through the resizer;
                            # this has no effect if no augmenters were added to it.
                            keypoints = [GT.to_numpy().reshape((-1, 2)).astype(float)]
                            frame_, keypoints = pipeline(
                                images=[frame], keypoints=keypoints
                            )
                            frame = frame_[0]
                            GT[:] = keypoints[0].flatten()

                            df = GT.unstack("coords").reindex(joints, level="bodyparts")

                            # FIXME Is having an empty array vs nan really that necessary?!
                            groundtruthidentity = list(
                                df.index.get_level_values("individuals")
                                .to_numpy()
                                .reshape((-1, 1))
                            )
                            groundtruthcoordinates = list(df.values[:, np.newaxis])
                            for i, coords in enumerate(groundtruthcoordinates):
                                if np.isnan(coords).any():
                                    groundtruthcoordinates[i] = np.empty(
                                        (0, 2), dtype=float
                                    )
                                    groundtruthidentity[i] = np.array([], dtype=str)

                            # Form 2D array of shape (n_rows, 4) where the last dimension
                            # is (sample_index, peak_y, peak_x, bpt_index) to slice the PAFs.
                            temp = df.reset_index(level="bodyparts").dropna()
                            temp["bodyparts"].replace(
                                dict(zip(joints, range(len(joints)))), inplace=True,
                            )
                            temp["sample"] = 0
                            peaks_gt = temp.loc[
                                :, ["sample", "y", "x", "bodyparts"]
                            ].to_numpy()
                            peaks_gt[:, 1:3] = (peaks_gt[:, 1:3] - stride // 2) / stride

                            pred = predictma.predict_batched_peaks_and_costs(
                                dlc_cfg,
                                np.expand_dims(frame, axis=0),
                                sess,
                                inputs,
                                outputs,
                                peaks_gt.astype(int),
                            )

                            if not pred:
                                continue
                            else:
                                pred = pred[0]

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            coords_pred = pred["coordinates"][0]
                            probs_pred = pred["confidence"]
                            for bpt, xy_gt in df.groupby(level="bodyparts"):
                                inds_gt = np.flatnonzero(
                                    np.all(~np.isnan(xy_gt), axis=1)
                                )
                                n_joint = joints.index(bpt)
                                xy = coords_pred[n_joint]
                                if inds_gt.size and xy.size:
                                    # Pick the predictions closest to ground truth,
                                    # rather than the ones the model has most confident in
                                    xy_gt_values = xy_gt.iloc[inds_gt].values
                                    neighbors = _find_closest_neighbors(
                                        xy_gt_values, xy, k=3
                                    )
                                    found = neighbors != -1
                                    min_dists = np.linalg.norm(
                                        xy_gt_values[found] - xy[neighbors[found]],
                                        axis=1,
                                    )
                                    inds = np.flatnonzero(all_bpts == bpt)
                                    sl = imageindex, inds[inds_gt[found]]
                                    dist[sl] = min_dists
                                    conf[sl] = probs_pred[n_joint][
                                        neighbors[found]
                                    ].squeeze()

                            if plotting == "bodypart":
                                temp_xy = GT.unstack("bodyparts")[joints].values
                                gt = temp_xy.reshape(
                                    (-1, 2, temp_xy.shape[1])
                                ).T.swapaxes(1, 2)
                                h, w, _ = np.shape(frame)
                                fig.set_size_inches(w / 100, h / 100)
                                ax.set_xlim(0, w)
                                ax.set_ylim(0, h)
                                ax.invert_yaxis()
                                ax = visualization.make_multianimal_labeled_image(
                                    frame,
                                    gt,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                    ax=ax,
                                )
                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )
                                visualization.erase_artists(ax)

                        sess.close()  # closes the current tf session

                        # Compute all distance statistics
                        df_dist = pd.DataFrame(dist, columns=df.index)
                        df_conf = pd.DataFrame(conf, columns=df.index)
                        df_joint = pd.concat(
                            [df_dist, df_conf],
                            keys=["rmse", "conf"],
                            names=["metrics"],
                            axis=1,
                        )
                        df_joint = df_joint.reorder_levels(
                            list(np.roll(df_joint.columns.names, -1)), axis=1
                        )
                        df_joint.sort_index(
                            axis=1,
                            level=["individuals", "bodyparts"],
                            ascending=[True, True],
                            inplace=True,
                        )
                        write_path = os.path.join(
                            evaluationfolder, f"dist_{trainingsiterations}.csv"
                        )
                        df_joint.to_csv(write_path)

                        # Calculate overall prediction error
                        error = df_joint.xs("rmse", level="metrics", axis=1)
                        mask = (
                            df_joint.xs("conf", level="metrics", axis=1)
                            >= cfg["pcutoff"]
                        )
                        error_masked = error[mask]
                        error_train = np.nanmean(error.iloc[trainIndices])
                        error_train_cut = np.nanmean(error_masked.iloc[trainIndices])
                        error_test = np.nanmean(error.iloc[testIndices])
                        error_test_cut = np.nanmean(error_masked.iloc[testIndices])
                        results = [
                            trainingsiterations,
                            int(100 * trainFraction),
                            shuffle,
                            np.round(error_train, 2),
                            np.round(error_test, 2),
                            cfg["pcutoff"],
                            np.round(error_train_cut, 2),
                            np.round(error_test_cut, 2),
                        ]
                        final_result.append(results)

                        if show_errors:
                            string = (
                                "Results for {} training iterations, training fraction of {}, and shuffle {}:\n"
                                "Train error: {} pixels. Test error: {} pixels.\n"
                                "With pcutoff of {}:\n"
                                "Train error: {} pixels. Test error: {} pixels."
                            )
                            print(string.format(*results))

                            print("##########################################")
                            print(
                                "Average Euclidean distance to GT per individual (in pixels; test-only)"
                            )
                            print(
                                error_masked.iloc[testIndices]
                                .groupby("individuals", axis=1)
                                .mean()
                                .mean()
                                .to_string()
                            )
                            print(
                                "Average Euclidean distance to GT per bodypart (in pixels; test-only)"
                            )
                            print(
                                error_masked.iloc[testIndices]
                                .groupby("bodyparts", axis=1)
                                .mean()
                                .mean()
                                .to_string()
                            )

                        PredicteData["metadata"] = {
                            "nms radius": dlc_cfg["nmsradius"],
                            "minimal confidence": dlc_cfg["minconfidence"],
                            "sigma": dlc_cfg.get("sigma", 1),
                            "PAFgraph": dlc_cfg["partaffinityfield_graph"],
                            "PAFinds": np.arange(
                                len(dlc_cfg["partaffinityfield_graph"])
                            ),
                            "all_joints": [
                                [i] for i in range(len(dlc_cfg["all_joints"]))
                            ],
                            "all_joints_names": [
                                dlc_cfg["all_joints_names"][i]
                                for i in range(len(dlc_cfg["all_joints"]))
                            ],
                            "stride": dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        _ = auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename
                        )

                        tf.compat.v1.reset_default_graph()

                    n_multibpts = len(cfg["multianimalbodyparts"])
                    if n_multibpts == 1:
                        continue

                    # Skip data-driven skeleton selection unless
                    # the model was trained on the full graph.
                    max_n_edges = n_multibpts * (n_multibpts - 1) // 2
                    n_edges = len(dlc_cfg["partaffinityfield_graph"])
                    if n_edges == max_n_edges:
                        print("Selecting best skeleton...")
                        n_graphs = 10
                        paf_inds = None
                    else:
                        n_graphs = 1
                        paf_inds = [list(range(n_edges))]
                    (
                        results,
                        paf_scores,
                        best_assemblies,
                    ) = crossvalutils.cross_validate_paf_graphs(
                        config,
                        str(path_test_config).replace("pose_", "inference_"),
                        data_path,
                        data_path.replace("_full.", "_meta."),
                        n_graphs=n_graphs,
                        paf_inds=paf_inds,
                        oks_sigma=dlc_cfg.get("oks_sigma", 0.1),
                        margin=dlc_cfg.get("bbox_margin", 0),
                        symmetric_kpts=dlc_cfg.get("symmetric_kpts"),
                    )
                    if plotting == "individual":
                        assemblies, assemblies_unique, image_paths = best_assemblies
                        fig, ax = visualization.create_minimal_figure()
                        n_animals = len(cfg["individuals"])
                        if cfg["uniquebodyparts"]:
                            n_animals += 1
                        colors = visualization.get_cmap(n_animals, name=cfg["colormap"])
                        for k, v in tqdm(assemblies.items()):
                            imname = image_paths[k]
                            image_path = os.path.join(cfg["project_path"], *imname)
                            frame = auxfun_videos.imread(image_path, mode="skimage")

                            h, w, _ = np.shape(frame)
                            fig.set_size_inches(w / 100, h / 100)
                            ax.set_xlim(0, w)
                            ax.set_ylim(0, h)
                            ax.invert_yaxis()

                            gt = [
                                s.to_numpy().reshape((-1, 2))
                                for _, s in Data.loc[imname].groupby("individuals")
                            ]
                            coords_pred = []
                            coords_pred += [ass.xy for ass in v]
                            probs_pred = []
                            probs_pred += [ass.data[:, 2:3] for ass in v]
                            if assemblies_unique is not None:
                                unique = assemblies_unique.get(k, None)
                                if unique is not None:
                                    coords_pred.append(unique[:, :2])
                                    probs_pred.append(unique[:, 2:3])
                            while len(coords_pred) < len(gt):
                                coords_pred.append(np.full((1, 2), np.nan))
                                probs_pred.append(np.full((1, 2), np.nan))
                            ax = visualization.make_multianimal_labeled_image(
                                frame,
                                gt,
                                coords_pred,
                                probs_pred,
                                colors,
                                cfg["dotsize"],
                                cfg["alphavalue"],
                                cfg["pcutoff"],
                                ax=ax,
                            )
                            visualization.save_labeled_frame(
                                fig, image_path, foldername, k in trainIndices,
                            )
                            visualization.erase_artists(ax)

                    df = results[1].copy()
                    df.loc(axis=0)[("mAP_train", "mean")] = [
                        d[0]["mAP"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAR_train", "mean")] = [
                        d[0]["mAR"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAP_test", "mean")] = [
                        d[1]["mAP"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAR_test", "mean")] = [
                        d[1]["mAR"] for d in results[2]
                    ]
                    with open(data_path.replace("_full.", "_map."), "wb") as file:
                        pickle.dump((df, paf_scores), file)

                if len(final_result) > 0:  # Only append if results were calculated
                    make_results_file(final_result, evaluationfolder, DLCscorer)

    os.chdir(str(start_path))
Exemple #6
0
def evaluate_network(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=False,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    rescale=False,
    modelprefix="",
):
    """

    Evaluates the network based on the saved models at different stages of the training network.\n
    The evaluation results are stored in the .h5 and .csv file under the subdirectory 'evaluation_results'.
    Change the snapshotindex parameter in the config file to 'all' in order to evaluate all the saved models.
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    Shuffles: list, optional
        List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    plotting: bool or str, optional
        Plots the predictions on the train and test images.
        The default is ``False``; if provided it must be either ``True``, ``False``, "bodypart", or "individual".
        Setting to ``True`` defaults as "bodypart" for multi-animal projects.

    show_errors: bool, optional
        Display train and test errors. The default is `True``

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you do not want to plot, just evaluate shuffle 1.
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml', Shuffles=[1])
    --------
    If you want to plot and evaluate shuffle 0 and 1.
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml',Shuffles=[0, 1],plotting = True)

    --------
    If you want to plot assemblies for a maDLC project:
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml',Shuffles=[1],plotting = "individual")

    Note: this defaults to standard plotting for single-animal projects.

    """
    if plotting not in (True, False, "bodypart", "individual"):
        raise ValueError(f"Unknown value for `plotting`={plotting}")

    import os

    start_path = os.getcwd()
    from deeplabcut.utils import auxiliaryfunctions

    cfg = auxiliaryfunctions.read_config(config)

    if cfg.get("multianimalproject", False):
        from .evaluate_multianimal import evaluate_multianimal_full

        # TODO: Make this code not so redundant!
        evaluate_multianimal_full(
            config=config,
            Shuffles=Shuffles,
            trainingsetindex=trainingsetindex,
            plotting=plotting,
            comparisonbodyparts=comparisonbodyparts,
            gputouse=gputouse,
            modelprefix=modelprefix,
        )
    else:
        from deeplabcut.utils.auxfun_videos import imread, imresize
        from deeplabcut.pose_estimation_tensorflow.core import predict
        from deeplabcut.pose_estimation_tensorflow.config import load_config
        from deeplabcut.pose_estimation_tensorflow.datasets.utils import data_to_input
        from deeplabcut.utils import auxiliaryfunctions, conversioncode
        import tensorflow as tf

        # If a string was passed in, auto-convert to True for backward compatibility
        plotting = bool(plotting)

        if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
            del os.environ[
                "TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

        tf.compat.v1.reset_default_graph()
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
        #    tf.logging.set_verbosity(tf.logging.WARN)

        start_path = os.getcwd()
        # Read file path for pose_config file. >> pass it on
        cfg = auxiliaryfunctions.read_config(config)
        if gputouse is not None:  # gpu selectinon
            os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

        if trainingsetindex == "all":
            TrainingFractions = cfg["TrainingFraction"]
        else:
            if (trainingsetindex < len(cfg["TrainingFraction"])
                    and trainingsetindex >= 0):
                TrainingFractions = [
                    cfg["TrainingFraction"][int(trainingsetindex)]
                ]
            else:
                raise Exception(
                    "Please check the trainingsetindex! ",
                    trainingsetindex,
                    " should be an integer from 0 .. ",
                    int(len(cfg["TrainingFraction"]) - 1),
                )

        # Loading human annotatated data
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
        Data = pd.read_hdf(
            os.path.join(
                cfg["project_path"],
                str(trainingsetfolder),
                "CollectedData_" + cfg["scorer"] + ".h5",
            ))

        # Get list of body parts to evaluate network for
        comparisonbodyparts = (
            auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
                cfg, comparisonbodyparts))
        # Make folder for evaluation
        auxiliaryfunctions.attempttomakefolder(
            str(cfg["project_path"] + "/evaluation-results/"))
        for shuffle in Shuffles:
            for trainFraction in TrainingFractions:
                ##################################################
                # Load and setup CNN part detector
                ##################################################
                datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)
                modelfolder = os.path.join(
                    cfg["project_path"],
                    str(
                        auxiliaryfunctions.GetModelFolder(
                            trainFraction,
                            shuffle,
                            cfg,
                            modelprefix=modelprefix)),
                )

                path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
                # Load meta data
                (
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                ) = auxiliaryfunctions.LoadMetadata(
                    os.path.join(cfg["project_path"], metadatafn))

                try:
                    dlc_cfg = load_config(str(path_test_config))
                except FileNotFoundError:
                    raise FileNotFoundError(
                        "It seems the model for shuffle %s and trainFraction %s does not exist."
                        % (shuffle, trainFraction))

                # change batch size, if it was edited during analysis!
                dlc_cfg[
                    "batch_size"] = 1  # in case this was edited for analysis.

                # Create folder structure to store results.
                evaluationfolder = os.path.join(
                    cfg["project_path"],
                    str(
                        auxiliaryfunctions.GetEvaluationFolder(
                            trainFraction,
                            shuffle,
                            cfg,
                            modelprefix=modelprefix)),
                )
                auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                       recursive=True)
                # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

                # Check which snapshots are available and sort them by # iterations
                Snapshots = np.array([
                    fn.split(".")[0] for fn in os.listdir(
                        os.path.join(str(modelfolder), "train"))
                    if "index" in fn
                ])
                try:  # check if any where found?
                    Snapshots[0]
                except IndexError:
                    raise FileNotFoundError(
                        "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                        % (shuffle, trainFraction))

                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots])
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    raise ValueError(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []

                ########################### RESCALING (to global scale)
                if rescale:
                    scale = dlc_cfg["global_scale"]
                    Data = (pd.read_hdf(
                        os.path.join(
                            cfg["project_path"],
                            str(trainingsetfolder),
                            "CollectedData_" + cfg["scorer"] + ".h5",
                        )) * scale)
                else:
                    scale = 1

                conversioncode.guarantee_multiindex_rows(Data)
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split(
                        "-"
                    )[-1]  # read how many training siterations that corresponds to.

                    # Name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of training iterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )
                    if notanalyzed:
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(
                            dlc_cfg)
                        Numimages = len(Data.index)
                        PredicteData = np.zeros(
                            (Numimages, 3 * len(dlc_cfg["all_joints_names"])))
                        print("Running evaluation ...")
                        for imageindex, imagename in tqdm(enumerate(
                                Data.index)):
                            image = imread(
                                os.path.join(cfg["project_path"], *imagename),
                                mode="skimage",
                            )
                            if scale != 1:
                                image = imresize(image, scale)

                            image_batch = data_to_input(image)
                            # Compute prediction with the CNN
                            outputs_np = sess.run(
                                outputs, feed_dict={inputs: image_batch})
                            scmap, locref = predict.extract_cnn_output(
                                outputs_np, dlc_cfg)

                            # Extract maximum scoring location from the heatmap, assume 1 person
                            pose = predict.argmax_pose_predict(
                                scmap, locref, dlc_cfg["stride"])
                            PredicteData[imageindex, :] = (
                                pose.flatten()
                            )  # NOTE: thereby     cfg_test['all_joints_names'] should be same order as bodyparts!

                        sess.close()  # closes the current tf session

                        index = pd.MultiIndex.from_product(
                            [
                                [DLCscorer],
                                dlc_cfg["all_joints_names"],
                                ["x", "y", "likelihood"],
                            ],
                            names=["scorer", "bodyparts", "coords"],
                        )

                        # Saving results
                        DataMachine = pd.DataFrame(PredicteData,
                                                   columns=index,
                                                   index=Data.index)
                        DataMachine.to_hdf(resultsfilename, "df_with_missing")

                        print(
                            "Analysis is done and the results are stored (see evaluation-results) for snapshot: ",
                            Snapshots[snapindex],
                        )
                        DataCombined = pd.concat([Data.T, DataMachine.T],
                                                 axis=0,
                                                 sort=False).T

                        RMSE, RMSEpcutoff = pairwisedistances(
                            DataCombined,
                            cfg["scorer"],
                            DLCscorer,
                            cfg["pcutoff"],
                            comparisonbodyparts,
                        )
                        testerror = np.nanmean(
                            RMSE.iloc[testIndices].values.flatten())
                        trainerror = np.nanmean(
                            RMSE.iloc[trainIndices].values.flatten())
                        testerrorpcutoff = np.nanmean(
                            RMSEpcutoff.iloc[testIndices].values.flatten())
                        trainerrorpcutoff = np.nanmean(
                            RMSEpcutoff.iloc[trainIndices].values.flatten())
                        results = [
                            trainingsiterations,
                            int(100 * trainFraction),
                            shuffle,
                            np.round(trainerror, 2),
                            np.round(testerror, 2),
                            cfg["pcutoff"],
                            np.round(trainerrorpcutoff, 2),
                            np.round(testerrorpcutoff, 2),
                        ]
                        final_result.append(results)

                        if show_errors:
                            print(
                                "Results for",
                                trainingsiterations,
                                " training iterations:",
                                int(100 * trainFraction),
                                shuffle,
                                "train error:",
                                np.round(trainerror, 2),
                                "pixels. Test error:",
                                np.round(testerror, 2),
                                " pixels.",
                            )
                            print(
                                "With pcutoff of",
                                cfg["pcutoff"],
                                " train error:",
                                np.round(trainerrorpcutoff, 2),
                                "pixels. Test error:",
                                np.round(testerrorpcutoff, 2),
                                "pixels",
                            )
                            if scale != 1:
                                print(
                                    "The predictions have been calculated for rescaled images (and rescaled ground truth). Scale:",
                                    scale,
                                )
                            print(
                                "Thereby, the errors are given by the average distances between the labels by DLC and the scorer."
                            )

                        if plotting:
                            print("Plotting...")
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)
                            Plotting(
                                cfg,
                                comparisonbodyparts,
                                DLCscorer,
                                trainIndices,
                                DataCombined * 1.0 / scale,
                                foldername,
                            )  # Rescaling coordinates to have figure in original size!

                        tf.compat.v1.reset_default_graph()
                        # print(final_result)
                    else:
                        DataMachine = pd.read_hdf(resultsfilename)
                        conversioncode.guarantee_multiindex_rows(DataMachine)
                        if plotting:
                            DataCombined = pd.concat([Data.T, DataMachine.T],
                                                     axis=0,
                                                     sort=False).T
                            print(
                                "Plotting...(attention scale might be inconsistent in comparison to when data was analyzed; i.e. if you used rescale)"
                            )
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)
                            Plotting(
                                cfg,
                                comparisonbodyparts,
                                DLCscorer,
                                trainIndices,
                                DataCombined * 1.0 / scale,
                                foldername,
                            )

                if len(final_result
                       ) > 0:  # Only append if results were calculated
                    make_results_file(final_result, evaluationfolder,
                                      DLCscorer)
                    print(
                        "The network is evaluated and the results are stored in the subdirectory 'evaluation_results'."
                    )
                    print(
                        "Please check the results, then choose the best model (snapshot) for prediction. You can update the config.yaml file with the appropriate index for the 'snapshotindex'.\nUse the function 'analyze_video' to make predictions on new videos."
                    )
                    print(
                        "Otherwise, consider adding more labeled-data and retraining the network (see DeepLabCut workflow Fig 2, Nath 2019)"
                    )

    # returning to initial folder
    os.chdir(str(start_path))
def extract_save_all_maps(
    config,
    shuffle=1,
    trainingsetindex=0,
    comparisonbodyparts="all",
    gputouse=None,
    rescale=False,
    Indices=None,
    modelprefix="",
    dest_folder=None,
    nplots_per_row=None,
):
    """
    Extracts the scoremap, location refinement field and part affinity field prediction of the model. The maps
    will be rescaled to the size of the input image and stored in the corresponding model folder in /evaluation-results.

    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    Indices: default None
        For which images shall the scmap/locref and paf be computed? Give a list of images

    nplots_per_row: int, optional (default=None)
        Number of plots per row in grid plots. By default, calculated to approximate a squared grid of plots

    Examples
    --------
    Calculated maps for images 0, 1 and 33.
    >>> deeplabcut.extract_save_all_maps('/analysis/project/reaching-task/config.yaml', shuffle=1,Indices=[0,1,33])

    """

    from deeplabcut.utils.auxiliaryfunctions import (
        read_config,
        attempttomakefolder,
        GetEvaluationFolder,
    )
    from tqdm import tqdm

    cfg = read_config(config)
    data = extract_maps(
        config,
        shuffle,
        trainingsetindex,
        comparisonbodyparts,
        gputouse,
        rescale,
        Indices,
        modelprefix,
    )

    if not nplots_per_row:
        from deeplabcut.utils import auxiliaryfunctions

        bpts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
            cfg, comparisonbodyparts)
        nplots_per_row = int(np.floor(np.sqrt(len(bpts))))

    print("Saving plots...")
    for frac, values in data.items():
        if not dest_folder:
            # dest_folder = os.path.join(cfg['project_path'], 'maps')
            dest_folder = os.path.join(
                cfg["project_path"],
                str(
                    GetEvaluationFolder(frac,
                                        shuffle,
                                        cfg,
                                        modelprefix=modelprefix)),
                "maps",
            )
        attempttomakefolder(dest_folder)
        dest_path = os.path.join(dest_folder, "{}_{}_{}_{}_{}_{}.png")

        for snap, maps in values.items():
            for imagenr in tqdm(maps):
                (
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pafgraph,
                    impath,
                    trainingframe,
                ) = maps[imagenr]
                label = "train" if trainingframe else "test"
                imname = os.path.split(os.path.splitext(impath)[0])[1]
                if not os.path.isfile(
                        dest_path.format(imagenr, "scmap", label, shuffle,
                                         frac, snap)):
                    scmap, (locref_x, locref_y), paf = resize_all_maps(
                        image, scmap, locref, paf)
                    fig1, _ = visualize_scoremaps(
                        image,
                        scmap,
                        labels=bptnames,
                        nplots_per_row=nplots_per_row)
                    fig2, _ = visualize_locrefs(
                        image,
                        scmap,
                        locref_x,
                        locref_y,
                        labels=bptnames,
                        nplots_per_row=nplots_per_row,
                    )
                    fig3, _ = visualize_locrefs(
                        image,
                        scmap,
                        locref_x,
                        locref_y,
                        zoom_width=100,
                        labels=bptnames,
                        nplots_per_row=nplots_per_row,
                    )
                    if paf is not None:
                        fig4, _ = visualize_paf(
                            image,
                            paf,
                            pafgraph,
                            labels=bptnames,
                            nplots_per_row=nplots_per_row,
                        )

                    fig1.savefig(
                        dest_path.format(imname, "scmap", label, shuffle, frac,
                                         snap))
                    fig2.savefig(
                        dest_path.format(imname, "locref", label, shuffle,
                                         frac, snap))
                    fig3.savefig(
                        dest_path.format(imname, "locrefzoom", label, shuffle,
                                         frac, snap))
                    if paf is not None:
                        fig4.savefig(
                            dest_path.format(imname, "paf", label, shuffle,
                                             frac, snap))

                    plt.close("all")
def AnalyzeMultiAnimalVideo(
    video,
    DLCscorer,
    trainFraction,
    cfg,
    dlc_cfg,
    sess,
    inputs,
    outputs,
    pdindex,
    save_as_csv,
    destfolder=None,
    c_engine=False,
    robust_nframes=False,
):
    """ Helper function for analyzing a video with multiple individuals """

    print("Starting to analyze % ", video)
    vname = Path(video).stem
    videofolder = str(Path(video).parents[0])
    if destfolder is None:
        destfolder = videofolder
    auxiliaryfunctions.attempttomakefolder(destfolder)
    dataname = os.path.join(destfolder, vname + DLCscorer + ".h5")

    if os.path.isfile(dataname.split(".h5")[0] + "_full.pickle"):
        print("Video already analyzed!", dataname)
    else:
        print("Loading ", video)
        vid = VideoWriter(video)
        if robust_nframes:
            nframes = vid.get_n_frames(robust=True)
            duration = vid.calc_duration(robust=True)
            fps = nframes / duration
        else:
            nframes = len(vid)
            duration = vid.calc_duration(robust=False)
            fps = vid.fps

        nx, ny = vid.dimensions
        print(
            "Duration of video [s]: ",
            round(duration, 2),
            ", recorded with ",
            round(fps, 2),
            "fps!",
        )
        print(
            "Overall # of frames: ",
            nframes,
            " found with (before cropping) frame dimensions: ",
            nx,
            ny,
        )
        start = time.time()

        print("Starting to extract posture")
        if int(dlc_cfg["batch_size"]) > 1:
            PredicteData, nframes = GetPoseandCostsF(
                cfg,
                dlc_cfg,
                sess,
                inputs,
                outputs,
                vid,
                nframes,
                int(dlc_cfg["batch_size"]),
                c_engine=c_engine,
            )
        else:
            PredicteData, nframes = GetPoseandCostsS(cfg,
                                                     dlc_cfg,
                                                     sess,
                                                     inputs,
                                                     outputs,
                                                     vid,
                                                     nframes,
                                                     c_engine=c_engine)

        stop = time.time()

        if cfg["cropping"] == True:
            coords = [cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"]]
        else:
            coords = [0, nx, 0, ny]

        dictionary = {
            "start": start,
            "stop": stop,
            "run_duration": stop - start,
            "Scorer": DLCscorer,
            "DLC-model-config file": dlc_cfg,
            "fps": fps,
            "batch_size": dlc_cfg["batch_size"],
            "frame_dimensions": (ny, nx),
            "nframes": nframes,
            "iteration (active-learning)": cfg["iteration"],
            "training set fraction": trainFraction,
            "cropping": cfg["cropping"],
            "cropping_parameters": coords,
        }
        metadata = {"data": dictionary}
        print("Saving results in %s..." % (destfolder))

        auxfun_multianimal.SaveFullMultiAnimalData(PredicteData, metadata,
                                                   dataname)
Exemple #9
0
def evaluate_multianimal_crossvalidate(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    pbounds=None,
    edgewisecondition=True,
    target="rpck_train",
    inferencecfg=None,
    init_points=20,
    n_iter=50,
    dcorr=10.0,
    leastbpts=1,
    printingintermediatevalues=True,
    modelprefix="",
    plotting=False,
):
    """
    Crossvalidate inference parameters on evaluation data; optimal parametrs will be stored in " inference_cfg.yaml".

    They will then be then used for inference (for analysis of videos). Performs Bayesian Optimization with https://github.com/fmfn/BayesianOptimization

    This is a crucial step. The most important variable (in inferencecfg) to cross-validate is minimalnumberofconnections. Pass
    a reasonable range to optimze (e.g. if you have 5 edges from 1 to 5. If you have 4 bpts and 11 connections from 3 to 9).

    config: string
        Full path of the config.yaml file as a string.

    shuffle: int, optional
        An integer specifying the shuffle index of the training dataset used for training the network. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    pbounds: dictionary of variables with ranges to crossvalidate.
        By default: pbounds = {
                        'pafthreshold': (0.05, 0.7),
                        'detectionthresholdsquare': (0, 0.9),
                        'minimalnumberofconnections': (1, # connections in your skeleton),
                    }

    inferencecfg: dict, OPTIONAL
        For the variables that are *not* crossvalidated the parameters from inference_cfg.yaml are used, or
        you can overwrite them by passing a dictinary with your preferred parameters.

    edgewisecondition: bool, default True
        Estimates Euclidean distances for each skeleton edge and uses those distance for excluding possible connections.
        If false, uses only one distance for all bodyparts (which is obviously suboptimal).

    target: string, default='rpck_train'
        What metric to optimize. Options are pck/rpck/rmse on train/test set.

    init_points: int, optional (default=10)
        Number of random initial explorations. Probing random regions helps diversify the exploration space.
        Parameter from BayesianOptimization.

    n_iter: int, optional (default=20)
        Number of iterations of Bayesian optimization to perform.
        The larger it is, the higher the likelihood of finding a good extremum.
        Parameter from BayesianOptimization.

    dcorr: float,
        Distance thereshold for percent correct keypoints / relative percent correct keypoints (see paper).

    leastbpts: integer (should be a small number)
        If an animals has less or equal as many body parts in an image it will not be used
        for cross validation. Imagine e.g. if only a single bodypart is present, then
        if animals need a certain minimal number of bodyparts for assembly (minimalnumberofconnections),
        this might not be predictable.

    printingintermediatevalues: bool, default True
        If intermediate metrics RMSE/hits/.. per sample should be printed.


    Examples
    --------

    first run evalute:

    deeplabcut.evaluate_network(path_config_file,Shuffles=[shuffle],plotting=True)

    Then e.g. for finding inference parameters to minimize rmse on test set:

    deeplabcut.evaluate_multianimal_crossvalidate(path_config_file,Shuffles=[shuffle],target='rmse_test')
    """
    from deeplabcut.pose_estimation_tensorflow.lib import crossvalutils
    from deeplabcut.utils import auxfun_multianimal, auxiliaryfunctions
    from easydict import EasyDict as edict

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, "all")
    colors = visualization.get_cmap(len(comparisonbodyparts),
                                    name=cfg["colormap"])

    # wild guesses for a wide range:
    maxconnections = len(cfg["skeleton"])
    minconnections = 1  # len(cfg['multianimalbodyparts'])-1

    _pbounds = {
        "pafthreshold": (0.05, 0.7),
        "detectionthresholdsquare": (
            0,
            0.9,
        ),  # TODO: set to minimum (from pose_cfg.yaml)
        "minimalnumberofconnections": (minconnections, maxconnections),
    }
    if pbounds is not None:
        _pbounds.update(pbounds)

    if "rpck" in target or "pck" in target:
        maximize = True

    if "rmse" in target:
        maximize = False  # i.e. minimize

    for shuffle in Shuffles:
        evaluationfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetEvaluationFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix)),
        )
        auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                               recursive=True)

        datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder, trainFraction, shuffle, cfg)
        _, trainIndices, testIndices, _ = auxiliaryfunctions.LoadMetadata(
            os.path.join(cfg["project_path"], metadatafn))
        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetModelFolder(trainFraction,
                                                  shuffle,
                                                  cfg,
                                                  modelprefix=modelprefix)),
        )
        path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
        try:
            dlc_cfg = load_config(str(path_test_config))
        except FileNotFoundError:
            raise FileNotFoundError(
                "It seems the model for shuffle %s and trainFraction %s does not exist."
                % (shuffle, trainFraction))

        # Check which snapshots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split(".")[0]
            for fn in os.listdir(os.path.join(str(modelfolder), "train"))
            if "index" in fn
        ])
        snapindex = -1
        dlc_cfg["init_weights"] = os.path.join(
            str(modelfolder), "train",
            Snapshots[snapindex])  # setting weights to corresponding snapshot.
        trainingsiterations = (dlc_cfg["init_weights"].split(
            os.sep)[-1]).split("-")[
                -1]  # read how many training siterations that corresponds to.

        DLCscorer, _ = auxiliaryfunctions.GetScorerName(
            cfg,
            shuffle,
            trainFraction,
            trainingsiterations,
            modelprefix=modelprefix)

        path_inference_config = Path(
            modelfolder) / "test" / "inference_cfg.yaml"
        if inferencecfg is None:  # then load or initialize
            inferencecfg = auxfun_multianimal.read_inferencecfg(
                path_inference_config, cfg)
        else:
            inferencecfg = edict(inferencecfg)
            auxfun_multianimal.check_inferencecfg_sanity(cfg, inferencecfg)

        inferencecfg.topktoretain = np.inf
        inferencecfg, opt = crossvalutils.bayesian_search(
            config,
            inferencecfg,
            _pbounds,
            edgewisecondition=edgewisecondition,
            shuffle=shuffle,
            trainingsetindex=trainingsetindex,
            target=target,
            maximize=maximize,
            init_points=init_points,
            n_iter=n_iter,
            acq="ei",
            dcorr=dcorr,
            leastbpts=leastbpts,
            modelprefix=modelprefix,
        )

        # update number of individuals to retain.
        inferencecfg.topktoretain = len(
            cfg["individuals"]) + 1 * (len(cfg["uniquebodyparts"]) > 0)

        # calculating result at best best solution
        DataOptParams, poses_gt, poses = crossvalutils.compute_crossval_metrics(
            config, inferencecfg, shuffle, trainingsetindex, modelprefix)

        path_inference_config = str(path_inference_config)
        # print("Quantification:", DataOptParams.head())
        DataOptParams.to_hdf(
            path_inference_config.split(".yaml")[0] + ".h5",
            "df_with_missing",
            format="table",
            mode="w",
        )
        DataOptParams.to_csv(path_inference_config.split(".yaml")[0] + ".csv")
        print("Saving optimal inference parameters...")
        print(DataOptParams.to_string())
        auxiliaryfunctions.write_plainconfig(path_inference_config,
                                             dict(inferencecfg))

        # Store best predictions
        max_indivs = max(pose.shape[0] for pose in poses)
        bpts = dlc_cfg["all_joints_names"]
        container = np.full((len(poses), max_indivs * len(bpts) * 3), np.nan)
        for n, pose in enumerate(poses):
            temp = pose.flatten()
            container[n, :len(temp)] = temp

        header = pd.MultiIndex.from_product(
            [
                [DLCscorer],
                [f"individual{i}" for i in range(1, max_indivs + 1)],
                bpts,
                ["x", "y", "likelihood"],
            ],
            names=["scorer", "individuals", "bodyparts", "coords"],
        )

        df = pd.DataFrame(container, columns=header)
        df.to_hdf(os.path.join(evaluationfolder, f"{DLCscorer}.h5"),
                  key="df_with_missing")

        if plotting:
            foldername = os.path.join(
                str(evaluationfolder),
                "LabeledImages_" + DLCscorer + "_" + Snapshots[snapindex],
            )
            auxiliaryfunctions.attempttomakefolder(foldername)
            for imageindex, imagename in tqdm(enumerate(Data.index)):
                image_path = os.path.join(cfg["project_path"], imagename)
                image = io.imread(image_path)
                frame = img_as_ubyte(skimage.color.gray2rgb(image))
                groundtruthcoordinates = poses_gt[imageindex]
                coords_pred = poses[imageindex][:, :, :2]
                probs_pred = poses[imageindex][:, :, -1:]
                fig = visualization.make_multianimal_labeled_image(
                    frame,
                    groundtruthcoordinates,
                    coords_pred,
                    probs_pred,
                    colors,
                    cfg["dotsize"],
                    cfg["alphavalue"],
                    cfg["pcutoff"],
                )
                visualization.save_labeled_frame(fig, image_path, foldername,
                                                 imageindex in trainIndices)
Exemple #10
0
def create_training_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    userfeedback=False,
    trainIndices=None,
    testIndices=None,
    net_type=None,
    augmenter_type=None,
):
    """
    Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n

    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    trainIndices: list of lists, optional (default=None)
        List of one or multiple lists containing train indexes.
        A list containing two lists of training indexes will produce two splits.

    testIndices: list of lists, optional (default=None)
        List of one or multiple lists containing test indexes.

    net_type: string
        Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0,mobilenet_v2_0.75, mobilenet_v2_0.5, and mobilenet_v2_0.35 are supported.

    augmenter_type: string
        Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.

    Example
    --------
    >>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
    Windows:
    >>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    import scipy.io as sio

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    if cfg.get("multianimalproject", False):
        from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
            create_multianimaltraining_dataset, )

        create_multianimaltraining_dataset(config, num_shuffles, Shuffles,
                                           windows2linux, net_type)
    else:
        scorer = cfg["scorer"]
        project_path = cfg["project_path"]
        # Create path for training sets & store data there
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
            cfg)  # Path concatenation OS platform independent
        auxiliaryfunctions.attempttomakefolder(Path(
            os.path.join(project_path, str(trainingsetfolder))),
                                               recursive=True)

        Data = merge_annotateddatasets(
            cfg, Path(os.path.join(project_path, trainingsetfolder)),
            windows2linux)
        if Data is None:
            return
        Data = Data[scorer]  # extract labeled data

        # loading & linking pretrained models
        if net_type is None:  # loading & linking pretrained models
            net_type = cfg.get("default_net_type", "resnet_50")
        else:
            if "resnet" in net_type or "mobilenet" in net_type:
                pass
            else:
                raise ValueError("Invalid network type:", net_type)

        if augmenter_type is None:
            augmenter_type = cfg.get("default_augmenter", "imgaug")
            if augmenter_type is None:  # this could be in config.yaml for old projects!
                # updating variable if null/None! #backwardscompatability
                auxiliaryfunctions.edit_config(config,
                                               {"default_augmenter": "imgaug"})
                augmenter_type = "imgaug"
        else:
            if augmenter_type in [
                    "default",
                    "scalecrop",
                    "imgaug",
                    "tensorpack",
                    "deterministic",
            ]:
                pass
            else:
                raise ValueError("Invalid augmenter type:", augmenter_type)

        # Loading the encoder (if necessary downloading from TF)
        dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
        defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
        model_path, num_shuffles = auxfun_models.Check4weights(
            net_type, Path(dlcparent_path), num_shuffles)

        if Shuffles is None:
            Shuffles = range(1, num_shuffles + 1)
        else:
            Shuffles = [i for i in Shuffles if isinstance(i, int)]

        # print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
        if trainIndices is None and testIndices is None:
            splits = [(
                trainFraction,
                shuffle,
                SplitTrials(range(len(Data.index)), trainFraction),
            ) for trainFraction in cfg["TrainingFraction"]
                      for shuffle in Shuffles]
        else:
            if len(trainIndices) != len(testIndices) != len(Shuffles):
                raise ValueError(
                    "Number of Shuffles and train and test indexes should be equal."
                )
            splits = []
            for shuffle, (train_inds, test_inds) in enumerate(
                    zip(trainIndices, testIndices)):
                trainFraction = round(
                    len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)),
                    2)
                print(
                    f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
                )
                splits.append((trainFraction, Shuffles[shuffle], (train_inds,
                                                                  test_inds)))

        bodyparts = cfg["bodyparts"]
        nbodyparts = len(bodyparts)
        for trainFraction, shuffle, (trainIndices, testIndices) in splits:
            if len(trainIndices) > 0:
                if userfeedback:
                    trainposeconfigfile, _, _ = training.return_train_network_path(
                        config,
                        shuffle=shuffle,
                        trainingsetindex=cfg["TrainingFraction"].index(
                            trainFraction),
                    )
                    if trainposeconfigfile.is_file():
                        askuser = input(
                            "The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
                        )
                        if (askuser == "no" or askuser == "No"
                                or askuser == "N" or askuser == "No"):
                            raise Exception(
                                "Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
                            )

                ####################################################
                # Generating data structure with labeled information & frame metadata (for deep cut)
                ####################################################
                # Make training file!
                (
                    datafilename,
                    metadatafilename,
                ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)

                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################
                data, MatlabData = format_training_data(
                    Data, trainIndices, nbodyparts, project_path)
                sio.savemat(os.path.join(project_path, datafilename),
                            {"dataset": MatlabData})

                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename),
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                )

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################
                modelfoldername = auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/train")
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/test")

                path_train_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "train",
                        "pose_cfg.yaml",
                    ))
                path_test_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "pose_cfg.yaml",
                    ))
                # str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')
                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": [str(bpt) for bpt in bodyparts],
                    "init_weights": model_path,
                    "project_path": str(cfg["project_path"]),
                    "net_type": net_type,
                    "dataset_type": augmenter_type,
                }

                items2drop = {}
                if augmenter_type == "scalecrop":
                    # these values are dropped as scalecrop
                    # doesn't have rotation implemented
                    items2drop = {"rotation": 0, "rotratio": 0.0}

                trainingdata = MakeTrain_pose_yaml(items2change,
                                                   path_train_config,
                                                   defaultconfigfile,
                                                   items2drop)

                keys2save = [
                    "dataset",
                    "num_joints",
                    "all_joints",
                    "all_joints_names",
                    "net_type",
                    "init_weights",
                    "global_scale",
                    "location_refinement",
                    "locref_stdev",
                ]
                MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )
        return splits
def extract_bpt_feature_from_video(
    video,
    DLCscorer,
    trainFraction,
    cfg,
    dlc_cfg,
    sess,
    inputs,
    outputs,
    extra_dict,
    destfolder=None,
    robust_nframes=False,
):
    print("Starting to analyze % ", video)
    vname = Path(video).stem
    videofolder = str(Path(video).parents[0])
    if destfolder is None:
        destfolder = videofolder
    auxiliaryfunctions.attempttomakefolder(destfolder)
    dataname = os.path.join(destfolder, vname + DLCscorer + ".h5")

    assemble_filename = dataname.split(".h5")[0] + "_assemblies.pickle"

    feature_dict = shelve.open(
        dataname.split(".h5")[0] + "_bpt_features.pickle",
        protocol=pickle.DEFAULT_PROTOCOL,
    )

    with open(assemble_filename, "rb") as f:
        assemblies = pickle.load(f)
        print("Loading ", video)
        vid = VideoWriter(video)
        if robust_nframes:
            nframes = vid.get_n_frames(robust=True)
            duration = vid.calc_duration(robust=True)
            fps = nframes / duration
        else:
            nframes = len(vid)
            duration = vid.calc_duration(robust=False)
            fps = vid.fps

        nx, ny = vid.dimensions
        print(
            "Duration of video [s]: ",
            round(duration, 2),
            ", recorded with ",
            round(fps, 2),
            "fps!",
        )
        print(
            "Overall # of frames: ",
            nframes,
            " found with (before cropping) frame dimensions: ",
            nx,
            ny,
        )
        start = time.time()

        print("Starting to extract posture")
        if int(dlc_cfg["batch_size"]) > 1:
            # for multi animal, seems only this is used
            PredicteData, nframes = GetPoseandCostsF_from_assemblies(
                cfg,
                dlc_cfg,
                sess,
                inputs,
                outputs,
                vid,
                nframes,
                int(dlc_cfg["batch_size"]),
                assemblies,
                feature_dict,
                extra_dict,
            )
        else:
            raise NotImplementedError(
                "Not implemented yet, please raise an GitHub issue if you need this."
            )
Exemple #12
0
def plot_trajectories(
    config,
    videos,
    videotype=".avi",
    shuffle=1,
    trainingsetindex=0,
    filtered=False,
    displayedbodyparts="all",
    displayedindividuals="all",
    showfigures=False,
    destfolder=None,
    modelprefix="",
    imagetype=".png",
    resolution=100,
    linewidth=1.0,
    track_method="",
):
    """
    Plots the trajectories of various bodyparts across the video.

    Parameters
    ----------
     config : string
    Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: list, optional
    List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
    Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
    Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video.
        Either ``all``, then all body parts from config.yaml are used,
        or a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    showfigures: bool, default false
    If true then plots are also displayed.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    imagetype: string, default ".png"
        Specifies the output image format, tested '.tif', '.jpg', '.svg' and ".png".

    resolution: int, default 100
        Specifies the resolution (in dpi) of saved figures. Note higher resolution figures take longer to generate.

    linewidth: float, default 1.0
        Specifies width of line for line and histogram plots.

    track_method: string, optional
         Specifies the tracker used to generate the data. Empty by default (corresponding to a single animal project).
         For multiple animals, must be either 'box', 'skeleton', or 'ellipse' and will be taken from the config.yaml file if none is given.

    Example
    --------
    for labeling the frames
    >>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    track_method = auxfun_multianimal.get_track_method(
        cfg, track_method=track_method)

    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix
    )  # automatically loads corresponding model (even training iteration based on snapshot index)
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)
    individuals = auxfun_multianimal.IntersectionofIndividualsandOnesGivenbyUser(
        cfg, displayedindividuals)
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    if not len(Videos):
        print(
            "No videos found. Make sure you passed a list of videos and that *videotype* is right."
        )
        return

    failed = []
    for video in Videos:
        if destfolder is None:
            videofolder = str(Path(video).parents[0])
        else:
            videofolder = destfolder

        vname = str(Path(video).stem)
        print("Loading ", video, "and data.")
        try:
            df, _, _, suffix = auxiliaryfunctions.load_analyzed_data(
                videofolder, vname, DLCscorer, filtered, track_method)
            failed.append(False)
            tmpfolder = os.path.join(videofolder, "plot-poses", vname)
            auxiliaryfunctions.attempttomakefolder(tmpfolder, recursive=True)
            # Keep only the individuals and bodyparts that were labeled
            labeled_bpts = [
                bp for bp in df.columns.get_level_values("bodyparts").unique()
                if bp in bodyparts
            ]
            # Either display the animals defined in the config if they are found
            # in the dataframe, or all the trajectories regardless of their names
            try:
                animals = set(df.columns.get_level_values("individuals"))
            except KeyError:
                animals = {""}
            for animal in animals.intersection(individuals) or animals:
                PlottingResults(
                    tmpfolder,
                    df,
                    cfg,
                    labeled_bpts,
                    animal,
                    showfigures,
                    suffix + animal + imagetype,
                    resolution=resolution,
                    linewidth=linewidth,
                )
        except FileNotFoundError as e:
            failed.append(True)
            print(e)
            try:
                _ = auxiliaryfunctions.load_detection_data(
                    video, DLCscorer, track_method)
                print('Call "deeplabcut.stitch_tracklets()"'
                      " prior to plotting the trajectories.")
            except FileNotFoundError as e:
                print(e)
                print(
                    f"Make sure {video} was previously analyzed, and that "
                    f'detections were successively converted to tracklets using "deeplabcut.convert_detections2tracklets()" '
                    f'and "deeplabcut.stitch_tracklets()".')

    if not all(failed):
        print(
            'Plots created! Please check the directory "plot-poses" within the video directory'
        )
    else:
        print(
            f"Plots could not be created! "
            f"Videos were not evaluated with the current scorer {DLCscorer}.")
Exemple #13
0
def make_labeled_images_from_dataframe(
    df,
    cfg,
    destfolder="",
    scale=1.0,
    dpi=100,
    keypoint="+",
    draw_skeleton=True,
    color_by="bodypart",
):
    """
    Write labeled frames to disk from a DataFrame.
    Parameters
    ----------
    df : pd.DataFrame
        DataFrame containing the labeled data. Typically, the DataFrame is obtained
        through pandas.read_csv() or pandas.read_hdf().
    cfg : dict
        Project configuration.
    destfolder : string, optional
        Destination folder into which images will be stored. By default, same location as the labeled data.
        Note that the folder will be created if it does not exist.
    scale : float, optional
        Up/downscale the output dimensions.
        By default, outputs are of the same dimensions as the original images.
    dpi : int, optional
        Output resolution. 100 dpi by default.
    keypoint : str, optional
        Keypoint appearance. By default, keypoints are marked by a + sign.
        Refer to https://matplotlib.org/3.2.1/api/markers_api.html for a list of all possible options.
    draw_skeleton : bool, optional
        Whether to draw the animal skeleton as defined in *cfg*. True by default.
    color_by : str, optional
        Color scheme of the keypoints. Must be either 'bodypart' or 'individual'.
        By default, keypoints are colored relative to the bodypart they represent.
    """

    bodyparts = df.columns.get_level_values("bodyparts")
    bodypart_names = bodyparts.unique()
    nbodyparts = len(bodypart_names)
    bodyparts = bodyparts[::2]
    draw_skeleton = (draw_skeleton
                     and cfg["skeleton"])  # Only draw if a skeleton is defined

    if color_by == "bodypart":
        map_ = bodyparts.map(dict(zip(bodypart_names, range(nbodyparts))))
        cmap = get_cmap(nbodyparts, cfg["colormap"])
        colors = cmap(map_)
    elif color_by == "individual":
        try:
            individuals = df.columns.get_level_values("individuals")
            individual_names = individuals.unique().to_list()
            nindividuals = len(individual_names)
            individuals = individuals[::2]
            map_ = individuals.map(
                dict(zip(individual_names, range(nindividuals))))
            cmap = get_cmap(nindividuals, cfg["colormap"])
            colors = cmap(map_)
        except KeyError as e:
            raise Exception(
                "Coloring by individuals is only valid for multi-animal data"
            ) from e
    else:
        raise ValueError(
            "`color_by` must be either `bodypart` or `individual`.")

    bones = []
    if draw_skeleton:
        for bp1, bp2 in cfg["skeleton"]:
            match1, match2 = [], []
            for j, bp in enumerate(bodyparts):
                if bp == bp1:
                    match1.append(j)
                elif bp == bp2:
                    match2.append(j)
            bones.extend(zip(match1, match2))
    ind_bones = tuple(zip(*bones))

    sep = "/" if "/" in df.index[0] else "\\"
    images = cfg["project_path"] + sep + df.index
    if sep != os.path.sep:
        images = images.str.replace(sep, os.path.sep)
    if not destfolder:
        destfolder = os.path.dirname(images[0])
    tmpfolder = destfolder + "_labeled"
    attempttomakefolder(tmpfolder)
    images_list = images.to_list()
    ic = io.imread_collection(images_list)

    h, w = ic[0].shape[:2]
    all_same_shape = True
    for array in ic[1:]:
        if array.shape[:2] != (h, w):
            all_same_shape = False
            break

    xy = df.values.reshape((df.shape[0], -1, 2))
    segs = xy[:, ind_bones].swapaxes(1, 2)

    s = cfg["dotsize"]
    alpha = cfg["alphavalue"]
    if all_same_shape:  # Very efficient, avoid re-drawing the whole plot
        fig, ax = prepare_figure_axes(w, h, scale, dpi)
        im = ax.imshow(np.zeros((h, w)), "gray")
        pts = [
            ax.plot([], [], keypoint, ms=s, alpha=alpha, color=c)[0]
            for c in colors
        ]
        coll = LineCollection([], colors=cfg["skeleton_color"], alpha=alpha)
        ax.add_collection(coll)
        for i in trange(len(ic)):
            filename = ic.files[i]
            ind = images_list.index(filename)
            coords = xy[ind]
            img = ic[i]
            if img.ndim == 2 or img.shape[-1] == 1:
                img = color.gray2rgb(ic[i])
            im.set_data(img)
            for pt, coord in zip(pts, coords):
                pt.set_data(*coord)
            if ind_bones:
                coll.set_segments(segs[ind])
            imagename = os.path.basename(filename)
            fig.subplots_adjust(left=0,
                                bottom=0,
                                right=1,
                                top=1,
                                wspace=0,
                                hspace=0)
            fig.savefig(
                os.path.join(tmpfolder,
                             imagename.replace(".png", f"_{color_by}.png")),
                dpi=dpi,
            )
        plt.close(fig)

    else:  # Good old inelegant way
        for i in trange(len(ic)):
            filename = ic.files[i]
            ind = images_list.index(filename)
            coords = xy[ind]
            image = ic[i]
            h, w = image.shape[:2]
            fig, ax = prepare_figure_axes(w, h, scale, dpi)
            ax.imshow(image)
            for coord, c in zip(coords, colors):
                ax.plot(*coord, keypoint, ms=s, alpha=alpha, color=c)
            if ind_bones:
                coll = LineCollection(segs[ind],
                                      colors=cfg["skeleton_color"],
                                      alpha=alpha)
                ax.add_collection(coll)
            imagename = os.path.basename(filename)
            fig.subplots_adjust(left=0,
                                bottom=0,
                                right=1,
                                top=1,
                                wspace=0,
                                hspace=0)
            fig.savefig(
                os.path.join(tmpfolder,
                             imagename.replace(".png", f"_{color_by}.png")),
                dpi=dpi,
            )
            plt.close(fig)
Exemple #14
0
def extract_save_all_maps(config,
                          shuffle=1,
                          trainingsetindex=0,
                          comparisonbodyparts="all",
                          all_paf_in_one=True,
                          gputouse=None,
                          rescale=False,
                          Indices=None,
                          modelprefix="",
                          dest_folder=None):
    """
    Extracts the scoremap, location refinement field and part affinity field prediction of the model. The maps
    will be rescaled to the size of the input image and stored in the corresponding model folder in /evaluation-results.

    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    all_paf_in_one : bool
        By default, all part affinity fields are displayed on a single frame.
        If false, individual fields are shown on separate frames.

    Indices: default None
        For which images shall the scmap/locref and paf be computed? Give a list of images

    nplots_per_row: int, optional (default=None)
        Number of plots per row in grid plots. By default, calculated to approximate a squared grid of plots

    Examples
    --------
    Calculated maps for images 0, 1 and 33.
    >>> deeplabcut.extract_save_all_maps('/analysis/project/reaching-task/config.yaml', shuffle=1,Indices=[0,1,33])

    """

    from deeplabcut.utils.auxiliaryfunctions import (
        read_config, attempttomakefolder, GetEvaluationFolder,
        IntersectionofBodyPartsandOnesGivenbyUser)
    from tqdm import tqdm

    cfg = read_config(config)
    data = extract_maps(
        config,
        shuffle,
        trainingsetindex,
        gputouse,
        rescale,
        Indices,
        modelprefix,
    )

    comparisonbodyparts = IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)

    print("Saving plots...")
    for frac, values in data.items():
        if not dest_folder:
            dest_folder = os.path.join(
                cfg["project_path"],
                str(
                    GetEvaluationFolder(frac,
                                        shuffle,
                                        cfg,
                                        modelprefix=modelprefix)),
                "maps",
            )
        attempttomakefolder(dest_folder)
        filepath = "{imname}_{map}_{label}_{shuffle}_{frac}_{snap}.png"
        dest_path = os.path.join(dest_folder, filepath)
        for snap, maps in values.items():
            for imagenr in tqdm(maps):
                (
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pafgraph,
                    impath,
                    trainingframe,
                ) = maps[imagenr]
                label = "train" if trainingframe else "test"
                imname = os.path.split(os.path.splitext(impath)[0])[1]
                scmap, (locref_x, locref_y), paf = resize_all_maps(
                    image, scmap, locref, paf)
                to_plot = [
                    i for i, bpt in enumerate(bptnames)
                    if bpt in comparisonbodyparts
                ]
                list_of_inds = []
                for n, edge in enumerate(pafgraph):
                    if any(ind in to_plot for ind in edge):
                        list_of_inds.append([(2 * n, 2 * n + 1),
                                             (bptnames[edge[0]],
                                              bptnames[edge[1]])])
                map_ = scmap[:, :, to_plot].sum(axis=2)
                locref_x_ = locref_x[:, :, to_plot].sum(axis=2)
                locref_y_ = locref_y[:, :, to_plot].sum(axis=2)

                fig1, _ = visualize_scoremaps(image, map_)
                temp = dest_path.format(imname=imname,
                                        map='scmap',
                                        label=label,
                                        shuffle=shuffle,
                                        frac=frac,
                                        snap=snap)
                fig1.savefig(temp)

                fig2, _ = visualize_locrefs(image, map_, locref_x_, locref_y_)
                temp = dest_path.format(imname=imname,
                                        map='locref',
                                        label=label,
                                        shuffle=shuffle,
                                        frac=frac,
                                        snap=snap)
                fig2.savefig(temp)

                if paf is not None:
                    if not all_paf_in_one:
                        for inds, names in list_of_inds:
                            fig3, _ = visualize_paf(image, paf[:, :, [inds]])
                            temp = dest_path.format(
                                imname=imname,
                                map=f'paf_{"_".join(names)}',
                                label=label,
                                shuffle=shuffle,
                                frac=frac,
                                snap=snap)
                            fig3.savefig(temp)
                    else:
                        inds = [elem[0] for elem in list_of_inds]
                        n_inds = len(inds)
                        cmap = plt.cm.get_cmap(cfg['colormap'], n_inds)
                        colors = cmap(range(n_inds))
                        fig3, _ = visualize_paf(image,
                                                paf[:, :, inds],
                                                colors=colors)
                        temp = dest_path.format(imname=imname,
                                                map=f'paf',
                                                label=label,
                                                shuffle=shuffle,
                                                frac=frac,
                                                snap=snap)
                        fig3.savefig(temp)
                plt.close("all")
Exemple #15
0
def extract_maps(
    config,
    shuffle=0,
    trainingsetindex=0,
    gputouse=None,
    rescale=False,
    Indices=None,
    modelprefix="",
):
    """
    Extracts the scoremap, locref, partaffinityfields (if available).

    Returns a dictionary indexed by: trainingsetfraction, snapshotindex, and imageindex
    for those keys, each item contains: (image,scmap,locref,paf,bpt names,partaffinity graph, imagename, True/False if this image was in trainingset)
    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 0.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you want to extract the data for image 0 and 103 (of the training set) for model trained with shuffle 0.
    >>> deeplabcut.extract_maps(configfile,0,Indices=[0,103])

    """
    from deeplabcut.utils.auxfun_videos import imread, imresize
    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.pose_estimation_tensorflow.config import load_config
    from deeplabcut.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcut.utils import auxiliaryfunctions
    from tqdm import tqdm
    import tensorflow as tf

    vers = (tf.__version__).split(".")
    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    import pandas as pd
    from pathlib import Path
    import numpy as np

    TF.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    #    tf.logging.set_verbosity(tf.logging.WARN)

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        if trainingsetindex < len(
                cfg["TrainingFraction"]) and trainingsetindex >= 0:
            TrainingFractions = [
                cfg["TrainingFraction"][int(trainingsetindex)]
            ]
        else:
            raise Exception(
                "Please check the trainingsetindex! ",
                trainingsetindex,
                " should be an integer from 0 .. ",
                int(len(cfg["TrainingFraction"]) - 1),
            )

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )

    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))

    Maps = {}
    for trainFraction in TrainingFractions:
        Maps[trainFraction] = {}
        ##################################################
        # Load and setup CNN part detector
        ##################################################
        datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder, trainFraction, shuffle, cfg)

        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetModelFolder(trainFraction,
                                                  shuffle,
                                                  cfg,
                                                  modelprefix=modelprefix)),
        )
        path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
        # Load meta data
        (
            data,
            trainIndices,
            testIndices,
            trainFraction,
        ) = auxiliaryfunctions.LoadMetadata(
            os.path.join(cfg["project_path"], metadatafn))
        try:
            dlc_cfg = load_config(str(path_test_config))
        except FileNotFoundError:
            raise FileNotFoundError(
                "It seems the model for shuffle %s and trainFraction %s does not exist."
                % (shuffle, trainFraction))

        # change batch size, if it was edited during analysis!
        dlc_cfg["batch_size"] = 1  # in case this was edited for analysis.

        # Create folder structure to store results.
        evaluationfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetEvaluationFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix)),
        )
        auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                               recursive=True)
        # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

        # Check which snapshots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split(".")[0]
            for fn in os.listdir(os.path.join(str(modelfolder), "train"))
            if "index" in fn
        ])
        try:  # check if any where found?
            Snapshots[0]
        except IndexError:
            raise FileNotFoundError(
                "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                % (shuffle, trainFraction))

        increasing_indices = np.argsort(
            [int(m.split("-")[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        if cfg["snapshotindex"] == -1:
            snapindices = [-1]
        elif cfg["snapshotindex"] == "all":
            snapindices = range(len(Snapshots))
        elif cfg["snapshotindex"] < len(Snapshots):
            snapindices = [cfg["snapshotindex"]]
        else:
            print(
                "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
            )

        ########################### RESCALING (to global scale)
        scale = dlc_cfg["global_scale"] if rescale else 1
        Data *= scale

        bptnames = [
            dlc_cfg["all_joints_names"][i]
            for i in range(len(dlc_cfg["all_joints"]))
        ]

        for snapindex in snapindices:
            dlc_cfg["init_weights"] = os.path.join(
                str(modelfolder), "train", Snapshots[snapindex]
            )  # setting weights to corresponding snapshot.
            trainingsiterations = (
                dlc_cfg["init_weights"].split(os.sep)[-1]
            ).split("-")[
                -1]  # read how many training siterations that corresponds to.

            # Name for deeplabcut net (based on its parameters)
            # DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations)
            # notanalyzed, resultsfilename, DLCscorer=auxiliaryfunctions.CheckifNotEvaluated(str(evaluationfolder),DLCscorer,DLCscorerlegacy,Snapshots[snapindex])
            # print("Extracting maps for ", DLCscorer, " with # of trainingiterations:", trainingsiterations)
            # if notanalyzed: #this only applies to ask if h5 exists...

            # Specifying state of model (snapshot / training state)
            sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
            Numimages = len(Data.index)
            PredicteData = np.zeros(
                (Numimages, 3 * len(dlc_cfg["all_joints_names"])))
            print("Analyzing data...")
            if Indices is None:
                Indices = enumerate(Data.index)
            else:
                Ind = [Data.index[j] for j in Indices]
                Indices = enumerate(Ind)

            DATA = {}
            for imageindex, imagename in tqdm(Indices):
                image = imread(os.path.join(cfg["project_path"], imagename),
                               mode="RGB")
                if scale != 1:
                    image = imresize(image, scale)

                image_batch = data_to_input(image)
                # Compute prediction with the CNN
                outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})

                if cfg.get("multianimalproject", False):
                    scmap, locref, paf = predictma.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    pagraph = dlc_cfg["partaffinityfield_graph"]
                else:
                    scmap, locref = predict.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    paf = None
                    pagraph = []

                if imageindex in testIndices:
                    trainingfram = False
                else:
                    trainingfram = True

                DATA[imageindex] = [
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pagraph,
                    imagename,
                    trainingfram,
                ]
            Maps[trainFraction][Snapshots[snapindex]] = DATA
    os.chdir(str(start_path))
    return Maps
def evaluate_network(config,
                     Shuffles=[1],
                     plotting=None,
                     show_errors=True,
                     comparisonbodyparts="all",
                     gputouse=None):
    """
    Evaluates the network based on the saved models at different stages of the training network.\n
    The evaluation results are stored in the .h5 and .csv file under the subdirectory 'evaluation_results'.
    Change the snapshotindex parameter in the config file to 'all' in order to evaluate all the saved models.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    Shuffles: list, optional
        List of integers specifying the shuffle indices of the training dataset. The default is [1]

    plotting: bool, optional
        Plots the predictions on the train and test images. The default is ``False``; if provided it must be either ``True`` or ``False``

    show_errors: bool, optional
        Display train and test errors. The default is `True``

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
    See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    Examples
    --------
    If you do not want to plot
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml', shuffle=[1])
    --------

    If you want to plot
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml',shuffle=[1],True)
    """
    import os
    from skimage import io
    import skimage.color

    from deeplabcut.pose_estimation_tensorflow.nnet import predict as ptf_predict
    from deeplabcut.pose_estimation_tensorflow.config import load_config
    from deeplabcut.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcut.utils import auxiliaryfunctions, visualization
    import tensorflow as tf

    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ[
            'TF_CUDNN_USE_AUTOTUNE']  #was potentially set during training

    vers = (tf.__version__).split('.')
    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    TF.reset_default_graph()

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  #
    #    tf.logging.set_verbosity(tf.logging.WARN)

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)
    if gputouse is not None:  #gpu selectinon
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(cfg["project_path"], str(trainingsetfolder),
                     'CollectedData_' + cfg["scorer"] + '.h5'),
        'df_with_missing')
    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))
    for shuffle in Shuffles:
        for trainFraction in cfg["TrainingFraction"]:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(trainFraction, shuffle,
                                                      cfg)))
            path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
            # Load meta data
            data, trainIndices, testIndices, trainFraction = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn))

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction))

            #change batch size, if it was edited during analysis!
            dlc_cfg['batch_size'] = 1  #in case this was edited for analysis.
            #Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg)))
            auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                   recursive=True)
            #path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array([
                fn.split('.')[0]
                for fn in os.listdir(os.path.join(str(modelfolder), 'train'))
                if "index" in fn
            ])
            try:  #check if any where found?
                Snapshots[0]
            except IndexError:
                raise FileNotFoundError(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction))

            increasing_indices = np.argsort(
                [int(m.split('-')[1]) for m in Snapshots])
            Snapshots = Snapshots[increasing_indices]

            if cfg["snapshotindex"] == -1:
                snapindices = [-1]
            elif cfg["snapshotindex"] == "all":
                snapindices = range(len(Snapshots))
            elif cfg["snapshotindex"] < len(Snapshots):
                snapindices = [cfg["snapshotindex"]]
            else:
                print(
                    "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                )

            final_result = []
            ##################################################
            # Compute predictions over images
            ##################################################
            for snapindex in snapindices:
                dlc_cfg['init_weights'] = os.path.join(
                    str(modelfolder), 'train', Snapshots[snapindex]
                )  #setting weights to corresponding snapshot.
                trainingsiterations = (
                    dlc_cfg['init_weights'].split(os.sep)[-1]
                ).split(
                    '-'
                )[-1]  #read how many training siterations that corresponds to.

                #name for deeplabcut net (based on its parameters)
                DLCscorer = auxiliaryfunctions.GetScorerName(
                    cfg, shuffle, trainFraction, trainingsiterations)
                print("Running ", DLCscorer, " with # of trainingiterations:",
                      trainingsiterations)
                resultsfilename = os.path.join(
                    str(evaluationfolder),
                    DLCscorer + '-' + Snapshots[snapindex] + '.h5')
                try:
                    DataMachine = pd.read_hdf(resultsfilename,
                                              'df_with_missing')
                    print("This net has already been evaluated!")
                except FileNotFoundError:
                    # Specifying state of model (snapshot / training state)
                    sess, inputs, outputs = ptf_predict.setup_pose_prediction(
                        dlc_cfg)

                    Numimages = len(Data.index)
                    PredicteData = np.zeros(
                        (Numimages, 3 * len(dlc_cfg['all_joints_names'])))
                    print("Analyzing data...")
                    for imageindex, imagename in tqdm(enumerate(Data.index)):
                        image = io.imread(os.path.join(cfg['project_path'],
                                                       imagename),
                                          mode='RGB')
                        image = skimage.color.gray2rgb(image)
                        image_batch = data_to_input(image)

                        # Compute prediction with the CNN
                        outputs_np = sess.run(outputs,
                                              feed_dict={inputs: image_batch})
                        scmap, locref = ptf_predict.extract_cnn_output(
                            outputs_np, dlc_cfg)

                        # Extract maximum scoring location from the heatmap, assume 1 person
                        pose = ptf_predict.argmax_pose_predict(
                            scmap, locref, dlc_cfg.stride)
                        PredicteData[imageindex, :] = pose.flatten(
                        )  # NOTE: thereby     cfg_test['all_joints_names'] should be same order as bodyparts!

                    sess.close()  #closes the current tf session

                    index = pd.MultiIndex.from_product(
                        [[DLCscorer], dlc_cfg['all_joints_names'],
                         ['x', 'y', 'likelihood']],
                        names=['scorer', 'bodyparts', 'coords'])

                    # Saving results
                    DataMachine = pd.DataFrame(PredicteData,
                                               columns=index,
                                               index=Data.index.values)
                    DataMachine.to_hdf(resultsfilename,
                                       'df_with_missing',
                                       format='table',
                                       mode='w')

                    print("Done and results stored for snapshot: ",
                          Snapshots[snapindex])
                    DataCombined = pd.concat([Data.T, DataMachine.T], axis=0).T
                    RMSE, RMSEpcutoff = pairwisedistances(
                        DataCombined, cfg["scorer"], DLCscorer, cfg["pcutoff"],
                        comparisonbodyparts)
                    testerror = np.nanmean(
                        RMSE.iloc[testIndices].values.flatten())
                    trainerror = np.nanmean(
                        RMSE.iloc[trainIndices].values.flatten())
                    testerrorpcutoff = np.nanmean(
                        RMSEpcutoff.iloc[testIndices].values.flatten())
                    trainerrorpcutoff = np.nanmean(
                        RMSEpcutoff.iloc[trainIndices].values.flatten())
                    results = [
                        trainingsiterations,
                        int(100 * trainFraction), shuffle,
                        np.round(trainerror, 2),
                        np.round(testerror, 2), cfg["pcutoff"],
                        np.round(trainerrorpcutoff, 2),
                        np.round(testerrorpcutoff, 2)
                    ]
                    final_result.append(results)

                    if show_errors == True:
                        print("Results for",
                              trainingsiterations, " training iterations:",
                              int(100 * trainFraction), shuffle,
                              "train error:",
                              np.round(trainerror, 2), "pixels. Test error:",
                              np.round(testerror, 2), " pixels.")
                        print("With pcutoff of",
                              cfg["pcutoff"], " train error:",
                              np.round(trainerrorpcutoff,
                                       2), "pixels. Test error:",
                              np.round(testerrorpcutoff, 2), "pixels")
                        print(
                            "Thereby, the errors are given by the average distances between the labels by DLC and the scorer."
                        )

                    if plotting == True:
                        print("Plotting...")
                        colors = visualization.get_cmap(
                            len(comparisonbodyparts), name=cfg['colormap'])

                        foldername = os.path.join(
                            str(evaluationfolder), 'LabeledImages_' +
                            DLCscorer + '_' + Snapshots[snapindex])
                        auxiliaryfunctions.attempttomakefolder(foldername)
                        NumFrames = np.size(DataCombined.index)
                        for ind in np.arange(NumFrames):
                            visualization.PlottingandSaveLabeledFrame(
                                DataCombined, ind, trainIndices, cfg, colors,
                                comparisonbodyparts, DLCscorer, foldername)

                    TF.reset_default_graph()
                    #print(final_result)
            make_results_file(final_result, evaluationfolder, DLCscorer)
            print(
                "The network is evaluated and the results are stored in the subdirectory 'evaluation_results'."
            )
            print(
                "If it generalizes well, choose the best model for prediction and update the config file with the appropriate index for the 'snapshotindex'.\nUse the function 'analyze_video' to make predictions on new videos."
            )
            print(
                "Otherwise consider retraining the network (see DeepLabCut workflow Fig 2)"
            )

    #returning to intial folder
    os.chdir(str(start_path))
def proc_video(
    videos,
    destfolder,
    filtered,
    DLCscorer,
    DLCscorerlegacy,
    track_method,
    cfg,
    individuals,
    color_by,
    bodyparts,
    codec,
    bodyparts2connect,
    trailpoints,
    save_frames,
    outputframerate,
    Frames2plot,
    draw_skeleton,
    skeleton_color,
    displaycropped,
    fastmode,
    keypoints_only,
    video,
):
    """Helper function for create_videos

    Parameters
    ----------


    """
    videofolder = Path(video).parents[0]
    if destfolder is None:
        destfolder = videofolder  # where your folder with videos is.

    auxiliaryfunctions.attempttomakefolder(destfolder)

    os.chdir(destfolder)  # THE VIDEO IS STILL IN THE VIDEO FOLDER
    print("Starting to process video: {}".format(video))
    vname = str(Path(video).stem)

    if filtered:
        videooutname1 = os.path.join(vname + DLCscorer +
                                     "filtered_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy +
                                     "filtered_labeled.mp4")
    else:
        videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4")
        videooutname2 = os.path.join(vname + DLCscorerlegacy + "_labeled.mp4")

    if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
        print("Labeled video {} already created.".format(vname))
    else:
        print("Loading {} and data.".format(video))
        try:
            df, filepath, _, _ = auxiliaryfunctions.load_analyzed_data(
                destfolder, vname, DLCscorer, filtered, track_method)
            metadata = auxiliaryfunctions.load_video_metadata(
                destfolder, vname, DLCscorer)
            if cfg.get("multianimalproject", False):
                s = "_id" if color_by == "individual" else "_bp"
            else:
                s = ""
            videooutname = filepath.replace(".h5", f"{s}_labeled.mp4")
            if os.path.isfile(videooutname):
                print("Labeled video already created. Skipping...")
                return

            if all(individuals):
                df = df.loc(axis=1)[:, individuals]
            cropping = metadata["data"]["cropping"]
            [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"]
            labeled_bpts = [
                bp for bp in df.columns.get_level_values("bodyparts").unique()
                if bp in bodyparts
            ]

            if keypoints_only:
                # Mask rather than drop unwanted bodyparts to ensure consistent coloring
                mask = df.columns.get_level_values("bodyparts").isin(bodyparts)
                df.loc[:, ~mask] = np.nan
                inds = None
                if bodyparts2connect:
                    all_bpts = df.columns.get_level_values("bodyparts")[::3]
                    inds = get_segment_indices(bodyparts2connect, all_bpts)
                create_video_with_keypoints_only(
                    df,
                    videooutname,
                    inds,
                    cfg["pcutoff"],
                    cfg["dotsize"],
                    cfg["alphavalue"],
                    skeleton_color=skeleton_color,
                    color_by=color_by,
                    colormap=cfg["colormap"],
                )
            elif not fastmode:
                tmpfolder = os.path.join(str(videofolder), "temp-" + vname)
                if save_frames:
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                clip = vp(video)
                CreateVideoSlow(
                    videooutname,
                    clip,
                    df,
                    tmpfolder,
                    cfg["dotsize"],
                    cfg["colormap"],
                    cfg["alphavalue"],
                    cfg["pcutoff"],
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    save_frames,
                    labeled_bpts,
                    outputframerate,
                    Frames2plot,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )
            else:
                if displaycropped:  # then the cropped video + the labels is depicted
                    clip = vp(
                        fname=video,
                        sname=videooutname,
                        codec=codec,
                        sw=x2 - x1,
                        sh=y2 - y1,
                        fps=outputframerate,
                    )
                else:  # then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                    clip = vp(fname=video,
                              sname=videooutname,
                              codec=codec,
                              fps=outputframerate)
                CreateVideo(
                    clip,
                    df,
                    cfg["pcutoff"],
                    cfg["dotsize"],
                    cfg["colormap"],
                    labeled_bpts,
                    trailpoints,
                    cropping,
                    x1,
                    x2,
                    y1,
                    y2,
                    bodyparts2connect,
                    skeleton_color,
                    draw_skeleton,
                    displaycropped,
                    color_by,
                )

        except FileNotFoundError as e:
            print(e)
Exemple #18
0
def cropimagesandlabels(
    config,
    numcrops=10,
    size=(400, 400),
    userfeedback=True,
    cropdata=True,
    excludealreadycropped=True,
    updatevideoentries=True,
):
    """
    Crop images into multiple random crops (defined by numcrops) of size dimensions. If cropdata=True then the
    annotation data is loaded and labels for cropped images are inherited.
    If false, then one can make crops for unlabeled folders.

    This can be helpul for large frames with multiple animals. Then a smaller set of equally sized images is created.

    Parameters
    ----------
    config : string
        String containing the full path of the config file in the project.

    numcrops: number of random crops (around random bodypart)

    size: height x width in pixels

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    cropdata: bool, default True:
        If true creates corresponding annotation data (from ground truth)

    excludealreadycropped: bool, def true
        If true excludes folders that already contain _cropped in their name.

    updatevideoentries, bool, default true
        If true updates video_list entries to refer to cropped frames instead. This makes sense for subsequent processing.

    Example
    --------
    for labeling the frames
    >>> deeplabcut.cropimagesandlabels('/analysis/project/reaching-task/config.yaml')

    --------
    """
    from tqdm import trange

    indexlength = int(np.ceil(np.log10(numcrops)))
    project_path = os.path.dirname(config)
    cfg = auxiliaryfunctions.read_config(config)
    videos = cfg["video_sets"].keys()
    video_names = []
    for video in videos:
        parent, filename, ext = _robust_path_split(video)
        if excludealreadycropped and "_cropped" in filename:
            continue
        video_names.append([parent, filename, ext])

    if ("video_sets_original" not in cfg.keys() and updatevideoentries
        ):  # this dict is kept for storing links to original full-sized videos
        cfg["video_sets_original"] = {}

    for vidpath, vidname, videotype in video_names:
        folder = os.path.join(project_path, "labeled-data", vidname)
        if userfeedback:
            print("Do you want to crop frames for folder: ", folder, "?")
            askuser = input("(yes/no):")
        else:
            askuser = "******"
        if askuser == "y" or askuser == "yes" or askuser == "Y" or askuser == "Yes":
            new_vidname = vidname + "_cropped"
            new_folder = os.path.join(project_path, "labeled-data",
                                      new_vidname)
            auxiliaryfunctions.attempttomakefolder(new_folder)

            AnnotationData = []
            pd_index = []

            fn = os.path.join(folder, f"CollectedData_{cfg['scorer']}.h5")
            df = pd.read_hdf(fn, "df_with_missing")
            data = df.values.reshape((df.shape[0], -1, 2))
            sep = "/" if "/" in df.index[0] else "\\"
            if sep != os.path.sep:
                df.index = df.index.str.replace(sep, os.path.sep)
            images = project_path + os.path.sep + df.index
            # Avoid cropping already cropped images
            cropped_images = auxiliaryfunctions.grab_files_in_folder(
                new_folder, "png")
            cropped_names = set(map(lambda x: x.split("c")[0], cropped_images))
            imnames = [
                im for im in images.to_list()
                if Path(im).stem not in cropped_names
            ]
            ic = io.imread_collection(imnames)
            for i in trange(len(ic)):
                frame = ic[i]
                h, w = np.shape(frame)[:2]
                if size[0] >= h or size[1] >= w:
                    shutil.rmtree(new_folder, ignore_errors=True)
                    raise ValueError(
                        "Crop dimensions are larger than image size")

                imagename = os.path.relpath(ic.files[i], project_path)
                ind = np.flatnonzero(df.index == imagename)[0]
                cropindex = 0
                attempts = -1
                while cropindex < numcrops:
                    dd = np.array(data[ind].copy(), dtype=float)
                    y0, x0 = (
                        np.random.randint(h - size[0]),
                        np.random.randint(w - size[1]),
                    )
                    y1 = y0 + size[0]
                    x1 = x0 + size[1]
                    with np.errstate(invalid="ignore"):
                        within = np.all((dd >= [x0, y0]) & (dd < [x1, y1]),
                                        axis=1)
                    if cropdata:
                        dd[within] -= [x0, y0]
                        dd[~within] = np.nan
                    attempts += 1
                    if within.any() or attempts > 10:
                        newimname = str(
                            Path(imagename).stem + "c" +
                            str(cropindex).zfill(indexlength) + ".png")
                        cropppedimgname = os.path.join(new_folder, newimname)
                        io.imsave(cropppedimgname, frame[y0:y1, x0:x1])
                        cropindex += 1
                        pd_index.append(
                            os.path.join("labeled-data", new_vidname,
                                         newimname))
                        AnnotationData.append(dd.flatten())

            if cropdata:
                df = pd.DataFrame(AnnotationData,
                                  index=pd_index,
                                  columns=df.columns)
                fn_new = fn.replace(folder, new_folder)
                df.to_hdf(fn_new, key="df_with_missing", mode="w")
                df.to_csv(fn_new.replace(".h5", ".csv"))

            if updatevideoentries and cropdata:
                # moving old entry to _original, dropping it from video_set and update crop parameters
                video_orig = sep.join((vidpath, vidname + videotype))
                cfg["video_sets_original"][video_orig] = cfg["video_sets"][
                    video_orig]
                cfg["video_sets"].pop(video_orig)
                cfg["video_sets"][sep.join(
                    (vidpath, new_vidname + videotype))] = {
                        "crop": ", ".join(map(str, [0, size[1], 0, size[0]]))
                    }

    cfg["croppedtraining"] = True
    auxiliaryfunctions.write_config(config, cfg)
def create_video_with_all_detections(
    config,
    videos,
    shuffle=1,
    trainingsetindex=0,
    displayedbodyparts="all",
    destfolder=None,
    modelprefix="",
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import Assembler
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorername, _ = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername +
                                       "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.mp4")
            full_pickle = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.pickle")

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = Assembler._flatten_detections(
                        data[frame_names[ind]])
                    for det in dets:
                        if det.label not in bpts or det.confidence < pcutoff:
                            continue
                        x, y = det.pos
                        rr, cc = disk((y, x), dotsize, shape=(ny, nx))
                        frame[rr, cc] = colors[bpts.index(det.label)]
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)
Exemple #20
0
    def export_to_training_data(self, pcutoff=0.1):
        import os
        from skimage import io

        inds = self.manager.find_edited_frames()
        if not len(inds):
            print("No frames have been manually edited.")
            return

        # Save additional frames to the labeled-data directory
        strwidth = int(np.ceil(np.log10(self.nframes)))
        vname = os.path.splitext(os.path.basename(self.videoname))[0]
        tmpfolder = os.path.join(self.manager.cfg["project_path"],
                                 "labeled-data", vname)
        if os.path.isdir(tmpfolder):
            print("Frames from video", vname,
                  " already extracted (more will be added)!")
        else:
            attempttomakefolder(tmpfolder)
        index = []
        for ind in inds:
            imagename = os.path.join(tmpfolder,
                                     "img" + str(ind).zfill(strwidth) + ".png")
            index.append(os.path.join(*imagename.rsplit(os.path.sep, 3)[-3:]))
            if not os.path.isfile(imagename):
                self.video.set(cv2.CAP_PROP_POS_FRAMES, ind)
                frame = self._read_frame()
                if frame is None:
                    print("Frame could not be read. Skipping...")
                    continue
                frame = frame.astype(np.ubyte)
                if self.manager.cfg["cropping"]:
                    x1, x2, y1, y2 = [
                        int(self.manager.cfg[key])
                        for key in ("x1", "x2", "y1", "y2")
                    ]
                    frame = frame[y1:y2, x1:x2]
                io.imsave(imagename, frame)

        # Store the newly-refined data
        data = self.manager.format_data()
        df = data.iloc[inds]

        # Uncertain keypoints are ignored
        def filter_low_prob(cols, prob):
            mask = cols.iloc[:, 2] < prob
            cols.loc[mask] = np.nan
            return cols

        df = df.groupby(level='bodyparts', axis=1).apply(filter_low_prob,
                                                         prob=pcutoff)
        df.index = index
        machinefile = os.path.join(
            tmpfolder,
            "machinelabels-iter" + str(self.manager.cfg["iteration"]) + ".h5")
        if os.path.isfile(machinefile):
            df_old = pd.read_hdf(machinefile, "df_with_missing")
            df_joint = pd.concat([df_old, df])
            df_joint = df_joint[~df_joint.index.duplicated(keep="first")]
            df_joint.to_hdf(machinefile, key="df_with_missing", mode="w")
            df_joint.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))
        else:
            df.to_hdf(machinefile, key="df_with_missing", mode="w")
            df.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))

        # Merge with the already existing annotated data
        df.columns.set_levels([self.manager.cfg["scorer"]],
                              level="scorer",
                              inplace=True)
        df.drop("likelihood", level="coords", axis=1, inplace=True)
        output_path = os.path.join(
            tmpfolder, f'CollectedData_{self.manager.cfg["scorer"]}.h5')
        if os.path.isfile(output_path):
            print(
                "A training dataset file is already found for this video. The refined machine labels are merged to this data!"
            )
            df_orig = pd.read_hdf(output_path, "df_with_missing")
            df_joint = pd.concat([df, df_orig])
            # Now drop redundant ones keeping the first one [this will make sure that the refined machine file gets preference]
            df_joint = df_joint[~df_joint.index.duplicated(keep="first")]
            df_joint.sort_index(inplace=True)
            df_joint.to_hdf(output_path, key="df_with_missing", mode="w")
            df_joint.to_csv(output_path.replace("h5", "csv"))
        else:
            df.sort_index(inplace=True)
            df.to_hdf(output_path, key="df_with_missing", mode="w")
            df.to_csv(output_path.replace("h5", "csv"))
Exemple #21
0
def create_multianimaltraining_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    net_type=None,
    numdigits=2,
    crop_size=(400, 400),
    crop_sampling="hybrid",
    paf_graph=None,
    trainIndices=None,
    testIndices=None,
):
    """
    Creates a training dataset for multi-animal datasets. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n
    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Imporant differences to standard:
     - stores coordinates with numdigits as many digits
     - creates
    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    net_type: string
        Type of networks. Currently resnet_50, resnet_101, and resnet_152, efficientnet-b0, efficientnet-b1, efficientnet-b2, efficientnet-b3,
        efficientnet-b4, efficientnet-b5, and efficientnet-b6 as well as dlcrnet_ms5 are supported (not the MobileNets!).
        See Lauer et al. 2021 https://www.biorxiv.org/content/10.1101/2021.04.30.442096v1

    numdigits: int, optional

    crop_size: tuple of int, optional
        Dimensions (width, height) of the crops for data augmentation.
        Default is 400x400.

    crop_sampling: str, optional
        Crop centers sampling method. Must be either:
        "uniform" (randomly over the image),
        "keypoints" (randomly over the annotated keypoints),
        "density" (weighing preferentially dense regions of keypoints),
        or "hybrid" (alternating randomly between "uniform" and "density").
        Default is "hybrid".

    paf_graph: list of lists, optional (default=None)
        If not None, overwrite the default complete graph. This is useful for advanced users who
        already know a good graph, or simply want to use a specific one. Note that, in that case,
        the data-driven selection procedure upon model evaluation will be skipped.

    trainIndices: list of lists, optional (default=None)
        List of one or multiple lists containing train indexes.
        A list containing two lists of training indexes will produce two splits.

    testIndices: list of lists, optional (default=None)
        List of one or multiple lists containing test indexes.

    Example
    --------
    >>> deeplabcut.create_multianimaltraining_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)

    >>> deeplabcut.create_multianimaltraining_dataset('/analysis/project/reaching-task/config.yaml', Shuffles=[0,1,2], trainIndices=[trainInd1, trainInd2, trainInd3], testIndices=[testInd1, testInd2, testInd3])

    Windows:
    >>> deeplabcut.create_multianimaltraining_dataset(r'C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    if windows2linux:
        warnings.warn(
            "`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
            FutureWarning,
        )

    if len(crop_size) != 2 or not all(isinstance(v, int) for v in crop_size):
        raise ValueError(
            "Crop size must be a tuple of two integers (width, height).")

    if crop_sampling not in ("uniform", "keypoints", "density", "hybrid"):
        raise ValueError(
            f"Invalid sampling {crop_sampling}. Must be "
            f"either 'uniform', 'keypoints', 'density', or 'hybrid.")

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg["scorer"]
    project_path = cfg["project_path"]
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    full_training_path = Path(project_path, trainingsetfolder)
    auxiliaryfunctions.attempttomakefolder(full_training_path, recursive=True)

    Data = merge_annotateddatasets(cfg, full_training_path)
    if Data is None:
        return
    Data = Data[scorer]

    if net_type is None:  # loading & linking pretrained models
        net_type = cfg.get("default_net_type", "dlcrnet_ms5")
    elif not any(net in net_type for net in ("resnet", "eff", "dlc", "mob")):
        raise ValueError(f"Unsupported network {net_type}.")

    multi_stage = False
    ### dlcnet_ms5: backbone resnet50 + multi-fusion & multi-stage module
    ### dlcr101_ms5/dlcr152_ms5: backbone resnet101/152 + multi-fusion & multi-stage module
    if all(net in net_type for net in ("dlcr", "_ms5")):
        num_layers = re.findall("dlcr([0-9]*)", net_type)[0]
        if num_layers == "":
            num_layers = 50
        net_type = "resnet_{}".format(num_layers)
        multi_stage = True

    dataset_type = "multi-animal-imgaug"
    (
        individuals,
        uniquebodyparts,
        multianimalbodyparts,
    ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

    if paf_graph is None:  # Automatically form a complete PAF graph
        partaffinityfield_graph = [
            list(edge)
            for edge in combinations(range(len(multianimalbodyparts)), 2)
        ]
    else:
        # Ignore possible connections between 'multi' and 'unique' body parts;
        # one can never be too careful...
        to_ignore = auxfun_multianimal.filter_unwanted_paf_connections(
            cfg, paf_graph)
        partaffinityfield_graph = [
            edge for i, edge in enumerate(paf_graph) if i not in to_ignore
        ]
        auxfun_multianimal.validate_paf_graph(cfg, partaffinityfield_graph)

    print("Utilizing the following graph:", partaffinityfield_graph)
    # Disable the prediction of PAFs if the graph is empty
    partaffinityfield_predict = bool(partaffinityfield_graph)

    # Loading the encoder (if necessary downloading from TF)
    dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
    defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
    model_path, num_shuffles = auxfun_models.Check4weights(
        net_type, Path(dlcparent_path), num_shuffles)

    if Shuffles is None:
        Shuffles = range(1, num_shuffles + 1, 1)
    else:
        Shuffles = [i for i in Shuffles if isinstance(i, int)]

    # print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
    if trainIndices is None and testIndices is None:
        splits = []
        for shuffle in Shuffles:  # Creating shuffles starting from 1
            for train_frac in cfg["TrainingFraction"]:
                train_inds, test_inds = SplitTrials(range(len(Data)),
                                                    train_frac)
                splits.append((train_frac, shuffle, (train_inds, test_inds)))
    else:
        if len(trainIndices) != len(testIndices) != len(Shuffles):
            raise ValueError(
                "Number of Shuffles and train and test indexes should be equal."
            )
        splits = []
        for shuffle, (train_inds,
                      test_inds) in enumerate(zip(trainIndices, testIndices)):
            trainFraction = round(
                len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)), 2)
            print(
                f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
            )
            # Now that the training fraction is guaranteed to be correct,
            # the values added to pad the indices are removed.
            train_inds = np.asarray(train_inds)
            train_inds = train_inds[train_inds != -1]
            test_inds = np.asarray(test_inds)
            test_inds = test_inds[test_inds != -1]
            splits.append(
                (trainFraction, Shuffles[shuffle], (train_inds, test_inds)))

    for trainFraction, shuffle, (trainIndices, testIndices) in splits:
        ####################################################
        # Generating data structure with labeled information & frame metadata (for deep cut)
        ####################################################
        print(
            "Creating training data for: Shuffle:",
            shuffle,
            "TrainFraction: ",
            trainFraction,
        )

        # Make training file!
        data = format_multianimal_training_data(
            Data,
            trainIndices,
            cfg["project_path"],
            numdigits,
        )

        if len(trainIndices) > 0:
            (
                datafilename,
                metadatafilename,
            ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)
            ################################################################################
            # Saving metadata and data file (Pickle file)
            ################################################################################
            auxiliaryfunctions.SaveMetadata(
                os.path.join(project_path, metadatafilename),
                data,
                trainIndices,
                testIndices,
                trainFraction,
            )

            datafilename = datafilename.split(".mat")[0] + ".pickle"
            import pickle

            with open(os.path.join(project_path, datafilename), "wb") as f:
                # Pickle the 'labeled-data' dictionary using the highest protocol available.
                pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)

            ################################################################################
            # Creating file structure for training &
            # Test files as well as pose_yaml files (containing training and testing information)
            #################################################################################

            modelfoldername = auxiliaryfunctions.GetModelFolder(
                trainFraction, shuffle, cfg)
            auxiliaryfunctions.attempttomakefolder(Path(config).parents[0] /
                                                   modelfoldername,
                                                   recursive=True)
            auxiliaryfunctions.attempttomakefolder(
                str(Path(config).parents[0] / modelfoldername / "train"))
            auxiliaryfunctions.attempttomakefolder(
                str(Path(config).parents[0] / modelfoldername / "test"))

            path_train_config = str(
                os.path.join(
                    cfg["project_path"],
                    Path(modelfoldername),
                    "train",
                    "pose_cfg.yaml",
                ))
            path_test_config = str(
                os.path.join(
                    cfg["project_path"],
                    Path(modelfoldername),
                    "test",
                    "pose_cfg.yaml",
                ))
            path_inference_config = str(
                os.path.join(
                    cfg["project_path"],
                    Path(modelfoldername),
                    "test",
                    "inference_cfg.yaml",
                ))

            jointnames = [str(bpt) for bpt in multianimalbodyparts]
            jointnames.extend([str(bpt) for bpt in uniquebodyparts])
            items2change = {
                "dataset":
                datafilename,
                "metadataset":
                metadatafilename,
                "num_joints":
                len(multianimalbodyparts) +
                len(uniquebodyparts),  # cfg["uniquebodyparts"]),
                "all_joints": [[i] for i in range(
                    len(multianimalbodyparts) + len(uniquebodyparts))
                               ],  # cfg["uniquebodyparts"]))],
                "all_joints_names":
                jointnames,
                "init_weights":
                model_path,
                "project_path":
                str(cfg["project_path"]),
                "net_type":
                net_type,
                "multi_stage":
                multi_stage,
                "pairwise_loss_weight":
                0.1,
                "pafwidth":
                20,
                "partaffinityfield_graph":
                partaffinityfield_graph,
                "partaffinityfield_predict":
                partaffinityfield_predict,
                "weigh_only_present_joints":
                False,
                "num_limbs":
                len(partaffinityfield_graph),
                "dataset_type":
                dataset_type,
                "optimizer":
                "adam",
                "batch_size":
                8,
                "multi_step": [[1e-4, 7500], [5 * 1e-5, 12000], [1e-5,
                                                                 200000]],
                "save_iters":
                10000,
                "display_iters":
                500,
                "num_idchannel":
                len(cfg["individuals"]) if cfg.get("identity", False) else 0,
                "crop_size":
                list(crop_size),
                "crop_sampling":
                crop_sampling,
            }

            trainingdata = MakeTrain_pose_yaml(items2change, path_train_config,
                                               defaultconfigfile)
            keys2save = [
                "dataset",
                "num_joints",
                "all_joints",
                "all_joints_names",
                "net_type",
                "multi_stage",
                "init_weights",
                "global_scale",
                "location_refinement",
                "locref_stdev",
                "dataset_type",
                "partaffinityfield_predict",
                "pairwise_predict",
                "partaffinityfield_graph",
                "num_limbs",
                "dataset_type",
                "num_idchannel",
            ]

            MakeTest_pose_yaml(
                trainingdata,
                keys2save,
                path_test_config,
                nmsradius=5.0,
                minconfidence=0.01,
                sigma=1,
                locref_smooth=False,
            )  # setting important def. values for inference

            # Setting inference cfg file:
            defaultinference_configfile = os.path.join(dlcparent_path,
                                                       "inference_cfg.yaml")
            items2change = {
                "minimalnumberofconnections":
                int(len(cfg["multianimalbodyparts"]) / 2),
                "topktoretain":
                len(cfg["individuals"]) + 1 *
                (len(cfg["uniquebodyparts"]) > 0),
                "withid":
                cfg.get("identity", False),
            }
            MakeInference_yaml(items2change, path_inference_config,
                               defaultinference_configfile)

            print(
                "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
            )
        else:
            pass
Exemple #22
0
def create_multianimaltraining_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    net_type=None,
    numdigits=2,
    paf_graph=None,
):
    """
    Creates a training dataset for multi-animal datasets. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n
    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Imporant differences to standard:
     - stores coordinates with numdigits as many digits
     - creates
    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    net_type: string
        Type of networks. Currently resnet_50, resnet_101, and resnet_152, efficientnet-b0, efficientnet-b1, efficientnet-b2, efficientnet-b3,
        efficientnet-b4, efficientnet-b5, and efficientnet-b6 as well as dlcrnet_ms5 are supported (not the MobileNets!).
        See Lauer et al. 2021 https://www.biorxiv.org/content/10.1101/2021.04.30.442096v1

    numdigits: int, optional

    paf_graph: list of lists, optional (default=None)
        If not None, overwrite the default complete graph. This is useful for advanced users who
        already know a good graph, or simply want to use a specific one. Note that, in that case,
        the data-driven selection procedure upon model evaluation will be skipped.

    Example
    --------
    >>> deeplabcut.create_multianimaltraining_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)

    Windows:
    >>> deeplabcut.create_multianimaltraining_dataset(r'C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg["scorer"]
    project_path = cfg["project_path"]
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    full_training_path = Path(project_path, trainingsetfolder)
    auxiliaryfunctions.attempttomakefolder(full_training_path, recursive=True)

    Data = merge_annotateddatasets(cfg, full_training_path, windows2linux)
    if Data is None:
        return
    Data = Data[scorer]

    def strip_cropped_image_name(path):
        # utility function to split different crops from same image into either train or test!
        head, filename = os.path.split(path)
        if cfg["croppedtraining"]:
            filename = filename.split("c")[0]
        return os.path.join(head, filename)

    img_names = Data.index.map(strip_cropped_image_name).unique()

    if net_type is None:  # loading & linking pretrained models
        net_type = cfg.get("default_net_type", "dlcrnet_ms5")
    elif not any(net in net_type for net in ("resnet", "eff", "dlc")):
        raise ValueError(f"Unsupported network {net_type}.")

    multi_stage = False
    if net_type == "dlcrnet_ms5":
        net_type = "resnet_50"
        multi_stage = True

    dataset_type = "multi-animal-imgaug"
    (
        individuals,
        uniquebodyparts,
        multianimalbodyparts,
    ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

    if paf_graph is None:  # Automatically form a complete PAF graph
        partaffinityfield_graph = [
            list(edge)
            for edge in combinations(range(len(multianimalbodyparts)), 2)
        ]
    else:
        # Ignore possible connections between 'multi' and 'unique' body parts;
        # one can never be too careful...
        to_ignore = auxfun_multianimal.filter_unwanted_paf_connections(
            cfg, paf_graph)
        partaffinityfield_graph = [
            edge for i, edge in enumerate(paf_graph) if i not in to_ignore
        ]
        auxfun_multianimal.validate_paf_graph(cfg, partaffinityfield_graph)

    print("Utilizing the following graph:", partaffinityfield_graph)
    partaffinityfield_predict = True

    # Loading the encoder (if necessary downloading from TF)
    dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
    defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
    model_path, num_shuffles = auxfun_models.Check4weights(
        net_type, Path(dlcparent_path), num_shuffles)

    if Shuffles is None:
        Shuffles = range(1, num_shuffles + 1, 1)
    else:
        Shuffles = [i for i in Shuffles if isinstance(i, int)]

    TrainingFraction = cfg["TrainingFraction"]
    for shuffle in Shuffles:  # Creating shuffles starting from 1
        for trainFraction in TrainingFraction:
            train_inds_temp, test_inds_temp = SplitTrials(
                range(len(img_names)), trainFraction)
            # Map back to the original indices.
            temp = [
                re.escape(name) for i, name in enumerate(img_names)
                if i in test_inds_temp
            ]
            mask = Data.index.str.contains("|".join(temp))
            testIndices = np.flatnonzero(mask)
            trainIndices = np.flatnonzero(~mask)

            ####################################################
            # Generating data structure with labeled information & frame metadata (for deep cut)
            ####################################################
            print(
                "Creating training data for: Shuffle:",
                shuffle,
                "TrainFraction: ",
                trainFraction,
            )

            # Make training file!
            data = format_multianimal_training_data(
                Data,
                trainIndices,
                cfg["project_path"],
                numdigits,
            )

            if len(trainIndices) > 0:
                (
                    datafilename,
                    metadatafilename,
                ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)
                ################################################################################
                # Saving metadata and data file (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename),
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                )

                datafilename = datafilename.split(".mat")[0] + ".pickle"
                import pickle

                with open(os.path.join(project_path, datafilename), "wb") as f:
                    # Pickle the 'labeled-data' dictionary using the highest protocol available.
                    pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################

                modelfoldername = auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername / "train"))
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername / "test"))

                path_train_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "train",
                        "pose_cfg.yaml",
                    ))
                path_test_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "pose_cfg.yaml",
                    ))
                path_inference_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "inference_cfg.yaml",
                    ))

                jointnames = [str(bpt) for bpt in multianimalbodyparts]
                jointnames.extend([str(bpt) for bpt in uniquebodyparts])
                items2change = {
                    "dataset":
                    datafilename,
                    "metadataset":
                    metadatafilename,
                    "num_joints":
                    len(multianimalbodyparts) +
                    len(uniquebodyparts),  # cfg["uniquebodyparts"]),
                    "all_joints": [[i] for i in range(
                        len(multianimalbodyparts) + len(uniquebodyparts))
                                   ],  # cfg["uniquebodyparts"]))],
                    "all_joints_names":
                    jointnames,
                    "init_weights":
                    model_path,
                    "project_path":
                    str(cfg["project_path"]),
                    "net_type":
                    net_type,
                    "multi_stage":
                    multi_stage,
                    "pairwise_loss_weight":
                    0.1,
                    "pafwidth":
                    20,
                    "partaffinityfield_graph":
                    partaffinityfield_graph,
                    "partaffinityfield_predict":
                    partaffinityfield_predict,
                    "weigh_only_present_joints":
                    False,
                    "num_limbs":
                    len(partaffinityfield_graph),
                    "dataset_type":
                    dataset_type,
                    "optimizer":
                    "adam",
                    "batch_size":
                    8,
                    "multi_step": [[1e-4, 7500], [5 * 1e-5, 12000],
                                   [1e-5, 200000]],
                    "save_iters":
                    10000,
                    "display_iters":
                    500,
                    "num_idchannel":
                    len(cfg["individuals"])
                    if cfg.get("identity", False) else 0,
                }

                trainingdata = MakeTrain_pose_yaml(items2change,
                                                   path_train_config,
                                                   defaultconfigfile)
                keys2save = [
                    "dataset",
                    "num_joints",
                    "all_joints",
                    "all_joints_names",
                    "net_type",
                    "multi_stage",
                    "init_weights",
                    "global_scale",
                    "location_refinement",
                    "locref_stdev",
                    "dataset_type",
                    "partaffinityfield_predict",
                    "pairwise_predict",
                    "partaffinityfield_graph",
                    "num_limbs",
                    "dataset_type",
                    "num_idchannel",
                ]

                MakeTest_pose_yaml(
                    trainingdata,
                    keys2save,
                    path_test_config,
                    nmsradius=5.0,
                    minconfidence=0.01,
                )  # setting important def. values for inference

                # Setting inference cfg file:
                defaultinference_configfile = os.path.join(
                    dlcparent_path, "inference_cfg.yaml")
                items2change = {
                    "minimalnumberofconnections":
                    int(len(cfg["multianimalbodyparts"]) / 2),
                    "topktoretain":
                    len(cfg["individuals"]) + 1 *
                    (len(cfg["uniquebodyparts"]) > 0),
                    "withid":
                    cfg.get("identity", False),
                }
                MakeInference_yaml(items2change, path_inference_config,
                                   defaultinference_configfile)

                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )
            else:
                pass
Exemple #23
0
def mergeandsplit(config, trainindex=0, uniform=True, windows2linux=False):
    """
    This function allows additional control over "create_training_dataset".

    Merge annotated data sets (from different folders) and split data in a specific way, returns the split variables (train/test indices).
    Importantly, this allows one to freeze a split.

    One can also either create a uniform split (uniform = True; thereby indexing TrainingFraction in config file) or leave-one-folder out split
    by passing the index of the corrensponding video from the config.yaml file as variable trainindex.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    trainindex: int, optional
        Either (in case uniform = True) indexes which element of TrainingFraction in the config file should be used (note it is a list!).
        Alternatively (uniform = False) indexes which folder is dropped, i.e. the first if trainindex=0, the second if trainindex =1, etc.

    uniform: bool, optional
        Perform uniform split (disregarding folder structure in labeled data), or (if False) leave one folder out.

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.

    Examples
    --------
    To create a leave-one-folder-out model:
    >>> trainIndices, testIndices=deeplabcut.mergeandsplit(config,trainindex=0,uniform=False)
    returns the indices for the first video folder (as defined in config file) as testIndices and all others as trainIndices.
    You can then create the training set by calling (e.g. defining it as Shuffle 3):
    >>> deeplabcut.create_training_dataset(config,Shuffles=[3],trainIndices=trainIndices,testIndices=testIndices)

    To freeze a (uniform) split (i.e. iid sampled from all the data):
    >>> trainIndices, testIndices=deeplabcut.mergeandsplit(config,trainindex=0,uniform=True)
    You can then create two model instances that have the identical trainingset. Thereby you can assess the role of various parameters on the performance of DLC.
    >>> deeplabcut.create_training_dataset(config,Shuffles=[0,1],trainIndices=[trainIndices, trainIndices],testIndices=[testIndices, testIndices])
    --------

    """
    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg["scorer"]
    project_path = cfg["project_path"]
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
        cfg)  # Path concatenation OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(
        os.path.join(project_path, str(trainingsetfolder))),
                                           recursive=True)
    fn = os.path.join(project_path, trainingsetfolder,
                      "CollectedData_" + cfg["scorer"])

    try:
        Data = pd.read_hdf(fn + ".h5", "df_with_missing")
    except FileNotFoundError:
        Data = merge_annotateddatasets(
            cfg,
            Path(os.path.join(project_path, trainingsetfolder)),
            windows2linux=windows2linux,
        )
        if Data is None:
            return [], []

    Data = Data[scorer]  # extract labeled data

    if uniform == True:
        TrainingFraction = cfg["TrainingFraction"]
        trainFraction = TrainingFraction[trainindex]
        trainIndices, testIndices = SplitTrials(range(len(Data.index)),
                                                trainFraction)
    else:  # leave one folder out split
        videos = cfg["video_sets"].keys()
        test_video_name = [Path(i).stem for i in videos][trainindex]
        print("Excluding the following folder (from training):",
              test_video_name)
        trainIndices, testIndices = [], []
        for index, name in enumerate(Data.index):
            # print(index,name.split(os.sep)[1])
            if test_video_name == name.split(
                    os.sep)[1]:  # this is the video name
                # print(name,test_video_name)
                testIndices.append(index)
            else:
                trainIndices.append(index)

    return trainIndices, testIndices
Exemple #24
0
def create_video_with_all_detections(
    config, videos, DLCscorername, displayedbodyparts="all", destfolder=None
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    DLCscorername: str
        Name of network. E.g. 'DLC_resnet50_project_userMar23shuffle1_50000

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import (
        convertdetectiondict2listoflist,
    )
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername + "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.mp4"
            )
            full_pickle = os.path.join(
                destfolder, str(Path(video).stem) + DLCscorername + "_full.pickle"
            )

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = convertdetectiondict2listoflist(data[frame_names[ind]], bpts)
                    for i, det in enumerate(dets):
                        color = colors[i]
                        for x, y, p, _ in det:
                            if p > pcutoff:
                                rr, cc = circle(y, x, dotsize, shape=(ny, nx))
                                frame[rr, cc] = color
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)
Exemple #25
0
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=None,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
    c_engine=False,
):
    """
    WIP multi animal project.
    """

    import os

    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.utils import auxiliaryfunctions, auxfun_multianimal

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ[
            "TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    colors = visualization.get_cmap(len(comparisonbodyparts),
                                    name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn))

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction))

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                   recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array([
                fn.split(".")[0]
                for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                if "index" in fn
            ])
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction))
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots])
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                (
                    individuals,
                    uniquebodyparts,
                    multianimalbodyparts,
                ) = auxfun_multianimal.extractindividualsandbodyparts(cfg)

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split(
                        "-"
                    )[-1]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    if os.path.isfile(
                            resultsfilename.split(".h5")[0] + "_full.pickle"):
                        print("Model already evaluated.", resultsfilename)
                    else:
                        if plotting:
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)

                        # print(dlc_cfg)
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(
                            dlc_cfg)

                        PredicteData = {}
                        print("Analyzing data...")
                        for imageindex, imagename in tqdm(enumerate(
                                Data.index)):
                            image_path = os.path.join(cfg["project_path"],
                                                      imagename)
                            image = io.imread(image_path)
                            frame = img_as_ubyte(skimage.color.gray2rgb(image))

                            GT = Data.iloc[imageindex]

                            # Storing GT data as dictionary, so it can be used for calculating connection costs
                            groundtruthcoordinates = []
                            groundtruthidentity = []
                            for bptindex, bpt in enumerate(
                                    dlc_cfg["all_joints_names"]):
                                coords = np.zeros([len(individuals), 2
                                                   ]) * np.nan
                                identity = []
                                for prfxindex, prefix in enumerate(
                                        individuals):
                                    if bpt in uniquebodyparts and prefix == "single":
                                        coords[prfxindex, :] = np.array([
                                            GT[cfg["scorer"]][prefix][bpt]
                                            ["x"],
                                            GT[cfg["scorer"]][prefix][bpt]
                                            ["y"],
                                        ])
                                        identity.append(prefix)
                                    elif (bpt in multianimalbodyparts
                                          and prefix != "single"):
                                        coords[prfxindex, :] = np.array([
                                            GT[cfg["scorer"]][prefix][bpt]
                                            ["x"],
                                            GT[cfg["scorer"]][prefix][bpt]
                                            ["y"],
                                        ])
                                        identity.append(prefix)
                                    else:
                                        identity.append("nix")

                                groundtruthcoordinates.append(
                                    coords[np.isfinite(coords[:, 0]), :])
                                groundtruthidentity.append(
                                    np.array(identity)[np.isfinite(coords[:,
                                                                          0])])

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex

                            pred = predictma.get_detectionswithcostsandGT(
                                frame,
                                groundtruthcoordinates,
                                dlc_cfg,
                                sess,
                                inputs,
                                outputs,
                                outall=False,
                                nms_radius=dlc_cfg.nmsradius,
                                det_min_score=dlc_cfg.minconfidence,
                                c_engine=c_engine,
                            )
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            if plotting:
                                coords_pred = pred["coordinates"][0]
                                probs_pred = pred["confidence"]
                                fig = visualization.make_multianimal_labeled_image(
                                    frame,
                                    groundtruthcoordinates,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                )

                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )

                        sess.close()  # closes the current tf session
                        PredicteData["metadata"] = {
                            "nms radius":
                            dlc_cfg.nmsradius,
                            "minimal confidence":
                            dlc_cfg.minconfidence,
                            "PAFgraph":
                            dlc_cfg.partaffinityfield_graph,
                            "all_joints":
                            [[i] for i in range(len(dlc_cfg.all_joints))],
                            "all_joints_names": [
                                dlc_cfg.all_joints_names[i]
                                for i in range(len(dlc_cfg.all_joints))
                            ],
                            "stride":
                            dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename)

                        tf.reset_default_graph()

    # returning to intial folder
    os.chdir(str(start_path))
Exemple #26
0
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=None,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
    c_engine=False,
):
    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.utils import auxiliaryfunctions, auxfun_multianimal

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ[
            "TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )
    # Handle data previously annotated on a different platform
    sep = "/" if "/" in Data.index[0] else "\\"
    if sep != os.path.sep:
        Data.index = Data.index.str.replace(sep, os.path.sep)
    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    all_bpts = np.asarray(
        len(cfg["individuals"]) * cfg["multianimalbodyparts"] +
        cfg["uniquebodyparts"])
    colors = visualization.get_cmap(len(comparisonbodyparts),
                                    name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn))

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction))

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            joints = dlc_cfg["all_joints_names"]

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix)),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                   recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array([
                fn.split(".")[0]
                for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                if "index" in fn
            ])
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction))
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots])
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split(
                        "-"
                    )[-1]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    if os.path.isfile(
                            resultsfilename.split(".h5")[0] + "_full.pickle"):
                        print("Model already evaluated.", resultsfilename)
                    else:
                        if plotting:
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)

                        # print(dlc_cfg)
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(
                            dlc_cfg)

                        PredicteData = {}
                        dist = np.full((len(Data), len(all_bpts)), np.nan)
                        conf = np.full_like(dist, np.nan)
                        distnorm = np.full(len(Data), np.nan)
                        print("Analyzing data...")
                        for imageindex, imagename in tqdm(enumerate(
                                Data.index)):
                            image_path = os.path.join(cfg["project_path"],
                                                      imagename)
                            image = io.imread(image_path)
                            frame = img_as_ubyte(skimage.color.gray2rgb(image))

                            GT = Data.iloc[imageindex]
                            df = GT.unstack("coords").reindex(
                                joints, level='bodyparts')

                            # Evaluate PAF edge lengths to calibrate `distnorm`
                            temp = GT.unstack("bodyparts")[joints]
                            xy = temp.values.reshape(
                                (-1, 2, temp.shape[1])).swapaxes(1, 2)
                            edges = xy[:, dlc_cfg["partaffinityfield_graph"]]
                            lengths = np.sum(
                                (edges[:, :, 0] - edges[:, :, 1])**2, axis=2)
                            distnorm[imageindex] = np.nanmax(lengths)

                            # FIXME Is having an empty array vs nan really that necessary?!
                            groundtruthidentity = list(
                                df.index.get_level_values(
                                    "individuals").to_numpy().reshape((-1, 1)))
                            groundtruthcoordinates = list(
                                df.values[:, np.newaxis])
                            for i, coords in enumerate(groundtruthcoordinates):
                                if np.isnan(coords).any():
                                    groundtruthcoordinates[i] = np.empty(
                                        (0, 2), dtype=float)
                                    groundtruthidentity[i] = np.array(
                                        [], dtype=str)

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex

                            pred = predictma.get_detectionswithcostsandGT(
                                frame,
                                groundtruthcoordinates,
                                dlc_cfg,
                                sess,
                                inputs,
                                outputs,
                                outall=False,
                                nms_radius=dlc_cfg.nmsradius,
                                det_min_score=dlc_cfg.minconfidence,
                                c_engine=c_engine,
                            )
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            coords_pred = pred["coordinates"][0]
                            probs_pred = pred["confidence"]
                            for bpt, xy_gt in df.groupby(level="bodyparts"):
                                inds_gt = np.flatnonzero(
                                    np.all(~np.isnan(xy_gt), axis=1))
                                n_joint = joints.index(bpt)
                                xy = coords_pred[n_joint]
                                if inds_gt.size and xy.size:
                                    # Pick the predictions closest to ground truth,
                                    # rather than the ones the model has most confident in
                                    d = cdist(xy_gt.iloc[inds_gt], xy)
                                    rows, cols = linear_sum_assignment(d)
                                    min_dists = d[rows, cols]
                                    inds = np.flatnonzero(all_bpts == bpt)
                                    sl = imageindex, inds[inds_gt[rows]]
                                    dist[sl] = min_dists
                                    conf[sl] = probs_pred[n_joint][
                                        cols].squeeze()

                            if plotting:
                                fig = visualization.make_multianimal_labeled_image(
                                    frame,
                                    groundtruthcoordinates,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                )

                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )

                        sess.close()  # closes the current tf session

                        # Compute all distance statistics
                        df_dist = pd.DataFrame(dist, columns=df.index)
                        df_conf = pd.DataFrame(conf, columns=df.index)
                        df_joint = pd.concat([df_dist, df_conf],
                                             keys=["rmse", "conf"],
                                             names=["metrics"],
                                             axis=1)
                        df_joint = df_joint.reorder_levels(list(
                            np.roll(df_joint.columns.names, -1)),
                                                           axis=1)
                        df_joint.sort_index(axis=1,
                                            level=["individuals", "bodyparts"],
                                            ascending=[True, True],
                                            inplace=True)
                        write_path = os.path.join(
                            evaluationfolder,
                            f"dist_{trainingsiterations}.csv")
                        df_joint.to_csv(write_path)

                        # Calculate overall prediction error
                        error = df_joint.xs("rmse", level="metrics", axis=1)
                        mask = df_joint.xs("conf", level="metrics",
                                           axis=1) >= cfg["pcutoff"]
                        error_masked = error[mask]
                        error_train = np.nanmean(error.iloc[trainIndices])
                        error_train_cut = np.nanmean(
                            error_masked.iloc[trainIndices])
                        error_test = np.nanmean(error.iloc[testIndices])
                        error_test_cut = np.nanmean(
                            error_masked.iloc[testIndices])
                        results = [
                            trainingsiterations,
                            int(100 * trainFraction),
                            shuffle,
                            np.round(error_train, 2),
                            np.round(error_test, 2),
                            cfg["pcutoff"],
                            np.round(error_train_cut, 2),
                            np.round(error_test_cut, 2),
                        ]
                        final_result.append(results)

                        # For OKS/PCK, compute the standard deviation error across all frames
                        sd = df_dist.groupby("bodyparts",
                                             axis=1).mean().std(axis=0)
                        sd["distnorm"] = np.sqrt(np.nanmax(distnorm))
                        sd.to_csv(write_path.replace("dist.csv", "sd.csv"))

                        if show_errors:
                            string = "Results for {} training iterations: {}, shuffle {}:\n" \
                                     "Train error: {} pixels. Test error: {} pixels.\n" \
                                     "With pcutoff of {}:\n" \
                                     "Train error: {} pixels. Test error: {} pixels."
                            print(string.format(*results))

                            print("##########################################")
                            print(
                                "Average Euclidean distance to GT per individual (in pixels)"
                            )
                            print(
                                error_masked.groupby(
                                    'individuals',
                                    axis=1).mean().mean().to_string())
                            print(
                                "Average Euclidean distance to GT per bodypart (in pixels)"
                            )
                            print(
                                error_masked.groupby(
                                    'bodyparts',
                                    axis=1).mean().mean().to_string())

                        PredicteData["metadata"] = {
                            "nms radius":
                            dlc_cfg.nmsradius,
                            "minimal confidence":
                            dlc_cfg.minconfidence,
                            "PAFgraph":
                            dlc_cfg.partaffinityfield_graph,
                            "all_joints":
                            [[i] for i in range(len(dlc_cfg.all_joints))],
                            "all_joints_names": [
                                dlc_cfg.all_joints_names[i]
                                for i in range(len(dlc_cfg.all_joints))
                            ],
                            "stride":
                            dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename)

                        tf.reset_default_graph()

                if len(final_result
                       ) > 0:  # Only append if results were calculated
                    make_results_file(final_result, evaluationfolder,
                                      DLCscorer)

    # returning to intial folder
    os.chdir(str(start_path))
def ExtractFramesbasedonPreselection(Index,
                                     extractionalgorithm,
                                     Dataframe,
                                     dataname,
                                     scorer,
                                     video,
                                     cfg,
                                     config,
                                     opencv=True,
                                     cluster_resizewidth=30,
                                     cluster_color=False,
                                     savelabeled=True):
    from deeplabcut.create_project import add
    start = cfg['start']
    stop = cfg['stop']
    numframes2extract = cfg['numframes2pick']
    bodyparts = cfg['bodyparts']

    videofolder = str(Path(video).parents[0])
    vname = str(Path(video).stem)
    tmpfolder = os.path.join(cfg['project_path'], 'labeled-data', vname)
    if os.path.isdir(tmpfolder):
        print("Frames from video", vname,
              " already extracted (more will be added)!")
    else:
        auxiliaryfunctions.attempttomakefolder(tmpfolder)

    nframes = np.size(Dataframe.index)
    print("Loading video...")
    if opencv:
        import cv2
        cap = cv2.VideoCapture(video)
        fps = cap.get(5)
        duration = nframes * 1. / fps
        size = (int(cap.get(4)), int(cap.get(3)))
    else:
        from moviepy.editor import VideoFileClip
        clip = VideoFileClip(video)
        fps = clip.fps
        duration = clip.duration
        size = clip.size

    if cfg['cropping']:  # one might want to adjust
        coords = (cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2'])
    else:
        coords = None

    print("Duration of video [s]: ", duration, ", recorded @ ", fps, "fps!")
    print(
        "Overall # of frames: ",
        nframes,
        "with (cropped) frame dimensions: ",
    )
    if extractionalgorithm == 'uniform':
        if opencv:
            frames2pick = frameselectiontools.UniformFramescv2(
                cap, numframes2extract, start, stop, Index)
        else:
            frames2pick = frameselectiontools.UniformFrames(
                clip, numframes2extract, start, stop, Index)
    elif extractionalgorithm == 'kmeans':
        if opencv:
            frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                cap,
                numframes2extract,
                start,
                stop,
                cfg['cropping'],
                coords,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color)
        else:
            if cfg['cropping']:
                clip = clip.crop(y1=cfg['y1'],
                                 y2=cfg['x2'],
                                 x1=cfg['x1'],
                                 x2=cfg['x2'])
            frames2pick = frameselectiontools.KmeansbasedFrameselection(
                clip,
                numframes2extract,
                start,
                stop,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color)

    else:
        print(
            "Please implement this method yourself! Currently the options are 'kmeans', 'jump', 'uniform'."
        )
        frames2pick = []

    # Extract frames + frames with plotted labels and store them in folder (with name derived from video name) nder labeled-data
    print("Let's select frames indices:", frames2pick)
    colors = visualization.get_cmap(len(bodyparts), cfg['colormap'])
    strwidth = int(np.ceil(np.log10(nframes)))  #width for strings
    for index in frames2pick:  ##tqdm(range(0,nframes,10)):
        if opencv:
            PlottingSingleFramecv2(cap, cv2, cfg['cropping'], coords,
                                   Dataframe, bodyparts, tmpfolder, index,
                                   scorer, cfg['dotsize'], cfg['pcutoff'],
                                   cfg['alphavalue'], colors, strwidth,
                                   savelabeled)
        else:
            PlottingSingleFrame(clip, Dataframe, bodyparts, tmpfolder, index,
                                scorer, cfg['dotsize'], cfg['pcutoff'],
                                cfg['alphavalue'], colors, strwidth,
                                savelabeled)
        plt.close("all")

    #close videos
    if opencv:
        cap.release()
    else:
        clip.close()
        del clip

    # Extract annotations based on DeepLabCut and store in the folder (with name derived from video name) under labeled-data
    if len(frames2pick) > 0:
        #Dataframe = pd.read_hdf(os.path.join(videofolder,dataname+'.h5'))
        DF = Dataframe.ix[frames2pick]
        DF.index = [
            os.path.join('labeled-data', vname,
                         "img" + str(index).zfill(strwidth) + ".png")
            for index in DF.index
        ]  #exchange index number by file names.

        machinefile = os.path.join(
            tmpfolder, 'machinelabels-iter' + str(cfg['iteration']) + '.h5')
        if Path(machinefile).is_file():
            Data = pd.read_hdf(machinefile, 'df_with_missing')
            DataCombined = pd.concat([Data, DF])
            #drop duplicate labels:
            DataCombined = DataCombined[~DataCombined.index.duplicated(
                keep='first')]

            DataCombined.to_hdf(machinefile, key='df_with_missing', mode='w')
            DataCombined.to_csv(
                os.path.join(tmpfolder, "machinelabels.csv")
            )  #this is always the most current one (as reading is from h5)
        else:
            DF.to_hdf(machinefile, key='df_with_missing', mode='w')
            DF.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))
        try:
            if cfg['cropping']:
                add.add_new_videos(
                    config, [video],
                    coords=[coords])  # make sure you pass coords as a list
            else:
                add.add_new_videos(config, [video], coords=None)
        except:  #can we make a catch here? - in fact we should drop indices from DataCombined if they are in CollectedData.. [ideal behavior; currently this is pretty unlikely]
            print(
                "AUTOMATIC ADDING OF VIDEO TO CONFIG FILE FAILED! You need to do this manually for including it in the config.yaml file!"
            )
            print("Videopath:", video, "Coordinates for cropping:", coords)
            pass

        print(
            "The outlier frames are extracted. They are stored in the subdirectory labeled-data\%s."
            % vname)
        print(
            "Once you extracted frames for all videos, use 'refine_labels' to manually correct the labels."
        )
    else:
        print("No frames were extracted.")
def create_training_dataset(config,
                            num_shuffles=1,
                            Shuffles=None,
                            windows2linux=False):
    """
    Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n
    [OPTIONAL]Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    windows2linux: bool.
        The annotation files contain path formated according to your operating system. If you label on windows 
        but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths. 
    
    Example
    --------
    >>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
    Windows:
    >>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    from skimage import io
    import scipy.io as sio
    import deeplabcut
    import subprocess

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    scorer = cfg['scorer']
    project_path = cfg['project_path']
    # Create path for training sets & store data there
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
        cfg)  #Path concatenation OS platform independent
    auxiliaryfunctions.attempttomakefolder(Path(
        os.path.join(project_path, str(trainingsetfolder))),
                                           recursive=True)

    Data = merge_annotateddatasets(
        cfg, project_path, Path(os.path.join(project_path, trainingsetfolder)),
        windows2linux)
    Data = Data[scorer]  #extract labeled data

    #set model type. we will allow more in the future.
    if cfg['resnet'] == 50:
        net_type = 'resnet_' + str(cfg['resnet'])
        resnet_path = str(
            Path(deeplabcut.__file__).parents[0] /
            'pose_estimation_tensorflow/models/pretrained/resnet_v1_50.ckpt')
    elif cfg['resnet'] == 101:
        net_type = 'resnet_' + str(cfg['resnet'])
        resnet_path = str(
            Path(deeplabcut.__file__).parents[0] /
            'pose_estimation_tensorflow/models/pretrained/resnet_v1_101.ckpt')
    else:
        print(
            "Currently only ResNet 50 or 101 supported, please change 'resnet' entry in config.yaml!"
        )
        num_shuffles = -1  #thus the loop below is empty...

    if not Path(resnet_path).is_file():
        """
        Downloads the ImageNet pretrained weights for ResNet.
        """
        start = os.getcwd()
        os.chdir(str(Path(resnet_path).parents[0]))
        print("Downloading the pretrained model (ResNets)....")
        subprocess.call("download.sh", shell=True)
        os.chdir(start)

    if Shuffles == None:
        Shuffles = range(1, num_shuffles + 1, 1)
    else:
        Shuffles = [i for i in Shuffles if isinstance(i, int)]

    bodyparts = cfg['bodyparts']
    TrainingFraction = cfg['TrainingFraction']
    for shuffle in Shuffles:  # Creating shuffles starting from 1
        for trainFraction in TrainingFraction:
            trainIndexes, testIndexes = SplitTrials(range(len(Data.index)),
                                                    trainFraction)

            ####################################################
            # Generating data structure with labeled information & frame metadata (for deep cut)
            ####################################################

            # Make training file!
            data = []
            for jj in trainIndexes:
                H = {}
                # load image to get dimensions:
                filename = Data.index[jj]
                im = io.imread(os.path.join(cfg['project_path'], filename))
                H['image'] = filename

                if np.ndim(im) == 3:
                    H['size'] = np.array(
                        [np.shape(im)[2],
                         np.shape(im)[0],
                         np.shape(im)[1]])
                else:
                    # print "Grayscale!"
                    H['size'] = np.array([1, np.shape(im)[0], np.shape(im)[1]])

                indexjoints = 0
                joints = np.zeros((len(bodyparts), 3)) * np.nan
                for bpindex, bodypart in enumerate(bodyparts):
                    if Data[bodypart]['x'][jj] < np.shape(
                            im)[1] and Data[bodypart]['y'][jj] < np.shape(
                                im)[0]:  #are labels in image?
                        joints[indexjoints, 0] = int(bpindex)
                        joints[indexjoints, 1] = Data[bodypart]['x'][jj]
                        joints[indexjoints, 2] = Data[bodypart]['y'][jj]
                        indexjoints += 1

                joints = joints[np.where(np.prod(
                    np.isfinite(joints),
                    1))[0], :]  # drop NaN, i.e. lines for missing body parts

                assert (np.prod(np.array(joints[:, 2]) < np.shape(im)[0])
                        )  # y coordinate within image?
                assert (np.prod(np.array(joints[:, 1]) < np.shape(im)[1])
                        )  # x coordinate within image?

                H['joints'] = np.array(joints, dtype=int)
                if np.size(joints) > 0:  #exclude images without labels
                    data.append(H)

            if len(trainIndexes) > 0:
                datafilename, metadatafilename = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)
                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename), data,
                    trainIndexes, testIndexes, trainFraction)
                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################

                DTYPE = [('image', 'O'), ('size', 'O'), ('joints', 'O')]
                MatlabData = np.array(
                    [(np.array([data[item]['image']],
                               dtype='U'), np.array([data[item]['size']]),
                      boxitintoacell(data[item]['joints']))
                     for item in range(len(data))],
                    dtype=DTYPE)

                sio.savemat(os.path.join(project_path, datafilename),
                            {'dataset': MatlabData})

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################

                modelfoldername = auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + '/' +
                    '/train')
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + '/' +
                    '/test')

                path_train_config = str(
                    os.path.join(cfg['project_path'], Path(modelfoldername),
                                 'train', 'pose_cfg.yaml'))
                path_test_config = str(
                    os.path.join(cfg['project_path'], Path(modelfoldername),
                                 'test', 'pose_cfg.yaml'))
                #str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')

                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": [str(bpt) for bpt in bodyparts],
                    "init_weights": resnet_path,
                    "project_path": str(cfg['project_path']),
                    "net_type": net_type
                }

                defaultconfigfile = str(
                    Path(deeplabcut.__file__).parents[0] / 'pose_cfg.yaml')

                trainingdata = MakeTrain_pose_yaml(items2change,
                                                   path_train_config,
                                                   defaultconfigfile)
                keys2save = [
                    "dataset", "num_joints", "all_joints", "all_joints_names",
                    "net_type", 'init_weights', 'global_scale',
                    'location_refinement', 'locref_stdev'
                ]
                MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )
Exemple #29
0
def plot_trajectories(config,
                      videos,
                      videotype='.avi',
                      shuffle=1,
                      trainingsetindex=0,
                      filtered=False,
                      displayedbodyparts='all',
                      showfigures=False,
                      destfolder=None):
    """
    Plots the trajectories of various bodyparts across the video.

    Parameters
    ----------
     config : string
    Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: list, optional
    List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
    Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
    Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcut.filterpredictions

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video.
        Either ``all``, then all body parts from config.yaml are used,
        or a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    showfigures: bool, default false
    If true then plots are also displayed.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    Example
    --------
    for labeling the frames
    >>> deeplabcut.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        print(video)
        if destfolder is None:
            videofolder = str(Path(video).parents[0])
        else:
            videofolder = destfolder

        vname = str(Path(video).stem)
        print("Starting % ", videofolder, video)
        notanalyzed, dataname, DLCscorer = auxiliaryfunctions.CheckifNotAnalyzed(
            videofolder, vname, DLCscorer, DLCscorerlegacy, flag='checking')

        if notanalyzed:
            print("The video was not analyzed with this scorer:", DLCscorer)
        else:
            #LoadData
            print("Loading ", video, "and data.")
            datafound, metadata, Dataframe, DLCscorer, suffix = auxiliaryfunctions.LoadAnalyzedData(
                str(videofolder), vname, DLCscorer, filtered
            )  #returns boolean variable if data was found and metadata + pandas array
            if datafound:
                basefolder = videofolder
                auxiliaryfunctions.attempttomakefolder(basefolder)
                auxiliaryfunctions.attempttomakefolder(
                    os.path.join(basefolder, 'plot-poses'))
                tmpfolder = os.path.join(basefolder, 'plot-poses', vname)
                auxiliaryfunctions.attempttomakefolder(tmpfolder)
                PlottingResults(tmpfolder, Dataframe, DLCscorer, cfg,
                                bodyparts, showfigures, suffix + '.png')

    print(
        'Plots created! Please check the directory "plot-poses" within the video directory'
    )
def ExtractFramesbasedonPreselection(
    Index,
    extractionalgorithm,
    data,
    video,
    cfg,
    config,
    opencv=True,
    cluster_resizewidth=30,
    cluster_color=False,
    savelabeled=True,
    with_annotations=True,
):
    from deeplabcut.create_project import add

    start = cfg["start"]
    stop = cfg["stop"]
    numframes2extract = cfg["numframes2pick"]
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, "all")

    videofolder = str(Path(video).parents[0])
    vname = str(Path(video).stem)
    tmpfolder = os.path.join(cfg["project_path"], "labeled-data", vname)
    if os.path.isdir(tmpfolder):
        print("Frames from video", vname,
              " already extracted (more will be added)!")
    else:
        auxiliaryfunctions.attempttomakefolder(tmpfolder, recursive=True)

    nframes = len(data)
    print("Loading video...")
    if opencv:
        vid = VideoWriter(video)
        fps = vid.fps
        duration = vid.calc_duration()
    else:
        from moviepy.editor import VideoFileClip

        clip = VideoFileClip(video)
        fps = clip.fps
        duration = clip.duration

    if cfg["cropping"]:  # one might want to adjust
        coords = (cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"])
    else:
        coords = None

    print("Duration of video [s]: ", duration, ", recorded @ ", fps, "fps!")
    print("Overall # of frames: ", nframes,
          "with (cropped) frame dimensions: ")
    if extractionalgorithm == "uniform":
        if opencv:
            frames2pick = frameselectiontools.UniformFramescv2(
                vid, numframes2extract, start, stop, Index)
        else:
            frames2pick = frameselectiontools.UniformFrames(
                clip, numframes2extract, start, stop, Index)
    elif extractionalgorithm == "kmeans":
        if opencv:
            frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
                vid,
                numframes2extract,
                start,
                stop,
                cfg["cropping"],
                coords,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )
        else:
            if cfg["cropping"]:
                clip = clip.crop(y1=cfg["y1"],
                                 y2=cfg["x2"],
                                 x1=cfg["x1"],
                                 x2=cfg["x2"])
            frames2pick = frameselectiontools.KmeansbasedFrameselection(
                clip,
                numframes2extract,
                start,
                stop,
                Index,
                resizewidth=cluster_resizewidth,
                color=cluster_color,
            )

    else:
        print(
            "Please implement this method yourself! Currently the options are 'kmeans', 'jump', 'uniform'."
        )
        frames2pick = []

    # Extract frames + frames with plotted labels and store them in folder (with name derived from video name) nder labeled-data
    print("Let's select frames indices:", frames2pick)
    colors = visualization.get_cmap(len(bodyparts), cfg["colormap"])
    strwidth = int(np.ceil(np.log10(nframes)))  # width for strings
    for index in frames2pick:  ##tqdm(range(0,nframes,10)):
        if opencv:
            PlottingSingleFramecv2(
                vid,
                cfg["cropping"],
                coords,
                data,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        else:
            PlottingSingleFrame(
                clip,
                data,
                bodyparts,
                tmpfolder,
                index,
                cfg["dotsize"],
                cfg["pcutoff"],
                cfg["alphavalue"],
                colors,
                strwidth,
                savelabeled,
            )
        plt.close("all")

    # close videos
    if opencv:
        vid.close()
    else:
        clip.close()
        del clip

    # Extract annotations based on DeepLabCut and store in the folder (with name derived from video name) under labeled-data
    if len(frames2pick) > 0:
        try:
            if cfg["cropping"]:
                add.add_new_videos(
                    config, [video],
                    coords=[coords])  # make sure you pass coords as a list
            else:
                add.add_new_videos(config, [video], coords=None)
        except:  # can we make a catch here? - in fact we should drop indices from DataCombined if they are in CollectedData.. [ideal behavior; currently this is pretty unlikely]
            print(
                "AUTOMATIC ADDING OF VIDEO TO CONFIG FILE FAILED! You need to do this manually for including it in the config.yaml file!"
            )
            print("Videopath:", video, "Coordinates for cropping:", coords)
            pass

        if with_annotations:
            machinefile = os.path.join(
                tmpfolder,
                "machinelabels-iter" + str(cfg["iteration"]) + ".h5")
            if isinstance(data, pd.DataFrame):
                df = data.loc[frames2pick]
                df.index = [
                    os.path.join(
                        "labeled-data",
                        vname,
                        "img" + str(index).zfill(strwidth) + ".png",
                    ) for index in df.index
                ]  # exchange index number by file names.
            elif isinstance(data, dict):
                idx = [
                    os.path.join(
                        "labeled-data",
                        vname,
                        "img" + str(index).zfill(strwidth) + ".png",
                    ) for index in frames2pick
                ]
                filename = os.path.join(str(tmpfolder),
                                        f"CollectedData_{cfg['scorer']}.h5")
                try:
                    df_temp = pd.read_hdf(filename, "df_with_missing")
                    columns = df_temp.columns
                except FileNotFoundError:
                    columns = pd.MultiIndex.from_product(
                        [
                            [cfg["scorer"]],
                            cfg["individuals"],
                            cfg["multianimalbodyparts"],
                            ["x", "y"],
                        ],
                        names=["scorer", "individuals", "bodyparts", "coords"],
                    )
                    if cfg["uniquebodyparts"]:
                        columns2 = pd.MultiIndex.from_product(
                            [
                                [cfg["scorer"]],
                                ["single"],
                                cfg["uniquebodyparts"],
                                ["x", "y"],
                            ],
                            names=[
                                "scorer", "individuals", "bodyparts", "coords"
                            ],
                        )
                        df_temp = pd.concat((
                            pd.DataFrame(columns=columns),
                            pd.DataFrame(columns=columns2),
                        ))
                        columns = df_temp.columns
                array = np.full((len(frames2pick), len(columns)), np.nan)
                for i, index in enumerate(frames2pick):
                    data_temp = data.get(index)
                    if data_temp is not None:
                        vals = np.concatenate(data_temp)[:, :2].flatten()
                        array[i, :len(vals)] = vals
                df = pd.DataFrame(array, index=idx, columns=columns)
            else:
                return
            if Path(machinefile).is_file():
                Data = pd.read_hdf(machinefile, "df_with_missing")
                DataCombined = pd.concat([Data, df])
                # drop duplicate labels:
                DataCombined = DataCombined[~DataCombined.index.duplicated(
                    keep="first")]

                DataCombined.to_hdf(machinefile,
                                    key="df_with_missing",
                                    mode="w")
                DataCombined.to_csv(
                    os.path.join(tmpfolder, "machinelabels.csv")
                )  # this is always the most current one (as reading is from h5)
            else:
                df.to_hdf(machinefile, key="df_with_missing", mode="w")
                df.to_csv(os.path.join(tmpfolder, "machinelabels.csv"))

        print(
            "The outlier frames are extracted. They are stored in the subdirectory labeled-data\%s."
            % vname)
        print(
            "Once you extracted frames for all videos, use 'refine_labels' to manually correct the labels."
        )
    else:
        print("No frames were extracted.")