예제 #1
0
def plot_trajectories(config,
                      videos,
                      videotype='.avi',
                      shuffle=1,
                      trainingsetindex=0,
                      filtered=False,
                      displayedbodyparts='all',
                      showfigures=False,
                      destfolder=None):
    """
    Plots the trajectories of various bodyparts across the video.

    Parameters
    ----------
     config : string
    Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle: list, optional
    List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
    Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
    Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcutcore.filterpredictions

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video.
        Either ``all``, then all body parts from config.yaml are used,
        or a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    showfigures: bool, default false
    If true then plots are also displayed.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    Example
    --------
    for labeling the frames
    >>> deeplabcutcore.plot_trajectories('home/alex/analysis/project/reaching-task/config.yaml',['/home/alex/analysis/project/videos/reachingvideo1.avi'])
    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  #automatically loads corresponding model (even training iteration based on snapshot index)
    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts)
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        print(video)
        if destfolder is None:
            videofolder = str(Path(video).parents[0])
        else:
            videofolder = destfolder

        vname = str(Path(video).stem)
        print("Starting % ", videofolder, video)
        notanalyzed, dataname, DLCscorer = auxiliaryfunctions.CheckifNotAnalyzed(
            videofolder, vname, DLCscorer, DLCscorerlegacy, flag='checking')

        if notanalyzed:
            print("The video was not analyzed with this scorer:", DLCscorer)
        else:
            #LoadData
            print("Loading ", video, "and data.")
            datafound, metadata, Dataframe, DLCscorer, suffix = auxiliaryfunctions.LoadAnalyzedData(
                str(videofolder), vname, DLCscorer, filtered
            )  #returns boolean variable if data was found and metadata + pandas array
            if datafound:
                basefolder = videofolder
                auxiliaryfunctions.attempttomakefolder(basefolder)
                auxiliaryfunctions.attempttomakefolder(
                    os.path.join(basefolder, 'plot-poses'))
                tmpfolder = os.path.join(basefolder, 'plot-poses', vname)
                auxiliaryfunctions.attempttomakefolder(tmpfolder)
                PlottingResults(tmpfolder, Dataframe, DLCscorer, cfg,
                                bodyparts, showfigures, suffix + '.png')

    print(
        'Plots created! Please check the directory "plot-poses" within the video directory'
    )
예제 #2
0
def return_evaluate_network_data(config,
                                 shuffle=0,
                                 trainingsetindex=0,
                                 comparisonbodyparts="all",
                                 Snapindex=None,
                                 rescale=False,
                                 fulldata=False,
                                 show_errors=True):
    """
    Returns the results for (previously evaluated) network. deeplabcutcore.evaluate_network(..)
    Returns list of (per model): [trainingsiterations,trainfraction,shuffle,trainerror,testerror,pcutoff,trainerrorpcutoff,testerrorpcutoff,Snapshots[snapindex],scale,net_type]

    If fulldata=True, also returns (the complete annotation and prediction array)
    Returns list of: (DataMachine, Data, data, trainIndices, testIndices, trainFraction, DLCscorer,comparisonbodyparts, cfg, Snapshots[snapindex])
    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 0.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you do not want to plot
    >>> deeplabcutcore._evaluate_network_data('/analysis/project/reaching-task/config.yaml', shuffle=[1])
    --------
    If you want to plot
    >>> deeplabcutcore.evaluate_network('/analysis/project/reaching-task/config.yaml',shuffle=[1],True)
    """

    import os
    from skimage import io
    import skimage.color

    from deeplabcutcore.pose_estimation_tensorflow.config import load_config
    from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcutcore.utils import auxiliaryfunctions, visualization

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    #Data=pd.read_hdf(os.path.join(cfg["project_path"],str(trainingsetfolder),'CollectedData_' + cfg["scorer"] + '.h5'),'df_with_missing')

    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    ##################################################
    # Load data...
    ##################################################
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
        trainingsetfolder, trainFraction, shuffle, cfg)
    modelfolder = os.path.join(
        cfg["project_path"],
        str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    # Load meta data
    data, trainIndices, testIndices, trainFraction = auxiliaryfunctions.LoadMetadata(
        os.path.join(cfg["project_path"], metadatafn))

    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError(
            "It seems the model for shuffle %s and trainFraction %s does not exist."
            % (shuffle, trainFraction))

    ########################### RESCALING (to global scale)
    if rescale == True:
        scale = dlc_cfg['global_scale']
        print("Rescaling Data to ", scale)
        Data = pd.read_hdf(
            os.path.join(cfg["project_path"], str(trainingsetfolder),
                         'CollectedData_' + cfg["scorer"] + '.h5'),
            'df_with_missing') * scale
    else:
        scale = 1
        Data = pd.read_hdf(
            os.path.join(cfg["project_path"], str(trainingsetfolder),
                         'CollectedData_' + cfg["scorer"] + '.h5'),
            'df_with_missing')

    evaluationfolder = os.path.join(
        cfg["project_path"],
        str(auxiliaryfunctions.GetEvaluationFolder(trainFraction, shuffle,
                                                   cfg)))
    # Check which snapshots are available and sort them by # iterations
    Snapshots = np.array([
        fn.split('.')[0]
        for fn in os.listdir(os.path.join(str(modelfolder), 'train'))
        if "index" in fn
    ])

    if len(Snapshots) == 0:
        print(
            "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
            % (shuffle, trainFraction))
        snapindices = []
    else:
        increasing_indices = np.argsort(
            [int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]
        if Snapindex == None:
            Snapindex = cfg["snapshotindex"]

        if Snapindex == -1:
            snapindices = [-1]
        elif Snapindex == "all":
            snapindices = range(len(Snapshots))
        elif Snapindex < len(Snapshots):
            snapindices = [Snapindex]
        else:
            print(
                "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
            )

    DATA = []
    results = []
    for snapindex in snapindices:
        dlc_cfg['init_weights'] = os.path.join(
            str(modelfolder), 'train',
            Snapshots[snapindex])  #setting weights to corresponding snapshot.
        trainingsiterations = (dlc_cfg['init_weights'].split(
            os.sep)[-1]).split('-')[
                -1]  #read how many training siterations that corresponds to.

        #name for deeplabcut net (based on its parameters)
        #DLCscorer = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations)
        DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
            cfg, shuffle, trainFraction, trainingsiterations)
        print("Retrieving ", DLCscorer, " with # of trainingiterations:",
              trainingsiterations)
        notanalyzed, resultsfilename, DLCscorer = auxiliaryfunctions.CheckifNotEvaluated(
            str(evaluationfolder), DLCscorer, DLCscorerlegacy,
            Snapshots[snapindex])
        #resultsfilename=os.path.join(str(evaluationfolder),DLCscorer + '-' + str(Snapshots[snapindex])+  '.h5') # + '-' + str(snapshot)+  ' #'-' + Snapshots[snapindex]+  '.h5')
        print(resultsfilename)
        if not notanalyzed and os.path.isfile(resultsfilename):  #data exists..
            DataMachine = pd.read_hdf(resultsfilename, 'df_with_missing')
            DataCombined = pd.concat([Data.T, DataMachine.T], axis=0).T
            RMSE, RMSEpcutoff = pairwisedistances(DataCombined, cfg["scorer"],
                                                  DLCscorer, cfg["pcutoff"],
                                                  comparisonbodyparts)

            testerror = np.nanmean(RMSE.iloc[testIndices].values.flatten())
            trainerror = np.nanmean(RMSE.iloc[trainIndices].values.flatten())
            testerrorpcutoff = np.nanmean(
                RMSEpcutoff.iloc[testIndices].values.flatten())
            trainerrorpcutoff = np.nanmean(
                RMSEpcutoff.iloc[trainIndices].values.flatten())
            if show_errors == True:
                print("Results for",
                      trainingsiterations, " training iterations:",
                      int(100 * trainFraction), shuffle, "train error:",
                      np.round(trainerror, 2), "pixels. Test error:",
                      np.round(testerror, 2), " pixels.")
                print("With pcutoff of", cfg["pcutoff"], " train error:",
                      np.round(trainerrorpcutoff, 2), "pixels. Test error:",
                      np.round(testerrorpcutoff, 2), "pixels")
                print("Snapshot", Snapshots[snapindex])

            r = [
                trainingsiterations,
                int(100 * trainFraction), shuffle,
                np.round(trainerror, 2),
                np.round(testerror, 2), cfg["pcutoff"],
                np.round(trainerrorpcutoff, 2),
                np.round(testerrorpcutoff, 2), Snapshots[snapindex], scale,
                dlc_cfg['net_type']
            ]
            results.append(r)
        else:
            print("Model not trained/evaluated!")
        if fulldata == True:
            DATA.append([
                DataMachine, Data, data, trainIndices, testIndices,
                trainFraction, DLCscorer, comparisonbodyparts, cfg,
                evaluationfolder, Snapshots[snapindex]
            ])

    os.chdir(start_path)
    if fulldata == True:
        return DATA, results
    else:
        return results
예제 #3
0
def extract_outlier_frames(
    config,
    videos,
    videotype="avi",
    shuffle=1,
    trainingsetindex=0,
    outlieralgorithm="jump",
    comparisonbodyparts="all",
    epsilon=20,
    p_bound=0.01,
    ARdegree=3,
    MAdegree=1,
    alpha=0.01,
    extractionalgorithm="kmeans",
    automatic=False,
    cluster_resizewidth=30,
    cluster_color=False,
    opencv=True,
    savelabeled=True,
    destfolder=None,
):
    """
    Extracts the outlier frames in case, the predictions are not correct for a certain video from the cropped video running from
    start to stop as defined in config.yaml.

    Another crucial parameter in config.yaml is how many frames to extract 'numframes2extract'.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
        the corresponding shuffle of training dataset. Default is set to 1

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    outlieralgorithm: 'fitting', 'jump', 'uncertain', or 'manual'
        String specifying the algorithm used to detect the outliers. Currently, deeplabcut supports three methods + a manual GUI option. 'Fitting'
        fits a Auto Regressive Integrated Moving Average model to the data and computes the distance to the estimated data. Larger distances than
        epsilon are then potentially identified as outliers. The methods 'jump' identifies larger jumps than 'epsilon' in any body part; and 'uncertain'
        looks for frames with confidence below p_bound. The default is set to ``jump``.

    comparisonbodyparts: list of strings, optional
        This select the body parts for which the comparisons with the outliers are carried out. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    p_bound: float between 0 and 1, optional
        For outlieralgorithm 'uncertain' this parameter defines the likelihood below, below which a body part will be flagged as a putative outlier.

    epsilon; float,optional
        Meaning depends on outlieralgoritm. The default is set to 20 pixels.
        For outlieralgorithm 'fitting': Float bound according to which frames are picked when the (average) body part estimate deviates from model fit
        For outlieralgorithm 'jump': Float bound specifying the distance by which body points jump from one frame to next (Euclidean distance)

    ARdegree: int, optional
        For outlieralgorithm 'fitting': Autoregressive degree of ARIMA model degree. (Note we use SARIMAX without exogeneous and seasonal part)
        see https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html

    MAdegree: int
        For outlieralgorithm 'fitting': MovingAvarage degree of ARIMA model degree. (Note we use SARIMAX without exogeneous and seasonal part)
        See https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html

    alpha: float
        Significance level for detecting outliers based on confidence interval of fitted ARIMA model. Only the distance is used however.

    extractionalgorithm : string, optional
        String specifying the algorithm to use for selecting the frames from the identified putatative outlier frames. Currently, deeplabcut
        supports either ``kmeans`` or ``uniform`` based selection (same logic as for extract_frames).
        The default is set to``uniform``, if provided it must be either ``uniform`` or ``kmeans``.

    automatic : bool, optional
        Set it to True, if you want to extract outliers without being asked for user feedback.

    cluster_resizewidth: number, default: 30
        For k-means one can change the width to which the images are downsampled (aspect ratio is fixed).

    cluster_color: bool, default: False
        If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases
        the computational complexity.

    opencv: bool, default: True
        Uses openCV for loading & extractiong (otherwise moviepy (legacy))

    savelabeled: bool, default: True
        If true also saves frame with predicted labels in each folder.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    Examples

    Windows example for extracting the frames with default settings
    >>> deeplabcutcore.extract_outlier_frames('C:\\myproject\\reaching-task\\config.yaml',['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'])
    --------
    for extracting the frames with default settings
    >>> deeplabcutcore.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'])
    --------
    for extracting the frames with kmeans
    >>> deeplabcutcore.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],extractionalgorithm='kmeans')
    --------
    for extracting the frames with kmeans and epsilon = 5 pixels.
    >>> deeplabcutcore.extract_outlier_frames('/analysis/project/reaching-task/config.yaml',['/analysis/project/video/reachinvideo1.avi'],epsilon = 5,extractionalgorithm='kmeans')
    --------
    """

    cfg = auxiliaryfunctions.read_config(config)
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction=cfg["TrainingFraction"][trainingsetindex]
    )
    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        if destfolder is None:
            videofolder = str(Path(video).parents[0])
        else:
            videofolder = destfolder

        notanalyzed, dataname, DLCscorer = auxiliaryfunctions.CheckifNotAnalyzed(
            videofolder,
            str(Path(video).stem),
            DLCscorer,
            DLCscorerlegacy,
            flag="checking",
        )
        if notanalyzed:
            print(
                "It seems the video has not been analyzed yet, or the video is not found! You can only refine the labels after the a video is analyzed. Please run 'analyze_video' first. Or, please double check your video file path"
            )
        else:
            Dataframe = pd.read_hdf(dataname, "df_with_missing")
            scorer = Dataframe.columns.get_level_values(0)[0]  # reading scorer from
            nframes = np.size(Dataframe.index)
            # extract min and max index based on start stop interval.
            startindex = max([int(np.floor(nframes * cfg["start"])), 0])
            stopindex = min([int(np.ceil(nframes * cfg["stop"])), nframes])
            Index = np.arange(stopindex - startindex) + startindex
            # figure out body part list:
            bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
                cfg, comparisonbodyparts
            )

            Indices = []
            if (
                outlieralgorithm == "uncertain"
            ):  # necessary parameters: considered body parts and
                for bpindex, bp in enumerate(bodyparts):
                    if (
                        bp in cfg["bodyparts"]
                    ):  # filter [who knows what users put in...]
                        p = Dataframe[scorer][bp]["likelihood"].values[Index]
                        Indices.extend(
                            np.where(p < p_bound)[0] + startindex
                        )  # all indices between start and stop that are below p_bound.

            elif outlieralgorithm == "jump":
                for bpindex, bp in enumerate(bodyparts):
                    if (
                        bp in cfg["bodyparts"]
                    ):  # filter [who knows what users put in...]
                        dx = np.diff(Dataframe[scorer][bp]["x"].values[Index])
                        dy = np.diff(Dataframe[scorer][bp]["y"].values[Index])
                        # all indices between start and stop with jump larger than epsilon (leading up to this point!)
                        Indices.extend(
                            np.where((dx ** 2 + dy ** 2) > epsilon ** 2)[0]
                            + startindex
                            + 1
                        )
            elif outlieralgorithm == "fitting":
                # deviation_dataname = str(Path(videofolder)/Path(dataname))
                # Calculate deviatons for video
                [d, o] = ComputeDeviations(
                    Dataframe,
                    cfg,
                    bodyparts,
                    scorer,
                    dataname,
                    p_bound,
                    alpha,
                    ARdegree,
                    MAdegree,
                )

                # Some heuristics for extracting frames based on distance:
                Indices = np.where(d > epsilon)[
                    0
                ]  # time points with at least average difference of epsilon

                if (
                    len(Index) < cfg["numframes2pick"] * 2
                    and len(d) > cfg["numframes2pick"] * 2
                ):  # if too few points qualify, extract the most distant ones.
                    Indices = np.argsort(d)[::-1][: cfg["numframes2pick"] * 2]
            elif outlieralgorithm == "manual":
                wd = Path(config).resolve().parents[0]
                os.chdir(str(wd))
                from deeplabcutcore.refine_training_dataset import (
                    outlier_frame_extraction_toolbox,
                )

                outlier_frame_extraction_toolbox.show(
                    config, video, shuffle, Dataframe, scorer, savelabeled
                )
            # Run always except when the outlieralgorithm == manual.
            if not outlieralgorithm == "manual":
                Indices = np.sort(list(set(Indices)))  # remove repetitions.
                print(
                    "Method ",
                    outlieralgorithm,
                    " found ",
                    len(Indices),
                    " putative outlier frames.",
                )
                print(
                    "Do you want to proceed with extracting ",
                    cfg["numframes2pick"],
                    " of those?",
                )
                if outlieralgorithm == "uncertain":
                    print(
                        "If this list is very large, perhaps consider changing the paramters (start, stop, p_bound, comparisonbodyparts) or use a different method."
                    )
                elif outlieralgorithm == "jump":
                    print(
                        "If this list is very large, perhaps consider changing the paramters (start, stop, epsilon, comparisonbodyparts) or use a different method."
                    )
                elif outlieralgorithm == "fitting":
                    print(
                        "If this list is very large, perhaps consider changing the paramters (start, stop, epsilon, ARdegree, MAdegree, alpha, comparisonbodyparts) or use a different method."
                    )

                if automatic == False:
                    askuser = input("yes/no")
                else:
                    askuser = "******"

                if (
                    askuser == "y"
                    or askuser == "yes"
                    or askuser == "Ja"
                    or askuser == "ha"
                ):  # multilanguage support :)
                    # Now extract from those Indices!
                    ExtractFramesbasedonPreselection(
                        Indices,
                        extractionalgorithm,
                        Dataframe,
                        dataname,
                        scorer,
                        video,
                        cfg,
                        config,
                        opencv,
                        cluster_resizewidth,
                        cluster_color,
                        savelabeled,
                    )
                else:
                    print(
                        "Nothing extracted, please change the parameters and start again..."
                    )
예제 #4
0
def evaluate_network(config,
                     Shuffles=[1],
                     trainingsetindex=0,
                     plotting=None,
                     show_errors=True,
                     comparisonbodyparts="all",
                     gputouse=None,
                     rescale=False):
    """

    Evaluates the network based on the saved models at different stages of the training network.\n
    The evaluation results are stored in the .h5 and .csv file under the subdirectory 'evaluation_results'.
    Change the snapshotindex parameter in the config file to 'all' in order to evaluate all the saved models.
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    Shuffles: list, optional
        List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    plotting: bool, optional
        Plots the predictions on the train and test images. The default is ``False``; if provided it must be either ``True`` or ``False``

    show_errors: bool, optional
        Display train and test errors. The default is `True``

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you do not want to plot
    >>> deeplabcutcore.evaluate_network('/analysis/project/reaching-task/config.yaml', Shuffles=[1])
    --------
    If you want to plot
    >>> deeplabcutcore.evaluate_network('/analysis/project/reaching-task/config.yaml',Shuffles=[1],True)

    """
    import os
    #import skimage.color
    #from skimage.io import imread

    from deeplabcutcore.utils.auxfun_videos import imread, imresize
    from deeplabcutcore.pose_estimation_tensorflow.nnet import predict
    from deeplabcutcore.pose_estimation_tensorflow.config import load_config
    from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcutcore.utils import auxiliaryfunctions
    import tensorflow as tf

    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ[
            'TF_CUDNN_USE_AUTOTUNE']  #was potentially set during training

    tf.compat.v1.reset_default_graph()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  #
    #    tf.logging.set_verbosity(tf.logging.WARN)

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)
    if gputouse is not None:  #gpu selectinon
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    if trainingsetindex == 'all':
        TrainingFractions = cfg["TrainingFraction"]
    else:
        if trainingsetindex < len(
                cfg["TrainingFraction"]) and trainingsetindex >= 0:
            TrainingFractions = [
                cfg["TrainingFraction"][int(trainingsetindex)]
            ]
        else:
            raise Exception('Please check the trainingsetindex! ',
                            trainingsetindex,
                            ' should be an integer from 0 .. ',
                            int(len(cfg["TrainingFraction"]) - 1))

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(cfg["project_path"], str(trainingsetfolder),
                     'CollectedData_' + cfg["scorer"] + '.h5'),
        'df_with_missing')

    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts)
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg)

            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(trainFraction, shuffle,
                                                      cfg)))
            path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
            # Load meta data
            data, trainIndices, testIndices, trainFraction = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn))

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction))

            #change batch size, if it was edited during analysis!
            dlc_cfg['batch_size'] = 1  #in case this was edited for analysis.

            #Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg)))
            auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                   recursive=True)
            #path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array([
                fn.split('.')[0]
                for fn in os.listdir(os.path.join(str(modelfolder), 'train'))
                if "index" in fn
            ])
            try:  #check if any where found?
                Snapshots[0]
            except IndexError:
                raise FileNotFoundError(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction))

            increasing_indices = np.argsort(
                [int(m.split('-')[1]) for m in Snapshots])
            Snapshots = Snapshots[increasing_indices]

            if cfg["snapshotindex"] == -1:
                snapindices = [-1]
            elif cfg["snapshotindex"] == "all":
                snapindices = range(len(Snapshots))
            elif cfg["snapshotindex"] < len(Snapshots):
                snapindices = [cfg["snapshotindex"]]
            else:
                raise ValueError(
                    "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                )

            final_result = []

            ########################### RESCALING (to global scale)
            if rescale == True:
                scale = dlc_cfg['global_scale']
                Data = pd.read_hdf(
                    os.path.join(cfg["project_path"], str(trainingsetfolder),
                                 'CollectedData_' + cfg["scorer"] + '.h5'),
                    'df_with_missing') * scale
            else:
                scale = 1

            ##################################################
            # Compute predictions over images
            ##################################################
            for snapindex in snapindices:
                dlc_cfg['init_weights'] = os.path.join(
                    str(modelfolder), 'train', Snapshots[snapindex]
                )  #setting weights to corresponding snapshot.
                trainingsiterations = (
                    dlc_cfg['init_weights'].split(os.sep)[-1]
                ).split(
                    '-'
                )[-1]  #read how many training siterations that corresponds to.

                # Name for deeplabcut net (based on its parameters)
                DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                    cfg, shuffle, trainFraction, trainingsiterations)
                print("Running ", DLCscorer, " with # of trainingiterations:",
                      trainingsiterations)
                notanalyzed, resultsfilename, DLCscorer = auxiliaryfunctions.CheckifNotEvaluated(
                    str(evaluationfolder), DLCscorer, DLCscorerlegacy,
                    Snapshots[snapindex])
                if notanalyzed:
                    # Specifying state of model (snapshot / training state)
                    sess, inputs, outputs = predict.setup_pose_prediction(
                        dlc_cfg)
                    Numimages = len(Data.index)
                    PredicteData = np.zeros(
                        (Numimages, 3 * len(dlc_cfg['all_joints_names'])))
                    print("Analyzing data...")
                    for imageindex, imagename in tqdm(enumerate(Data.index)):
                        image = imread(os.path.join(cfg['project_path'],
                                                    imagename),
                                       mode='RGB')
                        if scale != 1:
                            image = imresize(image, scale)

                        #image = skimage.color.gray2rgb(image)
                        image_batch = data_to_input(image)

                        # Compute prediction with the CNN
                        outputs_np = sess.run(outputs,
                                              feed_dict={inputs: image_batch})
                        scmap, locref = predict.extract_cnn_output(
                            outputs_np, dlc_cfg)

                        # Extract maximum scoring location from the heatmap, assume 1 person
                        pose = predict.argmax_pose_predict(
                            scmap, locref, dlc_cfg.stride)
                        PredicteData[imageindex, :] = pose.flatten(
                        )  # NOTE: thereby     cfg_test['all_joints_names'] should be same order as bodyparts!

                    sess.close()  #closes the current tf session

                    index = pd.MultiIndex.from_product(
                        [[DLCscorer], dlc_cfg['all_joints_names'],
                         ['x', 'y', 'likelihood']],
                        names=['scorer', 'bodyparts', 'coords'])

                    # Saving results
                    DataMachine = pd.DataFrame(PredicteData,
                                               columns=index,
                                               index=Data.index.values)
                    DataMachine.to_hdf(resultsfilename,
                                       'df_with_missing',
                                       format='table',
                                       mode='w')

                    print("Done and results stored for snapshot: ",
                          Snapshots[snapindex])
                    DataCombined = pd.concat([Data.T, DataMachine.T],
                                             axis=0,
                                             sort=False).T

                    RMSE, RMSEpcutoff = pairwisedistances(
                        DataCombined, cfg["scorer"], DLCscorer, cfg["pcutoff"],
                        comparisonbodyparts)
                    testerror = np.nanmean(
                        RMSE.iloc[testIndices].values.flatten())
                    trainerror = np.nanmean(
                        RMSE.iloc[trainIndices].values.flatten())
                    testerrorpcutoff = np.nanmean(
                        RMSEpcutoff.iloc[testIndices].values.flatten())
                    trainerrorpcutoff = np.nanmean(
                        RMSEpcutoff.iloc[trainIndices].values.flatten())
                    results = [
                        trainingsiterations,
                        int(100 * trainFraction), shuffle,
                        np.round(trainerror, 2),
                        np.round(testerror, 2), cfg["pcutoff"],
                        np.round(trainerrorpcutoff, 2),
                        np.round(testerrorpcutoff, 2)
                    ]
                    final_result.append(results)

                    if show_errors == True:
                        print("Results for",
                              trainingsiterations, " training iterations:",
                              int(100 * trainFraction), shuffle,
                              "train error:",
                              np.round(trainerror, 2), "pixels. Test error:",
                              np.round(testerror, 2), " pixels.")
                        print("With pcutoff of",
                              cfg["pcutoff"], " train error:",
                              np.round(trainerrorpcutoff,
                                       2), "pixels. Test error:",
                              np.round(testerrorpcutoff, 2), "pixels")
                        if scale != 1:
                            print(
                                "The predictions have been calculated for rescaled images (and rescaled ground truth). Scale:",
                                scale)
                        print(
                            "Thereby, the errors are given by the average distances between the labels by DLC and the scorer."
                        )

                    if plotting == True:
                        print("Plotting...")
                        foldername = os.path.join(
                            str(evaluationfolder), 'LabeledImages_' +
                            DLCscorer + '_' + Snapshots[snapindex])
                        auxiliaryfunctions.attempttomakefolder(foldername)
                        Plotting(
                            cfg, comparisonbodyparts, DLCscorer, trainIndices,
                            DataCombined * 1. / scale, foldername
                        )  #Rescaling coordinates to have figure in original size!

                    tf.compat.v1.reset_default_graph()
                    #print(final_result)
                else:
                    DataMachine = pd.read_hdf(resultsfilename,
                                              'df_with_missing')
                    if plotting == True:
                        DataCombined = pd.concat([Data.T, DataMachine.T],
                                                 axis=0,
                                                 sort=False).T
                        print(
                            "Plotting...(attention scale might be inconsistent in comparison to when data was analyzed; i.e. if you used rescale)"
                        )
                        foldername = os.path.join(
                            str(evaluationfolder), 'LabeledImages_' +
                            DLCscorer + '_' + Snapshots[snapindex])
                        auxiliaryfunctions.attempttomakefolder(foldername)
                        Plotting(cfg, comparisonbodyparts, DLCscorer,
                                 trainIndices, DataCombined * 1. / scale,
                                 foldername)

            if len(final_result) > 0:  #Only append if results were calculated
                make_results_file(final_result, evaluationfolder, DLCscorer)
                print(
                    "The network is evaluated and the results are stored in the subdirectory 'evaluation_results'."
                )
                print(
                    "If it generalizes well, choose the best model for prediction and update the config file with the appropriate index for the 'snapshotindex'.\nUse the function 'analyze_video' to make predictions on new videos."
                )
                print(
                    "Otherwise consider retraining the network (see DeepLabCut workflow Fig 2)"
                )

    #returning to intial folder
    os.chdir(str(start_path))
예제 #5
0
def create_labeled_video(
    config,
    videos,
    videotype="avi",
    shuffle=1,
    trainingsetindex=0,
    filtered=False,
    save_frames=False,
    Frames2plot=None,
    delete=False,
    displayedbodyparts="all",
    codec="mp4v",
    outputframerate=None,
    destfolder=None,
    draw_skeleton=False,
    trailpoints=0,
    displaycropped=False,
):
    """
    Labels the bodyparts in a video. Make sure the video is already analyzed by the function 'analyze_video'

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    videos : list
        A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed. The default is ``.avi``

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    filtered: bool, default false
        Boolean variable indicating if filtered output should be plotted rather than frame-by-frame predictions. Filtered version can be calculated with deeplabcutcore.filterpredictions

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    save_frames: bool
        If true creates each frame individual and then combines into a video. This variant is relatively slow as
        it stores all individual frames. However, it uses matplotlib to create the frames and is therefore much more flexible (one can set transparency of markers, crop, and easily customize).

    Frames2plot: List of indices
        If not None & save_frames=True then the frames corresponding to the index will be plotted. For example, Frames2plot=[0,11] will plot the first and the 12th frame.

    delete: bool
        If true then the individual frames created during the video generation will be deleted.

    displayedbodyparts: list of strings, optional
        This select the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    codec: codec for labeled video. Options see http://www.fourcc.org/codecs.php [depends on your ffmpeg installation.]

    outputframerate: positive number, output frame rate for labeled video (only available for the mode with saving frames.) By default: None, which results in the original video rate.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``False``

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    displaycropped: bool, optional
        Specifies whether only cropped frame is displayed (with labels analyzed therein), or the original frame with the labels analyzed in the cropped subset.

    Examples
    --------
    If you want to create the labeled video for only 1 video
    >>> deeplabcutcore.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'])
    --------

    If you want to create the labeled video for only 1 video and store the individual frames
    >>> deeplabcutcore.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi'],save_frames=True)
    --------

    If you want to create the labeled video for multiple videos
    >>> deeplabcutcore.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/reachingvideo1.avi','/analysis/project/videos/reachingvideo2.avi'])
    --------

    If you want to create the labeled video for all the videos (as .avi extension) in a directory.
    >>> deeplabcutcore.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'])

    --------
    If you want to create the labeled video for all the videos (as .mp4 extension) in a directory.
    >>> deeplabcutcore.create_labeled_video('/analysis/project/reaching-task/config.yaml',['/analysis/project/videos/'],videotype='mp4')

    --------

    """
    cfg = auxiliaryfunctions.read_config(config)
    start_path = os.getcwd()  # record cwd to return to this directory in the end
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction
    )  # automatically loads corresponding model (even training iteration based on snapshot index)

    bodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, displayedbodyparts
    )
    if draw_skeleton:
        bodyparts2connect = cfg["skeleton"]
        skeleton_color = cfg["skeleton_color"]
    else:
        bodyparts2connect = None
        skeleton_color = None

    Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
    for video in Videos:
        if destfolder is None:
            videofolder = Path(video).parents[0]  # where your folder with videos is.
        else:
            videofolder = destfolder

        os.chdir(str(videofolder))
        videotype = Path(video).suffix
        print("Starting % ", videofolder, videos)
        vname = str(Path(video).stem)

        # if notanalyzed:
        # notanalyzed,outdataname,sourcedataname,DLCscorer=auxiliaryfunctions.CheckifPostProcessing(folder,vname,DLCscorer,DLCscorerlegacy,suffix='checking')

        if filtered == True:
            videooutname1 = os.path.join(vname + DLCscorer + "filtered_labeled.mp4")
            videooutname2 = os.path.join(
                vname + DLCscorerlegacy + "filtered_labeled.mp4"
            )
        else:
            videooutname1 = os.path.join(vname + DLCscorer + "_labeled.mp4")
            videooutname2 = os.path.join(vname + DLCscorerlegacy + "_labeled.mp4")

        if os.path.isfile(videooutname1) or os.path.isfile(videooutname2):
            print("Labeled video already created.")
        else:
            print("Loading ", video, "and data.")
            datafound, metadata, Dataframe, DLCscorer, suffix = auxiliaryfunctions.LoadAnalyzedData(
                str(videofolder), vname, DLCscorer, filtered
            )  # returns boolean variable if data was found and metadata + pandas array
            videooutname = os.path.join(vname + DLCscorer + suffix + "_labeled.mp4")
            if datafound and not os.path.isfile(
                videooutname
            ):  # checking again, for this loader video could exist
                # Loading cropping data used during analysis
                cropping = metadata["data"]["cropping"]
                [x1, x2, y1, y2] = metadata["data"]["cropping_parameters"]
                if save_frames == True:
                    tmpfolder = os.path.join(str(videofolder), "temp-" + vname)
                    auxiliaryfunctions.attempttomakefolder(tmpfolder)
                    clip = vp(video)

                    CreateVideoSlow(
                        videooutname,
                        clip,
                        Dataframe,
                        tmpfolder,
                        cfg["dotsize"],
                        cfg["colormap"],
                        cfg["alphavalue"],
                        cfg["pcutoff"],
                        trailpoints,
                        cropping,
                        x1,
                        x2,
                        y1,
                        y2,
                        delete,
                        DLCscorer,
                        bodyparts,
                        outputframerate,
                        Frames2plot,
                        bodyparts2connect,
                        skeleton_color,
                        draw_skeleton,
                        displaycropped,
                    )
                else:
                    if (
                        displaycropped
                    ):  # then the cropped video + the labels is depicted
                        clip = vp(
                            fname=video,
                            sname=videooutname,
                            codec=codec,
                            sw=x2 - x1,
                            sh=y2 - y1,
                        )
                        CreateVideo(
                            clip,
                            Dataframe,
                            cfg["pcutoff"],
                            cfg["dotsize"],
                            cfg["colormap"],
                            DLCscorer,
                            bodyparts,
                            trailpoints,
                            cropping,
                            x1,
                            x2,
                            y1,
                            y2,
                            bodyparts2connect,
                            skeleton_color,
                            draw_skeleton,
                            displaycropped,
                        )
                    else:  # then the full video + the (perhaps in cropped mode analyzed labels) are depicted
                        clip = vp(fname=video, sname=videooutname, codec=codec)
                        CreateVideo(
                            clip,
                            Dataframe,
                            cfg["pcutoff"],
                            cfg["dotsize"],
                            cfg["colormap"],
                            DLCscorer,
                            bodyparts,
                            trailpoints,
                            cropping,
                            x1,
                            x2,
                            y1,
                            y2,
                            bodyparts2connect,
                            skeleton_color,
                            draw_skeleton,
                            displaycropped,
                        )

    os.chdir(str(start_path))