コード例 #1
0
def triangulate(
    config,
    video_path,
    videotype="avi",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n
        Only videos with this extension are analyzed. The default is ``.avi``

    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str("It seems the file specified in the variable config_file_"
                    + str(cam)) +
                " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype)
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print("Analyzing video %s using %s" %
                      (video_list[i][j], str("config_file_" + cam_names[j])))

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)
                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" +
                                              cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder,
                                                   prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                            output_filename + ".csv"):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv"))

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(
                        pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(path_camera_matrix,
                                                    "stereo_params.pickle")
                    stereo_file = auxiliaryfunctions.read_pickle(
                        path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    if_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(metadata_["stereo_matrix"][k] ==
                                  stereo_file[cam_pair][k]):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations="unknown")

                    if (metadata_["scorer_name"][cam_names[j]] == DLCscorer
                        ):  # TODO: CHECK FOR BOTH?
                        if_video_analyzed = True
                    elif metadata_["scorer_name"][
                            cam_names[j]] == DLCscorerlegacy:
                        if_video_analyzed = True
                    else:
                        if_video_analyzed = False
                        run_triangulate = True

                    if if_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        if_video_analyzed = False
                        run_triangulate = True
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )

                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(config, dataname,
                                 str(cam_names[0] + "-" + cam_names[1]))
            if len(dataFrame_camera1_undistort) != len(
                    dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(
                        dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[:len(
                        dataFrame_camera2_undistort)]
                if len(dataFrame_camera2_undistort) > len(
                        dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[:len(
                        dataFrame_camera1_undistort)]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            X_final = []
            triangulate = []
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(
                0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(
                0)[0]
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(
                dataFrame_camera1_undistort, scorer_3d, flag="3d")
            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]

            print("Computing the triangulation...")
            for bpindex, bp in enumerate(bodyparts):
                # Extract the indices of frames where the likelihood of a bodypart for both cameras are less than pvalue
                likelihoods = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["likelihood"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["likelihood"].values[:],
                ])
                likelihoods = likelihoods.T

                # Extract frames where likelihood for both the views is less than the pcutoff
                low_likelihood_frames = np.any(likelihoods < pcutoff, axis=1)
                # low_likelihood_frames = np.all(likelihoods < pcutoff, axis=1)

                low_likelihood_frames = np.where(
                    low_likelihood_frames == True)[0]
                points_cam1_undistort = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["x"].values[:],
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["y"].values[:],
                ])
                points_cam1_undistort = points_cam1_undistort.T

                # For cam1 camera: Assign nans to x and y values of a bodypart where the likelihood for is less than pvalue
                points_cam1_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam1_undistort = np.expand_dims(points_cam1_undistort,
                                                       axis=1)

                points_cam2_undistort = np.array([
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["x"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["y"].values[:],
                ])
                points_cam2_undistort = points_cam2_undistort.T

                # For cam2 camera: Assign nans to x and y values of a bodypart where the likelihood is less than pvalue
                points_cam2_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam2_undistort = np.expand_dims(points_cam2_undistort,
                                                       axis=1)

                X_l = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, points_cam1_undistort, points_cam2_undistort)

                # ToDo: speed up func. below by saving in numpy.array
                X_final.append(X_l)
            triangulate.append(X_final)
            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create an empty dataframe to store x,y,z of 3d data
            for bpindex, bp in enumerate(bodyparts):
                df_3d.iloc[:][scorer_3d, bp, "x"] = triangulate[0, bpindex,
                                                                0, :]
                df_3d.iloc[:][scorer_3d, bp, "y"] = triangulate[0, bpindex,
                                                                1, :]
                df_3d.iloc[:][scorer_3d, bp, "z"] = triangulate[0, bpindex,
                                                                2, :]

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )
            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata)

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )
コード例 #2
0
def triangulate(
    config,
    video_path,
    videotype="",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
    track_method="",
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed.
        If left unspecified, videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept.


    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str(
                    "It seems the file specified in the variable config_file_"
                    + str(cam)
                )
                + " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype
        )
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print(
                    "Analyzing video %s using %s"
                    % (video_list[i][j], str("config_file_" + cam_names[j]))
                )

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)

                # Get track_method and do related checks
                track_method = auxfun_multianimal.get_track_method(
                    cfg, track_method=track_method
                )
                if len(cfg.get("multianimalbodyparts", [])) == 1 and track_method != "box":
                    warnings.warn(
                        "Switching to `box` tracker for single point tracking..."
                    )
                    track_method = "box"

                # Get track method suffix
                tr_method_suffix = TRACK_METHODS.get(track_method, "")

                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" + cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder, prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                        output_filename + ".csv"
                    ):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv")
                        )

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                        _,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(
                        path_camera_matrix, "stereo_params.pickle"
                    )
                    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    is_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(
                            metadata_["stereo_matrix"][k] == stereo_file[cam_pair][k]
                        ):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg, shuffle, trainFraction, trainingsiterations="unknown"
                    )

                    if (
                        metadata_["scorer_name"][cam_names[j]] == DLCscorer
                    ):  # TODO: CHECK FOR BOTH?
                        is_video_analyzed = True
                    elif metadata_["scorer_name"][cam_names[j]] == DLCscorerlegacy:
                        is_video_analyzed = True
                    else:
                        is_video_analyzed = False
                        run_triangulate = True

                    if is_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + ".h5"
                            )
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        is_video_analyzed = False
                        run_triangulate = True
                        suffix = tr_method_suffix
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )
                            suffix += "_filtered"

                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + suffix + ".h5"
                            )
                        )

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + "_filtered.h5"
                            )
                        )

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(
                config, dataname, str(cam_names[0] + "-" + cam_names[1])
            )
            if len(dataFrame_camera1_undistort) != len(dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[
                        : len(dataFrame_camera2_undistort)
                    ]
                if len(dataFrame_camera2_undistort) > len(dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[
                        : len(dataFrame_camera1_undistort)
                    ]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(0)[0]

            scorer_3d = scorer_cam1
            bodyparts = dataFrame_camera1_undistort.columns.get_level_values(
                "bodyparts"
            ).unique()

            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]
            F = stereomatrix["F"]

            print("Computing the triangulation...")

            num_frames = dataFrame_camera1_undistort.shape[0]
            ### Assign nan to [X,Y] of low likelihood predictions ###
            # Convert the data to a np array to easily mask out the low likelihood predictions
            data_cam1_tmp = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            data_cam2_tmp = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            # Assign [X,Y] = nan to low likelihood predictions
            data_cam1_tmp[data_cam1_tmp[..., 2] < pcutoff, :2] = np.nan
            data_cam2_tmp[data_cam2_tmp[..., 2] < pcutoff, :2] = np.nan

            # Reshape data back to original shape
            data_cam1_tmp = data_cam1_tmp.reshape(num_frames, -1)
            data_cam2_tmp = data_cam2_tmp.reshape(num_frames, -1)

            # put data back to the dataframes
            dataFrame_camera1_undistort[:] = data_cam1_tmp
            dataFrame_camera2_undistort[:] = data_cam2_tmp

            if cfg.get("multianimalproject"):
                # Check individuals are the same in both views
                individuals_view1 = (
                    dataFrame_camera1_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                individuals_view2 = (
                    dataFrame_camera2_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                if individuals_view1 != individuals_view2:
                    raise ValueError(
                        "The individuals do not match between the two DataFrames"
                    )

                # Cross-view match individuals
                _, voting = auxiliaryfunctions_3d.cross_view_match_dataframes(
                    dataFrame_camera1_undistort, dataFrame_camera2_undistort, F
                )
            else:
                # Create a dummy variables for single-animal
                individuals_view1 = ["indie"]
                voting = {0: 0}

            # Cleaner variable (since inds view1 == inds view2)
            individuals = individuals_view1

            # Reshape: (num_framex, num_individuals, num_bodyparts , 2)
            all_points_cam1 = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]
            all_points_cam2 = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]

            # Triangulate data
            triangulate = []
            for i, _ in enumerate(individuals):
                # i is individual in view 1
                # voting[i] is the matched individual in view 2

                pts_indv_cam1 = all_points_cam1[:, i].reshape((-1, 2)).T
                pts_indv_cam2 = all_points_cam2[:, voting[i]].reshape((-1, 2)).T

                indv_points_3d = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, pts_indv_cam1, pts_indv_cam2
                )

                indv_points_3d = indv_points_3d[:3].T.reshape((num_frames, -1, 3))

                triangulate.append(indv_points_3d)

            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create 3D DataFrame column and row indices
            axis_labels = ("x", "y", "z")
            if cfg.get("multianimalproject"):
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], individuals, bodyparts, axis_labels],
                    names=["scorer", "individuals", "bodyparts", "coords"],
                )

            else:
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], bodyparts, axis_labels],
                    names=["scorer", "bodyparts", "coords"],
                )

            inds = range(num_frames)

            # Swap num_animals with num_frames axes to ensure well-behaving reshape
            triangulate = triangulate.swapaxes(0, 1).reshape((num_frames, -1))

            # Fill up 3D dataframe
            df_3d = pd.DataFrame(triangulate, columns=columns, index=inds)

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )

            # Reorder 2D dataframe in view 2 to match order of view 1
            if cfg.get("multianimalproject"):
                df_2d_view2 = pd.read_hdf(dataname[1])
                individuals_order = [individuals[i] for i in list(voting.values())]
                df_2d_view2 = auxfun_multianimal.reorder_individuals_in_df(df_2d_view2, individuals_order)
                df_2d_view2.to_hdf(dataname[1], "tracks", format="table", mode="w",)

            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata
            )

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print("Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d")
コード例 #3
0
def create_labeled_video_3d(
    config,
    path,
    videofolder=None,
    start=0,
    end=None,
    trailpoints=0,
    videotype="avi",
    view=[-113, -270],
    xlim=[None, None],
    ylim=[None, None],
    zlim=[None, None],
    draw_skeleton=True,
):
    """
    Creates a video with views from the two cameras and the 3d reconstruction for a selected number of frames.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    path : list
        A list of strings containing the full paths to triangulated files for analysis or a path to the directory, where all the triangulated files are stored.

    videofolder: string
        Full path of the folder where the videos are stored. Use this if the vidoes are stored in a different location other than where the triangulation files are stored. By default is ``None`` and therefore looks for video files in the directory where the triangulation file is stored.

    start: int
        Integer specifying the start of frame index to select. Default is set to 0.

    end: int
        Integer specifying the end of frame index to select. Default is set to None, where all the frames of the video are used for creating the labeled video.

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    view: list
        A list that sets the elevation angle in z plane and azimuthal angle in x,y plane of 3d view. Useful for rotating the axis for 3d view

    xlim: list
        A list of integers specifying the limits for xaxis of 3d view. By default it is set to [None,None], where the x limit is set by taking the minimum and maximum value of the x coordinates for all the bodyparts.

    ylim: list
        A list of integers specifying the limits for yaxis of 3d view. By default it is set to [None,None], where the y limit is set by taking the minimum and maximum value of the y coordinates for all the bodyparts.

    zlim: list
        A list of integers specifying the limits for zaxis of 3d view. By default it is set to [None,None], where the z limit is set by taking the minimum and maximum value of the z coordinates for all the bodyparts.

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``True``

    Example
    -------
    Linux/MacOs
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos/3d.h5'],start=100, end=500)

    To create labeled videos for all the triangulated files in the folder
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500)

    To set the xlim, ylim, zlim and rotate the view of the 3d axis
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500,view=[30,90],xlim=[-12,12],ylim=[15,25],zlim=[20,30])

    """
    start_path = os.getcwd()

    # Read the config file and related variables
    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    cmap = cfg_3d["colormap"]
    bodyparts2connect = cfg_3d["skeleton"]
    skeleton_color = cfg_3d["skeleton_color"]
    scorer_3d = cfg_3d["scorername_3d"]

    # Flatten the list of bodyparts to connect
    bodyparts2plot = list(
        np.unique([val for sublist in bodyparts2connect for val in sublist]))
    color = plt.cm.get_cmap(cmap, len(bodyparts2plot))
    file_list = auxiliaryfunctions_3d.Get_list_of_triangulated_and_videoFiles(
        path, videotype, scorer_3d, cam_names, videofolder)
    print(file_list)
    if file_list == []:
        raise Exception(
            "No corresponding video file(s) found for the specified triangulated file or folder. Did you specify the video file type? If videos are stored in a different location, please use the ``videofolder`` argument to specify their path."
        )

    for file in file_list:
        path_h5_file = Path(file[0]).parents[0]
        triangulate_file = file[0]
        # triangulated file is a list which is always sorted as [triangulated.h5,camera-1.videotype,camera-2.videotype]
        # name for output video
        file_name = str(Path(triangulate_file).stem)
        if os.path.isfile(os.path.join(path_h5_file, file_name + ".mpg")):
            print("Video already created...")
        else:
            string_to_remove = str(Path(triangulate_file).suffix)
            pickle_file = triangulate_file.replace(string_to_remove,
                                                   "_meta.pickle")
            metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)

            base_filename_cam1 = str(Path(file[1]).stem).split(videotype)[
                0]  # required for searching the filtered file
            base_filename_cam2 = str(Path(file[2]).stem).split(videotype)[
                0]  # required for searching the filtered file
            cam1_view_video = file[1]
            cam2_view_video = file[2]
            cam1_scorer = metadata_["scorer_name"][cam_names[0]]
            cam2_scorer = metadata_["scorer_name"][cam_names[1]]
            print("Creating 3D video from %s and %s using %s" % (
                Path(cam1_view_video).name,
                Path(cam2_view_video).name,
                Path(triangulate_file).name,
            ))

            # Read the video files and corresponfing h5 files
            vid_cam1 = cv2.VideoCapture(cam1_view_video)
            vid_cam2 = cv2.VideoCapture(cam2_view_video)

            # Look for the filtered predictions file
            try:
                print("Looking for filtered predictions...")
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam1 + cam1_scorer +
                                "*filtered.h5"),
                        ))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam2 + cam2_scorer +
                                "*filtered.h5"),
                        ))[0])
                # print("Found filtered predictions, will be use these for triangulation.")
                print(
                    "Found the following filtered data: ",
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam1 + cam1_scorer +
                            "*filtered.h5"),
                    ),
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam2 + cam2_scorer +
                            "*filtered.h5"),
                    ),
                )
            except FileNotFoundError:
                print(
                    "No filtered predictions found, the unfiltered predictions will be used instead."
                )
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam1 + cam1_scorer +
                                "*.h5")))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam2 + cam2_scorer +
                                "*.h5")))[0])

            df_3d = pd.read_hdf(triangulate_file, "df_with_missing")
            plt.rcParams.update({"figure.max_open_warning": 0})

            if end == None:
                end = len(df_3d)  # All the frames
            frames = list(range(start, end, 1))

            # Start plotting for every frame
            for k in tqdm(frames):
                output_folder, num_frames = plot2D(
                    cfg_3d,
                    k,
                    bodyparts2plot,
                    vid_cam1,
                    vid_cam2,
                    bodyparts2connect,
                    df_cam1,
                    df_cam2,
                    df_3d,
                    pcutoff,
                    markerSize,
                    alphaValue,
                    color,
                    path_h5_file,
                    file_name,
                    skeleton_color,
                    view,
                    draw_skeleton,
                    trailpoints,
                    xlim,
                    ylim,
                    zlim,
                )

            # Once all the frames are saved, then make a movie using ffmpeg.
            cwd = os.getcwd()
            os.chdir(str(output_folder))
            subprocess.call([
                "ffmpeg",
                "-start_number",
                str(start),
                "-framerate",
                str(30),
                "-i",
                str("img%0" + str(num_frames) + "d.png"),
                "-r",
                str(30),
                "-vb",
                "20M",
                os.path.join(output_folder, str("../" + file_name + ".mpg")),
            ])
            os.chdir(cwd)

    os.chdir(start_path)
コード例 #4
0
ファイル: plotting3D.py プロジェクト: eaogorman/DeepLabCut
def create_labeled_video_3d(
        config,
        path,
        videofolder=None,
        start=0,
        end=None,
        trailpoints=0,
        videotype="avi",
        view=(-113, -270),
        xlim=None,
        ylim=None,
        zlim=None,
        draw_skeleton=True,
        color_by="bodypart",
        figsize=(20, 8),
        fps=30,
        dpi=300,
):
    """
    Creates a video with views from the two cameras and the 3d reconstruction for a selected number of frames.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    path : list
        A list of strings containing the full paths to triangulated files for analysis or a path to the directory, where all the triangulated files are stored.

    videofolder: string
        Full path of the folder where the videos are stored. Use this if the vidoes are stored in a different location other than where the triangulation files are stored. By default is ``None`` and therefore looks for video files in the directory where the triangulation file is stored.

    start: int
        Integer specifying the start of frame index to select. Default is set to 0.

    end: int
        Integer specifying the end of frame index to select. Default is set to None, where all the frames of the video are used for creating the labeled video.

    trailpoints: int
        Number of revious frames whose body parts are plotted in a frame (for displaying history). Default is set to 0.

    videotype: string, optional
        Checks for the extension of the video in case the input is a directory.\nOnly videos with this extension are analyzed. The default is ``.avi``

    view: list
        A list that sets the elevation angle in z plane and azimuthal angle in x,y plane of 3d view. Useful for rotating the axis for 3d view

    xlim: list
        A list of integers specifying the limits for xaxis of 3d view. By default it is set to [None,None], where the x limit is set by taking the minimum and maximum value of the x coordinates for all the bodyparts.

    ylim: list
        A list of integers specifying the limits for yaxis of 3d view. By default it is set to [None,None], where the y limit is set by taking the minimum and maximum value of the y coordinates for all the bodyparts.

    zlim: list
        A list of integers specifying the limits for zaxis of 3d view. By default it is set to [None,None], where the z limit is set by taking the minimum and maximum value of the z coordinates for all the bodyparts.

    draw_skeleton: bool
        If ``True`` adds a line connecting the body parts making a skeleton on on each frame. The body parts to be connected and the color of these connecting lines are specified in the config file. By default: ``True``

    color_by : string, optional (default='bodypart')
        Coloring rule. By default, each bodypart is colored differently.
        If set to 'individual', points belonging to a single individual are colored the same.

    Example
    -------
    Linux/MacOs
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos/3d.h5'],start=100, end=500)

    To create labeled videos for all the triangulated files in the folder
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500)

    To set the xlim, ylim, zlim and rotate the view of the 3d axis
    >>> deeplabcut.create_labeled_video_3d(config,['/data/project1/videos'],start=100, end=500,view=[30,90],xlim=[-12,12],ylim=[15,25],zlim=[20,30])

    """
    start_path = os.getcwd()

    # Read the config file and related variables
    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    cmap = cfg_3d["colormap"]
    bodyparts2connect = cfg_3d["skeleton"]
    skeleton_color = cfg_3d["skeleton_color"]
    scorer_3d = cfg_3d["scorername_3d"]

    if color_by not in ("bodypart", "individual"):
        raise ValueError(f"Invalid color_by={color_by}")

    file_list = auxiliaryfunctions_3d.Get_list_of_triangulated_and_videoFiles(
        path, videotype, scorer_3d, cam_names, videofolder)
    print(file_list)
    if file_list == []:
        raise Exception(
            "No corresponding video file(s) found for the specified triangulated file or folder. Did you specify the video file type? If videos are stored in a different location, please use the ``videofolder`` argument to specify their path."
        )

    for file in file_list:
        path_h5_file = Path(file[0]).parents[0]
        triangulate_file = file[0]
        # triangulated file is a list which is always sorted as [triangulated.h5,camera-1.videotype,camera-2.videotype]
        # name for output video
        file_name = str(Path(triangulate_file).stem)
        videooutname = os.path.join(path_h5_file, file_name + ".mp4")
        if os.path.isfile(videooutname):
            print("Video already created...")
        else:
            string_to_remove = str(Path(triangulate_file).suffix)
            pickle_file = triangulate_file.replace(string_to_remove,
                                                   "_meta.pickle")
            metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)

            base_filename_cam1 = str(Path(file[1]).stem).split(videotype)[
                0]  # required for searching the filtered file
            base_filename_cam2 = str(Path(file[2]).stem).split(videotype)[
                0]  # required for searching the filtered file
            cam1_view_video = file[1]
            cam2_view_video = file[2]
            cam1_scorer = metadata_["scorer_name"][cam_names[0]]
            cam2_scorer = metadata_["scorer_name"][cam_names[1]]
            print("Creating 3D video from %s and %s using %s" % (
                Path(cam1_view_video).name,
                Path(cam2_view_video).name,
                Path(triangulate_file).name,
            ))

            # Read the video files and corresponfing h5 files
            vid_cam1 = VideoReader(cam1_view_video)
            vid_cam2 = VideoReader(cam2_view_video)

            # Look for the filtered predictions file
            try:
                print("Looking for filtered predictions...")
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam1 + cam1_scorer +
                                "*filtered.h5"),
                        ))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str("*" + base_filename_cam2 + cam2_scorer +
                                "*filtered.h5"),
                        ))[0])
                # print("Found filtered predictions, will be use these for triangulation.")
                print(
                    "Found the following filtered data: ",
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam1 + cam1_scorer +
                            "*filtered.h5"),
                    ),
                    os.path.join(
                        path_h5_file,
                        str("*" + base_filename_cam2 + cam2_scorer +
                            "*filtered.h5"),
                    ),
                )
            except FileNotFoundError:
                print(
                    "No filtered predictions found, the unfiltered predictions will be used instead."
                )
                df_cam1 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam1 + cam1_scorer +
                                "*.h5")))[0])
                df_cam2 = pd.read_hdf(
                    glob.glob(
                        os.path.join(
                            path_h5_file,
                            str(base_filename_cam2 + cam2_scorer +
                                "*.h5")))[0])

            df_3d = pd.read_hdf(triangulate_file)
            try:
                num_animals = df_3d.columns.get_level_values(
                    "individuals").unique().size
            except KeyError:
                num_animals = 1

            if end is None:
                end = len(df_3d)  # All the frames
            end = min(end, min(len(vid_cam1), len(vid_cam2)))
            frames = list(range(start, end))

            output_folder = Path(
                os.path.join(path_h5_file, "temp_" + file_name))
            output_folder.mkdir(parents=True, exist_ok=True)

            # Flatten the list of bodyparts to connect
            bodyparts2plot = list(
                np.unique(
                    [val for sublist in bodyparts2connect for val in sublist]))

            # Format data
            mask2d = df_cam1.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xy1 = df_cam1.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible1 = xy1[..., 2] >= pcutoff
            xy1[~visible1] = np.nan
            xy2 = df_cam2.loc[:, mask2d].to_numpy().reshape(
                (len(df_cam1), -1, 3))
            visible2 = xy2[..., 2] >= pcutoff
            xy2[~visible2] = np.nan
            mask = df_3d.columns.get_level_values('bodyparts').isin(
                bodyparts2plot)
            xyz = df_3d.loc[:, mask].to_numpy().reshape((len(df_3d), -1, 3))
            xyz[~(visible1 & visible2)] = np.nan

            bpts = df_3d.columns.get_level_values('bodyparts')[mask][::3]
            links = make_labeled_video.get_segment_indices(
                bodyparts2connect,
                bpts,
            )
            ind_links = tuple(zip(*links))

            if color_by == "bodypart":
                color = plt.cm.get_cmap(cmap, len(bodyparts2plot))
                colors_ = color(range(len(bodyparts2plot)))
                colors = np.tile(colors_, (num_animals, 1))
            elif color_by == "individual":
                color = plt.cm.get_cmap(cmap, num_animals)
                colors_ = color(range(num_animals))
                colors = np.repeat(colors_, len(bodyparts2plot), axis=0)

            # Trick to force equal aspect ratio of 3D plots
            minmax = np.nanpercentile(xyz[frames], q=[25, 75], axis=(0, 1)).T
            minmax *= 1.1
            minmax_range = (minmax[:, 1] - minmax[:, 0]).max() / 2
            if xlim is None:
                mid_x = np.mean(minmax[0])
                xlim = mid_x - minmax_range, mid_x + minmax_range
            if ylim is None:
                mid_y = np.mean(minmax[1])
                ylim = mid_y - minmax_range, mid_y + minmax_range
            if zlim is None:
                mid_z = np.mean(minmax[2])
                zlim = mid_z - minmax_range, mid_z + minmax_range

            # Set up the matplotlib figure beforehand
            fig, axes1, axes2, axes3 = set_up_grid(figsize, xlim, ylim, zlim,
                                                   view)
            points_2d1 = axes1.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im1 = axes1.imshow(np.zeros((vid_cam1.height, vid_cam1.width)))
            points_2d2 = axes2.scatter(
                *np.zeros((2, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            im2 = axes2.imshow(np.zeros((vid_cam2.height, vid_cam2.width)))
            points_3d = axes3.scatter(
                *np.zeros((3, len(bodyparts2plot))),
                s=markerSize,
                alpha=alphaValue,
            )
            if draw_skeleton:
                # Set up skeleton LineCollections
                segs = np.zeros((2, len(ind_links), 2))
                coll1 = LineCollection(segs, colors=skeleton_color)
                coll2 = LineCollection(segs, colors=skeleton_color)
                axes1.add_collection(coll1)
                axes2.add_collection(coll2)
                segs = np.zeros((2, len(ind_links), 3))
                coll_3d = Line3DCollection(segs, colors=skeleton_color)
                axes3.add_collection(coll_3d)

            writer = FFMpegWriter(fps=fps)
            with writer.saving(fig, videooutname, dpi=dpi):
                for k in tqdm(frames):
                    vid_cam1.set_to_frame(k)
                    vid_cam2.set_to_frame(k)
                    frame_cam1 = vid_cam1.read_frame()
                    frame_cam2 = vid_cam2.read_frame()
                    if frame_cam1 is None or frame_cam2 is None:
                        raise IOError("A video frame is empty.")

                    im1.set_data(frame_cam1)
                    im2.set_data(frame_cam2)

                    sl = slice(max(0, k - trailpoints), k + 1)
                    coords3d = xyz[sl]
                    coords1 = xy1[sl, :, :2]
                    coords2 = xy2[sl, :, :2]
                    points_3d._offsets3d = coords3d.reshape((-1, 3)).T
                    points_3d.set_color(colors)
                    points_2d1.set_offsets(coords1.reshape((-1, 2)))
                    points_2d1.set_color(colors)
                    points_2d2.set_offsets(coords2.reshape((-1, 2)))
                    points_2d2.set_color(colors)
                    if draw_skeleton:
                        segs3d = xyz[k][tuple([ind_links])].swapaxes(0, 1)
                        coll_3d.set_segments(segs3d)
                        segs1 = xy1[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll1.set_segments(segs1)
                        segs2 = xy2[k, :, :2][tuple([ind_links
                                                     ])].swapaxes(0, 1)
                        coll2.set_segments(segs2)

                    writer.grab_frame()