Example #1
0
def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    path_camera_matrix = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)[2]
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
    dataframe_cam1 = pd.read_hdf(dataframe[0])
    dataframe_cam2 = pd.read_hdf(dataframe[1])
    path_stereo_file = os.path.join(path_camera_matrix, "stereo_params.pickle")
    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
    dataFrame_cam1_undistort, dataFrame_cam2_undistort = _undistort_views(
        [(dataframe_cam1, dataframe_cam2)], stereo_file,
    )[0]

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
def create_video_from_pickled_tracks(
    video, pickle_file, destfolder="", output_name="", pcutoff=0.6
):
    if not destfolder:
        destfolder = os.path.splitext(video)[0]
    if not output_name:
        video_name, ext = os.path.splitext(os.path.split(video)[1])
        output_name = video_name + "DLClabeled" + ext
    tracks = auxiliaryfunctions.read_pickle(pickle_file)
    _create_video_from_tracks(video, tracks, destfolder, output_name, pcutoff)
Example #3
0
def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam2.columns.get_level_values(0)[0]
        path_stereo_file = os.path.join(path_camera_matrix,
                                        "stereo_params.pickle")
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]["cameraMatrix1"]
        dist_l = stereo_file[camera_pair]["distCoeffs1"]

        mtx_r = stereo_file[camera_pair]["cameraMatrix2"]
        dist_r = stereo_file[camera_pair]["distCoeffs2"]

        R1 = stereo_file[camera_pair]["R1"]
        P1 = stereo_file[camera_pair]["P1"]

        R2 = stereo_file[camera_pair]["R2"]
        P2 = stereo_file[camera_pair]["P2"]

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        (
            dataFrame_cam1_undistort,
            scorer_cam1,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam1,
                                                  scorer_cam1,
                                                  flag="2d")
        (
            dataFrame_cam2_undistort,
            scorer_cam2,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam2,
                                                  scorer_cam2,
                                                  flag="2d")

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            points_cam1 = dataframe_cam1.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam1_remapped = cv2.undistortPoints(
                src=points_cam1.astype(np.float32),
                cameraMatrix=mtx_l,
                distCoeffs=dist_l,
                P=P1,
                R=R1,
            )
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp,
                        ["x", "y"]] = points_cam1_remapped.squeeze()
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp, "likelihood"] = dataframe_cam1.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

            # Undistorting the points from cam2 camera
            points_cam2 = dataframe_cam2.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam2_remapped = cv2.undistortPoints(
                src=points_cam2.astype(np.float32),
                cameraMatrix=mtx_r,
                distCoeffs=dist_r,
                P=P2,
                R=R2,
            )
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp,
                        ["x", "y"]] = points_cam2_remapped.squeeze()
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp, "likelihood"] = dataframe_cam2.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
Example #4
0
def triangulate(
    config,
    video_path,
    videotype="avi",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n
        Only videos with this extension are analyzed. The default is ``.avi``

    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str("It seems the file specified in the variable config_file_"
                    + str(cam)) +
                " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype)
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print("Analyzing video %s using %s" %
                      (video_list[i][j], str("config_file_" + cam_names[j])))

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)
                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" +
                                              cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder,
                                                   prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                            output_filename + ".csv"):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv"))

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(
                        pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(path_camera_matrix,
                                                    "stereo_params.pickle")
                    stereo_file = auxiliaryfunctions.read_pickle(
                        path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    if_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(metadata_["stereo_matrix"][k] ==
                                  stereo_file[cam_pair][k]):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations="unknown")

                    if (metadata_["scorer_name"][cam_names[j]] == DLCscorer
                        ):  # TODO: CHECK FOR BOTH?
                        if_video_analyzed = True
                    elif metadata_["scorer_name"][
                            cam_names[j]] == DLCscorerlegacy:
                        if_video_analyzed = True
                    else:
                        if_video_analyzed = False
                        run_triangulate = True

                    if if_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        if_video_analyzed = False
                        run_triangulate = True
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )

                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(config, dataname,
                                 str(cam_names[0] + "-" + cam_names[1]))
            if len(dataFrame_camera1_undistort) != len(
                    dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(
                        dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[:len(
                        dataFrame_camera2_undistort)]
                if len(dataFrame_camera2_undistort) > len(
                        dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[:len(
                        dataFrame_camera1_undistort)]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            X_final = []
            triangulate = []
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(
                0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(
                0)[0]
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(
                dataFrame_camera1_undistort, scorer_3d, flag="3d")
            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]

            print("Computing the triangulation...")
            for bpindex, bp in enumerate(bodyparts):
                # Extract the indices of frames where the likelihood of a bodypart for both cameras are less than pvalue
                likelihoods = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["likelihood"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["likelihood"].values[:],
                ])
                likelihoods = likelihoods.T

                # Extract frames where likelihood for both the views is less than the pcutoff
                low_likelihood_frames = np.any(likelihoods < pcutoff, axis=1)
                # low_likelihood_frames = np.all(likelihoods < pcutoff, axis=1)

                low_likelihood_frames = np.where(
                    low_likelihood_frames == True)[0]
                points_cam1_undistort = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["x"].values[:],
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["y"].values[:],
                ])
                points_cam1_undistort = points_cam1_undistort.T

                # For cam1 camera: Assign nans to x and y values of a bodypart where the likelihood for is less than pvalue
                points_cam1_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam1_undistort = np.expand_dims(points_cam1_undistort,
                                                       axis=1)

                points_cam2_undistort = np.array([
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["x"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["y"].values[:],
                ])
                points_cam2_undistort = points_cam2_undistort.T

                # For cam2 camera: Assign nans to x and y values of a bodypart where the likelihood is less than pvalue
                points_cam2_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam2_undistort = np.expand_dims(points_cam2_undistort,
                                                       axis=1)

                X_l = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, points_cam1_undistort, points_cam2_undistort)

                # ToDo: speed up func. below by saving in numpy.array
                X_final.append(X_l)
            triangulate.append(X_final)
            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create an empty dataframe to store x,y,z of 3d data
            for bpindex, bp in enumerate(bodyparts):
                df_3d.iloc[:][scorer_3d, bp, "x"] = triangulate[0, bpindex,
                                                                0, :]
                df_3d.iloc[:][scorer_3d, bp, "y"] = triangulate[0, bpindex,
                                                                1, :]
                df_3d.iloc[:][scorer_3d, bp, "z"] = triangulate[0, bpindex,
                                                                2, :]

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )
            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata)

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )
Example #5
0
def check_undistortion(config, cbrow=8, cbcol=6, plot=True):
    """
    This function undistorts the calibration images based on the camera matrices and stores them in the project folder(defined in the config file) 
    to visually check if the camera matrices are correct. 

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    cbrow : int
        Int specifying the number of rows in the calibration image.
    
    cbcol : int
        Int specifying the number of columns in the calibration image.

    plot : bool
        If this is set to True, the results of undistortion are saved as plots. The default is ``True``; if provided it must be either ``True`` or ``False``.

    Example
    --------
    Linux/MacOs/Windows
    >>> deeplabcut.check_undistortion(config, cbrow = 8,cbcol = 6)

    """

    # Read the config file
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)

    # colormap = plt.get_cmap(cfg_3d['colormap'])
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    markerType = cfg_3d["markerType"]
    markerColor = cfg_3d["markerColor"]
    cam_names = cfg_3d["camera_names"]

    images = glob.glob(os.path.join(img_path, "*.jpg"))

    # Sort the images
    images.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
    """
    for fname in images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                ext = Path(fname).suffix
                img = cv2.imread(fname)
                gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    """
    camera_pair = [[cam_names[0], cam_names[1]]]
    stereo_params = auxiliaryfunctions.read_pickle(
        os.path.join(path_camera_matrix, "stereo_params.pickle"))

    for pair in camera_pair:
        map1_x, map1_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix1"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs1"],
            stereo_params[pair[0] + "-" + pair[1]]["R1"],
            stereo_params[pair[0] + "-" + pair[1]]["P1"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][0]),
            cv2.CV_16SC2,
        )
        map2_x, map2_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix2"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs2"],
            stereo_params[pair[0] + "-" + pair[1]]["R2"],
            stereo_params[pair[0] + "-" + pair[1]]["P2"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][1]),
            cv2.CV_16SC2,
        )
        cam1_undistort = []
        cam2_undistort = []

        for fname in images:
            if pair[0] in fname:
                filename = Path(fname).stem
                img1 = cv2.imread(fname)
                gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                h, w = img1.shape[:2]
                _, corners1 = cv2.findChessboardCorners(
                    gray1, (cbcol, cbrow), None)
                corners_origin1 = cv2.cornerSubPix(gray1, corners1, (11, 11),
                                                   (-1, -1), criteria)

                # Remapping dataFrame_camera1_undistort
                im_remapped1 = cv2.remap(img1, map1_x, map1_y,
                                         cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort = cv2.undistortPoints(
                    src=corners_origin1,
                    cameraMatrix=stereo_params[pair[0] + "-" +
                                               pair[1]]["cameraMatrix1"],
                    distCoeffs=stereo_params[pair[0] + "-" +
                                             pair[1]]["distCoeffs1"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P1"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R1"],
                )
                cam1_undistort.append(imgpoints_proj_undistort)
                cv2.imwrite(
                    os.path.join(str(path_undistort),
                                 filename + "_undistort.jpg"),
                    im_remapped1,
                )
                imgpoints_proj_undistort = []

            elif pair[1] in fname:
                filename = Path(fname).stem
                img2 = cv2.imread(fname)
                gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                h, w = img2.shape[:2]
                _, corners2 = cv2.findChessboardCorners(
                    gray2, (cbcol, cbrow), None)
                corners_origin2 = cv2.cornerSubPix(gray2, corners2, (11, 11),
                                                   (-1, -1), criteria)

                # Remapping
                im_remapped2 = cv2.remap(img2, map2_x, map2_y,
                                         cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort2 = cv2.undistortPoints(
                    src=corners_origin2,
                    cameraMatrix=stereo_params[pair[0] + "-" +
                                               pair[1]]["cameraMatrix2"],
                    distCoeffs=stereo_params[pair[0] + "-" +
                                             pair[1]]["distCoeffs2"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P2"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R2"],
                )
                cam2_undistort.append(imgpoints_proj_undistort2)
                cv2.imwrite(
                    os.path.join(str(path_undistort),
                                 filename + "_undistort.jpg"),
                    im_remapped2,
                )
                imgpoints_proj_undistort2 = []

        cam1_undistort = np.array(cam1_undistort)
        cam2_undistort = np.array(cam2_undistort)
        print("All images are undistorted and stored in %s" %
              str(path_undistort))
        print(
            "Use the function ``triangulate`` to undistort the dataframes and compute the triangulation"
        )

        if plot == True:
            f1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f1.suptitle(
                str("Original Image: Views from " + pair[0] + " and " +
                    pair[1]),
                fontsize=25,
            )

            # Display images in RGB
            ax1.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))

            norm = mcolors.Normalize(vmin=0.0, vmax=cam1_undistort.shape[1])
            plt.savefig(os.path.join(str(path_undistort),
                                     "Original_Image.png"))

            # Plot the undistorted corner points
            f2, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f2.suptitle("Undistorted corner points on camera-1 and camera-2",
                        fontsize=25)
            ax1.imshow(cv2.cvtColor(im_remapped1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(im_remapped2, cv2.COLOR_BGR2RGB))
            for i in range(0, cam1_undistort.shape[1]):
                ax1.scatter(
                    [cam1_undistort[-1][i, 0, 0]],
                    [cam1_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
                ax2.scatter(
                    [cam2_undistort[-1][i, 0, 0]],
                    [cam2_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
            plt.savefig(
                os.path.join(str(path_undistort), "undistorted_points.png"))

            # Triangulate
            triangulate = auxiliaryfunctions_3d.compute_triangulation_calibration_images(
                stereo_params[pair[0] + "-" + pair[1]],
                cam1_undistort,
                cam2_undistort,
                path_undistort,
                cfg_3d,
                plot=True,
            )
            auxiliaryfunctions.write_pickle("triangulate.pickle", triangulate)
Example #6
0
    def retrieveData_and_computeEpLines(self, img, imNum):

        # load labeledPoints and fundamental Matrix

        if self.config3d is not None:
            cfg_3d = auxiliaryfunctions.read_config(self.config3d)
            cams = cfg_3d["camera_names"]
            path_camera_matrix = auxiliaryfunctions_3d.Foldernames3Dproject(
                cfg_3d)[2]
            path_stereo_file = os.path.join(path_camera_matrix,
                                            "stereo_params.pickle")
            stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)

            for cam in cams:
                if cam in img:
                    labelCam = cam
                    if self.sourceCam is None:
                        sourceCam = [
                            otherCam for otherCam in cams
                            if cam not in otherCam
                        ][0]  # WHY?
                    else:
                        sourceCam = self.sourceCam

            sourceCamIdx = np.where(np.array(cams) == sourceCam)[0][0]
            labelCamIdx = np.where(np.array(cams) == labelCam)[0][0]

            if sourceCamIdx < labelCamIdx:
                camera_pair = cams[sourceCamIdx] + "-" + cams[labelCamIdx]
                sourceCam_numInPair = 1
            else:
                camera_pair = cams[labelCamIdx] + "-" + cams[sourceCamIdx]
                sourceCam_numInPair = 2

            fundMat = stereo_file[camera_pair]["F"]
            sourceCam_path = os.path.split(img.replace(labelCam, sourceCam))[0]

            cfg = auxiliaryfunctions.read_config(self.config)
            scorer = cfg["scorer"]

            try:
                dataFrame = pd.read_hdf(
                    os.path.join(sourceCam_path,
                                 "CollectedData_" + scorer + ".h5"))
                dataFrame.sort_index(inplace=True)
            except IOError:
                print(
                    "source camera images have not yet been labeled, or you have opened this folder in the wrong mode!"
                )
                return None, None, None

            # Find offset terms for drawing epipolar Lines
            # Get crop params for camera being labeled
            foundEvent = 0
            eventSearch = re.compile(os.path.split(os.path.split(img)[0])[1])
            cropPattern = re.compile("[0-9]{1,4}")
            with open(self.config, "rt") as config:
                for line in config:
                    if foundEvent == 1:
                        crop_labelCam = np.int32(re.findall(cropPattern, line))
                        break
                    if eventSearch.search(line) != None:
                        foundEvent = 1
            # Get crop params for other camera
            foundEvent = 0
            eventSearch = re.compile(os.path.split(sourceCam_path)[1])
            cropPattern = re.compile("[0-9]{1,4}")
            with open(self.config, "rt") as config:
                for line in config:
                    if foundEvent == 1:
                        crop_sourceCam = np.int32(re.findall(
                            cropPattern, line))
                        break
                    if eventSearch.search(line) != None:
                        foundEvent = 1

            labelCam_offsets = [crop_labelCam[0], crop_labelCam[2]]
            sourceCam_offsets = [crop_sourceCam[0], crop_sourceCam[2]]

            sourceCam_pts = np.asarray(dataFrame, dtype=np.int32)
            sourceCam_pts = sourceCam_pts.reshape(
                (sourceCam_pts.shape[0], int(sourceCam_pts.shape[1] / 2), 2))
            sourceCam_pts = np.moveaxis(sourceCam_pts, [0, 1, 2], [1, 0, 2])
            sourceCam_pts[...,
                          0] = sourceCam_pts[..., 0] + sourceCam_offsets[0]
            sourceCam_pts[...,
                          1] = sourceCam_pts[..., 1] + sourceCam_offsets[1]

            sourcePts = sourceCam_pts[:, imNum, :]

            epLines_source2label = cv2.computeCorrespondEpilines(
                sourcePts, int(sourceCam_numInPair), fundMat)
            epLines_source2label.reshape(-1, 3)

            return epLines_source2label, sourcePts, labelCam_offsets

        else:
            return None, None, None
Example #7
0
def triangulate(
    config,
    video_path,
    videotype="",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
    track_method="",
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed.
        If left unspecified, videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept.


    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str(
                    "It seems the file specified in the variable config_file_"
                    + str(cam)
                )
                + " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype
        )
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print(
                    "Analyzing video %s using %s"
                    % (video_list[i][j], str("config_file_" + cam_names[j]))
                )

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)

                # Get track_method and do related checks
                track_method = auxfun_multianimal.get_track_method(
                    cfg, track_method=track_method
                )
                if len(cfg.get("multianimalbodyparts", [])) == 1 and track_method != "box":
                    warnings.warn(
                        "Switching to `box` tracker for single point tracking..."
                    )
                    track_method = "box"

                # Get track method suffix
                tr_method_suffix = TRACK_METHODS.get(track_method, "")

                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" + cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder, prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                        output_filename + ".csv"
                    ):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv")
                        )

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                        _,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(
                        path_camera_matrix, "stereo_params.pickle"
                    )
                    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    is_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(
                            metadata_["stereo_matrix"][k] == stereo_file[cam_pair][k]
                        ):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg, shuffle, trainFraction, trainingsiterations="unknown"
                    )

                    if (
                        metadata_["scorer_name"][cam_names[j]] == DLCscorer
                    ):  # TODO: CHECK FOR BOTH?
                        is_video_analyzed = True
                    elif metadata_["scorer_name"][cam_names[j]] == DLCscorerlegacy:
                        is_video_analyzed = True
                    else:
                        is_video_analyzed = False
                        run_triangulate = True

                    if is_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + ".h5"
                            )
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        is_video_analyzed = False
                        run_triangulate = True
                        suffix = tr_method_suffix
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )
                            suffix += "_filtered"

                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + suffix + ".h5"
                            )
                        )

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + "_filtered.h5"
                            )
                        )

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(
                config, dataname, str(cam_names[0] + "-" + cam_names[1])
            )
            if len(dataFrame_camera1_undistort) != len(dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[
                        : len(dataFrame_camera2_undistort)
                    ]
                if len(dataFrame_camera2_undistort) > len(dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[
                        : len(dataFrame_camera1_undistort)
                    ]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(0)[0]

            scorer_3d = scorer_cam1
            bodyparts = dataFrame_camera1_undistort.columns.get_level_values(
                "bodyparts"
            ).unique()

            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]
            F = stereomatrix["F"]

            print("Computing the triangulation...")

            num_frames = dataFrame_camera1_undistort.shape[0]
            ### Assign nan to [X,Y] of low likelihood predictions ###
            # Convert the data to a np array to easily mask out the low likelihood predictions
            data_cam1_tmp = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            data_cam2_tmp = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            # Assign [X,Y] = nan to low likelihood predictions
            data_cam1_tmp[data_cam1_tmp[..., 2] < pcutoff, :2] = np.nan
            data_cam2_tmp[data_cam2_tmp[..., 2] < pcutoff, :2] = np.nan

            # Reshape data back to original shape
            data_cam1_tmp = data_cam1_tmp.reshape(num_frames, -1)
            data_cam2_tmp = data_cam2_tmp.reshape(num_frames, -1)

            # put data back to the dataframes
            dataFrame_camera1_undistort[:] = data_cam1_tmp
            dataFrame_camera2_undistort[:] = data_cam2_tmp

            if cfg.get("multianimalproject"):
                # Check individuals are the same in both views
                individuals_view1 = (
                    dataFrame_camera1_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                individuals_view2 = (
                    dataFrame_camera2_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                if individuals_view1 != individuals_view2:
                    raise ValueError(
                        "The individuals do not match between the two DataFrames"
                    )

                # Cross-view match individuals
                _, voting = auxiliaryfunctions_3d.cross_view_match_dataframes(
                    dataFrame_camera1_undistort, dataFrame_camera2_undistort, F
                )
            else:
                # Create a dummy variables for single-animal
                individuals_view1 = ["indie"]
                voting = {0: 0}

            # Cleaner variable (since inds view1 == inds view2)
            individuals = individuals_view1

            # Reshape: (num_framex, num_individuals, num_bodyparts , 2)
            all_points_cam1 = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]
            all_points_cam2 = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]

            # Triangulate data
            triangulate = []
            for i, _ in enumerate(individuals):
                # i is individual in view 1
                # voting[i] is the matched individual in view 2

                pts_indv_cam1 = all_points_cam1[:, i].reshape((-1, 2)).T
                pts_indv_cam2 = all_points_cam2[:, voting[i]].reshape((-1, 2)).T

                indv_points_3d = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, pts_indv_cam1, pts_indv_cam2
                )

                indv_points_3d = indv_points_3d[:3].T.reshape((num_frames, -1, 3))

                triangulate.append(indv_points_3d)

            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create 3D DataFrame column and row indices
            axis_labels = ("x", "y", "z")
            if cfg.get("multianimalproject"):
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], individuals, bodyparts, axis_labels],
                    names=["scorer", "individuals", "bodyparts", "coords"],
                )

            else:
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], bodyparts, axis_labels],
                    names=["scorer", "bodyparts", "coords"],
                )

            inds = range(num_frames)

            # Swap num_animals with num_frames axes to ensure well-behaving reshape
            triangulate = triangulate.swapaxes(0, 1).reshape((num_frames, -1))

            # Fill up 3D dataframe
            df_3d = pd.DataFrame(triangulate, columns=columns, index=inds)

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )

            # Reorder 2D dataframe in view 2 to match order of view 1
            if cfg.get("multianimalproject"):
                df_2d_view2 = pd.read_hdf(dataname[1])
                individuals_order = [individuals[i] for i in list(voting.values())]
                df_2d_view2 = auxfun_multianimal.reorder_individuals_in_df(df_2d_view2, individuals_order)
                df_2d_view2.to_hdf(dataname[1], "tracks", format="table", mode="w",)

            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata
            )

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print("Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d")
Example #8
0
def undistort_points(config, dataframe, camera_pair, destfolder):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d)
    ''' 
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    '''
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam1.columns.get_level_values(0)[0]
        stereo_file = auxiliaryfunctions.read_pickle(
            os.path.join(path_camera_matrix, 'stereo_params.pickle'))
        path_stereo_file = os.path.join(path_camera_matrix,
                                        'stereo_params.pickle')
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]['cameraMatrix1']
        dist_l = stereo_file[camera_pair]['distCoeffs1']

        mtx_r = stereo_file[camera_pair]['cameraMatrix2']
        dist_r = stereo_file[camera_pair]['distCoeffs2']

        R1 = stereo_file[camera_pair]['R1']
        P1 = stereo_file[camera_pair]['P1']

        R2 = stereo_file[camera_pair]['R2']
        P2 = stereo_file[camera_pair]['P2']

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataFrame_cam1_undistort, scorer_cam1, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam1, scorer_cam1, flag='2d')
        dataFrame_cam2_undistort, scorer_cam2, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam2, scorer_cam2, flag='2d')

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            # Undistorting the points from cam1 camera
            points_cam1 = np.array([
                dataframe_cam1[scorer_cam1][bp]['x'].values[:],
                dataframe_cam1[scorer_cam1][bp]['y'].values[:]
            ])
            points_cam1 = points_cam1.T
            points_cam1 = np.expand_dims(points_cam1, axis=1)
            points_cam1_remapped = cv2.undistortPoints(src=points_cam1,
                                                       cameraMatrix=mtx_l,
                                                       distCoeffs=dist_l,
                                                       P=P1,
                                                       R=R1)

            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'x'] = points_cam1_remapped[:, 0,
                                                                         0]
            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'y'] = points_cam1_remapped[:, 0,
                                                                         1]
            dataFrame_cam1_undistort.iloc[:][
                scorer_cam1, bp, 'likelihood'] = dataframe_cam1[scorer_cam1][
                    bp]['likelihood'].values[:]

            # Undistorting the points from cam2 camera
            points_cam2 = np.array([
                dataframe_cam2[scorer_cam2][bp]['x'].values[:],
                dataframe_cam2[scorer_cam2][bp]['y'].values[:]
            ])
            points_cam2 = points_cam2.T
            points_cam2 = np.expand_dims(points_cam2, axis=1)
            points_cam2_remapped = cv2.undistortPoints(src=points_cam2,
                                                       cameraMatrix=mtx_r,
                                                       distCoeffs=dist_r,
                                                       P=P2,
                                                       R=R2)

            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'x'] = points_cam2_remapped[:, 0,
                                                                         0]
            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'y'] = points_cam2_remapped[:, 0,
                                                                         1]
            dataFrame_cam2_undistort.iloc[:][
                scorer_cam2, bp, 'likelihood'] = dataframe_cam2[scorer_cam2][
                    bp]['likelihood'].values[:]

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (dataFrame_cam1_undistort, dataFrame_cam2_undistort,
            stereo_file[camera_pair], path_stereo_file)
Example #9
0
def triangulate(dlc_3d_config, video_cfg, origins, table_corners, tocchini, video_path, filterpredictions=True, destfolder=None,
                save_as_csv=False):

    cfg_3d = auxiliaryfunctions.read_config(dlc_3d_config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']
    pcutoff = cfg_3d['pcutoff']
    scorer_3d = cfg_3d['scorername_3d']

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str('config_file_' + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(str("It seems the file specified in the variable config_file_" + str(cam)) +
                            " does not exist. Please edit the config file with correct file path and retry.")

    path_stereo_file = os.path.join(path_camera_matrix, 'stereo_params.pickle')
    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)

    position=np.array([0.0,0.0,0.0])
    rotation=np.array([[1.0, 0.0, 0.0],[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
    rotations=[np.linalg.inv(rotation)]
    translations=[np.matmul(-rotation, position)]
    projections=[]

    for cam_idx in range(1,len(cam_names)):
        pair_rotation=stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['R']
        pair_translation = stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['T']
        rotations.append(np.matmul(pair_rotation,rotations[0]))
        translations.append(np.matmul(pair_rotation, translations[0])+np.transpose(pair_translation))

    for cam_idx in range(len(cam_names)):
        path_camera_file = os.path.join(path_camera_matrix, '%s_intrinsic_params.pickle' % cam_names[cam_idx])
        intrinsic_file = auxiliaryfunctions.read_pickle(path_camera_file)

        projection=np.zeros((3,4))
        projection[:,0:3]=rotations[cam_idx]
        projection[:,3]=translations[cam_idx]

        projection=np.matmul(intrinsic_file[cam_names[cam_idx]]['mtx'], projection)
        projections.append(projection)

    for view in cam_names:
        origins[view]=np.array(origins[view])
        origins[view][0]=origins[view][0]+video_cfg['crop_limits'][view][0]
        origins[view][1]=origins[view][1]+video_cfg['crop_limits'][view][2]
    [origin, pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1}, origins, pcutoff, projections, reconMode='all')


    table_coords_3d=[]
    for corner_idx in range(len(table_corners['front'])):
        corner={}
        for view in cam_names:
            corner[view]=table_corners[view][corner_idx]
            corner[view] =np.array(corner[view])
            corner[view][0]=corner[view][0]+video_cfg['crop_limits'][view][0]
            corner[view][1]=corner[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},corner,pcutoff,projections, reconMode='all')
        table_coords_3d.append(coord)

    xy_norm=np.array([[0,0,1]])

    table_center=np.mean(np.array(table_coords_3d),axis=0)

    table_vec1=table_coords_3d[0]-table_center
    table_vec2=table_coords_3d[1]-table_center
    table_norm = unit_vector(np.cross(np.transpose(table_vec1), np.transpose(table_vec2)))

    rot_vec=unit_vector(np.cross(xy_norm, table_norm), axis=1)
    rot_angle=-np.arccos(np.abs(np.sum(xy_norm*table_norm))/np.sqrt(np.sum(table_norm**2)))

    rot_mat=rotation_matrix(rot_angle, np.transpose(rot_vec))
    rot_mat=np.matmul(rotation_matrix(np.pi, [1, 0, 0]), rot_mat)
    rot_mat=rot_mat[0:3,0:3]

    origin=np.matmul(rot_mat, origin)

    for idx, coord in enumerate(table_coords_3d):
        coord=np.matmul(rot_mat, coord)-origin
        table_coords_3d[idx]=coord

    tocchini_coords_3d=[]
    for tocchino_idx in range(len(tocchini['front'])):
        tocchino={}
        for view in cam_names:
            tocchino[view]=tocchini[view][tocchino_idx]
            tocchino[view]=np.array(tocchino[view])
            tocchino[view][0]=tocchino[view][0]+video_cfg['crop_limits'][view][0]
            tocchino[view][1]=tocchino[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},tocchino,pcutoff,projections, reconMode='all')
        tocchino_coord=np.matmul(rot_mat,coord)-origin
        tocchini_coords_3d.append(tocchino_coord)

    file_name_3d_scorer = []
    dataname = []
    for cam_name in cam_names:
        dlc_3d_config = snapshots[cam_name]
        cfg = auxiliaryfunctions.read_config(dlc_3d_config)

        shuffle = cfg_3d[str('shuffle_' + cam_name)]
        trainingsetindex = cfg_3d[str('trainingsetindex_' + cam_name)]

        video=video_path[cam_name]
        vname = Path(video).stem


        trainFraction = cfg['TrainingFraction'][trainingsetindex]
        modelfolder = os.path.join(cfg["project_path"],
                                   str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
        path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
        dlc_cfg = load_config(str(path_test_config))
        Snapshots = np.array([fn.split('.')[0] for fn in os.listdir(os.path.join(modelfolder, 'train')) if "index" in fn])
        snapshotindex = cfg['snapshotindex']

        increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        dlc_cfg['init_weights'] = os.path.join(modelfolder, 'train', Snapshots[snapshotindex])
        trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
        DLCscorer = auxiliaryfunctions.GetScorerName(cfg, shuffle, trainFraction,
                                                     trainingsiterations=trainingsiterations)

        file_name_3d_scorer.append(DLCscorer)

        if filterpredictions:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + 'filtered.h5'))
        else:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + '.h5'))

    output_filename = os.path.join(destfolder, vname + '_' + scorer_3d)
    if os.path.isfile(output_filename + '.h5'):  # TODO: don't check twice and load the pickle file to check if the same snapshots + camera matrices were used.
        print("Already analyzed...", output_filename+'.h5')
    else:
        if len(dataname) > 0:
            df = pd.read_hdf(dataname[0])
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(df, scorer_3d, flag='3d')

            for bpindex, bp in enumerate(bodyparts):
                bp_coords=np.zeros((3,len(df_3d)))
                for f_idx in range(len(df_3d)):
                    likelihoods={}
                    coords={}
                    for cam_idx,cam_name in enumerate(cam_names):
                        dataframe_cam = pd.read_hdf(dataname[cam_idx])
                        scorer_cam = dataframe_cam.columns.get_level_values(0)[0]
                        likelihoods[cam_name]=dataframe_cam[scorer_cam][bp]['likelihood'].values[f_idx]
                        coords[cam_name]=np.array([dataframe_cam[scorer_cam][bp]['x'].values[f_idx], dataframe_cam[scorer_cam][bp]['y'].values[f_idx]])
                        coords[cam_name][0]=coords[cam_name][0]+video_cfg['crop_limits'][cam_name][0]
                        coords[cam_name][1]=coords[cam_name][1]+video_cfg['crop_limits'][cam_name][2]
                    [coord, pairs_used] = locate(cam_names, likelihoods, coords, pcutoff, projections, reconMode='bestpossible')

                    coord=np.matmul(rot_mat, coord)-origin
                    if pairs_used < 3:
                        coord[0]=np.nan
                        coord[1]=np.nan
                        coord[2]=np.nan
                    bp_coords[:,f_idx]=np.squeeze(coord)
                df_3d.iloc[:][scorer_3d, bp, 'x'] = bp_coords[0,:]
                df_3d.iloc[:][scorer_3d, bp, 'y'] = bp_coords[1,:]
                df_3d.iloc[:][scorer_3d, bp, 'z'] = bp_coords[2,:]

            df_3d.to_hdf(str(output_filename + '.h5'), 'df_with_missing', format='table', mode='w')

            if save_as_csv:
                df_3d.to_csv(str(output_filename + '.csv'))

            print("Triangulated data for video", vname)
            print("Results are saved under: ", destfolder)

    return str(output_filename + '.h5'), table_coords_3d, tocchini_coords_3d
Example #10
0
def align_videos(subject, date):
    frame_buffer = 10

    base_video_path=os.path.join(cfg['preprocessed_data_dir'],subject, date,'video')
    with open(os.path.join(base_video_path,'config.json')) as json_file:
        vid_cfg=json.load(json_file)
    fnames=sorted(glob.glob(os.path.join(base_video_path, 'front', '%s*.avi' % date)))

    rois_checked={
        'front':False,
        'side':False,
        'top':False
    }
    crop_checked={
        'front':False,
        'side':False,
        'top':False
    }

    # For each file (filenames are same in each view directory)
    for fname in fnames:
        fname=os.path.split(fname)[-1]
        print('Processing %s' % fname)
        blue_onsets={}
        yellow_onsets={}
        blue_ts={}
        yellow_ts={}
        video_nframes={}

        # Whether or not to use LED for alignment
        led_based = True

        for view in cfg['camera_views']:
            video_path=os.path.join(base_video_path, view)
            clip = VideoFileClip(os.path.join(video_path, fname))
            n_frames_approx = int(np.ceil(clip.duration * clip.fps) + frame_buffer)
            n_frames = n_frames_approx
            clip.reader.initialize()

            # Initialize LED time series for this view
            blue_ts[view]=[]
            yellow_ts[view]=[]

            for index in range(n_frames_approx):
                image = img_as_ubyte(clip.reader.read_frame())

                # If not already set, show GUI to select blue LED ROI
                if not rois_checked[view]:
                    blue_led_roi_area=vid_cfg['blue_led_roi_areas'][view]
                    blue_cropped_img=image[blue_led_roi_area[2]:blue_led_roi_area[3],
                                     blue_led_roi_area[0]:blue_led_roi_area[1],:]
                    init_roi=None
                    if  view in vid_cfg['blue_led_rois'] and vid_cfg['blue_led_rois'][view] is not None:
                        init_roi=vid_cfg['blue_led_rois'][view]
                        init_roi[0] = init_roi[0] - blue_led_roi_area[0]
                        init_roi[1] = init_roi[1] - blue_led_roi_area[0]
                        init_roi[2] = init_roi[2] - blue_led_roi_area[2]
                        init_roi[3] = init_roi[3] - blue_led_roi_area[2]
                    vid_cfg['blue_led_rois'][view] = select_crop_parameters.show(blue_cropped_img, 'Select blue LED ROI', init_coords=init_roi)
                    vid_cfg['blue_led_rois'][view][0] = vid_cfg['blue_led_rois'][view][0] + blue_led_roi_area[0]
                    vid_cfg['blue_led_rois'][view][1] = vid_cfg['blue_led_rois'][view][1] + blue_led_roi_area[0]
                    vid_cfg['blue_led_rois'][view][2] = vid_cfg['blue_led_rois'][view][2] + blue_led_roi_area[2]
                    vid_cfg['blue_led_rois'][view][3] = vid_cfg['blue_led_rois'][view][3] + blue_led_roi_area[2]

                    yellow_led_roi_area = vid_cfg['yellow_led_roi_areas'][view]
                    yellow_cropped_img = image[yellow_led_roi_area[2]:yellow_led_roi_area[3],
                                       yellow_led_roi_area[0]:yellow_led_roi_area[1], :]
                    init_roi = None
                    if view in vid_cfg['yellow_led_rois'] and vid_cfg['yellow_led_rois'][view] is not None:
                        init_roi = vid_cfg['yellow_led_rois'][view]
                        init_roi[0] = init_roi[0] - yellow_led_roi_area[0]
                        init_roi[1] = init_roi[1] - yellow_led_roi_area[0]
                        init_roi[2] = init_roi[2] - yellow_led_roi_area[2]
                        init_roi[3] = init_roi[3] - yellow_led_roi_area[2]
                    vid_cfg['yellow_led_rois'][view] = select_crop_parameters.show(yellow_cropped_img, 'Select yellow LED ROI', init_coords=init_roi)
                    vid_cfg['yellow_led_rois'][view][0] = vid_cfg['yellow_led_rois'][view][0] + yellow_led_roi_area[0]
                    vid_cfg['yellow_led_rois'][view][1] = vid_cfg['yellow_led_rois'][view][1] + yellow_led_roi_area[0]
                    vid_cfg['yellow_led_rois'][view][2] = vid_cfg['yellow_led_rois'][view][2] + yellow_led_roi_area[2]
                    vid_cfg['yellow_led_rois'][view][3] = vid_cfg['yellow_led_rois'][view][3] + yellow_led_roi_area[2]

                    rois_checked[view]=True
                    
                if index == int(n_frames_approx - frame_buffer * 2):
                    last_image = image
                elif index > int(n_frames_approx - frame_buffer * 2):
                    if (image == last_image).all():
                        n_frames = index
                        break

                # Crop image around blue LED, get only blue channel
                blue_roi=vid_cfg['blue_led_rois'][view]
                blue_led_image = image[blue_roi[2]:blue_roi[3], blue_roi[0]:blue_roi[1], 2]
                # Add average of cropped image to blue LED timeseries
                blue_ts[view].append(np.mean(blue_led_image))

                # Crop image around yellow LED, average red and green channels
                yellow_roi=vid_cfg['yellow_led_rois'][view]
                yellow_led_image = np.mean(image[yellow_roi[2]:yellow_roi[3], yellow_roi[0]:yellow_roi[1], 0:1],axis=2)
                # Add average of cropped image to yellow LED timeseries
                yellow_ts[view].append(np.mean(yellow_led_image))

            blue_ts[view] = np.array(blue_ts[view])
            yellow_ts[view] = np.array(yellow_ts[view])

            # Normalize based on first 10 time steps
            if len(blue_ts[view])>10:
                blue_ts[view]=(blue_ts[view]-np.mean(blue_ts[view][0:10]))/np.mean(blue_ts[view][0:10])
            if len(yellow_ts[view])>10:
                yellow_ts[view]=(yellow_ts[view]-np.mean(yellow_ts[view][0:10]))/np.mean(yellow_ts[view][0:10])
            blue_ts[view]=blue_ts[view]/np.max(blue_ts[view])
            yellow_ts[view]=yellow_ts[view]/np.max(yellow_ts[view])

            # plt.figure()
            # plt.subplot(2,1,1)
            # plt.plot(video_blue_brightness)
            # plt.subplot(2, 1, 2)
            # plt.plot(video_yellow_brightness)
            # plt.show()

            # Get derivative of blue and yellow ts
            #blue_diff=np.diff(blue_ts[view])
            #yellow_diff=np.diff(yellow_ts[view])

            # Get peak blue and yellow LED change times
            #blue_peak=np.max(blue_diff)
            blue_peak = np.max(blue_ts[view])
            #yellow_peak=np.max(yellow_diff)
            yellow_peak = np.max(yellow_ts[view])

            # If none above 0.05, don't use LEDs for aligning
            #if blue_peak<.05 or yellow_peak<.05:
            if len(blue_ts[view])<10 or np.max(blue_ts[view][10:]) < .25 or np.max(yellow_ts[view])<.25:
                led_based=False
                print('Cant figure out LED onset - not using')
            # Otherwise, use the first time point after 25 time points where LED diff exceeds 0.05
            else:
                #blue_onsets[view] = np.where(blue_diff >= 0.05)[0][0]
                #np.where(blue_ts[view] >= 0.25)[0][0]
                blue_onsets[view] = 10 + np.where(blue_ts[view][10:] >= 0.25)[0][0]
                #yellow_onsets[view] = np.where(yellow_diff >= 0.05)[0][0]
                yellow_onsets[view] = np.where(yellow_ts[view] >= 0.25)[0][0]

            video_nframes[view]=n_frames

        # Use first view where blue LED exceeds threshold as reference to align to
        if len(blue_onsets.values())>0:
            min_blue_onset=min(blue_onsets.values())

        # if fname=='15-05-2019_10-34-15_11.avi':
        #     plt.figure()
        #     for view in cfg['camera_views']:
        #         plt.plot(blue_ts[view], label='%s: blue' % view)
        #         plt.plot(yellow_ts[view], label='%s: yellow' % view)
        #     plt.legend()
        #     plt.show()

        # Compute trial duration based on each view
        trial_durations={}
        for view in cfg['camera_views']:
            if view in blue_onsets and view in yellow_onsets:
                # Trial duration (in ms)
                trial_duration=(yellow_onsets[view]-blue_onsets[view])*1.0/clip.fps*1000.0
                # there is an 850ms delay before blue LED comes on
                if trial_duration>0:
                    trial_duration=trial_duration+850.0
                trial_durations[view]=trial_duration
                print('%s: %.2fms' % (view, trial_duration))
        #assert(len(trial_durations)>0 and all(x == trial_durations[0] for x in trial_durations))

        start_frames_to_cut={}
        n_frames_after_cutting = {}
        # Cut frames to align videos and crop
        for idx,view in enumerate(cfg['camera_views']):

            # using LED to align
            if led_based:
                start_frames_to_cut[view]=blue_onsets[view]-min_blue_onset
            # otherwise - use standard # of frames to crop (order of video triggering is top, side, front)
            if not led_based or start_frames_to_cut[view]>5:
                start_frames_to_cut[view] = 0
                if view=='front':
                    start_frames_to_cut[view]=2
                elif view=='side':
                    start_frames_to_cut[view]=1
            n_frames_after_cutting[view]=video_nframes[view]-start_frames_to_cut[view]
        new_nframes=min(n_frames_after_cutting.values())

        intrinsic_files = {}
        for view in cfg['camera_views']:
            dlc3d_cfg = os.path.join('/data/tool_learning/preprocessed_data/dlc_projects',
                                     'visual_grasp_3d-Jimmy-2019-08-19-3d', 'config.yaml')

            cfg_3d = auxiliaryfunctions.read_config(dlc3d_cfg)
            img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
                cfg_3d)
            path_intrinsic_file = os.path.join(path_camera_matrix, '%s_intrinsic_params.pickle' % view)
            intrinsic_file = auxiliaryfunctions.read_pickle(path_intrinsic_file)
            intrinsic_files[view] = intrinsic_file[view]

        for idx, view in enumerate(cfg['camera_views']):
            camera_matrix = intrinsic_files[view]['mtx']
            distortion_coefficients = intrinsic_files[view]['dist']

            end_frames_to_cut=n_frames_after_cutting[view]-new_nframes
            print('cutting %d frames from beginning and %d frames from end of %s' % (start_frames_to_cut[view], end_frames_to_cut, view))

            # Cut frames from blue and yellow LED time series and onsets
            if end_frames_to_cut>0:
                blue_ts[view]=blue_ts[view][start_frames_to_cut[view]:-end_frames_to_cut]
                yellow_ts[view] = yellow_ts[view][start_frames_to_cut[view]:-end_frames_to_cut]
            else:
                blue_ts[view] = blue_ts[view][start_frames_to_cut[view]:]
                yellow_ts[view] = yellow_ts[view][start_frames_to_cut[view]:]
            if view in blue_onsets:
                blue_onsets[view]=blue_onsets[view]-start_frames_to_cut[view]
            if view in yellow_onsets:
                yellow_onsets[view]=yellow_onsets[view]-start_frames_to_cut[view]

            # Load video and cut frames from beginning
            video_path = os.path.join(base_video_path, view)
            clip = VideoFileClip(os.path.join(video_path, fname))

            # Crop limits based on view
            frames=[]
            n_frames_approx = int(np.ceil(clip.duration * clip.fps)+frame_buffer)
            for index in range(n_frames_approx):
                image = img_as_ubyte(clip.reader.read_frame())
                image = cv2.undistort(image, camera_matrix, distortion_coefficients)
                if index>=start_frames_to_cut[view]:
                    if not crop_checked[view]:
                        init_crop_lims = None
                        if view in vid_cfg['crop_limits'] and vid_cfg['crop_limits'][view] is not None:
                            init_crop_lims = vid_cfg['crop_limits'][view]
                        vid_cfg['crop_limits'][view] = select_crop_parameters.show(image, 'Select crop limits',
                                                                                   init_coords=init_crop_lims)
                        crop_checked[view]=True
                    # Crop image and save to video
                    crop_lims=vid_cfg['crop_limits'][view]
                    image=image[crop_lims[2]:crop_lims[3], crop_lims[0]:crop_lims[1], :]
                    frames.append(image)
                if len(frames)==new_nframes:
                    break

            clip.close()

            # Check that have the right number of frames
            assert(len(frames)==new_nframes)

            # Create new video clip (cropped and aligned)
            video_path = os.path.join(base_video_path, view)
            new_clip = VideoProcessorCV(sname=os.path.join(video_path, fname), fps=clip.fps, codec='mp4v',
                                        sw=crop_lims[1] - crop_lims[0], sh=crop_lims[3] - crop_lims[2])
            for frame in frames:
                new_clip.save_frame(np.uint8(frame))
            new_clip.close()

        # Make everything hashable
        for view in cfg['camera_views']:
            blue_ts[view]=blue_ts[view].tolist()
            yellow_ts[view] = yellow_ts[view].tolist()
            if view in blue_onsets:
                blue_onsets[view]=int(blue_onsets[view])
            if view in yellow_onsets:
                yellow_onsets[view]=int(yellow_onsets[view])
            if view in trial_durations:
                trial_durations[view]=float(trial_durations[view])

        # Save video info to JSON
        data = {
            'blue_roi': vid_cfg['blue_led_rois'],
            'yellow_roi': vid_cfg['yellow_led_rois'],
            'blue_ts': blue_ts,
            'yellow_ts': yellow_ts,
            'blue_onset': blue_onsets,
            'yellow_onset': yellow_onsets,
            'trial_duration': trial_durations,
            'fname': fname
        }
        [base, ext] = os.path.splitext(fname)
        with open(os.path.join(base_video_path, '%s.json' % base), 'w') as outfile:
            json.dump(data, outfile)

        print('')

    with open(os.path.join(base_video_path,'config.json'),'w') as outfile:
        json.dump(vid_cfg, outfile)