Esempio n. 1
0
def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    path_camera_matrix = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)[2]
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
    dataframe_cam1 = pd.read_hdf(dataframe[0])
    dataframe_cam2 = pd.read_hdf(dataframe[1])
    path_stereo_file = os.path.join(path_camera_matrix, "stereo_params.pickle")
    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
    dataFrame_cam1_undistort, dataFrame_cam2_undistort = _undistort_views(
        [(dataframe_cam1, dataframe_cam2)], stereo_file,
    )[0]

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
Esempio n. 2
0
def step3A_pre_process_project(config_3d, **kwargs):
    #print("step3A_pre_process_project",config_3d)
    cfg_3d = auxiliaryfunctions.read_config(config_3d)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)

    config = None
    with open(kwargs['config_file'], "r") as config_file:
        config = json.load(config_file)

    kwargs['target_path'] = img_path

    convert_folder(config, **kwargs)

    print("Images have been cropped and renamed")
Esempio n. 3
0
def undistort_points(config, dataframe, camera_pair):
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    """
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    """
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam2.columns.get_level_values(0)[0]
        path_stereo_file = os.path.join(path_camera_matrix,
                                        "stereo_params.pickle")
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]["cameraMatrix1"]
        dist_l = stereo_file[camera_pair]["distCoeffs1"]

        mtx_r = stereo_file[camera_pair]["cameraMatrix2"]
        dist_r = stereo_file[camera_pair]["distCoeffs2"]

        R1 = stereo_file[camera_pair]["R1"]
        P1 = stereo_file[camera_pair]["P1"]

        R2 = stereo_file[camera_pair]["R2"]
        P2 = stereo_file[camera_pair]["P2"]

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        (
            dataFrame_cam1_undistort,
            scorer_cam1,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam1,
                                                  scorer_cam1,
                                                  flag="2d")
        (
            dataFrame_cam2_undistort,
            scorer_cam2,
            bodyparts,
        ) = auxiliaryfunctions_3d.create_empty_df(dataframe_cam2,
                                                  scorer_cam2,
                                                  flag="2d")

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            points_cam1 = dataframe_cam1.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam1_remapped = cv2.undistortPoints(
                src=points_cam1.astype(np.float32),
                cameraMatrix=mtx_l,
                distCoeffs=dist_l,
                P=P1,
                R=R1,
            )
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp,
                        ["x", "y"]] = points_cam1_remapped.squeeze()
            dataFrame_cam1_undistort.loc(
                axis=1)[scorer_cam1, bp, "likelihood"] = dataframe_cam1.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

            # Undistorting the points from cam2 camera
            points_cam2 = dataframe_cam2.xs(bp, level="bodyparts",
                                            axis=1).values[:, :2]
            points_cam2_remapped = cv2.undistortPoints(
                src=points_cam2.astype(np.float32),
                cameraMatrix=mtx_r,
                distCoeffs=dist_r,
                P=P2,
                R=R2,
            )
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp,
                        ["x", "y"]] = points_cam2_remapped.squeeze()
            dataFrame_cam2_undistort.loc(
                axis=1)[scorer_cam2, bp, "likelihood"] = dataframe_cam2.xs(
                    [bp, "likelihood"], level=["bodyparts",
                                               "coords"], axis=1).values

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (
        dataFrame_cam1_undistort,
        dataFrame_cam2_undistort,
        stereo_file[camera_pair],
        path_stereo_file,
    )
Esempio n. 4
0
def triangulate(
    config,
    video_path,
    videotype="avi",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n
        Only videos with this extension are analyzed. The default is ``.avi``

    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str("It seems the file specified in the variable config_file_"
                    + str(cam)) +
                " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype)
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print("Analyzing video %s using %s" %
                      (video_list[i][j], str("config_file_" + cam_names[j])))

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)
                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" +
                                              cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder,
                                                   prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                            output_filename + ".csv"):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv"))

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(
                        pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(path_camera_matrix,
                                                    "stereo_params.pickle")
                    stereo_file = auxiliaryfunctions.read_pickle(
                        path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    if_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(metadata_["stereo_matrix"][k] ==
                                  stereo_file[cam_pair][k]):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations="unknown")

                    if (metadata_["scorer_name"][cam_names[j]] == DLCscorer
                        ):  # TODO: CHECK FOR BOTH?
                        if_video_analyzed = True
                    elif metadata_["scorer_name"][
                            cam_names[j]] == DLCscorerlegacy:
                        if_video_analyzed = True
                    else:
                        if_video_analyzed = False
                        run_triangulate = True

                    if if_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        if_video_analyzed = False
                        run_triangulate = True
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )

                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(destfolder,
                                         vname + DLCscorer + ".h5"))

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(config, dataname,
                                 str(cam_names[0] + "-" + cam_names[1]))
            if len(dataFrame_camera1_undistort) != len(
                    dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(
                        dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[:len(
                        dataFrame_camera2_undistort)]
                if len(dataFrame_camera2_undistort) > len(
                        dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[:len(
                        dataFrame_camera1_undistort)]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            X_final = []
            triangulate = []
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(
                0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(
                0)[0]
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(
                dataFrame_camera1_undistort, scorer_3d, flag="3d")
            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]

            print("Computing the triangulation...")
            for bpindex, bp in enumerate(bodyparts):
                # Extract the indices of frames where the likelihood of a bodypart for both cameras are less than pvalue
                likelihoods = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["likelihood"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["likelihood"].values[:],
                ])
                likelihoods = likelihoods.T

                # Extract frames where likelihood for both the views is less than the pcutoff
                low_likelihood_frames = np.any(likelihoods < pcutoff, axis=1)
                # low_likelihood_frames = np.all(likelihoods < pcutoff, axis=1)

                low_likelihood_frames = np.where(
                    low_likelihood_frames == True)[0]
                points_cam1_undistort = np.array([
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["x"].values[:],
                    dataFrame_camera1_undistort[scorer_cam1][bp]
                    ["y"].values[:],
                ])
                points_cam1_undistort = points_cam1_undistort.T

                # For cam1 camera: Assign nans to x and y values of a bodypart where the likelihood for is less than pvalue
                points_cam1_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam1_undistort = np.expand_dims(points_cam1_undistort,
                                                       axis=1)

                points_cam2_undistort = np.array([
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["x"].values[:],
                    dataFrame_camera2_undistort[scorer_cam2][bp]
                    ["y"].values[:],
                ])
                points_cam2_undistort = points_cam2_undistort.T

                # For cam2 camera: Assign nans to x and y values of a bodypart where the likelihood is less than pvalue
                points_cam2_undistort[low_likelihood_frames] = np.nan, np.nan
                points_cam2_undistort = np.expand_dims(points_cam2_undistort,
                                                       axis=1)

                X_l = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, points_cam1_undistort, points_cam2_undistort)

                # ToDo: speed up func. below by saving in numpy.array
                X_final.append(X_l)
            triangulate.append(X_final)
            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create an empty dataframe to store x,y,z of 3d data
            for bpindex, bp in enumerate(bodyparts):
                df_3d.iloc[:][scorer_3d, bp, "x"] = triangulate[0, bpindex,
                                                                0, :]
                df_3d.iloc[:][scorer_3d, bp, "y"] = triangulate[0, bpindex,
                                                                1, :]
                df_3d.iloc[:][scorer_3d, bp, "z"] = triangulate[0, bpindex,
                                                                2, :]

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )
            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata)

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print(
            "Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d"
        )
Esempio n. 5
0
def calibrate_cameras(config, cbrow=8, cbcol=6, calibrate=False, alpha=0.4):
    """This function extracts the corners points from the calibration images, calibrates the camera and stores the calibration files in the project folder (defined in the config file).
    
    Make sure you have around 20-60 pairs of calibration images. The function should be used iteratively to select the right set of calibration images. 
    
    A pair of calibration image is considered "correct", if the corners are detected correctly in both the images. It may happen that during the first run of this function, 
    the extracted corners are incorrect or the order of detected corners does not align for the corresponding views (i.e. camera-1 and camera-2 images).
    
    In such a case, remove those pairs of images and re-run this function. Once the right number of calibration images are selected, 
    use the parameter ``calibrate=True`` to calibrate the cameras.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    cbrow : int
        Integer specifying the number of rows in the calibration image.
    
    cbcol : int
        Integer specifying the number of columns in the calibration image.

    calibrate : bool
        If this is set to True, the cameras are calibrated with the current set of calibration images. The default is ``False``
        Set it to True, only after checking the results of the corner detection method and removing dysfunctional images!
        
    alpha: float
        Floating point number between 0 and 1 specifying the free scaling parameter. When alpha = 0, the rectified images with only valid pixels are stored 
        i.e. the rectified images are zoomed in. When alpha = 1, all the pixels from the original images are retained. 
        For more details: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
        
    Example
    --------
    Linux/MacOs/Windows
    >>> deeplabcut.calibrate_camera(config)

    Once the right set of calibration images are selected, 
    >>> deeplabcut.calibrate_camera(config,calibrate=True)

    """
    # Termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((cbrow * cbcol, 3), np.float32)
    objp[:, :2] = np.mgrid[0:cbcol, 0:cbrow].T.reshape(-1, 2)

    # Read the config file
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)

    images = glob.glob(os.path.join(img_path, "*.jpg"))
    cam_names = cfg_3d["camera_names"]

    # update the variable snapshot* in config file according to the name of the cameras
    try:
        for i in range(len(cam_names)):
            cfg_3d[str("config_file_" + cam_names[i])] = cfg_3d.pop(
                str("config_file_camera-" + str(i + 1)))
        for i in range(len(cam_names)):
            cfg_3d[str("shuffle_" + cam_names[i])] = cfg_3d.pop(
                str("shuffle_camera-" + str(i + 1)))
    except:
        pass

    project_path = cfg_3d["project_path"]
    projconfigfile = os.path.join(str(project_path), "config.yaml")
    auxiliaryfunctions.write_config_3d(projconfigfile, cfg_3d)

    # Initialize the dictionary
    img_shape = {}
    objpoints = {}  # 3d point in real world space
    imgpoints = {}  # 2d points in image plane.
    dist_pickle = {}
    stereo_params = {}
    for cam in cam_names:
        objpoints.setdefault(cam, [])
        imgpoints.setdefault(cam, [])
        dist_pickle.setdefault(cam, [])

    # Sort the images.
    images.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
    if len(images) == 0:
        raise Exception(
            "No calibration images found. Make sure the calibration images are saved as .jpg and with prefix as the camera name as specified in the config.yaml file."
        )

    for fname in images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                img = cv2.imread(fname)
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                # Find the chess board corners
                ret, corners = cv2.findChessboardCorners(
                    gray, (cbcol, cbrow), None
                )  #  (8,6) pattern (dimensions = common points of black squares)
                # If found, add object points, image points (after refining them)
                if ret == True:
                    img_shape[cam] = gray.shape[::-1]
                    objpoints[cam].append(objp)
                    corners = cv2.cornerSubPix(gray, corners, (11, 11),
                                               (-1, -1), criteria)
                    imgpoints[cam].append(corners)
                    # Draw the corners and store the images
                    img = cv2.drawChessboardCorners(img, (cbcol, cbrow),
                                                    corners, ret)
                    cv2.imwrite(
                        os.path.join(str(path_corners),
                                     filename + "_corner.jpg"), img)
                else:
                    print("Corners not found for the image %s" %
                          Path(fname).name)
    try:
        h, w = img.shape[:2]
    except:
        raise Exception(
            "It seems that the name of calibration images does not match with the camera names in the config file. Please make sure that the calibration images are named with camera names as specified in the config.yaml file."
        )

    # Perform calibration for each cameras and store the matrices as a pickle file
    if calibrate == True:
        # Calibrating each camera
        for cam in cam_names:
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                objpoints[cam], imgpoints[cam], img_shape[cam], None, None)

            # Save the camera calibration result for later use (we won't use rvecs / tvecs)
            dist_pickle[cam] = {
                "mtx": mtx,
                "dist": dist,
                "objpoints": objpoints[cam],
                "imgpoints": imgpoints[cam],
            }
            pickle.dump(
                dist_pickle,
                open(
                    os.path.join(path_camera_matrix,
                                 cam + "_intrinsic_params.pickle"),
                    "wb",
                ),
            )
            print(
                "Saving intrinsic camera calibration matrices for %s as a pickle file in %s"
                % (cam, os.path.join(path_camera_matrix)))

            # Compute mean re-projection errors for individual cameras
            mean_error = 0
            for i in range(len(objpoints[cam])):
                imgpoints_proj, _ = cv2.projectPoints(objpoints[cam][i],
                                                      rvecs[i], tvecs[i], mtx,
                                                      dist)
                error = cv2.norm(imgpoints[cam][i], imgpoints_proj,
                                 cv2.NORM_L2) / len(imgpoints_proj)
                mean_error += error
            print("Mean re-projection error for %s images: %.3f pixels " %
                  (cam, mean_error / len(objpoints[cam])))

        # Compute stereo calibration for each pair of cameras
        camera_pair = [[cam_names[0], cam_names[1]]]
        for pair in camera_pair:
            print("Computing stereo calibration for " % pair)
            (
                retval,
                cameraMatrix1,
                distCoeffs1,
                cameraMatrix2,
                distCoeffs2,
                R,
                T,
                E,
                F,
            ) = cv2.stereoCalibrate(
                objpoints[pair[0]],
                imgpoints[pair[0]],
                imgpoints[pair[1]],
                dist_pickle[pair[0]]["mtx"],
                dist_pickle[pair[0]]["dist"],
                dist_pickle[pair[1]]["mtx"],
                dist_pickle[pair[1]]["dist"],
                (h, w),
                flags=cv2.CALIB_FIX_INTRINSIC,
            )

            # Stereo Rectification
            rectify_scale = (
                alpha
            )  # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
            R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
                cameraMatrix1,
                distCoeffs1,
                cameraMatrix2,
                distCoeffs2,
                (h, w),
                R,
                T,
                alpha=rectify_scale,
            )

            stereo_params[pair[0] + "-" + pair[1]] = {
                "cameraMatrix1": cameraMatrix1,
                "cameraMatrix2": cameraMatrix2,
                "distCoeffs1": distCoeffs1,
                "distCoeffs2": distCoeffs2,
                "R": R,
                "T": T,
                "E": E,
                "F": F,
                "R1": R1,
                "R2": R2,
                "P1": P1,
                "P2": P2,
                "roi1": roi1,
                "roi2": roi2,
                "Q": Q,
                "image_shape": [img_shape[pair[0]], img_shape[pair[1]]],
            }

        print(
            "Saving the stereo parameters for every pair of cameras as a pickle file in %s"
            % str(os.path.join(path_camera_matrix)))

        auxiliaryfunctions.write_pickle(
            os.path.join(path_camera_matrix, "stereo_params.pickle"),
            stereo_params)
        print(
            "Camera calibration done! Use the function ``check_undistortion`` to check the check the calibration"
        )
    else:
        print(
            "Corners extracted! You may check for the extracted corners in the directory %s and remove the pair of images where the corners are incorrectly detected. If all the corners are detected correctly with right order, then re-run the same function and use the flag ``calibrate=True``, to calbrate the camera."
            % str(path_corners))
Esempio n. 6
0
def check_undistortion(config, cbrow=8, cbcol=6, plot=True):
    """
    This function undistorts the calibration images based on the camera matrices and stores them in the project folder(defined in the config file) 
    to visually check if the camera matrices are correct. 

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    cbrow : int
        Int specifying the number of rows in the calibration image.
    
    cbcol : int
        Int specifying the number of columns in the calibration image.

    plot : bool
        If this is set to True, the results of undistortion are saved as plots. The default is ``True``; if provided it must be either ``True`` or ``False``.

    Example
    --------
    Linux/MacOs/Windows
    >>> deeplabcut.check_undistortion(config, cbrow = 8,cbcol = 6)

    """

    # Read the config file
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    cfg_3d = auxiliaryfunctions.read_config(config)
    (
        img_path,
        path_corners,
        path_camera_matrix,
        path_undistort,
    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)

    # colormap = plt.get_cmap(cfg_3d['colormap'])
    markerSize = cfg_3d["dotsize"]
    alphaValue = cfg_3d["alphaValue"]
    markerType = cfg_3d["markerType"]
    markerColor = cfg_3d["markerColor"]
    cam_names = cfg_3d["camera_names"]

    images = glob.glob(os.path.join(img_path, "*.jpg"))

    # Sort the images
    images.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
    """
    for fname in images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                ext = Path(fname).suffix
                img = cv2.imread(fname)
                gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    """
    camera_pair = [[cam_names[0], cam_names[1]]]
    stereo_params = auxiliaryfunctions.read_pickle(
        os.path.join(path_camera_matrix, "stereo_params.pickle"))

    for pair in camera_pair:
        map1_x, map1_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix1"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs1"],
            stereo_params[pair[0] + "-" + pair[1]]["R1"],
            stereo_params[pair[0] + "-" + pair[1]]["P1"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][0]),
            cv2.CV_16SC2,
        )
        map2_x, map2_y = cv2.initUndistortRectifyMap(
            stereo_params[pair[0] + "-" + pair[1]]["cameraMatrix2"],
            stereo_params[pair[0] + "-" + pair[1]]["distCoeffs2"],
            stereo_params[pair[0] + "-" + pair[1]]["R2"],
            stereo_params[pair[0] + "-" + pair[1]]["P2"],
            (stereo_params[pair[0] + "-" + pair[1]]["image_shape"][1]),
            cv2.CV_16SC2,
        )
        cam1_undistort = []
        cam2_undistort = []

        for fname in images:
            if pair[0] in fname:
                filename = Path(fname).stem
                img1 = cv2.imread(fname)
                gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                h, w = img1.shape[:2]
                _, corners1 = cv2.findChessboardCorners(
                    gray1, (cbcol, cbrow), None)
                corners_origin1 = cv2.cornerSubPix(gray1, corners1, (11, 11),
                                                   (-1, -1), criteria)

                # Remapping dataFrame_camera1_undistort
                im_remapped1 = cv2.remap(img1, map1_x, map1_y,
                                         cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort = cv2.undistortPoints(
                    src=corners_origin1,
                    cameraMatrix=stereo_params[pair[0] + "-" +
                                               pair[1]]["cameraMatrix1"],
                    distCoeffs=stereo_params[pair[0] + "-" +
                                             pair[1]]["distCoeffs1"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P1"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R1"],
                )
                cam1_undistort.append(imgpoints_proj_undistort)
                cv2.imwrite(
                    os.path.join(str(path_undistort),
                                 filename + "_undistort.jpg"),
                    im_remapped1,
                )
                imgpoints_proj_undistort = []

            elif pair[1] in fname:
                filename = Path(fname).stem
                img2 = cv2.imread(fname)
                gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                h, w = img2.shape[:2]
                _, corners2 = cv2.findChessboardCorners(
                    gray2, (cbcol, cbrow), None)
                corners_origin2 = cv2.cornerSubPix(gray2, corners2, (11, 11),
                                                   (-1, -1), criteria)

                # Remapping
                im_remapped2 = cv2.remap(img2, map2_x, map2_y,
                                         cv2.INTER_LANCZOS4)
                imgpoints_proj_undistort2 = cv2.undistortPoints(
                    src=corners_origin2,
                    cameraMatrix=stereo_params[pair[0] + "-" +
                                               pair[1]]["cameraMatrix2"],
                    distCoeffs=stereo_params[pair[0] + "-" +
                                             pair[1]]["distCoeffs2"],
                    P=stereo_params[pair[0] + "-" + pair[1]]["P2"],
                    R=stereo_params[pair[0] + "-" + pair[1]]["R2"],
                )
                cam2_undistort.append(imgpoints_proj_undistort2)
                cv2.imwrite(
                    os.path.join(str(path_undistort),
                                 filename + "_undistort.jpg"),
                    im_remapped2,
                )
                imgpoints_proj_undistort2 = []

        cam1_undistort = np.array(cam1_undistort)
        cam2_undistort = np.array(cam2_undistort)
        print("All images are undistorted and stored in %s" %
              str(path_undistort))
        print(
            "Use the function ``triangulate`` to undistort the dataframes and compute the triangulation"
        )

        if plot == True:
            f1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f1.suptitle(
                str("Original Image: Views from " + pair[0] + " and " +
                    pair[1]),
                fontsize=25,
            )

            # Display images in RGB
            ax1.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))

            norm = mcolors.Normalize(vmin=0.0, vmax=cam1_undistort.shape[1])
            plt.savefig(os.path.join(str(path_undistort),
                                     "Original_Image.png"))

            # Plot the undistorted corner points
            f2, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            f2.suptitle("Undistorted corner points on camera-1 and camera-2",
                        fontsize=25)
            ax1.imshow(cv2.cvtColor(im_remapped1, cv2.COLOR_BGR2RGB))
            ax2.imshow(cv2.cvtColor(im_remapped2, cv2.COLOR_BGR2RGB))
            for i in range(0, cam1_undistort.shape[1]):
                ax1.scatter(
                    [cam1_undistort[-1][i, 0, 0]],
                    [cam1_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
                ax2.scatter(
                    [cam2_undistort[-1][i, 0, 0]],
                    [cam2_undistort[-1][i, 0, 1]],
                    marker=markerType,
                    s=markerSize,
                    color=markerColor,
                    alpha=alphaValue,
                )
            plt.savefig(
                os.path.join(str(path_undistort), "undistorted_points.png"))

            # Triangulate
            triangulate = auxiliaryfunctions_3d.compute_triangulation_calibration_images(
                stereo_params[pair[0] + "-" + pair[1]],
                cam1_undistort,
                cam2_undistort,
                path_undistort,
                cfg_3d,
                plot=True,
            )
            auxiliaryfunctions.write_pickle("triangulate.pickle", triangulate)
Esempio n. 7
0
    def retrieveData_and_computeEpLines(self, img, imNum):

        # load labeledPoints and fundamental Matrix

        if self.config3d is not None:
            cfg_3d = auxiliaryfunctions.read_config(self.config3d)
            cams = cfg_3d["camera_names"]
            path_camera_matrix = auxiliaryfunctions_3d.Foldernames3Dproject(
                cfg_3d)[2]
            path_stereo_file = os.path.join(path_camera_matrix,
                                            "stereo_params.pickle")
            stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)

            for cam in cams:
                if cam in img:
                    labelCam = cam
                    if self.sourceCam is None:
                        sourceCam = [
                            otherCam for otherCam in cams
                            if cam not in otherCam
                        ][0]  # WHY?
                    else:
                        sourceCam = self.sourceCam

            sourceCamIdx = np.where(np.array(cams) == sourceCam)[0][0]
            labelCamIdx = np.where(np.array(cams) == labelCam)[0][0]

            if sourceCamIdx < labelCamIdx:
                camera_pair = cams[sourceCamIdx] + "-" + cams[labelCamIdx]
                sourceCam_numInPair = 1
            else:
                camera_pair = cams[labelCamIdx] + "-" + cams[sourceCamIdx]
                sourceCam_numInPair = 2

            fundMat = stereo_file[camera_pair]["F"]
            sourceCam_path = os.path.split(img.replace(labelCam, sourceCam))[0]

            cfg = auxiliaryfunctions.read_config(self.config)
            scorer = cfg["scorer"]

            try:
                dataFrame = pd.read_hdf(
                    os.path.join(sourceCam_path,
                                 "CollectedData_" + scorer + ".h5"))
                dataFrame.sort_index(inplace=True)
            except IOError:
                print(
                    "source camera images have not yet been labeled, or you have opened this folder in the wrong mode!"
                )
                return None, None, None

            # Find offset terms for drawing epipolar Lines
            # Get crop params for camera being labeled
            foundEvent = 0
            eventSearch = re.compile(os.path.split(os.path.split(img)[0])[1])
            cropPattern = re.compile("[0-9]{1,4}")
            with open(self.config, "rt") as config:
                for line in config:
                    if foundEvent == 1:
                        crop_labelCam = np.int32(re.findall(cropPattern, line))
                        break
                    if eventSearch.search(line) != None:
                        foundEvent = 1
            # Get crop params for other camera
            foundEvent = 0
            eventSearch = re.compile(os.path.split(sourceCam_path)[1])
            cropPattern = re.compile("[0-9]{1,4}")
            with open(self.config, "rt") as config:
                for line in config:
                    if foundEvent == 1:
                        crop_sourceCam = np.int32(re.findall(
                            cropPattern, line))
                        break
                    if eventSearch.search(line) != None:
                        foundEvent = 1

            labelCam_offsets = [crop_labelCam[0], crop_labelCam[2]]
            sourceCam_offsets = [crop_sourceCam[0], crop_sourceCam[2]]

            sourceCam_pts = np.asarray(dataFrame, dtype=np.int32)
            sourceCam_pts = sourceCam_pts.reshape(
                (sourceCam_pts.shape[0], int(sourceCam_pts.shape[1] / 2), 2))
            sourceCam_pts = np.moveaxis(sourceCam_pts, [0, 1, 2], [1, 0, 2])
            sourceCam_pts[...,
                          0] = sourceCam_pts[..., 0] + sourceCam_offsets[0]
            sourceCam_pts[...,
                          1] = sourceCam_pts[..., 1] + sourceCam_offsets[1]

            sourcePts = sourceCam_pts[:, imNum, :]

            epLines_source2label = cv2.computeCorrespondEpilines(
                sourcePts, int(sourceCam_numInPair), fundMat)
            epLines_source2label.reshape(-1, 3)

            return epLines_source2label, sourcePts, labelCam_offsets

        else:
            return None, None, None
Esempio n. 8
0
def triangulate(
    config,
    video_path,
    videotype="",
    filterpredictions=True,
    filtertype="median",
    gputouse=None,
    destfolder=None,
    save_as_csv=False,
    track_method="",
):
    """
    This function triangulates the detected DLC-keypoints from the two camera views
    using the camera matrices (derived from calibration) to calculate 3D predictions.

    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    video_path : string/list of list
        Full path of the directory where videos are saved. If the user wants to analyze
        only a pair of videos, the user needs to pass them as a list of list of videos,
        i.e. [['video1-camera-1.avi','video1-camera-2.avi']]

    videotype: string, optional
        Checks for the extension of the video in case the input to the video is a directory.\n Only videos with this extension are analyzed.
        If left unspecified, videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept.


    filterpredictions: Bool, optional
        Filter the predictions with filter specified by "filtertype". If specified it
        should be either ``True`` or ``False``.

    filtertype: string
        Select which filter, 'arima' or 'median' filter (currently supported).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi).
        If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    destfolder: string, optional
        Specifies the destination folder for analysis data (default is the path of the video)

    save_as_csv: bool, optional
        Saves the predictions in a .csv file. The default is ``False``

    Example
    -------
    Linux/MacOS
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'/data/project1/videos/')

    To analyze only a few pairs of videos:
    >>> deeplabcut.triangulate(config,[['/data/project1/videos/video1-camera-1.avi','/data/project1/videos/video1-camera-2.avi'],['/data/project1/videos/video2-camera-1.avi','/data/project1/videos/video2-camera-2.avi']])


    Windows
    To analyze all the videos in the directory:
    >>> deeplabcut.triangulate(config,'C:\\yourusername\\rig-95\\Videos')

    To analyze only a few pair of videos:
    >>> deeplabcut.triangulate(config,[['C:\\yourusername\\rig-95\\Videos\\video1-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video1-camera-2.avi'],['C:\\yourusername\\rig-95\\Videos\\video2-camera-1.avi','C:\\yourusername\\rig-95\\Videos\\video2-camera-2.avi']])
    """
    from deeplabcut.pose_estimation_tensorflow import predict_videos
    from deeplabcut.post_processing import filtering

    cfg_3d = auxiliaryfunctions.read_config(config)
    cam_names = cfg_3d["camera_names"]
    pcutoff = cfg_3d["pcutoff"]
    scorer_3d = cfg_3d["scorername_3d"]

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str("config_file_" + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(
                str(
                    "It seems the file specified in the variable config_file_"
                    + str(cam)
                )
                + " does not exist. Please edit the config file with correct file path and retry."
            )

    # flag to check if the video_path variable is a string or a list of list
    flag = False  # assumes that video path is a list
    if isinstance(video_path, str) == True:
        flag = True
        video_list = auxiliaryfunctions_3d.get_camerawise_videos(
            video_path, cam_names, videotype=videotype
        )
    else:
        video_list = video_path

    if video_list == []:
        print("No videos found in the specified video path.", video_path)
        print(
            "Please make sure that the video names are specified with correct camera names as entered in the config file or"
        )
        print(
            "perhaps the videotype is distinct from the videos in the path, I was looking for:",
            videotype,
        )

    print("List of pairs:", video_list)
    scorer_name = {}
    run_triangulate = False
    for i in range(len(video_list)):
        dataname = []
        for j in range(len(video_list[i])):  # looping over cameras
            if cam_names[j] in video_list[i][j]:
                print(
                    "Analyzing video %s using %s"
                    % (video_list[i][j], str("config_file_" + cam_names[j]))
                )

                config_2d = snapshots[cam_names[j]]
                cfg = auxiliaryfunctions.read_config(config_2d)

                # Get track_method and do related checks
                track_method = auxfun_multianimal.get_track_method(
                    cfg, track_method=track_method
                )
                if len(cfg.get("multianimalbodyparts", [])) == 1 and track_method != "box":
                    warnings.warn(
                        "Switching to `box` tracker for single point tracking..."
                    )
                    track_method = "box"

                # Get track method suffix
                tr_method_suffix = TRACK_METHODS.get(track_method, "")

                shuffle = cfg_3d[str("shuffle_" + cam_names[j])]
                trainingsetindex = cfg_3d[str("trainingsetindex_" + cam_names[j])]
                trainFraction = cfg["TrainingFraction"][trainingsetindex]
                if flag == True:
                    video = os.path.join(video_path, video_list[i][j])
                else:
                    video_path = str(Path(video_list[i][j]).parents[0])
                    video = os.path.join(video_path, video_list[i][j])

                if destfolder is None:
                    destfolder = str(Path(video).parents[0])

                vname = Path(video).stem
                prefix = str(vname).split(cam_names[j])[0]
                suffix = str(vname).split(cam_names[j])[-1]
                if prefix == "":
                    pass
                elif prefix[-1] == "_" or prefix[-1] == "-":
                    prefix = prefix[:-1]

                if suffix == "":
                    pass
                elif suffix[0] == "_" or suffix[0] == "-":
                    suffix = suffix[1:]

                if prefix == "":
                    output_file = os.path.join(destfolder, suffix)
                else:
                    if suffix == "":
                        output_file = os.path.join(destfolder, prefix)
                    else:
                        output_file = os.path.join(destfolder, prefix + "_" + suffix)

                output_filename = os.path.join(
                    output_file + "_" + scorer_3d
                )  # Check if the videos are already analyzed for 3d
                if os.path.isfile(output_filename + ".h5"):
                    if save_as_csv is True and not os.path.exists(
                        output_filename + ".csv"
                    ):
                        # In case user adds save_as_csv is True after triangulating
                        pd.read_hdf(output_filename + ".h5").to_csv(
                            str(output_filename + ".csv")
                        )

                    print(
                        "Already analyzed...Checking the meta data for any change in the camera matrices and/or scorer names",
                        vname,
                    )
                    pickle_file = str(output_filename + "_meta.pickle")
                    metadata_ = auxiliaryfunctions_3d.LoadMetadata3d(pickle_file)
                    (
                        img_path,
                        path_corners,
                        path_camera_matrix,
                        path_undistort,
                        _,
                    ) = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
                    path_stereo_file = os.path.join(
                        path_camera_matrix, "stereo_params.pickle"
                    )
                    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
                    cam_pair = str(cam_names[0] + "-" + cam_names[1])
                    is_video_analyzed = False  # variable to keep track if the video was already analyzed
                    # Check for the camera matrix
                    for k in metadata_["stereo_matrix"].keys():
                        if np.all(
                            metadata_["stereo_matrix"][k] == stereo_file[cam_pair][k]
                        ):
                            pass
                        else:
                            run_triangulate = True

                    # Check for scorer names in the pickle file of 3d output
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg, shuffle, trainFraction, trainingsiterations="unknown"
                    )

                    if (
                        metadata_["scorer_name"][cam_names[j]] == DLCscorer
                    ):  # TODO: CHECK FOR BOTH?
                        is_video_analyzed = True
                    elif metadata_["scorer_name"][cam_names[j]] == DLCscorerlegacy:
                        is_video_analyzed = True
                    else:
                        is_video_analyzed = False
                        run_triangulate = True

                    if is_video_analyzed:
                        print("This file is already analyzed!")
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + ".h5"
                            )
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                    else:
                        # Analyze video if score name is different
                        DLCscorer = predict_videos.analyze_videos(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            gputouse=gputouse,
                            destfolder=destfolder,
                        )
                        scorer_name[cam_names[j]] = DLCscorer
                        is_video_analyzed = False
                        run_triangulate = True
                        suffix = tr_method_suffix
                        if filterpredictions:
                            filtering.filterpredictions(
                                config_2d,
                                [video],
                                videotype=videotype,
                                shuffle=shuffle,
                                trainingsetindex=trainingsetindex,
                                filtertype=filtertype,
                                destfolder=destfolder,
                            )
                            suffix += "_filtered"

                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + suffix + ".h5"
                            )
                        )

                else:  # need to do the whole jam.
                    DLCscorer = predict_videos.analyze_videos(
                        config_2d,
                        [video],
                        videotype=videotype,
                        shuffle=shuffle,
                        trainingsetindex=trainingsetindex,
                        gputouse=gputouse,
                        destfolder=destfolder,
                    )
                    scorer_name[cam_names[j]] = DLCscorer
                    run_triangulate = True
                    print(destfolder, vname, DLCscorer)
                    if filterpredictions:
                        filtering.filterpredictions(
                            config_2d,
                            [video],
                            videotype=videotype,
                            shuffle=shuffle,
                            trainingsetindex=trainingsetindex,
                            filtertype=filtertype,
                            destfolder=destfolder,
                        )
                        dataname.append(
                            os.path.join(
                                destfolder, vname + DLCscorer + tr_method_suffix + "_filtered.h5"
                            )
                        )

        if run_triangulate:
            #        if len(dataname)>0:
            # undistort points for this pair
            print("Undistorting...")
            (
                dataFrame_camera1_undistort,
                dataFrame_camera2_undistort,
                stereomatrix,
                path_stereo_file,
            ) = undistort_points(
                config, dataname, str(cam_names[0] + "-" + cam_names[1])
            )
            if len(dataFrame_camera1_undistort) != len(dataFrame_camera2_undistort):
                import warnings

                warnings.warn(
                    "The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry! Excluding the extra frames from the longer video."
                )
                if len(dataFrame_camera1_undistort) > len(dataFrame_camera2_undistort):
                    dataFrame_camera1_undistort = dataFrame_camera1_undistort[
                        : len(dataFrame_camera2_undistort)
                    ]
                if len(dataFrame_camera2_undistort) > len(dataFrame_camera1_undistort):
                    dataFrame_camera2_undistort = dataFrame_camera2_undistort[
                        : len(dataFrame_camera1_undistort)
                    ]
            #                raise Exception("The number of frames do not match in the two videos. Please make sure that your videos have same number of frames and then retry!")
            scorer_cam1 = dataFrame_camera1_undistort.columns.get_level_values(0)[0]
            scorer_cam2 = dataFrame_camera2_undistort.columns.get_level_values(0)[0]

            scorer_3d = scorer_cam1
            bodyparts = dataFrame_camera1_undistort.columns.get_level_values(
                "bodyparts"
            ).unique()

            P1 = stereomatrix["P1"]
            P2 = stereomatrix["P2"]
            F = stereomatrix["F"]

            print("Computing the triangulation...")

            num_frames = dataFrame_camera1_undistort.shape[0]
            ### Assign nan to [X,Y] of low likelihood predictions ###
            # Convert the data to a np array to easily mask out the low likelihood predictions
            data_cam1_tmp = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            data_cam2_tmp = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, -1, 3)
            )
            # Assign [X,Y] = nan to low likelihood predictions
            data_cam1_tmp[data_cam1_tmp[..., 2] < pcutoff, :2] = np.nan
            data_cam2_tmp[data_cam2_tmp[..., 2] < pcutoff, :2] = np.nan

            # Reshape data back to original shape
            data_cam1_tmp = data_cam1_tmp.reshape(num_frames, -1)
            data_cam2_tmp = data_cam2_tmp.reshape(num_frames, -1)

            # put data back to the dataframes
            dataFrame_camera1_undistort[:] = data_cam1_tmp
            dataFrame_camera2_undistort[:] = data_cam2_tmp

            if cfg.get("multianimalproject"):
                # Check individuals are the same in both views
                individuals_view1 = (
                    dataFrame_camera1_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                individuals_view2 = (
                    dataFrame_camera2_undistort.columns.get_level_values("individuals")
                    .unique()
                    .to_list()
                )
                if individuals_view1 != individuals_view2:
                    raise ValueError(
                        "The individuals do not match between the two DataFrames"
                    )

                # Cross-view match individuals
                _, voting = auxiliaryfunctions_3d.cross_view_match_dataframes(
                    dataFrame_camera1_undistort, dataFrame_camera2_undistort, F
                )
            else:
                # Create a dummy variables for single-animal
                individuals_view1 = ["indie"]
                voting = {0: 0}

            # Cleaner variable (since inds view1 == inds view2)
            individuals = individuals_view1

            # Reshape: (num_framex, num_individuals, num_bodyparts , 2)
            all_points_cam1 = dataFrame_camera1_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]
            all_points_cam2 = dataFrame_camera2_undistort.to_numpy().reshape(
                (num_frames, len(individuals), -1, 3)
            )[..., :2]

            # Triangulate data
            triangulate = []
            for i, _ in enumerate(individuals):
                # i is individual in view 1
                # voting[i] is the matched individual in view 2

                pts_indv_cam1 = all_points_cam1[:, i].reshape((-1, 2)).T
                pts_indv_cam2 = all_points_cam2[:, voting[i]].reshape((-1, 2)).T

                indv_points_3d = auxiliaryfunctions_3d.triangulatePoints(
                    P1, P2, pts_indv_cam1, pts_indv_cam2
                )

                indv_points_3d = indv_points_3d[:3].T.reshape((num_frames, -1, 3))

                triangulate.append(indv_points_3d)

            triangulate = np.asanyarray(triangulate)
            metadata = {}
            metadata["stereo_matrix"] = stereomatrix
            metadata["stereo_matrix_file"] = path_stereo_file
            metadata["scorer_name"] = {
                cam_names[0]: scorer_name[cam_names[0]],
                cam_names[1]: scorer_name[cam_names[1]],
            }

            # Create 3D DataFrame column and row indices
            axis_labels = ("x", "y", "z")
            if cfg.get("multianimalproject"):
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], individuals, bodyparts, axis_labels],
                    names=["scorer", "individuals", "bodyparts", "coords"],
                )

            else:
                columns = pd.MultiIndex.from_product(
                    [[scorer_3d], bodyparts, axis_labels],
                    names=["scorer", "bodyparts", "coords"],
                )

            inds = range(num_frames)

            # Swap num_animals with num_frames axes to ensure well-behaving reshape
            triangulate = triangulate.swapaxes(0, 1).reshape((num_frames, -1))

            # Fill up 3D dataframe
            df_3d = pd.DataFrame(triangulate, columns=columns, index=inds)

            df_3d.to_hdf(
                str(output_filename + ".h5"),
                "df_with_missing",
                format="table",
                mode="w",
            )

            # Reorder 2D dataframe in view 2 to match order of view 1
            if cfg.get("multianimalproject"):
                df_2d_view2 = pd.read_hdf(dataname[1])
                individuals_order = [individuals[i] for i in list(voting.values())]
                df_2d_view2 = auxfun_multianimal.reorder_individuals_in_df(df_2d_view2, individuals_order)
                df_2d_view2.to_hdf(dataname[1], "tracks", format="table", mode="w",)

            auxiliaryfunctions_3d.SaveMetadata3d(
                str(output_filename + "_meta.pickle"), metadata
            )

            if save_as_csv:
                df_3d.to_csv(str(output_filename + ".csv"))

            print("Triangulated data for video", video_list[i])
            print("Results are saved under: ", destfolder)
            # have to make the dest folder none so that it can be updated for a new pair of videos
            if destfolder == str(Path(video).parents[0]):
                destfolder = None

    if len(video_list) > 0:
        print("All videos were analyzed...")
        print("Now you can create 3D video(s) using deeplabcut.create_labeled_video_3d")
Esempio n. 9
0
def step3C_clean_missed_corners(config3d, fname_with_issue=None, **kwargs):
    cfg_3d = auxiliaryfunctions.read_config(config3d)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']

    need_valid_again = False
    if fname_with_issue is None:
        fname_with_issue = glob.glob(os.path.join(img_path, '*.jpg'))
        need_valid_again = True

    bad_images = []

    issue_path = os.path.join(img_path, "../issues")

    if len(fname_with_issue) > 0:
        os.makedirs(issue_path, exist_ok=True)

    for fname in fname_with_issue:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                corner_filename = os.path.join(str(path_corners), filename + '_corner.jpg')
                if need_valid_again:
                    if not os.path.exists(corner_filename):
                        if fname not in bad_images:
                            bad_images.append(fname)

                        for another_cam in cam_names:
                            if another_cam == cam:
                                continue
                            another_side = fname.replace(cam, another_cam)
                            if another_side not in bad_images:
                                bad_images.append(another_side)
                            #print("Adding pair {} and {} to bad_images".format(fname, another_side))
                else:
                    if fname not in bad_images:
                        bad_images.append(fname)

                    for another_cam in cam_names:
                        if another_cam == cam:
                            continue
                        another_side = fname.replace(cam, another_cam)
                        if another_side not in bad_images:
                            bad_images.append(another_side)
                        #print("Adding pair {} and {} to bad_images".format(fname, another_side))

    #if len(bad_images) > 0:
    #    print("start to remove files in total:{}".format(len(bad_images)))
    #else:
    #    print("Nothing to clean")

    for fname in bad_images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                corner_filename = os.path.join(str(path_corners), filename + '_corner.jpg')
                if os.path.exists(corner_filename):
                    # os.remove(corner_filename)
                    shutil.move(corner_filename, issue_path)
                    #print("Moved: {}".format(corner_filename))

        origin_img_file = os.path.join(img_path, fname)
        # os.remove(origin_img_file)
        if os.path.exists(origin_img_file):
            shutil.move(origin_img_file, issue_path)
            #print("Moved: {}".format(origin_img_file))
        else:
            #print("unexcepted file missing: {}".format(origin_img_file))
            print("")
Esempio n. 10
0
def step3D_calibrate_cameras(config,cbrow = 8,cbcol = 6,calibrate=False,alpha=0.4):

    # Termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((cbrow * cbcol, 3), np.float32)
    objp[:, :2] = np.mgrid[0:cbcol, 0:cbrow].T.reshape(-1, 2)

    # Read the config file
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path,path_corners,path_camera_matrix,path_undistort=auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)

    images = glob.glob(os.path.join(img_path,'*.jpg'))
    cam_names = cfg_3d['camera_names']

    # # update the variable snapshot* in config file according to the name of the cameras
    # try:
    #     for i in range(len(cam_names)):
    #         cfg_3d[str('config_file_'+cam_names[i])] = cfg_3d.pop(str('config_file_camera-'+str(i+1)))
    #     for i in range(len(cam_names)):
    #         cfg_3d[str('shuffle_'+cam_names[i])] = cfg_3d.pop(str('shuffle_camera-'+str(i+1)))
    # except:
    #     pass

    project_path = cfg_3d['project_path']
    projconfigfile=os.path.join(str(project_path),'config.yaml')
    auxiliaryfunctions.write_config_3d(projconfigfile,cfg_3d)

    # Initialize the dictionary
    img_shape = {}
    objpoints = {} # 3d point in real world space
    imgpoints = {} # 2d points in image plane
    dist_pickle = {}
    stereo_params= {}
    for cam in cam_names:
        objpoints.setdefault(cam, [])
        imgpoints.setdefault(cam, [])
        dist_pickle.setdefault(cam, [])

    # Sort the images.
    images.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
    if len(images)==0:
        raise Exception("No calibration images found. Make sure the calibration images are saved as .jpg and with prefix as the camera name as specified in the config.yaml file.")

    fname_with_issue = []

    for fname in images:
        for cam in cam_names:
            if cam in fname:
                filename = Path(fname).stem
                #detect pair side exits or not.
                for pair_cam in cam_names:
                    if pair_cam==cam:
                        continue
                    pair_file =os.path.join(img_path, filename.replace(cam, pair_cam)+".jpg")
                    if not os.path.exists(pair_file):
                        #print("pair_file:", pair_file)
                        if fname not in fname_with_issue:
                            fname_with_issue.append(fname)
                            #print("{} doesn't have pair:{}".format(filename, Path(pair_file).stem))

                img = cv2.imread(fname)
                gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

                # Find the checker board corners
                ret, corners = cv2.findChessboardCorners(gray, (cbcol,cbrow),None,) #  (8,6) pattern (dimensions = common points of black squares)
                # If found, add object points, image points (after refining them)
                if ret == True:
                    img_shape[cam] = gray.shape[::-1]
                    objpoints[cam].append(objp)
                    corners = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
                    if not valid_corners(corners):
                        #print("suspected incorrect corner for:{}".format(fname))
                        if fname not in fname_with_issue:
                            fname_with_issue.append(fname)

                    imgpoints[cam].append(corners)
                    # Draw corners and store the images
                    img = cv2.drawChessboardCorners(img, (cbcol,cbrow), corners,ret)
                    cv2.imwrite(os.path.join(str(path_corners),filename+'_corner.jpg'),img)
                else:
                    #print("Corners not found for the image %s" %Path(fname).name)
                    if fname not in fname_with_issue:
                        fname_with_issue.append(fname)
    try:
        h,  w = img.shape[:2]
    except:
        raise Exception("The name of calibration images does not match the camera names in the config file.")

    # Perform calibration for each cameras and store matrices as a pickle file
    if calibrate == True:
        print("Starting to calibrate...")
        # Calibrating each camera
        for cam in cam_names:
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints[cam], imgpoints[cam], img_shape[cam],None,None)

            # Save the camera calibration result for later use (we won't use rvecs / tvecs)
            dist_pickle[cam] = {'mtx':mtx , 'dist':dist, 'objpoints':objpoints[cam] ,'imgpoints':imgpoints[cam] }
            pickle.dump( dist_pickle, open( os.path.join(path_camera_matrix,cam+'_intrinsic_params.pickle'), "wb" ) )
            print('Saving intrinsic camera calibration matrices for %s as a pickle file in %s'%(cam, os.path.join(path_camera_matrix)))

            # Compute mean re-projection errors for individual cameras
            mean_error = 0
            for i in range(len(objpoints[cam])):
                imgpoints_proj, _ = cv2.projectPoints(objpoints[cam][i], rvecs[i], tvecs[i], mtx, dist)
                error = cv2.norm(imgpoints[cam][i],imgpoints_proj, cv2.NORM_L2)/len(imgpoints_proj)
                mean_error += error
            print("Mean re-projection error for %s images: %.3f pixels " %(cam, mean_error/len(objpoints[cam])))

        # Compute stereo calibration for each pair of cameras
        camera_pair = [[cam_names[0], cam_names[1]]]
        for pair in camera_pair:
            print("Computing stereo calibration for " %pair)
            retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objpoints[pair[0]],imgpoints[pair[0]],imgpoints[pair[1]],dist_pickle[pair[0]]['mtx'],dist_pickle[pair[0]]['dist'], dist_pickle[pair[1]]['mtx'], dist_pickle[pair[1]]['dist'],(h,  w),flags = cv2.CALIB_FIX_INTRINSIC)

            # Stereo Rectification
            rectify_scale = alpha # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
            R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (h, w), R, T, alpha = rectify_scale)

            stereo_params[pair[0]+'-'+pair[1]] = {"cameraMatrix1": cameraMatrix1,"cameraMatrix2": cameraMatrix2,"distCoeffs1": distCoeffs1,"distCoeffs2": distCoeffs2,"R":R,"T":T,"E":E,"F":F,
                         "R1":R1,
                         "R2":R2,
                         "P1":P1,
                         "P2":P2,
                         "roi1":roi1,
                         "roi2":roi2,
                         "Q":Q,
                         "image_shape":[img_shape[pair[0]],img_shape[pair[1]]]}

        print('Saving the stereo parameters for every pair of cameras as a pickle file in %s'%str(os.path.join(path_camera_matrix)))

        auxiliaryfunctions.write_pickle(os.path.join(path_camera_matrix,'stereo_params.pickle'),stereo_params)
        print("Camera calibration done!")
    else:
        print("Removing images where the corners are incorrectly detected.")

    return fname_with_issue
Esempio n. 11
0
def undistort_points(config, dataframe, camera_pair, destfolder):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d)
    ''' 
    path_undistort = destfolder
    filename_cam1 = Path(dataframe[0]).stem
    filename_cam2 = Path(dataframe[1]).stem

    #currently no interm. saving of this due to high speed.
    # check if the undistorted files are already present
    if os.path.exists(os.path.join(path_undistort,filename_cam1 + '_undistort.h5')) and os.path.exists(os.path.join(path_undistort,filename_cam2 + '_undistort.h5')):
        print("The undistorted files are already present at %s" % os.path.join(path_undistort,filename_cam1))
        dataFrame_cam1_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam1 + '_undistort.h5'))
        dataFrame_cam2_undistort = pd.read_hdf(os.path.join(path_undistort,filename_cam2 + '_undistort.h5'))
    else:
    '''
    if True:
        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataframe_cam1 = pd.read_hdf(dataframe[0])
        dataframe_cam2 = pd.read_hdf(dataframe[1])
        scorer_cam1 = dataframe_cam1.columns.get_level_values(0)[0]
        scorer_cam2 = dataframe_cam1.columns.get_level_values(0)[0]
        stereo_file = auxiliaryfunctions.read_pickle(
            os.path.join(path_camera_matrix, 'stereo_params.pickle'))
        path_stereo_file = os.path.join(path_camera_matrix,
                                        'stereo_params.pickle')
        stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
        mtx_l = stereo_file[camera_pair]['cameraMatrix1']
        dist_l = stereo_file[camera_pair]['distCoeffs1']

        mtx_r = stereo_file[camera_pair]['cameraMatrix2']
        dist_r = stereo_file[camera_pair]['distCoeffs2']

        R1 = stereo_file[camera_pair]['R1']
        P1 = stereo_file[camera_pair]['P1']

        R2 = stereo_file[camera_pair]['R2']
        P2 = stereo_file[camera_pair]['P2']

        # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
        dataFrame_cam1_undistort, scorer_cam1, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam1, scorer_cam1, flag='2d')
        dataFrame_cam2_undistort, scorer_cam2, bodyparts = auxiliaryfunctions_3d.create_empty_df(
            dataframe_cam2, scorer_cam2, flag='2d')

        for bpindex, bp in tqdm(enumerate(bodyparts)):
            # Undistorting the points from cam1 camera
            points_cam1 = np.array([
                dataframe_cam1[scorer_cam1][bp]['x'].values[:],
                dataframe_cam1[scorer_cam1][bp]['y'].values[:]
            ])
            points_cam1 = points_cam1.T
            points_cam1 = np.expand_dims(points_cam1, axis=1)
            points_cam1_remapped = cv2.undistortPoints(src=points_cam1,
                                                       cameraMatrix=mtx_l,
                                                       distCoeffs=dist_l,
                                                       P=P1,
                                                       R=R1)

            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'x'] = points_cam1_remapped[:, 0,
                                                                         0]
            dataFrame_cam1_undistort.iloc[:][scorer_cam1, bp,
                                             'y'] = points_cam1_remapped[:, 0,
                                                                         1]
            dataFrame_cam1_undistort.iloc[:][
                scorer_cam1, bp, 'likelihood'] = dataframe_cam1[scorer_cam1][
                    bp]['likelihood'].values[:]

            # Undistorting the points from cam2 camera
            points_cam2 = np.array([
                dataframe_cam2[scorer_cam2][bp]['x'].values[:],
                dataframe_cam2[scorer_cam2][bp]['y'].values[:]
            ])
            points_cam2 = points_cam2.T
            points_cam2 = np.expand_dims(points_cam2, axis=1)
            points_cam2_remapped = cv2.undistortPoints(src=points_cam2,
                                                       cameraMatrix=mtx_r,
                                                       distCoeffs=dist_r,
                                                       P=P2,
                                                       R=R2)

            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'x'] = points_cam2_remapped[:, 0,
                                                                         0]
            dataFrame_cam2_undistort.iloc[:][scorer_cam2, bp,
                                             'y'] = points_cam2_remapped[:, 0,
                                                                         1]
            dataFrame_cam2_undistort.iloc[:][
                scorer_cam2, bp, 'likelihood'] = dataframe_cam2[scorer_cam2][
                    bp]['likelihood'].values[:]

        # Save the undistorted files
        dataFrame_cam1_undistort.sort_index(inplace=True)
        dataFrame_cam2_undistort.sort_index(inplace=True)

    return (dataFrame_cam1_undistort, dataFrame_cam2_undistort,
            stereo_file[camera_pair], path_stereo_file)
Esempio n. 12
0
def triangulate_raw_2d_camera_coords(dlc3d_cfg,
                                     cam1_coords=None,
                                     cam2_coords=None,
                                     cam1_image=None,
                                     cam2_image=None,
                                     keys=None,
                                     undistort=True):
    """
    Augmented deeplabcut.triangulate() for DeepCage workflow

    This function triangulates user-defined coordinates from the two camera views using the camera matrices (derived from calibration) to calculate 3D predictions.
    Optionally, the user can define the coordiantes from images.

    Used for changing basis operations.

    Note: cam1 is the first camera on the 'camera_names' list located in the project 'config.yaml' file; cam2 is the second camera on the same list

    Parameters
    ----------
    dlc3d_cfg : string
        Absolute path of the config.yaml file as a string.
    cam1_image : string; default None
        Absolute path of the image of camera 1 as a string.
    cam2_image : string; default None
        Absolute path of the image of camera 2 as a string.
    cam1_coords : numpy.array-like; default None
        List of vectors that are coordinates in the camera 1 image
    cam2_coords : numpy.array-like; default None
        List of vectors that are coordinates in the camera 2 image
    keys : list-like; default None
        List of names or dictionary keys that can be associated with the 3d-coordinate with the identical index

    Example
    -------
    To analyze a set of coordinates:
    >>> deeplabcut.triangulate_raw_2d_camera_coords(dlc3d_cfg, cam1_coords=((1, 2), (20, 50), ...), cam2_coords=((3, 5), (14, 2), ...) )

    Linux/MacOS
    To analyze a set of images in a directory:
    >>> deeplabcut.triangulate_raw_2d_camera_coords(dlc3d_cfg, cam1_image='/image_directory/cam1.png', cam2_image='/image_directory/cam2.png')

    Windows
    To analyze a set of images in a directory:
    >>> deeplabcut.triangulate_raw_2d_camera_coords(dlc3d_cfg, cam1_image='<drive_letter>:\\<image_directory>\\cam1.png', cam2_image='\\image_directory\\cam2.png')

    """
    # if ((cam1_coords is None and cam2_coords is None) and (cam1_image is None and cam2_image is None)) or (
    #     (cam1_coords is not None and cam2_coords is not None) and (cam1_image is not None and cam2_image is not None)):
    #     msg = 'Must include a set of camera images or 2d-coordinates'
    #     raise ValueError(msg)

    if cam1_coords is not None and cam2_coords is not None:
        coords_defined = True

    if cam1_image is not None and cam2_image is not None:
        if coords_defined is True:
            msg = 'Must include a set of camera images or 2d-coordinates'
            raise ValueError(msg)
        cam1_coords = get_coord(cam1_image, n=-1)
        cam2_coords = get_coord(cam2_image, n=-1)

        if len(cam1_coords) != len(cam2_coords):
            msg = 'Each image must have the same number of selections'
            raise ValueError(msg)

    cam1_coords = np.array(cam1_coords, dtype=np.float64)
    cam2_coords = np.array(cam2_coords, dtype=np.float64)

    if cam1_coords.shape != cam2_coords.shape:
        msg = "Camera coordinate arrays have different dimensions"
        raise ValueError(msg)

    if not cam1_coords[0].shape == (1, 2):
        if cam1_coords[0].shape == (2, ):
            print(
                "Attempting to fix coordinate-array by np.expand_dims(<array>, axis=1)"
            )
            cam1_coords = np.expand_dims(cam1_coords, axis=1)
            cam2_coords = np.expand_dims(cam2_coords, axis=1)
        else:
            msg = "Coordinate-array has an invalid format"
            raise ValueError(msg)

    cfg_3d = read_config(dlc3d_cfg)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
        cfg_3d)

    cam_names = cfg_3d['camera_names']
    camera_pair_key = cam_names[0] + '-' + cam_names[1]

    # Create an empty dataFrame to store the undistorted 2d coordinates and likelihood
    stereo_path = os.path.join(path_camera_matrix, 'stereo_params.pickle')
    with open(stereo_path, 'rb') as infile:
        stereo_file = pickle.load(infile)

    mtx_l = stereo_file[camera_pair_key]['cameraMatrix1']
    dist_l = stereo_file[camera_pair_key]['distCoeffs1']

    mtx_r = stereo_file[camera_pair_key]['cameraMatrix2']
    dist_r = stereo_file[camera_pair_key]['distCoeffs2']

    R1 = stereo_file[camera_pair_key]['R1']
    P1 = stereo_file[camera_pair_key]['P1']

    R2 = stereo_file[camera_pair_key]['R2']
    P2 = stereo_file[camera_pair_key]['P2']

    if undistort is True:
        cam1_coords = cv2.undistortPoints(src=cam1_coords,
                                          cameraMatrix=mtx_l,
                                          distCoeffs=dist_l,
                                          P=P1,
                                          R=R1)
        cam2_coords = cv2.undistortPoints(src=cam2_coords,
                                          cameraMatrix=mtx_r,
                                          distCoeffs=dist_r,
                                          P=P2,
                                          R=R2)

    homogenous_coords = auxiliaryfunctions_3d.triangulatePoints(
        P1, P2, cam1_coords, cam2_coords)
    triangulated_coords = np.array(
        (homogenous_coords[0], homogenous_coords[1], homogenous_coords[2])).T

    if keys is not None:
        return {
            label: coord
            for label, coord in zip(keys, triangulated_coords)
        }, triangulated_coords
    else:
        return triangulated_coords
Esempio n. 13
0
def convert_doveeye_to_dlc(config, doveeye_calib_file):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']

    dist_pickle = {}
    stereo_params = {}
    for cam in cam_names:
        dist_pickle.setdefault(cam, [])

    doveye_calib=readYAMLFile(doveeye_calib_file)
    for cam in cam_names:
        cam_idx=cameras.index(cam)
        # Save the camera calibration result for later use (we won't use rvecs / tvecs)
        dist_pickle[cam] = {'mtx': doveye_calib['C'][cam_idx], 'dist': doveye_calib['D'][cam_idx], 'objpoints': [], 'imgpoints': []}
        pickle.dump(dist_pickle, open(os.path.join(path_camera_matrix, cam + '_intrinsic_params.pickle'), "wb"))

    for i in range(len(cam_names)):
        cam1_idx=cameras.index(cam_names[i])
        for j in range(i + 1, len(cam_names)):
            cam2_idx=cameras.index(cam_names[j])
            pair = [cam_names[i], cam_names[j]]
            pair_idx=-1
            for potential_pair_idx,potential_pair in enumerate(camera_pairs):
                if (potential_pair[0]==cam_names[i] and potential_pair[1]==cam_names[j]) or (potential_pair[0]==cam_names[j] and potential_pair[1]==cam_names[i]):
                    pair_idx=potential_pair_idx
                    break

            # Stereo Rectification
            rectify_scale = 0.4  # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
            R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(doveye_calib['C'][cam1_idx], doveye_calib['D'][cam2_idx],
                                                              doveye_calib['C'][cam2_idx], doveye_calib['D'][cam1_idx],
                                                              (1086, 2040),
                                                              doveye_calib['R'][pair_idx],
                                                              doveye_calib['T'][pair_idx],
                                                              alpha=rectify_scale)

            stereo_params[pair[0] + '-' + pair[1]] = {"cameraMatrix1": doveye_calib['C'][cam1_idx],
                                                      "cameraMatrix2": doveye_calib['C'][cam2_idx],
                                                      "distCoeffs1": doveye_calib['D'][cam1_idx],
                                                      "distCoeffs2": doveye_calib['D'][cam2_idx],
                                                      "R": doveye_calib['R'][pair_idx],
                                                      "T": doveye_calib['T'][pair_idx],
                                                      "E": [],
                                                      "F": doveye_calib['F'][pair_idx],
                                                      "R1": R1,
                                                      "R2": R2,
                                                      "P1": P1,
                                                      "P2": P2,
                                                      "roi1": roi1,
                                                      "roi2": roi2,
                                                      "Q": Q,
                                                      "image_shape": [(1086,2040),(1086,2040)]}

    print('Saving the stereo parameters for every pair of cameras as a pickle file in %s' % str(
        os.path.join(path_camera_matrix)))

    auxiliaryfunctions.write_pickle(os.path.join(path_camera_matrix, 'stereo_params.pickle'), stereo_params)
    print("Camera calibration done! Use the function ``check_undistortion`` to check the check the calibration")

    pass
Esempio n. 14
0
def triangulate(dlc_3d_config, video_cfg, origins, table_corners, tocchini, video_path, filterpredictions=True, destfolder=None,
                save_as_csv=False):

    cfg_3d = auxiliaryfunctions.read_config(dlc_3d_config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']
    pcutoff = cfg_3d['pcutoff']
    scorer_3d = cfg_3d['scorername_3d']

    snapshots = {}
    for cam in cam_names:
        snapshots[cam] = cfg_3d[str('config_file_' + cam)]
        # Check if the config file exists
        if not os.path.exists(snapshots[cam]):
            raise Exception(str("It seems the file specified in the variable config_file_" + str(cam)) +
                            " does not exist. Please edit the config file with correct file path and retry.")

    path_stereo_file = os.path.join(path_camera_matrix, 'stereo_params.pickle')
    stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)

    position=np.array([0.0,0.0,0.0])
    rotation=np.array([[1.0, 0.0, 0.0],[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
    rotations=[np.linalg.inv(rotation)]
    translations=[np.matmul(-rotation, position)]
    projections=[]

    for cam_idx in range(1,len(cam_names)):
        pair_rotation=stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['R']
        pair_translation = stereo_file[cam_names[0] + '-' + cam_names[cam_idx]]['T']
        rotations.append(np.matmul(pair_rotation,rotations[0]))
        translations.append(np.matmul(pair_rotation, translations[0])+np.transpose(pair_translation))

    for cam_idx in range(len(cam_names)):
        path_camera_file = os.path.join(path_camera_matrix, '%s_intrinsic_params.pickle' % cam_names[cam_idx])
        intrinsic_file = auxiliaryfunctions.read_pickle(path_camera_file)

        projection=np.zeros((3,4))
        projection[:,0:3]=rotations[cam_idx]
        projection[:,3]=translations[cam_idx]

        projection=np.matmul(intrinsic_file[cam_names[cam_idx]]['mtx'], projection)
        projections.append(projection)

    for view in cam_names:
        origins[view]=np.array(origins[view])
        origins[view][0]=origins[view][0]+video_cfg['crop_limits'][view][0]
        origins[view][1]=origins[view][1]+video_cfg['crop_limits'][view][2]
    [origin, pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1}, origins, pcutoff, projections, reconMode='all')


    table_coords_3d=[]
    for corner_idx in range(len(table_corners['front'])):
        corner={}
        for view in cam_names:
            corner[view]=table_corners[view][corner_idx]
            corner[view] =np.array(corner[view])
            corner[view][0]=corner[view][0]+video_cfg['crop_limits'][view][0]
            corner[view][1]=corner[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},corner,pcutoff,projections, reconMode='all')
        table_coords_3d.append(coord)

    xy_norm=np.array([[0,0,1]])

    table_center=np.mean(np.array(table_coords_3d),axis=0)

    table_vec1=table_coords_3d[0]-table_center
    table_vec2=table_coords_3d[1]-table_center
    table_norm = unit_vector(np.cross(np.transpose(table_vec1), np.transpose(table_vec2)))

    rot_vec=unit_vector(np.cross(xy_norm, table_norm), axis=1)
    rot_angle=-np.arccos(np.abs(np.sum(xy_norm*table_norm))/np.sqrt(np.sum(table_norm**2)))

    rot_mat=rotation_matrix(rot_angle, np.transpose(rot_vec))
    rot_mat=np.matmul(rotation_matrix(np.pi, [1, 0, 0]), rot_mat)
    rot_mat=rot_mat[0:3,0:3]

    origin=np.matmul(rot_mat, origin)

    for idx, coord in enumerate(table_coords_3d):
        coord=np.matmul(rot_mat, coord)-origin
        table_coords_3d[idx]=coord

    tocchini_coords_3d=[]
    for tocchino_idx in range(len(tocchini['front'])):
        tocchino={}
        for view in cam_names:
            tocchino[view]=tocchini[view][tocchino_idx]
            tocchino[view]=np.array(tocchino[view])
            tocchino[view][0]=tocchino[view][0]+video_cfg['crop_limits'][view][0]
            tocchino[view][1]=tocchino[view][1]+video_cfg['crop_limits'][view][2]
        [coord,pairs_used]=locate(cam_names,{'front':1,'side':1,'top':1},tocchino,pcutoff,projections, reconMode='all')
        tocchino_coord=np.matmul(rot_mat,coord)-origin
        tocchini_coords_3d.append(tocchino_coord)

    file_name_3d_scorer = []
    dataname = []
    for cam_name in cam_names:
        dlc_3d_config = snapshots[cam_name]
        cfg = auxiliaryfunctions.read_config(dlc_3d_config)

        shuffle = cfg_3d[str('shuffle_' + cam_name)]
        trainingsetindex = cfg_3d[str('trainingsetindex_' + cam_name)]

        video=video_path[cam_name]
        vname = Path(video).stem


        trainFraction = cfg['TrainingFraction'][trainingsetindex]
        modelfolder = os.path.join(cfg["project_path"],
                                   str(auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)))
        path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
        dlc_cfg = load_config(str(path_test_config))
        Snapshots = np.array([fn.split('.')[0] for fn in os.listdir(os.path.join(modelfolder, 'train')) if "index" in fn])
        snapshotindex = cfg['snapshotindex']

        increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        dlc_cfg['init_weights'] = os.path.join(modelfolder, 'train', Snapshots[snapshotindex])
        trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]
        DLCscorer = auxiliaryfunctions.GetScorerName(cfg, shuffle, trainFraction,
                                                     trainingsiterations=trainingsiterations)

        file_name_3d_scorer.append(DLCscorer)

        if filterpredictions:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + 'filtered.h5'))
        else:
            dataname.append(os.path.join(destfolder, cam_name, vname + DLCscorer + '.h5'))

    output_filename = os.path.join(destfolder, vname + '_' + scorer_3d)
    if os.path.isfile(output_filename + '.h5'):  # TODO: don't check twice and load the pickle file to check if the same snapshots + camera matrices were used.
        print("Already analyzed...", output_filename+'.h5')
    else:
        if len(dataname) > 0:
            df = pd.read_hdf(dataname[0])
            df_3d, scorer_3d, bodyparts = auxiliaryfunctions_3d.create_empty_df(df, scorer_3d, flag='3d')

            for bpindex, bp in enumerate(bodyparts):
                bp_coords=np.zeros((3,len(df_3d)))
                for f_idx in range(len(df_3d)):
                    likelihoods={}
                    coords={}
                    for cam_idx,cam_name in enumerate(cam_names):
                        dataframe_cam = pd.read_hdf(dataname[cam_idx])
                        scorer_cam = dataframe_cam.columns.get_level_values(0)[0]
                        likelihoods[cam_name]=dataframe_cam[scorer_cam][bp]['likelihood'].values[f_idx]
                        coords[cam_name]=np.array([dataframe_cam[scorer_cam][bp]['x'].values[f_idx], dataframe_cam[scorer_cam][bp]['y'].values[f_idx]])
                        coords[cam_name][0]=coords[cam_name][0]+video_cfg['crop_limits'][cam_name][0]
                        coords[cam_name][1]=coords[cam_name][1]+video_cfg['crop_limits'][cam_name][2]
                    [coord, pairs_used] = locate(cam_names, likelihoods, coords, pcutoff, projections, reconMode='bestpossible')

                    coord=np.matmul(rot_mat, coord)-origin
                    if pairs_used < 3:
                        coord[0]=np.nan
                        coord[1]=np.nan
                        coord[2]=np.nan
                    bp_coords[:,f_idx]=np.squeeze(coord)
                df_3d.iloc[:][scorer_3d, bp, 'x'] = bp_coords[0,:]
                df_3d.iloc[:][scorer_3d, bp, 'y'] = bp_coords[1,:]
                df_3d.iloc[:][scorer_3d, bp, 'z'] = bp_coords[2,:]

            df_3d.to_hdf(str(output_filename + '.h5'), 'df_with_missing', format='table', mode='w')

            if save_as_csv:
                df_3d.to_csv(str(output_filename + '.csv'))

            print("Triangulated data for video", vname)
            print("Results are saved under: ", destfolder)

    return str(output_filename + '.h5'), table_coords_3d, tocchini_coords_3d
Esempio n. 15
0
def align_videos(subject, date):
    frame_buffer = 10

    base_video_path=os.path.join(cfg['preprocessed_data_dir'],subject, date,'video')
    with open(os.path.join(base_video_path,'config.json')) as json_file:
        vid_cfg=json.load(json_file)
    fnames=sorted(glob.glob(os.path.join(base_video_path, 'front', '%s*.avi' % date)))

    rois_checked={
        'front':False,
        'side':False,
        'top':False
    }
    crop_checked={
        'front':False,
        'side':False,
        'top':False
    }

    # For each file (filenames are same in each view directory)
    for fname in fnames:
        fname=os.path.split(fname)[-1]
        print('Processing %s' % fname)
        blue_onsets={}
        yellow_onsets={}
        blue_ts={}
        yellow_ts={}
        video_nframes={}

        # Whether or not to use LED for alignment
        led_based = True

        for view in cfg['camera_views']:
            video_path=os.path.join(base_video_path, view)
            clip = VideoFileClip(os.path.join(video_path, fname))
            n_frames_approx = int(np.ceil(clip.duration * clip.fps) + frame_buffer)
            n_frames = n_frames_approx
            clip.reader.initialize()

            # Initialize LED time series for this view
            blue_ts[view]=[]
            yellow_ts[view]=[]

            for index in range(n_frames_approx):
                image = img_as_ubyte(clip.reader.read_frame())

                # If not already set, show GUI to select blue LED ROI
                if not rois_checked[view]:
                    blue_led_roi_area=vid_cfg['blue_led_roi_areas'][view]
                    blue_cropped_img=image[blue_led_roi_area[2]:blue_led_roi_area[3],
                                     blue_led_roi_area[0]:blue_led_roi_area[1],:]
                    init_roi=None
                    if  view in vid_cfg['blue_led_rois'] and vid_cfg['blue_led_rois'][view] is not None:
                        init_roi=vid_cfg['blue_led_rois'][view]
                        init_roi[0] = init_roi[0] - blue_led_roi_area[0]
                        init_roi[1] = init_roi[1] - blue_led_roi_area[0]
                        init_roi[2] = init_roi[2] - blue_led_roi_area[2]
                        init_roi[3] = init_roi[3] - blue_led_roi_area[2]
                    vid_cfg['blue_led_rois'][view] = select_crop_parameters.show(blue_cropped_img, 'Select blue LED ROI', init_coords=init_roi)
                    vid_cfg['blue_led_rois'][view][0] = vid_cfg['blue_led_rois'][view][0] + blue_led_roi_area[0]
                    vid_cfg['blue_led_rois'][view][1] = vid_cfg['blue_led_rois'][view][1] + blue_led_roi_area[0]
                    vid_cfg['blue_led_rois'][view][2] = vid_cfg['blue_led_rois'][view][2] + blue_led_roi_area[2]
                    vid_cfg['blue_led_rois'][view][3] = vid_cfg['blue_led_rois'][view][3] + blue_led_roi_area[2]

                    yellow_led_roi_area = vid_cfg['yellow_led_roi_areas'][view]
                    yellow_cropped_img = image[yellow_led_roi_area[2]:yellow_led_roi_area[3],
                                       yellow_led_roi_area[0]:yellow_led_roi_area[1], :]
                    init_roi = None
                    if view in vid_cfg['yellow_led_rois'] and vid_cfg['yellow_led_rois'][view] is not None:
                        init_roi = vid_cfg['yellow_led_rois'][view]
                        init_roi[0] = init_roi[0] - yellow_led_roi_area[0]
                        init_roi[1] = init_roi[1] - yellow_led_roi_area[0]
                        init_roi[2] = init_roi[2] - yellow_led_roi_area[2]
                        init_roi[3] = init_roi[3] - yellow_led_roi_area[2]
                    vid_cfg['yellow_led_rois'][view] = select_crop_parameters.show(yellow_cropped_img, 'Select yellow LED ROI', init_coords=init_roi)
                    vid_cfg['yellow_led_rois'][view][0] = vid_cfg['yellow_led_rois'][view][0] + yellow_led_roi_area[0]
                    vid_cfg['yellow_led_rois'][view][1] = vid_cfg['yellow_led_rois'][view][1] + yellow_led_roi_area[0]
                    vid_cfg['yellow_led_rois'][view][2] = vid_cfg['yellow_led_rois'][view][2] + yellow_led_roi_area[2]
                    vid_cfg['yellow_led_rois'][view][3] = vid_cfg['yellow_led_rois'][view][3] + yellow_led_roi_area[2]

                    rois_checked[view]=True
                    
                if index == int(n_frames_approx - frame_buffer * 2):
                    last_image = image
                elif index > int(n_frames_approx - frame_buffer * 2):
                    if (image == last_image).all():
                        n_frames = index
                        break

                # Crop image around blue LED, get only blue channel
                blue_roi=vid_cfg['blue_led_rois'][view]
                blue_led_image = image[blue_roi[2]:blue_roi[3], blue_roi[0]:blue_roi[1], 2]
                # Add average of cropped image to blue LED timeseries
                blue_ts[view].append(np.mean(blue_led_image))

                # Crop image around yellow LED, average red and green channels
                yellow_roi=vid_cfg['yellow_led_rois'][view]
                yellow_led_image = np.mean(image[yellow_roi[2]:yellow_roi[3], yellow_roi[0]:yellow_roi[1], 0:1],axis=2)
                # Add average of cropped image to yellow LED timeseries
                yellow_ts[view].append(np.mean(yellow_led_image))

            blue_ts[view] = np.array(blue_ts[view])
            yellow_ts[view] = np.array(yellow_ts[view])

            # Normalize based on first 10 time steps
            if len(blue_ts[view])>10:
                blue_ts[view]=(blue_ts[view]-np.mean(blue_ts[view][0:10]))/np.mean(blue_ts[view][0:10])
            if len(yellow_ts[view])>10:
                yellow_ts[view]=(yellow_ts[view]-np.mean(yellow_ts[view][0:10]))/np.mean(yellow_ts[view][0:10])
            blue_ts[view]=blue_ts[view]/np.max(blue_ts[view])
            yellow_ts[view]=yellow_ts[view]/np.max(yellow_ts[view])

            # plt.figure()
            # plt.subplot(2,1,1)
            # plt.plot(video_blue_brightness)
            # plt.subplot(2, 1, 2)
            # plt.plot(video_yellow_brightness)
            # plt.show()

            # Get derivative of blue and yellow ts
            #blue_diff=np.diff(blue_ts[view])
            #yellow_diff=np.diff(yellow_ts[view])

            # Get peak blue and yellow LED change times
            #blue_peak=np.max(blue_diff)
            blue_peak = np.max(blue_ts[view])
            #yellow_peak=np.max(yellow_diff)
            yellow_peak = np.max(yellow_ts[view])

            # If none above 0.05, don't use LEDs for aligning
            #if blue_peak<.05 or yellow_peak<.05:
            if len(blue_ts[view])<10 or np.max(blue_ts[view][10:]) < .25 or np.max(yellow_ts[view])<.25:
                led_based=False
                print('Cant figure out LED onset - not using')
            # Otherwise, use the first time point after 25 time points where LED diff exceeds 0.05
            else:
                #blue_onsets[view] = np.where(blue_diff >= 0.05)[0][0]
                #np.where(blue_ts[view] >= 0.25)[0][0]
                blue_onsets[view] = 10 + np.where(blue_ts[view][10:] >= 0.25)[0][0]
                #yellow_onsets[view] = np.where(yellow_diff >= 0.05)[0][0]
                yellow_onsets[view] = np.where(yellow_ts[view] >= 0.25)[0][0]

            video_nframes[view]=n_frames

        # Use first view where blue LED exceeds threshold as reference to align to
        if len(blue_onsets.values())>0:
            min_blue_onset=min(blue_onsets.values())

        # if fname=='15-05-2019_10-34-15_11.avi':
        #     plt.figure()
        #     for view in cfg['camera_views']:
        #         plt.plot(blue_ts[view], label='%s: blue' % view)
        #         plt.plot(yellow_ts[view], label='%s: yellow' % view)
        #     plt.legend()
        #     plt.show()

        # Compute trial duration based on each view
        trial_durations={}
        for view in cfg['camera_views']:
            if view in blue_onsets and view in yellow_onsets:
                # Trial duration (in ms)
                trial_duration=(yellow_onsets[view]-blue_onsets[view])*1.0/clip.fps*1000.0
                # there is an 850ms delay before blue LED comes on
                if trial_duration>0:
                    trial_duration=trial_duration+850.0
                trial_durations[view]=trial_duration
                print('%s: %.2fms' % (view, trial_duration))
        #assert(len(trial_durations)>0 and all(x == trial_durations[0] for x in trial_durations))

        start_frames_to_cut={}
        n_frames_after_cutting = {}
        # Cut frames to align videos and crop
        for idx,view in enumerate(cfg['camera_views']):

            # using LED to align
            if led_based:
                start_frames_to_cut[view]=blue_onsets[view]-min_blue_onset
            # otherwise - use standard # of frames to crop (order of video triggering is top, side, front)
            if not led_based or start_frames_to_cut[view]>5:
                start_frames_to_cut[view] = 0
                if view=='front':
                    start_frames_to_cut[view]=2
                elif view=='side':
                    start_frames_to_cut[view]=1
            n_frames_after_cutting[view]=video_nframes[view]-start_frames_to_cut[view]
        new_nframes=min(n_frames_after_cutting.values())

        intrinsic_files = {}
        for view in cfg['camera_views']:
            dlc3d_cfg = os.path.join('/data/tool_learning/preprocessed_data/dlc_projects',
                                     'visual_grasp_3d-Jimmy-2019-08-19-3d', 'config.yaml')

            cfg_3d = auxiliaryfunctions.read_config(dlc3d_cfg)
            img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(
                cfg_3d)
            path_intrinsic_file = os.path.join(path_camera_matrix, '%s_intrinsic_params.pickle' % view)
            intrinsic_file = auxiliaryfunctions.read_pickle(path_intrinsic_file)
            intrinsic_files[view] = intrinsic_file[view]

        for idx, view in enumerate(cfg['camera_views']):
            camera_matrix = intrinsic_files[view]['mtx']
            distortion_coefficients = intrinsic_files[view]['dist']

            end_frames_to_cut=n_frames_after_cutting[view]-new_nframes
            print('cutting %d frames from beginning and %d frames from end of %s' % (start_frames_to_cut[view], end_frames_to_cut, view))

            # Cut frames from blue and yellow LED time series and onsets
            if end_frames_to_cut>0:
                blue_ts[view]=blue_ts[view][start_frames_to_cut[view]:-end_frames_to_cut]
                yellow_ts[view] = yellow_ts[view][start_frames_to_cut[view]:-end_frames_to_cut]
            else:
                blue_ts[view] = blue_ts[view][start_frames_to_cut[view]:]
                yellow_ts[view] = yellow_ts[view][start_frames_to_cut[view]:]
            if view in blue_onsets:
                blue_onsets[view]=blue_onsets[view]-start_frames_to_cut[view]
            if view in yellow_onsets:
                yellow_onsets[view]=yellow_onsets[view]-start_frames_to_cut[view]

            # Load video and cut frames from beginning
            video_path = os.path.join(base_video_path, view)
            clip = VideoFileClip(os.path.join(video_path, fname))

            # Crop limits based on view
            frames=[]
            n_frames_approx = int(np.ceil(clip.duration * clip.fps)+frame_buffer)
            for index in range(n_frames_approx):
                image = img_as_ubyte(clip.reader.read_frame())
                image = cv2.undistort(image, camera_matrix, distortion_coefficients)
                if index>=start_frames_to_cut[view]:
                    if not crop_checked[view]:
                        init_crop_lims = None
                        if view in vid_cfg['crop_limits'] and vid_cfg['crop_limits'][view] is not None:
                            init_crop_lims = vid_cfg['crop_limits'][view]
                        vid_cfg['crop_limits'][view] = select_crop_parameters.show(image, 'Select crop limits',
                                                                                   init_coords=init_crop_lims)
                        crop_checked[view]=True
                    # Crop image and save to video
                    crop_lims=vid_cfg['crop_limits'][view]
                    image=image[crop_lims[2]:crop_lims[3], crop_lims[0]:crop_lims[1], :]
                    frames.append(image)
                if len(frames)==new_nframes:
                    break

            clip.close()

            # Check that have the right number of frames
            assert(len(frames)==new_nframes)

            # Create new video clip (cropped and aligned)
            video_path = os.path.join(base_video_path, view)
            new_clip = VideoProcessorCV(sname=os.path.join(video_path, fname), fps=clip.fps, codec='mp4v',
                                        sw=crop_lims[1] - crop_lims[0], sh=crop_lims[3] - crop_lims[2])
            for frame in frames:
                new_clip.save_frame(np.uint8(frame))
            new_clip.close()

        # Make everything hashable
        for view in cfg['camera_views']:
            blue_ts[view]=blue_ts[view].tolist()
            yellow_ts[view] = yellow_ts[view].tolist()
            if view in blue_onsets:
                blue_onsets[view]=int(blue_onsets[view])
            if view in yellow_onsets:
                yellow_onsets[view]=int(yellow_onsets[view])
            if view in trial_durations:
                trial_durations[view]=float(trial_durations[view])

        # Save video info to JSON
        data = {
            'blue_roi': vid_cfg['blue_led_rois'],
            'yellow_roi': vid_cfg['yellow_led_rois'],
            'blue_ts': blue_ts,
            'yellow_ts': yellow_ts,
            'blue_onset': blue_onsets,
            'yellow_onset': yellow_onsets,
            'trial_duration': trial_durations,
            'fname': fname
        }
        [base, ext] = os.path.splitext(fname)
        with open(os.path.join(base_video_path, '%s.json' % base), 'w') as outfile:
            json.dump(data, outfile)

        print('')

    with open(os.path.join(base_video_path,'config.json'),'w') as outfile:
        json.dump(vid_cfg, outfile)