コード例 #1
0
ファイル: crossvalutils.py プロジェクト: lillipu/DeepLabCut
def _benchmark_paf_graphs(
    config,
    inference_cfg,
    data,
    paf_inds,
    greedy=False,
    add_discarded=True,
    identity_only=False,
    calibration_file="",
    oks_sigma=0.1,
):
    n_multi = len(auxfun_multianimal.extractindividualsandbodyparts(config)[2])
    data_ = {"metadata": data.pop("metadata")}
    for k, v in data.items():
        data_[k] = v["prediction"]
    ass = Assembler(
        data_,
        max_n_individuals=inference_cfg["topktoretain"],
        n_multibodyparts=n_multi,
        greedy=greedy,
        pcutoff=inference_cfg.get("pcutoff", 0.1),
        min_affinity=inference_cfg.get("pafthreshold", 0.1),
        add_discarded=add_discarded,
        identity_only=identity_only,
    )
    if calibration_file:
        ass.calibrate(calibration_file)

    params = ass.metadata
    image_paths = params["imnames"]
    bodyparts = params["joint_names"]
    idx = (data[image_paths[0]]["groundtruth"][2].unstack("coords").reindex(
        bodyparts, level="bodyparts").index)
    individuals = idx.get_level_values("individuals").unique()
    n_individuals = len(individuals)
    map_ = dict(zip(individuals, range(n_individuals)))

    # Form ground truth beforehand
    ground_truth = []
    for i, imname in enumerate(image_paths):
        temp = data[imname]["groundtruth"][2]
        ground_truth.append(temp.to_numpy().reshape((-1, 2)))
    ground_truth = np.stack(ground_truth)
    temp = np.ones((*ground_truth.shape[:2], 3))
    temp[..., :2] = ground_truth
    temp = temp.reshape((temp.shape[0], n_individuals, -1, 3))
    ass_true_dict = _parse_ground_truth_data(temp)
    ids = np.vectorize(map_.get)(
        idx.get_level_values("individuals").to_numpy())
    ground_truth = np.insert(ground_truth, 2, ids, axis=2)

    # Assemble animals on the full set of detections
    paf_inds = sorted(paf_inds, key=len)
    paf_graph = ass.graph
    n_graphs = len(paf_inds)
    all_scores = []
    all_metrics = []
    for j, paf in enumerate(paf_inds, start=1):
        print(f"Graph {j}|{n_graphs}")
        graph = [paf_graph[i] for i in paf]
        ass.paf_inds = paf
        ass.graph = graph
        ass.assemble()
        oks = evaluate_assembly(ass.assemblies, ass_true_dict, oks_sigma)
        all_metrics.append(oks)
        scores = np.full((len(image_paths), 2), np.nan)
        for i, imname in enumerate(tqdm(image_paths)):
            gt = ground_truth[i]
            gt = gt[~np.isnan(gt).any(axis=1)]
            if len(np.unique(
                    gt[:, 2])) < 2:  # Only consider frames with 2+ animals
                continue

            # Count the number of unassembled bodyparts
            n_dets = len(gt)
            animals = ass.assemblies.get(i)
            if animals is None:
                if n_dets:
                    scores[i, 0] = 1
            else:
                animals = [
                    np.c_[animal.data,
                          np.ones(animal.data.shape[0]) * n]
                    for n, animal in enumerate(animals)
                ]
                hyp = np.concatenate(animals)
                hyp = hyp[~np.isnan(hyp).any(axis=1)]
                scores[i, 0] = (n_dets - hyp.shape[0]) / n_dets
                neighbors = _find_closest_neighbors(gt[:, :2], hyp[:, :2])
                valid = neighbors != -1
                id_gt = gt[valid, 2]
                id_hyp = hyp[neighbors[valid], -1]
                mat = contingency_matrix(id_gt, id_hyp)
                purity = mat.max(axis=0).sum() / mat.sum()
                scores[i, 1] = purity
        all_scores.append((scores, paf))

    dfs = []
    for score, inds in all_scores:
        df = pd.DataFrame(score, columns=["miss", "purity"])
        df["ngraph"] = len(inds)
        dfs.append(df)
    big_df = pd.concat(dfs)
    group = big_df.groupby("ngraph")
    return (all_scores, group.agg(["mean", "std"]).T, all_metrics)
コード例 #2
0
def create_video_with_all_detections(
    config,
    videos,
    shuffle=1,
    trainingsetindex=0,
    displayedbodyparts="all",
    destfolder=None,
    modelprefix="",
):
    """
    Create a video labeled with all the detections stored in a '*_full.pickle' file.

    Parameters
    ----------
    config : str
        Absolute path to the config.yaml file

    videos : list of str
        A list of strings containing the full paths to videos for analysis or a path to the directory,
        where all the videos with same extension are stored.

    shuffle : int, optional
        Number of shuffles of training dataset. Default is set to 1.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    displayedbodyparts: list of strings, optional
        This selects the body parts that are plotted in the video. Either ``all``, then all body parts
        from config.yaml are used orr a list of strings that are a subset of the full list.
        E.g. ['hand','Joystick'] for the demo Reaching-Mackenzie-2018-08-30/config.yaml to select only these two body parts.

    destfolder: string, optional
        Specifies the destination folder that was used for storing analysis data (default is the path of the video).

    """
    from deeplabcut.pose_estimation_tensorflow.lib.inferenceutils import Assembler
    import pickle, re

    cfg = auxiliaryfunctions.read_config(config)
    trainFraction = cfg["TrainingFraction"][trainingsetindex]
    DLCscorername, _ = auxiliaryfunctions.GetScorerName(
        cfg, shuffle, trainFraction, modelprefix=modelprefix)

    for video in videos:
        videofolder = os.path.splitext(video)[0]

        if destfolder is None:
            outputname = "{}_full.mp4".format(videofolder + DLCscorername)
            full_pickle = os.path.join(videofolder + DLCscorername +
                                       "_full.pickle")
        else:
            auxiliaryfunctions.attempttomakefolder(destfolder)
            outputname = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.mp4")
            full_pickle = os.path.join(
                destfolder,
                str(Path(video).stem) + DLCscorername + "_full.pickle")

        if not (os.path.isfile(outputname)):
            print("Creating labeled video for ", str(Path(video).stem))
            with open(full_pickle, "rb") as file:
                data = pickle.load(file)

            header = data.pop("metadata")
            all_jointnames = header["all_joints_names"]

            if displayedbodyparts == "all":
                numjoints = len(all_jointnames)
                bpts = range(numjoints)
            else:  # select only "displayedbodyparts"
                bpts = []
                for bptindex, bp in enumerate(all_jointnames):
                    if bp in displayedbodyparts:
                        bpts.append(bptindex)
                numjoints = len(bpts)

            frame_names = list(data)
            frames = [int(re.findall(r"\d+", name)[0]) for name in frame_names]
            colorclass = plt.cm.ScalarMappable(cmap=cfg["colormap"])
            C = colorclass.to_rgba(np.linspace(0, 1, numjoints))
            colors = (C[:, :3] * 255).astype(np.uint8)

            pcutoff = cfg["pcutoff"]
            dotsize = cfg["dotsize"]
            clip = vp(fname=video, sname=outputname, codec="mp4v")
            ny, nx = clip.height(), clip.width()

            for n in trange(clip.nframes):
                frame = clip.load_frame()
                try:
                    ind = frames.index(n)
                    dets = Assembler._flatten_detections(
                        data[frame_names[ind]])
                    for det in dets:
                        if det.label not in bpts or det.confidence < pcutoff:
                            continue
                        x, y = det.pos
                        rr, cc = disk((y, x), dotsize, shape=(ny, nx))
                        frame[rr, cc] = colors[bpts.index(det.label)]
                except ValueError:  # No data stored for that particular frame
                    print(n, "no data")
                    pass
                try:
                    clip.save_frame(frame)
                except:
                    print(n, "frame writing error.")
                    pass
            clip.close()
        else:
            print("Detections already plotted, ", outputname)