def get_batch(self):
        img_idx = np.random.choice(self.num_images,
                                   size=self.batch_size,
                                   replace=True)
        batch_images = []
        batch_joints = []
        joint_ids = []
        data_items = []
        for i in range(self.batch_size):
            data_item = self.data[img_idx[i]]

            data_items.append(data_item)
            im_file = data_item.im_path

            logging.debug("image %s", im_file)
            image = imread(os.path.join(self.cfg["project_path"], im_file),
                           mode="skimage")
            if self.has_gt:
                Joints = data_item.joints
                joint_id = [
                    Joints[person_id][:, 0].astype(int)
                    for person_id in Joints.keys()
                ]
                joint_points = np.concatenate(
                    [Joints[person_id][:, 1:3] for person_id in Joints.keys()])
                joint_ids.append(joint_id)
                batch_joints.append(np.array(joint_points))

            batch_images.append(image)
        return batch_images, joint_ids, batch_joints, data_items
    def get_batch(self):
        img_idx = np.random.choice(self.num_images, size=self.batch_size, replace=True)
        batch_images = []
        batch_joints = []
        joint_ids = []
        inds_visible = []
        data_items = []
        for i in range(self.batch_size):
            data_item = self.data[img_idx[i]]

            data_items.append(data_item)
            im_file = data_item.im_path

            logging.debug("image %s", im_file)
            image = imread(
                os.path.join(self.cfg["project_path"], im_file), mode="skimage"
            )
            if self.has_gt:
                Joints = data_item.joints
                kpts = np.zeros((self._n_kpts * self._n_animals, 2))
                for j in range(self._n_animals):
                    for n, x, y in Joints.get(j, []):
                        kpts[j * self._n_kpts + int(n)] = x, y
                joint_id = [
                    Joints[person_id][:, 0].astype(int) for person_id in Joints.keys()
                ]
                joint_ids.append(joint_id)
                batch_joints.append(kpts)
                inds_visible.append(np.flatnonzero(np.all(kpts != 0, axis=1)))

            batch_images.append(image)
        return batch_images, joint_ids, batch_joints, inds_visible, data_items
예제 #3
0
    def get_batch(self):
        img_idx = np.random.choice(self.num_images, size=self.batch_size, replace=True)
        batch_images = []
        batch_joints = []
        joint_ids = []
        data_items = []
        # Scale is sampled only once to transform all of the images of a batch into same size.
        scale = self.sample_scale()
        while True:
            idx = np.random.choice(self.num_images)
            size = self.data[idx].im_size
            target_size = np.ceil(size[1:3] * scale).astype(int)
            if self.is_valid_size(target_size[1] * target_size[0]):
                break

        stride = self.cfg["stride"]
        for i in range(self.batch_size):
            data_item = self.data[img_idx[i]]

            data_items.append(data_item)
            im_file = data_item.im_path

            logging.debug("image %s", im_file)
            image = imread(os.path.join(self.cfg["project_path"], im_file), mode="RGB")

            if self.has_gt:
                joints = np.copy(data_item.joints)
                joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
                joint_points = [person_joints[:, 1:3] for person_joints in joints]
                joint_ids.append(joint_id)
                batch_joints.append(np.array(joint_points)[0])
            batch_images.append(image)
        sm_size = np.ceil(target_size / (stride * 2)).astype(int) * 2
        assert len(batch_images) == self.batch_size
        return batch_images, joint_ids, batch_joints, data_items, sm_size, target_size
예제 #4
0
    def make_batch(self, data_item, scale, mirror):
        im_file = data_item.im_path
        logging.debug("image %s", im_file)
        logging.debug("mirror %r", mirror)
        image = imread(os.path.join(self.cfg["project_path"], im_file),
                       mode="skimage")

        if self.has_gt:
            joints = np.copy(data_item.joints)

        if self.cfg["crop"]:  # adapted cropping for DLC
            if np.random.rand() < self.cfg["cropratio"]:
                j = np.random.randint(np.shape(joints)[1])
                joints, image = crop_image(joints, image, joints[0, j, 1],
                                           joints[0, j, 2], self.cfg)

        img = imresize(image, scale) if scale != 1 else image
        scaled_img_size = np.array(img.shape[0:2])

        if mirror:
            img = np.fliplr(img)

        batch = {Batch.inputs: img}

        if self.has_gt:
            stride = self.cfg["stride"]
            if mirror:
                joints = [
                    self.mirror_joints(person_joints, self.symmetric_joints,
                                       image.shape[1])
                    for person_joints in joints
                ]
            sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2
            scaled_joints = [
                person_joints[:, 1:3] * scale for person_joints in joints
            ]
            joint_id = [
                person_joints[:, 0].astype(int) for person_joints in joints
            ]
            (
                part_score_targets,
                part_score_weights,
                locref_targets,
                locref_mask,
            ) = self.compute_target_part_scoremap(joint_id, scaled_joints,
                                                  data_item, sm_size, scale)

            batch.update({
                Batch.part_score_targets: part_score_targets,
                Batch.part_score_weights: part_score_weights,
                Batch.locref_targets: locref_targets,
                Batch.locref_mask: locref_mask,
            })

        batch = {key: data_to_input(data) for (key, data) in batch.items()}

        batch[Batch.data_item] = data_item

        return batch
def test_read_image_shape_fast(tmp_path):
    path_rgb_image = os.path.join(TEST_DATA_DIR, "image.png")
    img = imread(path_rgb_image, mode="skimage")
    shape = img.shape
    assert read_image_shape_fast(path_rgb_image) == (shape[2], shape[0], shape[1])
    path_gray_image = str(tmp_path / "gray.png")
    io.imsave(path_gray_image, color.rgb2gray(img).astype(np.uint8))
    assert read_image_shape_fast(path_gray_image) == (1, shape[0], shape[1])
예제 #6
0
    def get_batch(self):
        img_idx = np.random.choice(self.num_images,
                                   size=self.batch_size,
                                   replace=True)
        batch_images = []
        batch_joints = []
        joint_ids = []
        data_items = []
        # Scale is sampled only once to transform all of the images of a batch into same size.
        scale = self.sample_scale()

        found_valid = False
        n_tries = 10
        while n_tries > 1:
            idx = np.random.choice(self.num_images)
            size = self.data[idx].im_size
            target_size = np.ceil(size[1:3] * scale).astype(int)
            if self.is_valid_size(target_size[1] * target_size[0]):
                found_valid = True
                break
            n_tries -= 1
        if not found_valid:
            if size[1] * size[2] > self.max_input_sizesquare:
                s = "large", "increasing `max_input_size`", "decreasing"
            else:
                s = "small", "decreasing `min_input_size`", "increasing"
            raise ValueError(f"Image size {size[1:3]} may be too {s[0]}. "
                             f"Consider {s[1]} and/or {s[2]} `global_scale` "
                             "in the train/pose_cfg.yaml.")

        stride = self.cfg["stride"]
        for i in range(self.batch_size):
            data_item = self.data[img_idx[i]]

            data_items.append(data_item)
            im_file = data_item.im_path

            logging.debug("image %s", im_file)
            image = imread(os.path.join(self.cfg["project_path"], im_file),
                           mode="skimage")

            if self.has_gt:
                joints = np.copy(data_item.joints)
                joint_id = [
                    person_joints[:, 0].astype(int) for person_joints in joints
                ]
                joint_points = [
                    person_joints[:, 1:3] for person_joints in joints
                ]
                joint_ids.append(joint_id)
                batch_joints.append(np.array(joint_points)[0])
            batch_images.append(image)
        sm_size = np.ceil(target_size / (stride * 2)).astype(int) * 2
        assert len(batch_images) == self.batch_size
        return batch_images, joint_ids, batch_joints, data_items, sm_size, target_size
예제 #7
0
    def get_batch(self):
        img_idx = np.random.choice(self.num_images,
                                   size=self.batch_size,
                                   replace=True)
        batch_images = []
        batch_joints = []
        joint_ids = []
        data_items = []

        # Scale is sampled only once (per batch) to transform all of the images into same size.
        scale = self.get_scale()
        while True:
            idx = np.random.choice(self.num_images)
            scale = self.get_scale()
            size = self.data[idx].im_size
            target_size = np.ceil(size[1:3] * scale).astype(int)
            if self.is_valid_size(target_size):
                break

        stride = self.cfg["stride"]
        for i in range(self.batch_size):
            data_item = self.data[img_idx[i]]

            data_items.append(data_item)
            im_file = data_item.im_path

            logging.debug("image %s", im_file)
            image = imread(os.path.join(self.cfg["project_path"], im_file),
                           mode="RGB")
            if self.has_gt:
                Joints = data_item.joints
                joint_id = [
                    Joints[person_id][:, 0].astype(int)
                    for person_id in Joints.keys()
                ]
                joint_points = np.concatenate(
                    [Joints[person_id][:, 1:3] for person_id in Joints.keys()])
                joint_ids.append(joint_id)
                batch_joints.append(arr(joint_points))

            batch_images.append(image)

        sm_size = np.ceil(
            target_size /
            (stride * self.cfg.get("smfactor", 2))).astype(int) * self.cfg.get(
                "smfactor", 2)

        if stride == 2:
            sm_size = np.ceil(target_size / 16).astype(int)
            sm_size *= 8

        # assert len(batch_images) == self.batch_size
        return batch_images, joint_ids, batch_joints, data_items, sm_size, target_size
예제 #8
0
def evaluate_multianimal_full(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=False,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    modelprefix="",
):
    from deeplabcut.pose_estimation_tensorflow.core import (
        predict,
        predict_multianimal as predictma,
    )
    from deeplabcut.utils import (
        auxiliaryfunctions,
        auxfun_multianimal,
        auxfun_videos,
        conversioncode,
    )

    import tensorflow as tf

    if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
        del os.environ["TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

    tf.compat.v1.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    start_path = os.getcwd()

    if plotting is True:
        plotting = "bodypart"

    ##################################################
    # Load data...
    ##################################################
    cfg = auxiliaryfunctions.read_config(config)
    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        TrainingFractions = [cfg["TrainingFraction"][trainingsetindex]]

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        )
    )
    conversioncode.guarantee_multiindex_rows(Data)

    # Get list of body parts to evaluate network for
    comparisonbodyparts = auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
        cfg, comparisonbodyparts
    )
    all_bpts = np.asarray(
        len(cfg["individuals"]) * cfg["multianimalbodyparts"] + cfg["uniquebodyparts"]
    )
    colors = visualization.get_cmap(len(comparisonbodyparts), name=cfg["colormap"])
    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/")
    )
    for shuffle in Shuffles:
        for trainFraction in TrainingFractions:
            ##################################################
            # Load and setup CNN part detector
            ##################################################
            datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                trainingsetfolder, trainFraction, shuffle, cfg
            )
            modelfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetModelFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"

            # Load meta data
            (
                data,
                trainIndices,
                testIndices,
                trainFraction,
            ) = auxiliaryfunctions.LoadMetadata(
                os.path.join(cfg["project_path"], metadatafn)
            )

            try:
                dlc_cfg = load_config(str(path_test_config))
            except FileNotFoundError:
                raise FileNotFoundError(
                    "It seems the model for shuffle %s and trainFraction %s does not exist."
                    % (shuffle, trainFraction)
                )

            pipeline = iaa.Sequential(random_order=False)
            pre_resize = dlc_cfg.get("pre_resize")
            if pre_resize:
                width, height = pre_resize
                pipeline.add(iaa.Resize({"height": height, "width": width}))

            # TODO: IMPLEMENT for different batch sizes?
            dlc_cfg["batch_size"] = 1  # due to differently sized images!!!

            stride = dlc_cfg["stride"]
            # Ignore best edges possibly defined during a prior evaluation
            _ = dlc_cfg.pop("paf_best", None)
            joints = dlc_cfg["all_joints_names"]

            # Create folder structure to store results.
            evaluationfolder = os.path.join(
                cfg["project_path"],
                str(
                    auxiliaryfunctions.GetEvaluationFolder(
                        trainFraction, shuffle, cfg, modelprefix=modelprefix
                    )
                ),
            )
            auxiliaryfunctions.attempttomakefolder(evaluationfolder, recursive=True)
            # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

            # Check which snapshots are available and sort them by # iterations
            Snapshots = np.array(
                [
                    fn.split(".")[0]
                    for fn in os.listdir(os.path.join(str(modelfolder), "train"))
                    if "index" in fn
                ]
            )
            if len(Snapshots) == 0:
                print(
                    "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                    % (shuffle, trainFraction)
                )
            else:
                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots]
                )
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    print(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split("-")[
                        -1
                    ]  # read how many training siterations that corresponds to.

                    # name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of trainingiterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )

                    data_path = resultsfilename.split(".h5")[0] + "_full.pickle"

                    if plotting:
                        foldername = os.path.join(
                            str(evaluationfolder),
                            "LabeledImages_" + DLCscorer + "_" + Snapshots[snapindex],
                        )
                        auxiliaryfunctions.attempttomakefolder(foldername)
                        if plotting == "bodypart":
                            fig, ax = visualization.create_minimal_figure()

                    if os.path.isfile(data_path):
                        print("Model already evaluated.", resultsfilename)
                    else:

                        (sess, inputs, outputs,) = predict.setup_pose_prediction(
                            dlc_cfg
                        )

                        PredicteData = {}
                        dist = np.full((len(Data), len(all_bpts)), np.nan)
                        conf = np.full_like(dist, np.nan)
                        print("Network Evaluation underway...")
                        for imageindex, imagename in tqdm(enumerate(Data.index)):
                            image_path = os.path.join(cfg["project_path"], *imagename)
                            frame = auxfun_videos.imread(image_path, mode="skimage")

                            GT = Data.iloc[imageindex]
                            if not GT.any():
                                continue

                            # Pass the image and the keypoints through the resizer;
                            # this has no effect if no augmenters were added to it.
                            keypoints = [GT.to_numpy().reshape((-1, 2)).astype(float)]
                            frame_, keypoints = pipeline(
                                images=[frame], keypoints=keypoints
                            )
                            frame = frame_[0]
                            GT[:] = keypoints[0].flatten()

                            df = GT.unstack("coords").reindex(joints, level="bodyparts")

                            # FIXME Is having an empty array vs nan really that necessary?!
                            groundtruthidentity = list(
                                df.index.get_level_values("individuals")
                                .to_numpy()
                                .reshape((-1, 1))
                            )
                            groundtruthcoordinates = list(df.values[:, np.newaxis])
                            for i, coords in enumerate(groundtruthcoordinates):
                                if np.isnan(coords).any():
                                    groundtruthcoordinates[i] = np.empty(
                                        (0, 2), dtype=float
                                    )
                                    groundtruthidentity[i] = np.array([], dtype=str)

                            # Form 2D array of shape (n_rows, 4) where the last dimension
                            # is (sample_index, peak_y, peak_x, bpt_index) to slice the PAFs.
                            temp = df.reset_index(level="bodyparts").dropna()
                            temp["bodyparts"].replace(
                                dict(zip(joints, range(len(joints)))), inplace=True,
                            )
                            temp["sample"] = 0
                            peaks_gt = temp.loc[
                                :, ["sample", "y", "x", "bodyparts"]
                            ].to_numpy()
                            peaks_gt[:, 1:3] = (peaks_gt[:, 1:3] - stride // 2) / stride

                            pred = predictma.predict_batched_peaks_and_costs(
                                dlc_cfg,
                                np.expand_dims(frame, axis=0),
                                sess,
                                inputs,
                                outputs,
                                peaks_gt.astype(int),
                            )

                            if not pred:
                                continue
                            else:
                                pred = pred[0]

                            PredicteData[imagename] = {}
                            PredicteData[imagename]["index"] = imageindex
                            PredicteData[imagename]["prediction"] = pred
                            PredicteData[imagename]["groundtruth"] = [
                                groundtruthidentity,
                                groundtruthcoordinates,
                                GT,
                            ]

                            coords_pred = pred["coordinates"][0]
                            probs_pred = pred["confidence"]
                            for bpt, xy_gt in df.groupby(level="bodyparts"):
                                inds_gt = np.flatnonzero(
                                    np.all(~np.isnan(xy_gt), axis=1)
                                )
                                n_joint = joints.index(bpt)
                                xy = coords_pred[n_joint]
                                if inds_gt.size and xy.size:
                                    # Pick the predictions closest to ground truth,
                                    # rather than the ones the model has most confident in
                                    xy_gt_values = xy_gt.iloc[inds_gt].values
                                    neighbors = _find_closest_neighbors(
                                        xy_gt_values, xy, k=3
                                    )
                                    found = neighbors != -1
                                    min_dists = np.linalg.norm(
                                        xy_gt_values[found] - xy[neighbors[found]],
                                        axis=1,
                                    )
                                    inds = np.flatnonzero(all_bpts == bpt)
                                    sl = imageindex, inds[inds_gt[found]]
                                    dist[sl] = min_dists
                                    conf[sl] = probs_pred[n_joint][
                                        neighbors[found]
                                    ].squeeze()

                            if plotting == "bodypart":
                                temp_xy = GT.unstack("bodyparts")[joints].values
                                gt = temp_xy.reshape(
                                    (-1, 2, temp_xy.shape[1])
                                ).T.swapaxes(1, 2)
                                h, w, _ = np.shape(frame)
                                fig.set_size_inches(w / 100, h / 100)
                                ax.set_xlim(0, w)
                                ax.set_ylim(0, h)
                                ax.invert_yaxis()
                                ax = visualization.make_multianimal_labeled_image(
                                    frame,
                                    gt,
                                    coords_pred,
                                    probs_pred,
                                    colors,
                                    cfg["dotsize"],
                                    cfg["alphavalue"],
                                    cfg["pcutoff"],
                                    ax=ax,
                                )
                                visualization.save_labeled_frame(
                                    fig,
                                    image_path,
                                    foldername,
                                    imageindex in trainIndices,
                                )
                                visualization.erase_artists(ax)

                        sess.close()  # closes the current tf session

                        # Compute all distance statistics
                        df_dist = pd.DataFrame(dist, columns=df.index)
                        df_conf = pd.DataFrame(conf, columns=df.index)
                        df_joint = pd.concat(
                            [df_dist, df_conf],
                            keys=["rmse", "conf"],
                            names=["metrics"],
                            axis=1,
                        )
                        df_joint = df_joint.reorder_levels(
                            list(np.roll(df_joint.columns.names, -1)), axis=1
                        )
                        df_joint.sort_index(
                            axis=1,
                            level=["individuals", "bodyparts"],
                            ascending=[True, True],
                            inplace=True,
                        )
                        write_path = os.path.join(
                            evaluationfolder, f"dist_{trainingsiterations}.csv"
                        )
                        df_joint.to_csv(write_path)

                        # Calculate overall prediction error
                        error = df_joint.xs("rmse", level="metrics", axis=1)
                        mask = (
                            df_joint.xs("conf", level="metrics", axis=1)
                            >= cfg["pcutoff"]
                        )
                        error_masked = error[mask]
                        error_train = np.nanmean(error.iloc[trainIndices])
                        error_train_cut = np.nanmean(error_masked.iloc[trainIndices])
                        error_test = np.nanmean(error.iloc[testIndices])
                        error_test_cut = np.nanmean(error_masked.iloc[testIndices])
                        results = [
                            trainingsiterations,
                            int(100 * trainFraction),
                            shuffle,
                            np.round(error_train, 2),
                            np.round(error_test, 2),
                            cfg["pcutoff"],
                            np.round(error_train_cut, 2),
                            np.round(error_test_cut, 2),
                        ]
                        final_result.append(results)

                        if show_errors:
                            string = (
                                "Results for {} training iterations, training fraction of {}, and shuffle {}:\n"
                                "Train error: {} pixels. Test error: {} pixels.\n"
                                "With pcutoff of {}:\n"
                                "Train error: {} pixels. Test error: {} pixels."
                            )
                            print(string.format(*results))

                            print("##########################################")
                            print(
                                "Average Euclidean distance to GT per individual (in pixels; test-only)"
                            )
                            print(
                                error_masked.iloc[testIndices]
                                .groupby("individuals", axis=1)
                                .mean()
                                .mean()
                                .to_string()
                            )
                            print(
                                "Average Euclidean distance to GT per bodypart (in pixels; test-only)"
                            )
                            print(
                                error_masked.iloc[testIndices]
                                .groupby("bodyparts", axis=1)
                                .mean()
                                .mean()
                                .to_string()
                            )

                        PredicteData["metadata"] = {
                            "nms radius": dlc_cfg["nmsradius"],
                            "minimal confidence": dlc_cfg["minconfidence"],
                            "sigma": dlc_cfg.get("sigma", 1),
                            "PAFgraph": dlc_cfg["partaffinityfield_graph"],
                            "PAFinds": np.arange(
                                len(dlc_cfg["partaffinityfield_graph"])
                            ),
                            "all_joints": [
                                [i] for i in range(len(dlc_cfg["all_joints"]))
                            ],
                            "all_joints_names": [
                                dlc_cfg["all_joints_names"][i]
                                for i in range(len(dlc_cfg["all_joints"]))
                            ],
                            "stride": dlc_cfg.get("stride", 8),
                        }
                        print(
                            "Done and results stored for snapshot: ",
                            Snapshots[snapindex],
                        )

                        dictionary = {
                            "Scorer": DLCscorer,
                            "DLC-model-config file": dlc_cfg,
                            "trainIndices": trainIndices,
                            "testIndices": testIndices,
                            "trainFraction": trainFraction,
                        }
                        metadata = {"data": dictionary}
                        _ = auxfun_multianimal.SaveFullMultiAnimalData(
                            PredicteData, metadata, resultsfilename
                        )

                        tf.compat.v1.reset_default_graph()

                    n_multibpts = len(cfg["multianimalbodyparts"])
                    if n_multibpts == 1:
                        continue

                    # Skip data-driven skeleton selection unless
                    # the model was trained on the full graph.
                    max_n_edges = n_multibpts * (n_multibpts - 1) // 2
                    n_edges = len(dlc_cfg["partaffinityfield_graph"])
                    if n_edges == max_n_edges:
                        print("Selecting best skeleton...")
                        n_graphs = 10
                        paf_inds = None
                    else:
                        n_graphs = 1
                        paf_inds = [list(range(n_edges))]
                    (
                        results,
                        paf_scores,
                        best_assemblies,
                    ) = crossvalutils.cross_validate_paf_graphs(
                        config,
                        str(path_test_config).replace("pose_", "inference_"),
                        data_path,
                        data_path.replace("_full.", "_meta."),
                        n_graphs=n_graphs,
                        paf_inds=paf_inds,
                        oks_sigma=dlc_cfg.get("oks_sigma", 0.1),
                        margin=dlc_cfg.get("bbox_margin", 0),
                        symmetric_kpts=dlc_cfg.get("symmetric_kpts"),
                    )
                    if plotting == "individual":
                        assemblies, assemblies_unique, image_paths = best_assemblies
                        fig, ax = visualization.create_minimal_figure()
                        n_animals = len(cfg["individuals"])
                        if cfg["uniquebodyparts"]:
                            n_animals += 1
                        colors = visualization.get_cmap(n_animals, name=cfg["colormap"])
                        for k, v in tqdm(assemblies.items()):
                            imname = image_paths[k]
                            image_path = os.path.join(cfg["project_path"], *imname)
                            frame = auxfun_videos.imread(image_path, mode="skimage")

                            h, w, _ = np.shape(frame)
                            fig.set_size_inches(w / 100, h / 100)
                            ax.set_xlim(0, w)
                            ax.set_ylim(0, h)
                            ax.invert_yaxis()

                            gt = [
                                s.to_numpy().reshape((-1, 2))
                                for _, s in Data.loc[imname].groupby("individuals")
                            ]
                            coords_pred = []
                            coords_pred += [ass.xy for ass in v]
                            probs_pred = []
                            probs_pred += [ass.data[:, 2:3] for ass in v]
                            if assemblies_unique is not None:
                                unique = assemblies_unique.get(k, None)
                                if unique is not None:
                                    coords_pred.append(unique[:, :2])
                                    probs_pred.append(unique[:, 2:3])
                            while len(coords_pred) < len(gt):
                                coords_pred.append(np.full((1, 2), np.nan))
                                probs_pred.append(np.full((1, 2), np.nan))
                            ax = visualization.make_multianimal_labeled_image(
                                frame,
                                gt,
                                coords_pred,
                                probs_pred,
                                colors,
                                cfg["dotsize"],
                                cfg["alphavalue"],
                                cfg["pcutoff"],
                                ax=ax,
                            )
                            visualization.save_labeled_frame(
                                fig, image_path, foldername, k in trainIndices,
                            )
                            visualization.erase_artists(ax)

                    df = results[1].copy()
                    df.loc(axis=0)[("mAP_train", "mean")] = [
                        d[0]["mAP"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAR_train", "mean")] = [
                        d[0]["mAR"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAP_test", "mean")] = [
                        d[1]["mAP"] for d in results[2]
                    ]
                    df.loc(axis=0)[("mAR_test", "mean")] = [
                        d[1]["mAR"] for d in results[2]
                    ]
                    with open(data_path.replace("_full.", "_map."), "wb") as file:
                        pickle.dump((df, paf_scores), file)

                if len(final_result) > 0:  # Only append if results were calculated
                    make_results_file(final_result, evaluationfolder, DLCscorer)

    os.chdir(str(start_path))
예제 #9
0
def evaluate_network(
    config,
    Shuffles=[1],
    trainingsetindex=0,
    plotting=False,
    show_errors=True,
    comparisonbodyparts="all",
    gputouse=None,
    rescale=False,
    modelprefix="",
):
    """

    Evaluates the network based on the saved models at different stages of the training network.\n
    The evaluation results are stored in the .h5 and .csv file under the subdirectory 'evaluation_results'.
    Change the snapshotindex parameter in the config file to 'all' in order to evaluate all the saved models.
    Parameters
    ----------
    config : string
        Full path of the config.yaml file as a string.

    Shuffles: list, optional
        List of integers specifying the shuffle indices of the training dataset. The default is [1]

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    plotting: bool or str, optional
        Plots the predictions on the train and test images.
        The default is ``False``; if provided it must be either ``True``, ``False``, "bodypart", or "individual".
        Setting to ``True`` defaults as "bodypart" for multi-animal projects.

    show_errors: bool, optional
        Display train and test errors. The default is `True``

    comparisonbodyparts: list of bodyparts, Default is "all".
        The average error will be computed for those body parts only (Has to be a subset of the body parts).

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you do not want to plot, just evaluate shuffle 1.
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml', Shuffles=[1])
    --------
    If you want to plot and evaluate shuffle 0 and 1.
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml',Shuffles=[0, 1],plotting = True)

    --------
    If you want to plot assemblies for a maDLC project:
    >>> deeplabcut.evaluate_network('/analysis/project/reaching-task/config.yaml',Shuffles=[1],plotting = "individual")

    Note: this defaults to standard plotting for single-animal projects.

    """
    if plotting not in (True, False, "bodypart", "individual"):
        raise ValueError(f"Unknown value for `plotting`={plotting}")

    import os

    start_path = os.getcwd()
    from deeplabcut.utils import auxiliaryfunctions

    cfg = auxiliaryfunctions.read_config(config)

    if cfg.get("multianimalproject", False):
        from .evaluate_multianimal import evaluate_multianimal_full

        # TODO: Make this code not so redundant!
        evaluate_multianimal_full(
            config=config,
            Shuffles=Shuffles,
            trainingsetindex=trainingsetindex,
            plotting=plotting,
            comparisonbodyparts=comparisonbodyparts,
            gputouse=gputouse,
            modelprefix=modelprefix,
        )
    else:
        from deeplabcut.utils.auxfun_videos import imread, imresize
        from deeplabcut.pose_estimation_tensorflow.core import predict
        from deeplabcut.pose_estimation_tensorflow.config import load_config
        from deeplabcut.pose_estimation_tensorflow.datasets.utils import data_to_input
        from deeplabcut.utils import auxiliaryfunctions, conversioncode
        import tensorflow as tf

        # If a string was passed in, auto-convert to True for backward compatibility
        plotting = bool(plotting)

        if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
            del os.environ[
                "TF_CUDNN_USE_AUTOTUNE"]  # was potentially set during training

        tf.compat.v1.reset_default_graph()
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
        #    tf.logging.set_verbosity(tf.logging.WARN)

        start_path = os.getcwd()
        # Read file path for pose_config file. >> pass it on
        cfg = auxiliaryfunctions.read_config(config)
        if gputouse is not None:  # gpu selectinon
            os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

        if trainingsetindex == "all":
            TrainingFractions = cfg["TrainingFraction"]
        else:
            if (trainingsetindex < len(cfg["TrainingFraction"])
                    and trainingsetindex >= 0):
                TrainingFractions = [
                    cfg["TrainingFraction"][int(trainingsetindex)]
                ]
            else:
                raise Exception(
                    "Please check the trainingsetindex! ",
                    trainingsetindex,
                    " should be an integer from 0 .. ",
                    int(len(cfg["TrainingFraction"]) - 1),
                )

        # Loading human annotatated data
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
        Data = pd.read_hdf(
            os.path.join(
                cfg["project_path"],
                str(trainingsetfolder),
                "CollectedData_" + cfg["scorer"] + ".h5",
            ))

        # Get list of body parts to evaluate network for
        comparisonbodyparts = (
            auxiliaryfunctions.IntersectionofBodyPartsandOnesGivenbyUser(
                cfg, comparisonbodyparts))
        # Make folder for evaluation
        auxiliaryfunctions.attempttomakefolder(
            str(cfg["project_path"] + "/evaluation-results/"))
        for shuffle in Shuffles:
            for trainFraction in TrainingFractions:
                ##################################################
                # Load and setup CNN part detector
                ##################################################
                datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)
                modelfolder = os.path.join(
                    cfg["project_path"],
                    str(
                        auxiliaryfunctions.GetModelFolder(
                            trainFraction,
                            shuffle,
                            cfg,
                            modelprefix=modelprefix)),
                )

                path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
                # Load meta data
                (
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                ) = auxiliaryfunctions.LoadMetadata(
                    os.path.join(cfg["project_path"], metadatafn))

                try:
                    dlc_cfg = load_config(str(path_test_config))
                except FileNotFoundError:
                    raise FileNotFoundError(
                        "It seems the model for shuffle %s and trainFraction %s does not exist."
                        % (shuffle, trainFraction))

                # change batch size, if it was edited during analysis!
                dlc_cfg[
                    "batch_size"] = 1  # in case this was edited for analysis.

                # Create folder structure to store results.
                evaluationfolder = os.path.join(
                    cfg["project_path"],
                    str(
                        auxiliaryfunctions.GetEvaluationFolder(
                            trainFraction,
                            shuffle,
                            cfg,
                            modelprefix=modelprefix)),
                )
                auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                                       recursive=True)
                # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

                # Check which snapshots are available and sort them by # iterations
                Snapshots = np.array([
                    fn.split(".")[0] for fn in os.listdir(
                        os.path.join(str(modelfolder), "train"))
                    if "index" in fn
                ])
                try:  # check if any where found?
                    Snapshots[0]
                except IndexError:
                    raise FileNotFoundError(
                        "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                        % (shuffle, trainFraction))

                increasing_indices = np.argsort(
                    [int(m.split("-")[1]) for m in Snapshots])
                Snapshots = Snapshots[increasing_indices]

                if cfg["snapshotindex"] == -1:
                    snapindices = [-1]
                elif cfg["snapshotindex"] == "all":
                    snapindices = range(len(Snapshots))
                elif cfg["snapshotindex"] < len(Snapshots):
                    snapindices = [cfg["snapshotindex"]]
                else:
                    raise ValueError(
                        "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
                    )

                final_result = []

                ########################### RESCALING (to global scale)
                if rescale:
                    scale = dlc_cfg["global_scale"]
                    Data = (pd.read_hdf(
                        os.path.join(
                            cfg["project_path"],
                            str(trainingsetfolder),
                            "CollectedData_" + cfg["scorer"] + ".h5",
                        )) * scale)
                else:
                    scale = 1

                conversioncode.guarantee_multiindex_rows(Data)
                ##################################################
                # Compute predictions over images
                ##################################################
                for snapindex in snapindices:
                    dlc_cfg["init_weights"] = os.path.join(
                        str(modelfolder), "train", Snapshots[snapindex]
                    )  # setting weights to corresponding snapshot.
                    trainingsiterations = (
                        dlc_cfg["init_weights"].split(os.sep)[-1]
                    ).split(
                        "-"
                    )[-1]  # read how many training siterations that corresponds to.

                    # Name for deeplabcut net (based on its parameters)
                    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
                        cfg,
                        shuffle,
                        trainFraction,
                        trainingsiterations,
                        modelprefix=modelprefix,
                    )
                    print(
                        "Running ",
                        DLCscorer,
                        " with # of training iterations:",
                        trainingsiterations,
                    )
                    (
                        notanalyzed,
                        resultsfilename,
                        DLCscorer,
                    ) = auxiliaryfunctions.CheckifNotEvaluated(
                        str(evaluationfolder),
                        DLCscorer,
                        DLCscorerlegacy,
                        Snapshots[snapindex],
                    )
                    if notanalyzed:
                        # Specifying state of model (snapshot / training state)
                        sess, inputs, outputs = predict.setup_pose_prediction(
                            dlc_cfg)
                        Numimages = len(Data.index)
                        PredicteData = np.zeros(
                            (Numimages, 3 * len(dlc_cfg["all_joints_names"])))
                        print("Running evaluation ...")
                        for imageindex, imagename in tqdm(enumerate(
                                Data.index)):
                            image = imread(
                                os.path.join(cfg["project_path"], *imagename),
                                mode="skimage",
                            )
                            if scale != 1:
                                image = imresize(image, scale)

                            image_batch = data_to_input(image)
                            # Compute prediction with the CNN
                            outputs_np = sess.run(
                                outputs, feed_dict={inputs: image_batch})
                            scmap, locref = predict.extract_cnn_output(
                                outputs_np, dlc_cfg)

                            # Extract maximum scoring location from the heatmap, assume 1 person
                            pose = predict.argmax_pose_predict(
                                scmap, locref, dlc_cfg["stride"])
                            PredicteData[imageindex, :] = (
                                pose.flatten()
                            )  # NOTE: thereby     cfg_test['all_joints_names'] should be same order as bodyparts!

                        sess.close()  # closes the current tf session

                        index = pd.MultiIndex.from_product(
                            [
                                [DLCscorer],
                                dlc_cfg["all_joints_names"],
                                ["x", "y", "likelihood"],
                            ],
                            names=["scorer", "bodyparts", "coords"],
                        )

                        # Saving results
                        DataMachine = pd.DataFrame(PredicteData,
                                                   columns=index,
                                                   index=Data.index)
                        DataMachine.to_hdf(resultsfilename, "df_with_missing")

                        print(
                            "Analysis is done and the results are stored (see evaluation-results) for snapshot: ",
                            Snapshots[snapindex],
                        )
                        DataCombined = pd.concat([Data.T, DataMachine.T],
                                                 axis=0,
                                                 sort=False).T

                        RMSE, RMSEpcutoff = pairwisedistances(
                            DataCombined,
                            cfg["scorer"],
                            DLCscorer,
                            cfg["pcutoff"],
                            comparisonbodyparts,
                        )
                        testerror = np.nanmean(
                            RMSE.iloc[testIndices].values.flatten())
                        trainerror = np.nanmean(
                            RMSE.iloc[trainIndices].values.flatten())
                        testerrorpcutoff = np.nanmean(
                            RMSEpcutoff.iloc[testIndices].values.flatten())
                        trainerrorpcutoff = np.nanmean(
                            RMSEpcutoff.iloc[trainIndices].values.flatten())
                        results = [
                            trainingsiterations,
                            int(100 * trainFraction),
                            shuffle,
                            np.round(trainerror, 2),
                            np.round(testerror, 2),
                            cfg["pcutoff"],
                            np.round(trainerrorpcutoff, 2),
                            np.round(testerrorpcutoff, 2),
                        ]
                        final_result.append(results)

                        if show_errors:
                            print(
                                "Results for",
                                trainingsiterations,
                                " training iterations:",
                                int(100 * trainFraction),
                                shuffle,
                                "train error:",
                                np.round(trainerror, 2),
                                "pixels. Test error:",
                                np.round(testerror, 2),
                                " pixels.",
                            )
                            print(
                                "With pcutoff of",
                                cfg["pcutoff"],
                                " train error:",
                                np.round(trainerrorpcutoff, 2),
                                "pixels. Test error:",
                                np.round(testerrorpcutoff, 2),
                                "pixels",
                            )
                            if scale != 1:
                                print(
                                    "The predictions have been calculated for rescaled images (and rescaled ground truth). Scale:",
                                    scale,
                                )
                            print(
                                "Thereby, the errors are given by the average distances between the labels by DLC and the scorer."
                            )

                        if plotting:
                            print("Plotting...")
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)
                            Plotting(
                                cfg,
                                comparisonbodyparts,
                                DLCscorer,
                                trainIndices,
                                DataCombined * 1.0 / scale,
                                foldername,
                            )  # Rescaling coordinates to have figure in original size!

                        tf.compat.v1.reset_default_graph()
                        # print(final_result)
                    else:
                        DataMachine = pd.read_hdf(resultsfilename)
                        conversioncode.guarantee_multiindex_rows(DataMachine)
                        if plotting:
                            DataCombined = pd.concat([Data.T, DataMachine.T],
                                                     axis=0,
                                                     sort=False).T
                            print(
                                "Plotting...(attention scale might be inconsistent in comparison to when data was analyzed; i.e. if you used rescale)"
                            )
                            foldername = os.path.join(
                                str(evaluationfolder),
                                "LabeledImages_" + DLCscorer + "_" +
                                Snapshots[snapindex],
                            )
                            auxiliaryfunctions.attempttomakefolder(foldername)
                            Plotting(
                                cfg,
                                comparisonbodyparts,
                                DLCscorer,
                                trainIndices,
                                DataCombined * 1.0 / scale,
                                foldername,
                            )

                if len(final_result
                       ) > 0:  # Only append if results were calculated
                    make_results_file(final_result, evaluationfolder,
                                      DLCscorer)
                    print(
                        "The network is evaluated and the results are stored in the subdirectory 'evaluation_results'."
                    )
                    print(
                        "Please check the results, then choose the best model (snapshot) for prediction. You can update the config.yaml file with the appropriate index for the 'snapshotindex'.\nUse the function 'analyze_video' to make predictions on new videos."
                    )
                    print(
                        "Otherwise, consider adding more labeled-data and retraining the network (see DeepLabCut workflow Fig 2, Nath 2019)"
                    )

    # returning to initial folder
    os.chdir(str(start_path))
예제 #10
0
def extract_maps(
    config,
    shuffle=0,
    trainingsetindex=0,
    gputouse=None,
    rescale=False,
    Indices=None,
    modelprefix="",
):
    """
    Extracts the scoremap, locref, partaffinityfields (if available).

    Returns a dictionary indexed by: trainingsetfraction, snapshotindex, and imageindex
    for those keys, each item contains: (image,scmap,locref,paf,bpt names,partaffinity graph, imagename, True/False if this image was in trainingset)
    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: integer
        integers specifying shuffle index of the training dataset. The default is 0.

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml). This
        variable can also be set to "all".

    rescale: bool, default False
        Evaluate the model at the 'global_scale' variable (as set in the test/pose_config.yaml file for a particular project). I.e. every
        image will be resized according to that scale and prediction will be compared to the resized ground truth. The error will be reported
        in pixels at rescaled to the *original* size. I.e. For a [200,200] pixel image evaluated at global_scale=.5, the predictions are calculated
        on [100,100] pixel images, compared to 1/2*ground truth and this error is then multiplied by 2!. The evaluation images are also shown for the
        original size!

    Examples
    --------
    If you want to extract the data for image 0 and 103 (of the training set) for model trained with shuffle 0.
    >>> deeplabcut.extract_maps(configfile,0,Indices=[0,103])

    """
    from deeplabcut.utils.auxfun_videos import imread, imresize
    from deeplabcut.pose_estimation_tensorflow.nnet import predict
    from deeplabcut.pose_estimation_tensorflow.nnet import (
        predict_multianimal as predictma, )
    from deeplabcut.pose_estimation_tensorflow.config import load_config
    from deeplabcut.pose_estimation_tensorflow.dataset.pose_dataset import data_to_input
    from deeplabcut.utils import auxiliaryfunctions
    from tqdm import tqdm
    import tensorflow as tf

    vers = (tf.__version__).split(".")
    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    import pandas as pd
    from pathlib import Path
    import numpy as np

    TF.reset_default_graph()
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  #
    #    tf.logging.set_verbosity(tf.logging.WARN)

    start_path = os.getcwd()
    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)

    if gputouse is not None:  # gpu selectinon
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)

    if trainingsetindex == "all":
        TrainingFractions = cfg["TrainingFraction"]
    else:
        if trainingsetindex < len(
                cfg["TrainingFraction"]) and trainingsetindex >= 0:
            TrainingFractions = [
                cfg["TrainingFraction"][int(trainingsetindex)]
            ]
        else:
            raise Exception(
                "Please check the trainingsetindex! ",
                trainingsetindex,
                " should be an integer from 0 .. ",
                int(len(cfg["TrainingFraction"]) - 1),
            )

    # Loading human annotatated data
    trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
    Data = pd.read_hdf(
        os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        ),
        "df_with_missing",
    )

    # Make folder for evaluation
    auxiliaryfunctions.attempttomakefolder(
        str(cfg["project_path"] + "/evaluation-results/"))

    Maps = {}
    for trainFraction in TrainingFractions:
        Maps[trainFraction] = {}
        ##################################################
        # Load and setup CNN part detector
        ##################################################
        datafn, metadatafn = auxiliaryfunctions.GetDataandMetaDataFilenames(
            trainingsetfolder, trainFraction, shuffle, cfg)

        modelfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetModelFolder(trainFraction,
                                                  shuffle,
                                                  cfg,
                                                  modelprefix=modelprefix)),
        )
        path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
        # Load meta data
        (
            data,
            trainIndices,
            testIndices,
            trainFraction,
        ) = auxiliaryfunctions.LoadMetadata(
            os.path.join(cfg["project_path"], metadatafn))
        try:
            dlc_cfg = load_config(str(path_test_config))
        except FileNotFoundError:
            raise FileNotFoundError(
                "It seems the model for shuffle %s and trainFraction %s does not exist."
                % (shuffle, trainFraction))

        # change batch size, if it was edited during analysis!
        dlc_cfg["batch_size"] = 1  # in case this was edited for analysis.

        # Create folder structure to store results.
        evaluationfolder = os.path.join(
            cfg["project_path"],
            str(
                auxiliaryfunctions.GetEvaluationFolder(
                    trainFraction, shuffle, cfg, modelprefix=modelprefix)),
        )
        auxiliaryfunctions.attempttomakefolder(evaluationfolder,
                                               recursive=True)
        # path_train_config = modelfolder / 'train' / 'pose_cfg.yaml'

        # Check which snapshots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split(".")[0]
            for fn in os.listdir(os.path.join(str(modelfolder), "train"))
            if "index" in fn
        ])
        try:  # check if any where found?
            Snapshots[0]
        except IndexError:
            raise FileNotFoundError(
                "Snapshots not found! It seems the dataset for shuffle %s and trainFraction %s is not trained.\nPlease train it before evaluating.\nUse the function 'train_network' to do so."
                % (shuffle, trainFraction))

        increasing_indices = np.argsort(
            [int(m.split("-")[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        if cfg["snapshotindex"] == -1:
            snapindices = [-1]
        elif cfg["snapshotindex"] == "all":
            snapindices = range(len(Snapshots))
        elif cfg["snapshotindex"] < len(Snapshots):
            snapindices = [cfg["snapshotindex"]]
        else:
            print(
                "Invalid choice, only -1 (last), any integer up to last, or all (as string)!"
            )

        ########################### RESCALING (to global scale)
        scale = dlc_cfg["global_scale"] if rescale else 1
        Data *= scale

        bptnames = [
            dlc_cfg["all_joints_names"][i]
            for i in range(len(dlc_cfg["all_joints"]))
        ]

        for snapindex in snapindices:
            dlc_cfg["init_weights"] = os.path.join(
                str(modelfolder), "train", Snapshots[snapindex]
            )  # setting weights to corresponding snapshot.
            trainingsiterations = (
                dlc_cfg["init_weights"].split(os.sep)[-1]
            ).split("-")[
                -1]  # read how many training siterations that corresponds to.

            # Name for deeplabcut net (based on its parameters)
            # DLCscorer,DLCscorerlegacy = auxiliaryfunctions.GetScorerName(cfg,shuffle,trainFraction,trainingsiterations)
            # notanalyzed, resultsfilename, DLCscorer=auxiliaryfunctions.CheckifNotEvaluated(str(evaluationfolder),DLCscorer,DLCscorerlegacy,Snapshots[snapindex])
            # print("Extracting maps for ", DLCscorer, " with # of trainingiterations:", trainingsiterations)
            # if notanalyzed: #this only applies to ask if h5 exists...

            # Specifying state of model (snapshot / training state)
            sess, inputs, outputs = predict.setup_pose_prediction(dlc_cfg)
            Numimages = len(Data.index)
            PredicteData = np.zeros(
                (Numimages, 3 * len(dlc_cfg["all_joints_names"])))
            print("Analyzing data...")
            if Indices is None:
                Indices = enumerate(Data.index)
            else:
                Ind = [Data.index[j] for j in Indices]
                Indices = enumerate(Ind)

            DATA = {}
            for imageindex, imagename in tqdm(Indices):
                image = imread(os.path.join(cfg["project_path"], imagename),
                               mode="RGB")
                if scale != 1:
                    image = imresize(image, scale)

                image_batch = data_to_input(image)
                # Compute prediction with the CNN
                outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})

                if cfg.get("multianimalproject", False):
                    scmap, locref, paf = predictma.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    pagraph = dlc_cfg["partaffinityfield_graph"]
                else:
                    scmap, locref = predict.extract_cnn_output(
                        outputs_np, dlc_cfg)
                    paf = None
                    pagraph = []

                if imageindex in testIndices:
                    trainingfram = False
                else:
                    trainingfram = True

                DATA[imageindex] = [
                    image,
                    scmap,
                    locref,
                    paf,
                    bptnames,
                    pagraph,
                    imagename,
                    trainingfram,
                ]
            Maps[trainFraction][Snapshots[snapindex]] = DATA
    os.chdir(str(start_path))
    return Maps
예제 #11
0
def GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,batchsize,rgb):
    ''' Batchwise prediction of pose for frame list in directory'''
    #from skimage.io import imread
    from deeplabcut.utils.auxfun_videos import imread
    print("Starting to extract posture")
    if rgb:
        im=imread(os.path.join(directory,framelist[0]),mode='RGB')
    else:
        im=imread(os.path.join(directory,framelist[0]))

    ny,nx,nc=np.shape(im)
    print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)

    PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0 # keeps track of which image within a batch should be written to
    batch_num = 0 # keeps track of which batch you are at
    if cfg['cropping']:
        print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
        nx,ny=cfg['x2']-cfg['x1'],cfg['y2']-cfg['y1']
        if nx>0 and ny>0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1']>=0 and cfg['x2']<int(np.shape(im)[1]) and cfg['y1']>=0 and cfg['y2']<int(np.shape(im)[0]):
            pass #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')

    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))

    if batchsize==1:
        for counter,framename in enumerate(framelist):
                #frame=imread(os.path.join(directory,framename),mode='RGB')
                if rgb:
                    im=imread(os.path.join(directory,framename),mode='RGB')
                else:
                    im=imread(os.path.join(directory,framename))

                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frame= img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frame = img_as_ubyte(im)

                pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
                PredictedData[counter, :] = pose.flatten()
    else:
        frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all the frames of a batch
        for counter,framename in enumerate(framelist):
                if rgb:
                    im=imread(os.path.join(directory,framename),mode='RGB')
                else:
                    im=imread(os.path.join(directory,framename))

                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frames[batch_ind] = img_as_ubyte(im[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frames[batch_ind] = img_as_ubyte(im)

                if batch_ind==batchsize-1:
                    pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
                    PredictedData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
                    batch_ind = 0
                    batch_num += 1
                else:
                   batch_ind+=1

        if batch_ind>0: #take care of the last frames (the batch that might have been processed)
            pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs) #process the whole batch (some frames might be from previous batch!)
            PredictedData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]

    pbar.close()
    return PredictedData,nframes,nx,ny
Augmentations.append([augtype, seq])

augtype = 'fog'
seq = iaa.Sequential([iaa.Fog()])
Augmentations.append([augtype, seq])

augtype = 'snow'
seq = iaa.Sequential([
    iaa.Snowflakes(flake_size=(.2, .5),
                   density=(0.005, 0.07),
                   speed=(0.01, 0.05))
])
Augmentations.append([augtype, seq])

for ind, imname in enumerate(Dataframe.index):
    image = imresize(imread(os.path.join('montblanc_images', imname)),
                     size=scale)
    ny, nx, nc = np.shape(image)

    kpts = []
    for i in individuals:
        for b in bodyparts:
            x, y = Dataframe.iloc[ind][scorer][i][b]['x'], Dataframe.iloc[ind][
                scorer][i][b]['y']
            if np.isfinite(x) and np.isfinite(y):
                kpts.append(Keypoint(x=x * scale, y=y * scale))

    kps = KeypointsOnImage(kpts, shape=image.shape)

    cells = []
예제 #13
0
    def make_batch(self, data_item, scale, mirror):
        im_file = data_item.im_path
        logging.debug("image %s", im_file)
        logging.debug("mirror %r", mirror)
        image = imread(os.path.join(self.cfg['project_path'], im_file), mode="RGB")

        if self.has_gt:
            joints = np.copy(data_item.joints)

        if self.cfg['crop']:  # adapted cropping for DLC
            if np.random.rand() < self.cfg['cropratio']:
                j = np.random.randint(np.shape(joints)[1])  # pick a random joint
                joints, image = CropImage(
                    joints, image, joints[0, j, 1], joints[0, j, 2], self.cfg
                )
                """
                print(joints)
                import matplotlib.pyplot as plt
                plt.clf()
                plt.imshow(image)
                plt.plot(joints[0,:,1],joints[0,:,2],'.')
                plt.savefig("abc"+str(np.random.randint(int(1e6)))+".png")
                """
            else:
                pass  # no cropping!

        img = imresize(image, scale) if scale != 1 else image
        scaled_img_size = arr(img.shape[0:2])
        if mirror:
            img = np.fliplr(img)

        batch = {Batch.inputs: img}

        if self.has_gt:
            stride = self.cfg['stride']

            if mirror:
                joints = [
                    self.mirror_joints(
                        person_joints, self.symmetric_joints, image.shape[1]
                    )
                    for person_joints in joints
                ]

            sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2

            scaled_joints = [person_joints[:, 1:3] * scale for person_joints in joints]

            joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
            (
                part_score_targets,
                part_score_weights,
                locref_targets,
                locref_mask,
            ) = self.compute_target_part_scoremap(
                joint_id, scaled_joints, data_item, sm_size, scale
            )

            batch.update(
                {
                    Batch.part_score_targets: part_score_targets,
                    Batch.part_score_weights: part_score_weights,
                    Batch.locref_targets: locref_targets,
                    Batch.locref_mask: locref_mask,
                }
            )

        batch = {key: data_to_input(data) for (key, data) in batch.items()}

        batch[Batch.data_item] = data_item

        return batch
    def make_batch(self, data_item, scale, mirror):

        im_file = data_item.im_path
        logging.debug("image %s", im_file)
        logging.debug("mirror %r", mirror)

        # print(im_file, os.getcwd())
        # print(self.cfg.project_path)
        image = imread(os.path.join(self.cfg.project_path, im_file),
                       mode="RGB")

        if self.has_gt:
            joints = np.copy(data_item.joints)

        if self.cfg.crop:  # adapted cropping for DLC
            if np.random.rand() < self.cfg.cropratio:
                # 1. get center of joints
                j = np.random.randint(
                    np.shape(joints)[1])  # pick a random joint
                # draw random crop dimensions & subtract joint points
                # print(joints,j,'ahah')
                joints, image = CropImage(joints, image, joints[0, j, 1],
                                          joints[0, j, 2], self.cfg)

                # if self.has_gt:
                #    joints[0,:, 1] -= x0
                #    joints[0,:, 2] -= y0
                """
                print(joints)
                import matplotlib.pyplot as plt
                plt.clf()
                plt.imshow(image)
                plt.plot(joints[0,:,1],joints[0,:,2],'.')
                plt.savefig("abc"+str(np.random.randint(int(1e6)))+".png")
                """
            else:
                pass  # no cropping!

        # Charlie addition
        if not self.cfg.using_z_slices:
            img = imresize(image, scale) if scale != 1 else image
            scaled_img_size = arr(img.shape[0:2])
        else:
            img = imresize(image, scale) if scale < 1 else image
            scaled_img_size = arr(img.shape[0:3])

        if mirror:
            img = np.fliplr(img)

        batch = {Batch.inputs: img}

        if self.has_gt:
            stride = self.cfg.stride

            if mirror:
                joints = [
                    self.mirror_joints(person_joints, self.symmetric_joints,
                                       image.shape[1])
                    for person_joints in joints
                ]

            sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2

            scaled_joints = [
                person_joints[:, 1:3] * scale for person_joints in joints
            ]

            joint_id = [
                person_joints[:, 0].astype(int) for person_joints in joints
            ]
            (
                part_score_targets,
                part_score_weights,
                locref_targets,
                locref_mask,
            ) = self.compute_target_part_scoremap(joint_id, scaled_joints,
                                                  data_item, sm_size, scale)

            batch.update({
                Batch.part_score_targets: part_score_targets,
                Batch.part_score_weights: part_score_weights,
                Batch.locref_targets: locref_targets,
                Batch.locref_mask: locref_mask,
            })

        batch = {key: data_to_input(data) for (key, data) in batch.items()}

        batch[Batch.data_item] = data_item

        return batch
Augmentations.append([augtype, seq])

augtype = 'edgedetect'
seq = iaa.Sequential([iaa.EdgeDetect(alpha=(0.8, 1.0))])
Augmentations.append([augtype, seq])

augtype = 'flipud'
seq = iaa.Sequential([iaa.Flipud(1)])
Augmentations.append([augtype, seq])

augtype = 'fliplr'
seq = iaa.Sequential([iaa.Fliplr(1)])
Augmentations.append([augtype, seq])

for ind, imname in enumerate(Dataframe.index):
    image = imresize(imread(os.path.join(imfolder, imname)), size=scale)
    ny, nx, nc = np.shape(image)

    kpts = []
    for b in bodyparts:
        x, y = Dataframe.iloc[ind][scorer][b]['x'], Dataframe.iloc[ind][
            scorer][b]['y']
        if np.isfinite(x) and np.isfinite(y):
            kpts.append(Keypoint(x=x * scale, y=y * scale))

    kps = KeypointsOnImage(kpts, shape=image.shape)

    cells = []

    # image with keypoints before augmentation
    image_before = kps.draw_on_image(image,
    def make_batch(self, data_item, scale, mirror):
        im_file = data_item.im_path
        logging.debug("image %s", im_file)
        logging.debug("mirror %r", mirror)

        # print(im_file, os.getcwd())
        # print(self.cfg.project_path)
        vid_fname = os.path.join(self.cfg.project_path, im_file)
        image = imread(vid_fname, mode="RGB")
        # print("Full image filename: ", vid_fname)
        # print("Shape of read image: ", image.shape)

        if self.has_gt:
            joints = np.copy(data_item.joints)

        if self.cfg.crop:  # adapted cropping for DLC
            if np.random.rand() < self.cfg.cropratio:
                j = np.random.randint(np.shape(joints)[1])  # pick a random joint
                joints, image = CropImage(
                    joints, image, joints[0, j, 1], joints[0, j, 2], self.cfg
                )
                """
                print(joints)
                import matplotlib.pyplot as plt
                plt.clf()
                plt.imshow(image)
                plt.plot(joints[0,:,1],joints[0,:,2],'.')
                plt.savefig("abc"+str(np.random.randint(int(1e6)))+".png")
                """
            else:
                pass  # no cropping!

        # Charlie addition
        if not self.cfg['using_z_slices']:
            img = imresize(image, scale) if scale != 1 else image
            scaled_img_size = arr(img.shape[0:2])
        else:
            # img = imresize(image, scale) if scale < 1 else image
            # if scale != 1:
            #     zspan = range(image.shape[0])
            #     img = np.array([imresize(image[z,...], scale) for z in zspan])
            #     print(f"{img.shape}")
            # else:
            #     img = image
            img = image # Just ignore scale
            scaled_img_size = arr(img.shape[:3]) # Ignore color
        if mirror:
            img = np.fliplr(img)

        batch = {Batch.inputs: img}

        if self.has_gt:
            stride = self.cfg.stride

            if mirror:
                joints = [
                    self.mirror_joints(
                        person_joints, self.symmetric_joints, image.shape[1]
                    )
                    for person_joints in joints
                ]

            # print("Input size: ", scaled_img_size)
            # print("Stride: ", stride)
            sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2
            if self.cfg.using_z_slices:
                sm_size[0] = scaled_img_size[0] # z should not be "strided"
            print(f"Resized to {sm_size} from {image.shape} using scale {scale}")

            if not self.cfg.using_z_slices:
                scaled_joints = [person_joints[:, 1:3] * scale for person_joints in joints]
            else:
                # print("Scale ", scale)
                scaled_joints = [person_joints[:, 1:4] * scale for person_joints in joints]
                # [print("Person joints ", person_joints) for person_joints in joints]

            joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
            if not self.cfg.using_z_slices:
                compute = self.compute_target_part_scoremap
            else:
                compute = self.compute_target_part_scoremap_slices
            (
                part_score_targets,
                part_score_weights,
                locref_targets,
                locref_mask,
            ) = compute(joint_id, scaled_joints, data_item, sm_size, scale)

            # print("part_score_targets: ", part_score_targets.shape)
            # print("locref_targets: ", locref_targets.shape)

            batch.update(
                {
                    Batch.part_score_targets: part_score_targets,
                    Batch.part_score_weights: part_score_weights,
                    Batch.locref_targets: locref_targets,
                    Batch.locref_mask: locref_mask,
                }
            )

        batch = {key: data_to_input(data) for (key, data) in batch.items()}

        batch[Batch.data_item] = data_item

        return batch