Exemplo n.º 1
0
        def log_op():

            PCK_THRESH = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.25, 0.5]
            HM_THRESH = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
            if self.config['pck_alpha'] not in PCK_THRESH: PCK_THRESH.append(self.config["pck_alpha"])

            coords, _ = heatmaps_to_coords(predictions.clone(), thresh=self.config["hm"]["thresh"])
            pck = {t: percentage_correct_keypoints(kwargs["kps"], coords, t, self.config["pck"]["type"]) for t in
                   PCK_THRESH}

            gridded_outputs = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
                predictions[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            gridded_targets = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
                sure_to_torch(kwargs["targets"])[0], nrow=10)), 0).transpose(1, 2, 3, 0)

            logs = {
                "images": {
                    # Image input not needed, because stick animal is printed on input image
                    # "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
                    "first_pred": adjust_support(gridded_outputs, "-1->1", "0->1"),
                    "first_targets": adjust_support(gridded_targets, "-1->1", "0->1"),
                    "outputs": adjust_support(heatmaps_to_image(torch2numpy(predictions)).transpose(0, 2, 3, 1),
                                              "-1->1", "0->1"),
                    "targets": adjust_support(heatmaps_to_image(kwargs["targets"]).transpose(0, 2, 3, 1), "-1->1"),
                    "inputs_with_stick": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), kwargs["kps"]),
                    "stickanimal": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), predictions.clone(),
                                                    thresh=self.config["hm"]["thresh"]),
                },
                "scalars": {
                    "loss": losses["total"],
                    "learning_rate": self.optimizer.state_dict()["param_groups"][0]["lr"],
                    f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
                },
                "figures": {
                    "Keypoint Mapping": plot_input_target_keypoints(torch2numpy(inputs).transpose(0, 2, 3, 1),
                                                                    # get BHWC
                                                                    torch2numpy(predictions),  # stay BCHW
                                                                    kwargs["kps"], coords),
                }
            }
            if self.config["losses"]["L2"]:
                logs["scalars"]["L2"] = losses["L2"]
            if self.config["losses"]["L1"]:
                logs["scalars"]["L1"] = losses["L1"]
            if self.config["losses"]["perceptual"]:
                logs["scalars"]["perceptual"] = losses["perceptual"]

            if self.config["pck"]["pck_multi"]:
                for key, val in pck.items():
                    # get mean value for pck at given threshold
                    logs["scalars"][f"PCK@{key}"] = np.around(val[0], 5)
                    for idx, part in enumerate(val[1]):
                        logs["scalars"][f"PCK@{key}_{self.dataset.get_idx_parts(idx)}"] = np.around(part, 5)
            return logs
Exemplo n.º 2
0
 def log_op():
     is_train = self.get_split() == "train"
     logs = {
         "images": {
             "input": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
             "outputs": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
         },
         "scalars": {
             "loss": loss,
         },
     }
     return logs
Exemplo n.º 3
0
        def log_op():

            logs = {
                "images": {
                    "p0a0": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p0a1": adjust_support(torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p1a0": adjust_support(torch2numpy(inputs2).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "p1a1": adjust_support(torch2numpy(inputs3).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "disentanglement": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "testing": adjust_support(torch2numpy(testing).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": loss,
                    "loss_dis": loss_dis,
                },
            }
            logs["scalars"]["lambda_"] = log["scalars"]["lambda_"]
            logs["scalars"]["gain"] = log["scalars"]["gain"]
            logs["scalars"]["active"] = log["scalars"]["active"]
            logs["scalars"]["kl_loss"] = log["scalars"]["kl_loss"]
            logs["scalars"]["nll_loss"] = log["scalars"]["nll_loss"]
            logs["scalars"]["rec_loss"] = log["scalars"]["rec_loss"]
            logs["scalars"]["mu"] = log["scalars"]["mu"]
            logs["scalars"]["eps"] = log["scalars"]["eps"]

            return logs
        def log_op():
            from AnimalPose.utils.log_utils import plot_pred_figure
            from edflow.data.util import adjust_support

            logs = {
                "images": {
                    "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
                },
                "scalars": {
                    "loss": losses["batch"]["total"],
                    "accuracy": accuracy.cpu().numpy(),
                },
                "figures": {
                    "predictions": plot_pred_figure(inputs, preds.cpu().detach().numpy(), labels)
                }

            }
            if self.config["losses"]["CEL"]:
                logs["scalars"]["CrossEntropyLoss"] = losses["batch"]["CEL"]
            return logs
Exemplo n.º 5
0
        def log_op():
            logs = {
                "images": {
                    "image_input_0": adjust_support(torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                    "disentanglement": adjust_support(np.expand_dims(generate_samples(inputs0, model), 0), "-1->1",
                                                      "0->1"),
                    "pose_random": adjust_support(np.expand_dims(generate_samples(inputs0, model, pose_random=True), 0),
                                                  "-1->1",
                                                  "0->1"),
                    "appearance_random": adjust_support(
                        np.expand_dims(generate_samples(inputs0, model, appreance_random=True), 0), "-1->1",
                        "0->1"),

                    "outputs": adjust_support(torch2numpy(predictions).transpose(0, 2, 3, 1), "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": loss,
                },
            }
            logs["images"]["image_input_1_flipped"] = adjust_support(
                torch2numpy(inputs1_flipped).transpose(0, 2, 3, 1), "-1->1", "0->1")
            if self.get_global_step() >= self.config["LossConstrained"]["no_kl_for"] and \
                    self.config["LossConstrained"]["active"]:
                logs["scalars"]["lambda_"] = log["scalars"]["lambda_"]
                logs["scalars"]["gain"] = log["scalars"]["gain"]
                logs["scalars"]["active"] = log["scalars"]["active"]
                logs["scalars"]["kl_loss"] = log["scalars"]["kl_loss"]
                logs["scalars"]["nll_loss"] = log["scalars"]["nll_loss"]
                logs["scalars"]["rec_loss"] = log["scalars"]["rec_loss"]
                logs["scalars"]["mu"] = log["scalars"]["mu"]
                logs["scalars"]["eps"] = log["scalars"]["eps"]

            if self.encoder_2:
                logs["images"]["image_input_1"] = adjust_support(torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                                                                 "0->1")
                logs["images"]["image_input_1_flipped"] = adjust_support(
                    torch2numpy(inputs1_flipped).transpose(0, 2, 3, 1), "-1->1", "0->1")
                logs["images"]["pose_reconstruction"] = adjust_support(
                    torch2numpy(kl_test_preds).transpose(0, 2, 3, 1), "-1->1", "0->1")
                if retrieve(self.config, "classifier/active", default=False):
                    logs["scalars"]["accuracy"] = accuracy
            if self.encoder_2:
                logs["scalars"]["hog"] = np.array(hog_values).mean()
                logs["scalars"]["hog_std"] = np.array(hog_values).std()
                logs["scalars"]["hist"] = np.array(hist_values).mean()
                logs["scalars"]["hist_std"] = np.array(hist_values).std()

            return logs
Exemplo n.º 6
0
 def log_op():
     from edflow.data.util import adjust_support
     logs = {
         "images": {
             "inputs0":
             adjust_support(
                 torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "inputs1":
             adjust_support(
                 torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "pred0":
             adjust_support(
                 torch2numpy(pred0).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "pred1":
             adjust_support(
                 torch2numpy(pred1).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
             "mixed_reconstruction":
             adjust_support(
                 torch2numpy(mixed_reconstruction).transpose(
                     0, 2, 3, 1), "-1->1", "0->1"),
             "input0_flipped":
             adjust_support(
                 torch2numpy(torch.flip(inputs0,
                                        [0])).transpose(0, 2, 3, 1),
                 "-1->1", "0->1"),
             "flip_test":
             adjust_support(
                 torch2numpy(flip_test).transpose(0, 2, 3, 1), "-1->1",
                 "0->1"),
         },
         "scalars": {
             "loss":
             loss_appearance + loss_pose + loss_mixed_reconstruction,
             "loss_appearance": loss_appearance,
             "loss_pose": loss_pose,
             "loss_mixed_reconstruction": loss_mixed_reconstruction,
         },
     }
     return logs
        def log_op():
            from edflow.data.util import adjust_support

            #PCK_THRESH = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.25, 0.5]
            #if self.config['pck_alpha'] not in PCK_THRESH: PCK_THRESH.append(self.config["pck_alpha"])

            #coords, _ = heatmaps_to_coords(pose_predictions.clone(), thresh=self.config["hm"]["thresh"])
            #pck = {t: percentage_correct_keypoints(kwargs["kps"], coords, t, self.config["pck"]["type"]) for t in
            #       PCK_THRESH}

            logs = {
                "images": {
                    "inputs0":
                    adjust_support(
                        torch2numpy(inputs0).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "inputs1":
                    adjust_support(
                        torch2numpy(inputs1).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "pred0":
                    adjust_support(
                        torch2numpy(pred0).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "pred1":
                    adjust_support(
                        torch2numpy(pred1).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "mixed_reconstruction":
                    adjust_support(
                        torch2numpy(mixed_reconstruction).transpose(
                            0, 2, 3, 1), "-1->1", "0->1"),
                    "cycle0":
                    adjust_support(
                        torch2numpy(pose_recon).transpose(0, 2, 3, 1), "-1->1",
                        "0->1"),
                    "cycle1":
                    adjust_support(
                        torch2numpy(appearance_recon).transpose(0, 2, 3, 1),
                        "-1->1", "0->1"),
                    #"targets": adjust_support(heatmaps_to_image(kwargs["targets0"]).transpose(0, 2, 3, 1), "-1->1"),
                    #"inputs_with_stick": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), kwargs["kps0"]),
                    #"stickanimal": make_stickanimal(torch2numpy(inputs).transpose(0, 2, 3, 1), pose_predictions.clone(),
                    #                                thresh=self.config["hm"]["thresh"]),
                    #"pred_hm": adjust_support(heatmaps_to_image(torch2numpy(pose_predictions)).transpose(0, 2, 3, 1),
                    #                          "-1->1", "0->1"),
                },
                "scalars": {
                    "loss": autoencoder_loss + cycle_loss,  # + pose_loss,
                    "loss_autoencoder": autoencoder_loss,
                    "recon_loss": cycle_loss,
                    #"pose_loss": pose_loss,
                    #f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
                },
            }

            # if self.config["pck"]["pck_multi"]:
            #     for key, val in pck.items():
            #         # get mean value for pck at given threshold
            #         logs["scalars"][f"PCK@{key}"] = np.around(val[0], 5)
            #         for idx, part in enumerate(val[1]):
            #             logs["scalars"][f"PCK@{key}_{self.dataset.get_idx_parts(idx)}"] = np.around(part, 5)

            # gridded_outputs = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
            #    predictions[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            # gridded_targets = np.expand_dims(sure_to_numpy(torchvision.utils.make_grid(
            #    sure_to_torch(kwargs["targets"])[0], nrow=10)), 0).transpose(1, 2, 3, 0)
            #
            # logs = {
            #     "images": {
            #         # Image input not needed, because stick animal is printed on input image
            #         # "image_input": adjust_support(torch2numpy(inputs).transpose(0, 2, 3, 1), "-1->1"),
            #         #"first_pred": adjust_support(gridded_outputs, "-1->1", "0->1"),
            #         #"first_targets": adjust_support(gridded_targets, "-1->1", "0->1"),
            #         "outputs": adjust_support(heatmaps_to_image(torch2numpy(predictions)).transpose(0, 2, 3, 1),
            #                                   "-1->1", "0->1"),

            #     },
            #     "scalars": {
            #         "loss": losses["total"],
            #         "learning_rate": self.optimizer.state_dict()["param_groups"][0]["lr"],
            #         f"PCK@{self.config['pck_alpha']}": np.around(pck[self.config['pck_alpha']][0], 5),
            #     },
            #     "figures": {
            #         "Keypoint Mapping": plot_input_target_keypoints(torch2numpy(inputs).transpose(0, 2, 3, 1),
            #                                                         # get BHWC
            #                                                         torch2numpy(predictions),  # stay BCHW
            #                                                         kwargs["kps"], coords),
            #     }
            # }
            # if self.config["losses"]["L2"]:
            #     logs["scalars"]["L2"] = losses["L2"]
            # if self.config["losses"]["L1"]:
            #     logs["scalars"]["L1"] = losses["L1"]
            # if self.config["losses"]["perceptual"]:
            #     logs["scalars"]["perceptual"] = losses["perceptual"]
            #

            return logs
Exemplo n.º 8
0
def make_stickanimal(image, predictions, thresh=0, draw_all_circles=True):
    """
    Args:
        image: batch of images [B, W, H, C]
        joints: joint array
        predictions: batch of prediction heatmaps [B, Joints, W, H]

    Returns:

    """
    image = adjust_support(np.copy(image), "0->255").astype(np.uint8)
    if predictions.shape[-1] != 2:
        # Predictions to Keypoints
        coords, _ = heatmaps_to_coords(torch2numpy(predictions), thresh=thresh)
    else:
        coords = predictions

    joints = [
        # Head
        [2, 0],  # Nose - L_Eye
        [2, 1],  # Nose - R_Eye
        [0, 3],  # L_Eye - L_EarBase
        [1, 4],  # R_Eye - R_EarBase
        [2, 8],  # Nose - Throat
        # Body
        [8, 9],  # Throat - L_F_Elbow
        [8, 5],  # Throat - R_F_Elbow
        [9, 10],  # L_F_Elbow - Withers
        [5, 10],  # R_F_Elbow - Withers
        # Front
        [9, 16],  # L_F_Elbow - L_F_Knee
        [16, 6],  # L_F_Knee - L_F_Paw
        [5, 17],  # R_F_Elbow - R_F_Knee
        [17, 7],  # R_F_Knee - R_F_Paw
        # Back
        [14, 18],  # L_B_Elbow - L_B_Knee
        [18, 12],  # L_B_Knee - L_B_Paw
        [15, 19],  # R_B_Elbow - R_B_Knee
        [19, 13],  # R_B_Knee - R_B_Paw
        [10, 11],  # Withers - TailBase
        [11, 15],  # Tailbase - R_B_Elbow
        [11, 14],  # Tailbase - L_B_Elbow
    ]
    #  BGR color such as: Blue = a, Green = b and Red = c
    head = (255, 0, 0)  # red
    body = (255, 255, 255)  # white
    front = (0, 255, 0)  # green
    back = (0, 0, 255)  # blue

    colordict = {
        0: head,
        1: head,
        2: head,
        3: head,
        4: head,
        5: body,
        6: body,
        7: body,
        8: body,
        9: front,
        10: front,
        11: front,
        12: front,
        13: back,
        14: back,
        15: back,
        16: back,
        17: back,
        18: back,
        19: back,
    }
    for idx, orig in enumerate(image):
        img = np.zeros((orig.shape[0], orig.shape[1], orig.shape[2]), np.uint8)
        img[:, :, :] = orig  # but why not img = orig.copy() ?
        for idx_joints, pair in enumerate(joints):
            start = coords[idx][pair[0]]
            end = coords[idx][pair[1]]

            # catch the case, that both points are missing (are 0,0) or we want to draw the circles
            if np.isclose(start, [0, 0]).any() and np.isclose(end, [0, 0]).any():
                continue
            # catch the case, that only one of them is missing
            if not np.isclose(start, [0, 0]).any() and draw_all_circles:
                cv2.circle(img, (int(start[0]), int(start[1])), radius=1,
                           color=colordict[idx_joints], thickness=2, lineType=cv2.LINE_AA)
            if not np.isclose(end, [0, 0]).any() and draw_all_circles:
                cv2.circle(img, (int(end[0]), int(end[1])), radius=1,
                           color=colordict[idx_joints], thickness=2, lineType=cv2.LINE_AA)

            if not np.isclose(start[0], 0):
                if not np.isclose(end[0], 0):
                    cv2.line(img, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])),
                             color=colordict[idx_joints], thickness=1, lineType=cv2.LINE_AA)
        image[idx] = img

    return adjust_support(image, "-1->1")