示例#1
0
    def prepare_simclr_sample(self, sample: dict, augmenter: SampleAugmenter) -> dict:
        """Prepares sample according to SimCLR experiment.
        For each sample two transformations of an image are returned.
        Note: Rotation and jitter is kept same in both the transformations.
        Args:
            sample (dict): Underlying data from dataloader class.
            augmenter (SampleAugmenter): Augmenter used to transform sample

        Returns:
            dict: sample containing 'transformed_image1' and 'transformed_image2'
        """
        joints25D, _ = convert_to_2_5D(sample["K"], sample["joints3D"])
        img1, _, _ = augmenter.transform_sample(sample["image"], joints25D.clone())

        # To keep rotation and jitter consistent between the two transformations.
        override_angle = augmenter.angle
        overrride_jitter = augmenter.jitter

        img2, _, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), override_angle, overrride_jitter
        )

        # Applying only image related transform
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)
        return {"transformed_image1": img1, "transformed_image2": img2}
示例#2
0
    def prepare_hybrid2_sample(self, sample: dict, augmenter: SampleAugmenter) -> dict:
        joints25D, _ = convert_to_2_5D(sample["K"], sample["joints3D"])
        if augmenter.crop:
            override_jitter = None
        else:
            # Zero jitter is added incase the cropping is off. It is done to trigger the
            # cropping but always with no translation in image.
            override_jitter = [0, 0]
        img1, joints1, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), None, override_jitter
        )
        param1 = self.get_random_augment_param(augmenter)

        img2, joints2, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), None, override_jitter
        )
        param2 = self.get_random_augment_param(augmenter)

        # Applying only image related transform
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)

        return {
            **{"transformed_image1": img1, "transformed_image2": img2},
            **{f"{k}_1": v for k, v in param1.items() if v is not None},
            **{f"{k}_2": v for k, v in param2.items() if v is not None},
        }
示例#3
0
    def prepare_pairwise_ablative(
        self, sample: dict, augmenter: SampleAugmenter
    ) -> dict:
        """Prepares samples according to pairwise experiment, i.e. transforming the
        image and keeping track of the relative parameters. Augmentations are isolated.
        Args:
            sample (dict): Underlying data from dataloader class.
            augmenter (SampleAugmenter): Augmenter used to transform sample

        Returns:
            dict: sample containing following elements
                'transformed_image1'
                'transformed_image2'
                'joints1' (2.5D joints)
                'joints2' (2.5D joints)
                'rotation'
                'jitter' ...
        """
        joints25D, _ = convert_to_2_5D(sample["K"], sample["joints3D"])
        if augmenter.crop:
            override_jitter = None
        else:
            # Zero jitter is added incase the cropping is off. It is done to trigger the
            # cropping but always with no translation in image.
            override_jitter = [0, 0]
        if augmenter.rotate:
            override_angle = None
        else:
            override_angle = None
            # override_angle = random.uniform(1, 360)
            # uncomment line above to add this rotation  to both channels
        img1, joints1, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), override_angle, override_jitter
        )
        param1 = self.get_random_augment_param(augmenter)

        img2, joints2, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), override_angle, override_jitter
        )
        param2 = self.get_random_augment_param(augmenter)

        # relative transform calculation.
        rel_param = self.get_relative_param(augmenter, param1, param2)

        # Applying only image related transform
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)

        return {
            **{
                "transformed_image1": img1,
                "transformed_image2": img2,
                "joints1": joints1,
                "joints2": joints2,
            },
            **rel_param,
        }
示例#4
0
    def prepare_pairwise_sample(self, sample: dict, augmenter: SampleAugmenter) -> dict:
        """Prepares samples according to pairwise experiment, i.e. transforming the
        image and keepinf track of the relative parameters.
        Note: Gaussian blur and Flip are treated as boolean. Also it was decided not to
        use them for experiment.
        The effects of transformations are isolated.

        Args:
            sample (dict): Underlying data from dataloader class.
            augmenter (SampleAugmenter): Augmenter used to transform sample

        Returns:
            dict: sample containing following elements
                'transformed_image1'
                'transformed_image2'
                'joints1' (2.5D joints)
                'joints2' (2.5D joints)
                'rotation'
                'jitter' ...
        """
        joints25D, _ = convert_to_2_5D(sample["K"], sample["joints3D"])

        img1, joints1, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone()
        )
        param1 = self.get_random_augment_param(augmenter)

        img2, joints2, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone()
        )
        param2 = self.get_random_augment_param(augmenter)

        # relative transform calculation.
        rel_param = self.get_relative_param(augmenter, param1, param2)

        # Applying only image related transform
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)

        return {
            **{
                "transformed_image1": img1,
                "transformed_image2": img2,
                "joints1": joints1,
                "joints2": joints2,
            },
            **rel_param,
        }
示例#5
0
def process_data(
    sample: dict,
    bbox: torch.Tensor,
    augmenter: SampleAugmenter,
    transform: transforms.Compose,
    step=1,
) -> Dict[str, torch.Tensor]:
    """Processes the images according to augmenter and transfromer. The boundbox acts as proxy for 2D joints
    for efficienct cropping.

    Args:
        sample (dict): dictionary containing original image("image") and camera intrinsics (K)
        bbox (torch.Tensor): 21x3 dimensional boundbox
        augmenter (SampleAugmenter): Augmenter to augment sample, used for cropping and resizing
        transform (transforms.Compose): Transform to be performed on image, like conversion to tensor and
            normalization
        step (int, optional): variable for debugging the image processing step. Defaults to 1.

    Returns:
        Dict[str, torch.Tensor]:  Dictionary containg augemented "image" , adapted camera intrinsics("K") and
            "transformation_matrix"
    """
    image, _, transformation_matrix = augmenter.transform_sample(
        sample["image"], bbox)
    # plt.imshow(image)
    # plt.show()
    # plt.savefig(f"image_{step}.png")
    image = transform(image)
    transformation_matrix = torch.tensor(transformation_matrix).float()

    return {
        "image": image.view([1] + list(image.size())),
        "K": transformation_matrix @ sample["K"],
        "transformation_matrix": transformation_matrix,
    }
示例#6
0
 def get_sample_augmenter(
     self, augmentation_params: edict, augmentation_flags: edict
 ) -> SampleAugmenter:
     return SampleAugmenter(
         augmentation_params=augmentation_params,
         augmentation_flags=augmentation_flags,
     )
示例#7
0
    def prepare_experiment4_pretraining(
        self, sample: dict, augmenter: SampleAugmenter
    ) -> dict:
        """Prepares samples for ablative studies on Simclr. This function isolates the
        effect of each transform. Make sure no other transformation is applied except
        the one you want to isolate. (Resize is allowed). Samples are not
        artificially increased by changing rotation and jitter for both samples.

        Args:
            sample (dict): Underlying data from dataloader class.
            augmenter (SampleAugmenter): Augmenter used to transform sample

        Returns:
            dict: sample containing 'transformed_image1' and 'transformed_image2'
        """

        joints25D, _ = convert_to_2_5D(sample["K"], sample["joints3D"])
        if augmenter.crop:
            override_jitter = None
        else:
            # Zero jitter is added incase the cropping is off. It is done to trigger the
            # cropping but always with no translation in image.
            override_jitter = [0, 0]
        if augmenter.rotate:
            override_angle = None
        else:
            override_angle = None
            # override_angle = random.uniform(1, 360)
            # uncomment line above to add this rotation  to both channels

        img1, _, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), override_angle, override_jitter
        )
        img2, _, _ = augmenter.transform_sample(
            sample["image"], joints25D.clone(), override_angle, override_jitter
        )

        # Applying only image related transform
        if self.transform:
            img1 = self.transform(img1)
            img2 = self.transform(img2)

        return {"transformed_image1": img1, "transformed_image2": img2}
示例#8
0
    def prepare_supervised_sample(
        self, sample: dict, augmenter: SampleAugmenter
    ) -> dict:
        """Prepares samples for supervised experiment with keypoints.

        Args:
            sample (dict): Underlying data from dataloader class.
            augmenter (SampleAugmenter): Augmenter used to transform sample

        Returns:
            dict: sample containing following elements
                'image'
                'joints'
                'joints3D'
                'K'
                'scale'
                'joints3D_recreated'
        """
        joints25D_raw, scale = convert_to_2_5D(sample["K"], sample["joints3D"])
        joints_raw = (
            sample["joints_raw"]
            if "joints_raw" in sample.keys()
            else sample["joints3D"].clone()
        )
        image, joints25D, transformation_matrix = augmenter.transform_sample(
            sample["image"], joints25D_raw
        )
        sample["K"] = torch.Tensor(transformation_matrix) @ sample["K"]
        if self.config.use_palm:
            sample["joints3D"] = self.move_wrist_to_palm(sample["joints3D"])
            joints25D, scale = convert_to_2_5D(sample["K"], sample["joints3D"])

        joints3D_recreated = convert_2_5D_to_3D(joints25D, scale, sample["K"])
        # This variable is for procrustes analysis, only relevant when youtube data is used

        if self.config.use_palm:
            joints_raw = self.move_wrist_to_palm(joints_raw)

        if self.transform:
            image = self.transform(image)

        return {
            "image": image,
            "joints": joints25D,
            "joints3D": sample["joints3D"],
            "K": sample["K"],
            "scale": scale,
            "joints3D_recreated": joints3D_recreated,
            "joints_valid": sample["joints_valid"],
            "joints_raw": joints_raw,
            "T": torch.Tensor(transformation_matrix),
        }
def main():
    """
    Main eval loop: Iterates over all evaluation samples and saves the corresponding
    predictions as json and zip file. This is the format expected at
    https://competitions.codalab.org/competitions/21238#learn_the_details-overview
    """
    parser = argparse.ArgumentParser(
        description="Evaluation on Freihand eval set.")
    parser.add_argument("-key",
                        type=str,
                        help="Add comet key of experiment to restore.")
    parser.add_argument(
        "-resnet_size",
        type=str,
        help="Resnet sizes",
        choices=["18", "34", "50", "101", "152"],
        default=50,
    )
    parser.add_argument("--heatmap",
                        action="store_true",
                        help="Choose Resnet",
                        default=False)
    parser.add_argument(
        "--palm_trained",
        action="store_true",
        help="Use when palm is regressed during training.",
        default=False,
    )
    parser.add_argument(
        "-split",
        type=str,
        help="For debugging select val split",
        default="test",
        choices=["test", "val"],
    )
    parser.add_argument("-checkpoint",
                        type=str,
                        help="selectign checkpoint",
                        default="")
    args = parser.parse_args()
    model = load_model(args.key, args.resnet_size, args.heatmap,
                       args.checkpoint)
    if args.split == "val":
        print(
            "DEBUG MODE ACTIVATED.\n Evaluation pipeline is executed on validation set"
        )
    train_param = edict(read_json(TRAINING_CONFIG_PATH))
    train_param.augmentation_flags.resize = True
    train_param.augmentation_flags.crop = True
    # train_param.augmentation_params.crop_margin = 1.5
    train_param.augmentation_params.crop_box_jitter = [0.0, 0.0]
    augmenter = SampleAugmenter(train_param.augmentation_flags,
                                train_param.augmentation_params)
    # Normalization for BGR mode.
    # transform = transforms.Compose(
    #     [
    #         transforms.ToTensor(),
    #         transforms.Normalize(
    #             (0.485, 0.456, 0.406)[::-1], (0.229, 0.224, 0.225)[::-1]
    #         ),
    #     ]
    # )
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    data = F_DB(FREIHAND_DATA, split=args.split)
    xyz_pred = []
    debug_mean = []
    with torch.no_grad():
        for i in tqdm(range(len(data))):
            joints3d_normalized = normalize_joints(
                model_refined_inference(model, data[i], augmenter, transform,
                                        args.palm_trained))
            if args.split == "val":
                # DEBUG CODE:
                joints3d = joints3d_normalized * data.scale[data.indices[i] %
                                                            32560]
                debug_mean.append(
                    torch.mean(torch.abs(joints3d - data[i]["joints3D"])))
            else:
                joints3d = joints3d_normalized * data.scale[data.indices[i]]

            xyz_pred.append(JOINTS.ait_to_freihand(joints3d).tolist())

    if args.split == "val":
        # DEBUG CODE:
        print(
            f"MAE 3d\nMean : {np.mean(debug_mean)}\nMax: { np.max(debug_mean)}"
            "\nMedian: { np.median(debug_mean)}")
        exit()

    verts = np.zeros((len(xyz_pred), 778, 3)).tolist()
    save_json([xyz_pred, verts], f"{args.key}_pred.json")
    subprocess.call(
        ["zip", "-j", f"{args.key}_pred.zip", f"{args.key}_pred.json"])
    subprocess.call(["rm", f"{args.key}_pred.json"])