Beispiel #1
0
 def test_mixup_transform_single_label_missing_num_classes(self):
     alpha = 2.0
     mixup_transform = MixupTransform(alpha, None)
     sample = {
         "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
         "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
     }
     with self.assertRaises(Exception):
         mixup_transform(sample)
Beispiel #2
0
 def test_mixup_transform_single_label(self):
     alpha = 2.0
     num_classes = 3
     mixup_transform = MixupTransform(alpha, num_classes)
     sample = {
         "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
         "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
     }
     sample_mixup = mixup_transform(sample)
     self.assertTrue(sample["input"].shape == sample_mixup["input"].shape)
     self.assertTrue(sample_mixup["target"].shape[0] == 4)
     self.assertTrue(sample_mixup["target"].shape[1] == 3)
Beispiel #3
0
 def test_mixup_transform_single_label_multi_modal_batch(self):
     mixup_alpha = 2.0
     num_classes = 3
     mixup_transform = MixupTransform(mixup_alpha, num_classes)
     sample = {
         "input": {
             "video": torch.rand(4, 3, 4, 224, 224, dtype=torch.float32),
             "audio": torch.rand(4, 1, 40, 100, dtype=torch.float32),
         },
         "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
     }
     mixup_transform(sample)
Beispiel #4
0
 def test_mixup_transform_multi_label(self):
     alpha = 2.0
     mixup_transform = MixupTransform(alpha, None)
     sample = {
         "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
         "target": torch.as_tensor(
             [[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1]],
             dtype=torch.int32,
         ),
     }
     sample_mixup = mixup_transform(sample)
     self.assertTrue(sample["input"].shape == sample_mixup["input"].shape)
     self.assertTrue(sample["target"].shape == sample_mixup["target"].shape)
Beispiel #5
0
    def test_mixup_transform_single_label_image_batch(self):
        mixup_alpha = 2.0
        num_classes = 3

        for mode in ["batch", "pair", "elem"]:
            mixup_transform = MixupTransform(mixup_alpha, num_classes, mode=mode)
            sample = {
                "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
                "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
            }
            sample_mixup = mixup_transform(sample)
            self.assertTrue(sample["input"].shape == sample_mixup["input"].shape)
            self.assertTrue(sample_mixup["target"].shape[0] == 4)
            self.assertTrue(sample_mixup["target"].shape[1] == 3)
Beispiel #6
0
 def test_mixup_transform_multi_label_multi_modal_batch(self):
     mixup_alpha = 2.0
     mixup_transform = MixupTransform(mixup_alpha, None)
     sample = {
         "input": {
             "video": torch.rand(4, 3, 4, 224, 224, dtype=torch.float32),
             "audio": torch.rand(4, 1, 40, 100, dtype=torch.float32),
         },
         "target": torch.as_tensor(
             [[1, 0, 0, 0], [0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1]],
             dtype=torch.int32,
         ),
     }
     mixup_transform(sample)
Beispiel #7
0
    def test_cutmix_transform_single_label_image_batch(self):
        mixup_alpha = 0
        cutmix_alpha = 0.2
        num_classes = 3

        for mode in ["batch", "pair", "elem"]:
            for minmax in [None, (0.3, 0.7)]:
                cutmix_transform = MixupTransform(
                    mixup_alpha,
                    num_classes,
                    cutmix_alpha=cutmix_alpha,
                    mode=mode,
                    cutmix_minmax=minmax,
                )
                sample = {
                    "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
                    "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
                }
                sample_cutmix = cutmix_transform(sample)
                self.assertTrue(sample["input"].shape == sample_cutmix["input"].shape)
                self.assertTrue(sample_cutmix["target"].shape[0] == 4)
                self.assertTrue(sample_cutmix["target"].shape[1] == 3)
Beispiel #8
0
    def test_mixup_cutmix_transform_single_label_image_batch(self):
        mixup_alpha = 0.3
        cutmix_alpha = 0.2
        num_classes = 3

        for mode in ["batch", "pair", "elem"]:
            cutmix_transform = MixupTransform(
                mixup_alpha,
                num_classes,
                cutmix_alpha=cutmix_alpha,
                switch_prob=0.5,
                mode=mode,
            )

            for _i in range(4):
                sample = {
                    "input": torch.rand(4, 3, 224, 224, dtype=torch.float32),
                    "target": torch.as_tensor([0, 1, 2, 2], dtype=torch.int32),
                }
                sample_cutmix = cutmix_transform(sample)
                self.assertTrue(sample["input"].shape == sample_cutmix["input"].shape)
                self.assertTrue(sample_cutmix["target"].shape[0] == 4)
                self.assertTrue(sample_cutmix["target"].shape[1] == 3)
Beispiel #9
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        test_only = config.get("test_only", False)
        if not test_only:
            # TODO Make distinction between epochs and phases in optimizer clear
            train_phases_per_epoch = config["dataset"]["train"].get(
                "phases_per_epoch", 1)

            optimizer_config = config["optimizer"]
            optimizer_config["num_epochs"] = (config["num_epochs"] *
                                              train_phases_per_epoch)
            optimizer = build_optimizer(optimizer_config)
            param_schedulers = build_optimizer_schedulers(optimizer_config)

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            if phase_type in config["dataset"]:
                datasets[phase_type] = build_dataset(
                    config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        amp_args = config.get("amp_args")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])

        mixup_transform = None
        if config.get("mixup") is not None:
            assert "alpha" in config[
                "mixup"], "key alpha is missing in mixup dict"
            mixup_transform = MixupTransform(
                config["mixup"]["alpha"], config["mixup"].get("num_classes"))

        # hooks config is optional
        hooks_config = config.get("hooks")
        hooks = []
        if hooks_config is not None:
            hooks = build_hooks(hooks_config)

        distributed_config = config.get("distributed", {})
        distributed_options = {
            "broadcast_buffers_mode":
            BroadcastBuffersMode[distributed_config.get(
                "broadcast_buffers", "before_eval").upper()],
            "batch_norm_sync_mode":
            BatchNormSyncMode[distributed_config.get("batch_norm_sync_mode",
                                                     "disabled").upper()],
            "batch_norm_sync_group_size":
            distributed_config.get("batch_norm_sync_group_size", 0),
            "find_unused_parameters":
            distributed_config.get("find_unused_parameters", True),
        }

        task = (
            cls().set_num_epochs(config["num_epochs"]).set_test_phase_period(
                config.get(
                    "test_phase_period",
                    1)).set_loss(loss).set_test_only(test_only).set_model(
                        model).set_meters(meters).set_amp_args(amp_args).
            set_mixup_transform(mixup_transform).set_distributed_options(
                **distributed_options).set_hooks(hooks).set_bn_weight_decay(
                    config.get("bn_weight_decay", False)))

        if not test_only:
            task.set_optimizer(optimizer)
            task.set_optimizer_schedulers(param_schedulers)

        use_gpu = config.get("use_gpu")
        if use_gpu is not None:
            task.set_use_gpu(use_gpu)

        for phase_type in datasets:
            task.set_dataset(datasets[phase_type], phase_type)

        # NOTE: this is a private member and only meant to be used for
        # logging/debugging purposes. See __repr__ implementation
        task._config = config

        return task
Beispiel #10
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        optimizer_config = config["optimizer"]

        # TODO Make distinction between epochs and phases in optimizer clear
        train_phases_per_epoch = config["dataset"]["train"].get(
            "phases_per_epoch", 1)
        optimizer_config[
            "num_epochs"] = config["num_epochs"] * train_phases_per_epoch

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            datasets[phase_type] = build_dataset(config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        test_only = config.get("test_only", False)
        amp_args = config.get("amp_args")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])

        mixup_transform = None
        if config.get("mixup") is not None:
            assert "alpha" in config[
                "mixup"], "key alpha is missing in mixup dict"
            mixup_transform = MixupTransform(
                config["mixup"]["alpha"], config["mixup"].get("num_classes"))

        # hooks config is optional
        hooks_config = config.get("hooks")
        hooks = []
        if hooks_config is not None:
            hooks = build_hooks(hooks_config)

        optimizer = build_optimizer(optimizer_config)

        task = (cls().set_num_epochs(
            config["num_epochs"]).set_test_phase_period(
                config.get(
                    "test_phase_period",
                    1)).set_loss(loss).set_test_only(test_only).set_model(
                        model).set_optimizer(optimizer).set_meters(
                            meters).set_amp_args(amp_args).
                set_mixup_transform(mixup_transform).set_distributed_options(
                    broadcast_buffers_mode=BroadcastBuffersMode[config.get(
                        "broadcast_buffers", "disabled").upper()],
                    batch_norm_sync_mode=BatchNormSyncMode[config.get(
                        "batch_norm_sync_mode", "disabled").upper()],
                    find_unused_parameters=config.get("find_unused_parameters",
                                                      True),
                ).set_hooks(hooks))

        use_gpu = config.get("use_gpu")
        if use_gpu is not None:
            task.set_use_gpu(use_gpu)

        for phase_type in phase_types:
            task.set_dataset(datasets[phase_type], phase_type)

        return task