def test_deep_copy(self):
        config = {
            "name": "label_smoothing_cross_entropy",
            "ignore_index": -1,
            "smoothing_param": 0.5,
        }
        crit = build_loss(config)
        self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
        outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
        targets = torch.tensor([[0, 0, 0, 0, 1]])
        crit(outputs, targets)

        crit2 = copy.deepcopy(crit)
        self.assertAlmostEqual(crit2(outputs, targets).item(),
                               5.07609558,
                               places=5)
コード例 #2
0
    def _test_loss(self, config, output, target, expected_loss):
        # test that we are able to build losses from torch.nn.modules.loss
        # and that they work correctly

        crit = build_loss(config)

        # test that the weights are set correctly
        self.assertAlmostEqual(crit.weight.numpy().tolist(), [1.0, 1.0])

        # test that the loss is computed correctly
        self.assertAlmostEqual(crit(output, target).item(), expected_loss)

        # verify ignore index works
        if "ignore_index" in config:
            self.assertAlmostEqual(
                crit(output, torch.tensor([-1])).item(), 0.0)
コード例 #3
0
    def _build_task(self, num_epochs, skip_param_schedulers=False):
        config = self._get_config(skip_param_schedulers)
        config["optimizer"]["num_epochs"] = num_epochs
        task = (
            ClassificationTask()
            .set_num_epochs(num_epochs)
            .set_loss(build_loss(config["loss"]))
            .set_model(build_model(config["model"]))
            .set_optimizer(build_optimizer(config["optimizer"]))
        )
        for phase_type in ["train", "test"]:
            dataset = build_dataset(config["dataset"][phase_type])
            task.set_dataset(dataset, phase_type)

        self.assertTrue(task is not None)
        return task
コード例 #4
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        optimizer_config = config["optimizer"]
        optimizer_config["num_epochs"] = config["num_epochs"]

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            datasets[phase_type] = build_dataset(config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        test_only = config.get("test_only", False)
        amp_opt_level = config.get("amp_opt_level")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])
        # put model in eval mode in case any hooks modify model states, it'll
        # be reset to train mode before training
        model.eval()
        optimizer = build_optimizer(optimizer_config)

        task = (
            cls()
            .set_num_epochs(config["num_epochs"])
            .set_test_phase_period(config.get("test_phase_period", 1))
            .set_loss(loss)
            .set_test_only(test_only)
            .set_model(model)
            .set_optimizer(optimizer)
            .set_meters(meters)
            .set_amp_opt_level(amp_opt_level)
            .set_distributed_options(
                BroadcastBuffersMode[config.get("broadcast_buffers", "DISABLED")]
            )
        )
        for phase_type in phase_types:
            task.set_dataset(datasets[phase_type], phase_type)

        return task
コード例 #5
0
    def test_smoothing_all_ones_one_hot_targets(self):
        config = {
            "name": "label_smoothing_cross_entropy",
            "ignore_index": -1,
            "smoothing_param": 0.1,
        }
        crit = build_loss(config)
        targets = torch.tensor([[1, 1, 1, 1]])
        self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
        valid_targets = crit.compute_valid_targets(targets, 4)
        self.assertTrue(
            torch.allclose(valid_targets, torch.tensor([[1.0, 1.0, 1.0, 1.0]]))
        )

        smoothed_targets = crit.smooth_targets(valid_targets, 4)
        self.assertTrue(
            torch.allclose(smoothed_targets, torch.tensor([[0.25, 0.25, 0.25, 0.25]]))
        )
コード例 #6
0
    def test_get_state(self):
        config = get_test_task_config()
        loss = build_loss(config["loss"])
        task = (
            ClassificationTask()
            .set_num_epochs(1)
            .set_loss(loss)
            .set_model(build_model(config["model"]))
            .set_optimizer(build_optimizer(config["optimizer"]))
        )
        for phase_type in ["train", "test"]:
            dataset = build_dataset(config["dataset"][phase_type])
            task.set_dataset(dataset, phase_type)

        task.prepare()

        task = build_task(config)
        task.prepare()
コード例 #7
0
    def test_training(self):
        """Checks we can train a small MLP model."""
        config = get_test_mlp_task_config()
        task = (ClassificationTask().set_num_epochs(10).set_loss(
            build_loss(config["loss"])).set_model(build_model(
                config["model"])).set_optimizer(
                    build_optimizer(config["optimizer"])).set_meters([
                        AccuracyMeter(topk=[1])
                    ]).set_hooks([LossLrMeterLoggingHook()]))
        for split in ["train", "test"]:
            dataset = build_dataset(config["dataset"][split])
            task.set_dataset(dataset, split)

        self.assertTrue(task is not None)

        trainer = LocalTrainer()
        trainer.train(task)
        accuracy = task.meters[0].value["top_1"]
        self.assertAlmostEqual(accuracy, 1.0)
コード例 #8
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        optimizer_config = config["optimizer"]
        optimizer_config["num_epochs"] = config["num_epochs"]

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            datasets[phase_type] = build_dataset(config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        test_only = config.get("test_only", False)
        amp_args = config.get("amp_args")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])
        optimizer = build_optimizer(optimizer_config)

        task = (cls().set_num_epochs(
            config["num_epochs"]).set_test_phase_period(
                config.get("test_phase_period",
                           1)).set_loss(loss).set_test_only(test_only).
                set_model(model).set_optimizer(optimizer).set_meters(
                    meters).set_amp_args(amp_args).set_distributed_options(
                        broadcast_buffers_mode=BroadcastBuffersMode[config.get(
                            "broadcast_buffers", "disabled").upper()],
                        batch_norm_sync_mode=BatchNormSyncMode[config.get(
                            "batch_norm_sync_mode", "disabled").upper()],
                    ))
        for phase_type in phase_types:
            task.set_dataset(datasets[phase_type], phase_type)

        return task
コード例 #9
0
 def test_build_soft_target_cross_entropy(self):
     config = self._get_config()
     crit = build_loss(config)
     self.assertTrue(isinstance(crit, SoftTargetCrossEntropyLoss))
     self.assertEqual(crit._ignore_index, -1)
     self.assertEqual(crit._reduction, "mean")
コード例 #10
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        test_only = config.get("test_only", False)
        if not test_only:
            # TODO Make distinction between epochs and phases in optimizer clear
            train_phases_per_epoch = config["dataset"]["train"].get(
                "phases_per_epoch", 1)

            optimizer_config = config["optimizer"]
            optimizer_config["num_epochs"] = (config["num_epochs"] *
                                              train_phases_per_epoch)
            optimizer = build_optimizer(optimizer_config)
            param_schedulers = build_optimizer_schedulers(optimizer_config)

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            if phase_type in config["dataset"]:
                datasets[phase_type] = build_dataset(
                    config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        amp_args = config.get("amp_args")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])

        mixup_transform = None
        if config.get("mixup") is not None:
            assert "alpha" in config[
                "mixup"], "key alpha is missing in mixup dict"
            mixup_transform = MixupTransform(
                config["mixup"]["alpha"], config["mixup"].get("num_classes"))

        # hooks config is optional
        hooks_config = config.get("hooks")
        hooks = []
        if hooks_config is not None:
            hooks = build_hooks(hooks_config)

        distributed_config = config.get("distributed", {})
        distributed_options = {
            "broadcast_buffers_mode":
            BroadcastBuffersMode[distributed_config.get(
                "broadcast_buffers", "before_eval").upper()],
            "batch_norm_sync_mode":
            BatchNormSyncMode[distributed_config.get("batch_norm_sync_mode",
                                                     "disabled").upper()],
            "batch_norm_sync_group_size":
            distributed_config.get("batch_norm_sync_group_size", 0),
            "find_unused_parameters":
            distributed_config.get("find_unused_parameters", True),
        }

        task = (
            cls().set_num_epochs(config["num_epochs"]).set_test_phase_period(
                config.get(
                    "test_phase_period",
                    1)).set_loss(loss).set_test_only(test_only).set_model(
                        model).set_meters(meters).set_amp_args(amp_args).
            set_mixup_transform(mixup_transform).set_distributed_options(
                **distributed_options).set_hooks(hooks).set_bn_weight_decay(
                    config.get("bn_weight_decay", False)))

        if not test_only:
            task.set_optimizer(optimizer)
            task.set_optimizer_schedulers(param_schedulers)

        use_gpu = config.get("use_gpu")
        if use_gpu is not None:
            task.set_use_gpu(use_gpu)

        for phase_type in datasets:
            task.set_dataset(datasets[phase_type], phase_type)

        # NOTE: this is a private member and only meant to be used for
        # logging/debugging purposes. See __repr__ implementation
        task._config = config

        return task
コード例 #11
0
    def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
        """Instantiates a ClassificationTask from a configuration.

        Args:
            config: A configuration for a ClassificationTask.
                See :func:`__init__` for parameters expected in the config.

        Returns:
            A ClassificationTask instance.
        """
        optimizer_config = config["optimizer"]

        # TODO Make distinction between epochs and phases in optimizer clear
        train_phases_per_epoch = config["dataset"]["train"].get(
            "phases_per_epoch", 1)
        optimizer_config[
            "num_epochs"] = config["num_epochs"] * train_phases_per_epoch

        datasets = {}
        phase_types = ["train", "test"]
        for phase_type in phase_types:
            datasets[phase_type] = build_dataset(config["dataset"][phase_type])
        loss = build_loss(config["loss"])
        test_only = config.get("test_only", False)
        amp_args = config.get("amp_args")
        meters = build_meters(config.get("meters", {}))
        model = build_model(config["model"])

        mixup_transform = None
        if config.get("mixup") is not None:
            assert "alpha" in config[
                "mixup"], "key alpha is missing in mixup dict"
            mixup_transform = MixupTransform(
                config["mixup"]["alpha"], config["mixup"].get("num_classes"))

        # hooks config is optional
        hooks_config = config.get("hooks")
        hooks = []
        if hooks_config is not None:
            hooks = build_hooks(hooks_config)

        optimizer = build_optimizer(optimizer_config)

        task = (cls().set_num_epochs(
            config["num_epochs"]).set_test_phase_period(
                config.get(
                    "test_phase_period",
                    1)).set_loss(loss).set_test_only(test_only).set_model(
                        model).set_optimizer(optimizer).set_meters(
                            meters).set_amp_args(amp_args).
                set_mixup_transform(mixup_transform).set_distributed_options(
                    broadcast_buffers_mode=BroadcastBuffersMode[config.get(
                        "broadcast_buffers", "disabled").upper()],
                    batch_norm_sync_mode=BatchNormSyncMode[config.get(
                        "batch_norm_sync_mode", "disabled").upper()],
                    find_unused_parameters=config.get("find_unused_parameters",
                                                      True),
                ).set_hooks(hooks))

        use_gpu = config.get("use_gpu")
        if use_gpu is not None:
            task.set_use_gpu(use_gpu)

        for phase_type in phase_types:
            task.set_dataset(datasets[phase_type], phase_type)

        return task
コード例 #12
0
def main():
    args = parser.parse_args()
    print(args)
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    if args.cuda and args.mkldnn:
        assert False, "We can not runing this work on GPU backend and MKLDNN backend \
                please set one backend.\n"

    if args.cuda:
        print("Using GPU backend to do this work.\n")
    elif args.mkldnn:
        print("Using MKLDNN backend to do this work.\n")
    else:
        print("Using native CPU backend to do this work.\n")

    # set it to the folder where video files are saved
    video_dir = args.video_dir + "/UCF-101"
    # set it to the folder where dataset splitting files are saved
    splits_dir = args.video_dir + "/ucfTrainTestlist"
    # set it to the file path for saving the metadata
    metadata_file = args.video_dir + "/metadata.pth"

    resnext3d_configs =model_config.ResNeXt3D_Config(video_dir, splits_dir, metadata_file, args.num_epochs)
    resnext3d_configs.setUp()

    datasets = {}
    dataset_train_configs = resnext3d_configs.dataset_configs["train"]
    dataset_test_configs = resnext3d_configs.dataset_configs["test"]
    dataset_train_configs["batchsize_per_replica"] = args.batch_size_train
    # For testing, batchsize per replica should be equal to clips_per_video
    dataset_test_configs["batchsize_per_replica"] = args.batch_size_eval
    dataset_test_configs["clips_per_video"] = args.batch_size_eval

    datasets["train"] = build_dataset(dataset_train_configs)
    datasets["test"] = build_dataset(dataset_test_configs)

    model = build_model(resnext3d_configs.model_configs)
    meters = build_meters(resnext3d_configs.meters_configs)
    loss = build_loss({"name": "CrossEntropyLoss"})
    optimizer = build_optimizer(resnext3d_configs.optimizer_configs)

    # there some ops are not supported by MKLDNN, so convert input to CPU tensor
    if args.mkldnn:
        heads_configs = resnext3d_configs.model_configs['heads'][0]
        in_plane = heads_configs['in_plane']
        num_classes = heads_configs['num_classes']
        act_func = heads_configs['activation_func']
        mkldnn_head_fcl = MkldnnFullyConvolutionalLinear(in_plane, num_classes, act_func)

        if args.evaluate:
            model = model.eval()
            model = mkldnn_utils.to_mkldnn(model)
            model._heads['pathway0-stage4-block2']['default_head'].head_fcl = mkldnn_head_fcl.eval()
        else:
            model._heads['pathway0-stage4-block2']['default_head'].head_fcl = mkldnn_head_fc

    # print(model)
    if args.evaluate:
        validata(datasets, model, loss, meters, args)
        return

    train(datasets, model, loss, optimizer, meters, args)