def _test_create_evaluation_step( mock_torch_cuda_amp_module, model_device: Optional[str] = None, evaluator_device: Optional[str] = None, trace: bool = False, amp_mode: str = None, ): output_transform_mock = MagicMock() model = Linear(1, 1) if model_device: model.to(model_device) model.weight.data.zero_() model.bias.data.zero_() if trace: example_input = torch.randn(1, 1) model = torch.jit.trace(model, example_input) device_type = evaluator_device.type if isinstance(evaluator_device, torch.device) else evaluator_device on_tpu = "xla" in device_type if device_type is not None else False mode, _ = _check_arg(on_tpu, amp_mode, None) evaluate_step = supervised_evaluation_step(model, evaluator_device, output_transform=output_transform_mock) x = torch.tensor([[1.0], [2.0]]) y = torch.tensor([[3.0], [5.0]]) data = [(x, y)] evaluator = Engine(evaluate_step) evaluator.run(data) assert not mock_torch_cuda_amp_module.called assert output_transform_mock.called
def _test_create_mocked_supervised_trainer( model_device: Optional[str] = None, trainer_device: Optional[str] = None, trace: bool = False, amp_mode: str = None, scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False, ): with mock.patch("ignite.engine.supervised_training_step_amp") as training_step_amp_mock: with mock.patch("ignite.engine.supervised_training_step_apex") as training_step_apex_mock: with mock.patch("ignite.engine.supervised_training_step_tpu") as training_step_tpu_mock: with mock.patch("ignite.engine.supervised_training_step") as training_step_mock: model = Linear(1, 1) if model_device: model.to(model_device) model.weight.data.zero_() model.bias.data.zero_() optimizer = SGD(model.parameters(), 0.1) if trace: example_input = torch.randn(1, 1) model = torch.jit.trace(model, example_input) if amp_mode == "apex" and model_device == trainer_device == "cuda": from apex import amp model, optimizer = amp.initialize(model, optimizer, opt_level="O2") trainer = create_supervised_trainer( model, optimizer, mse_loss, device=trainer_device, output_transform=lambda x, y, y_pred, loss: (y_pred, loss.item()), amp_mode=amp_mode, scaler=scaler, ) x = torch.tensor([[0.1], [0.2]]) y = torch.tensor([[0.3], [0.5]]) data = [(x, y)] assert model.weight.data[0, 0].item() == approx(0.0) assert model.bias.item() == approx(0.0) on_tpu = "xla" in trainer_device if trainer_device is not None else False mode, _ = _check_arg(on_tpu, amp_mode, scaler) if model_device == trainer_device or ((model_device == "cpu") ^ (trainer_device == "cpu")): trainer.run(data) if mode == "amp": assert training_step_amp_mock.called elif mode == "apex": assert training_step_apex_mock.called elif mode == "tpu": assert training_step_tpu_mock.called else: assert training_step_mock.called
def _test_create_mocked_supervised_trainer( model_device: Optional[str] = None, trainer_device: Optional[str] = None, trace: bool = False, amp_mode: str = None, scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False, ): with mock.patch("ignite.engine.supervised_training_step_amp" ) as training_step_amp_mock: with mock.patch("ignite.engine.supervised_training_step_apex" ) as training_step_apex_mock: with mock.patch("ignite.engine.supervised_training_step_tpu" ) as training_step_tpu_mock: with mock.patch("ignite.engine.supervised_training_step" ) as training_step_mock: trainer, _ = _default_create_supervised_trainer( model_device=model_device, trainer_device=trainer_device, trace=trace, amp_mode=amp_mode, scaler=scaler, ) x = torch.tensor([[0.1], [0.2]]) y = torch.tensor([[0.3], [0.5]]) data = [(x, y)] on_tpu = "xla" in trainer_device if trainer_device is not None else False mode, _ = _check_arg(on_tpu, amp_mode, scaler) if model_device == trainer_device or ( (model_device == "cpu") ^ (trainer_device == "cpu")): trainer.run(data) if mode == "amp": assert training_step_amp_mock.called elif mode == "apex": assert training_step_apex_mock.called elif mode == "tpu": assert training_step_tpu_mock.called else: assert training_step_mock.called