def __next__(self): batch_input, batch_target = next(self.data_loader_iter) tup = (batch_input, batch_target) def is_tensor(obj): return isinstance(obj, torch.Tensor) to_device_fn = partial(torch.Tensor.to, device=self.device) batch_input, batch_target = objwalk(tup, is_tensor, to_device_fn) return batch_input, batch_target, self.kwargs
def test_objwalk(objwalk_objects): start_obj = objwalk_objects[0] ref_obj = objwalk_objects[1] def is_target_class(obj): return isinstance(obj, ObjwalkTestClass) fn_to_apply = partial(ObjwalkTestClass.member_fn, val=OBJWALK_REF_VAL) test_obj = objwalk(start_obj, is_target_class, fn_to_apply) assert test_obj == ref_obj
def __next__(self): if self.num_iter >= self._num_data_iter: raise StopIteration self.num_iter += 1 dataloader_output = next(self.data_loader_iter) device = next(self._model.parameters()).device to_device_fn = partial(torch.Tensor.to, device=device) dataloader_output = objwalk(dataloader_output, is_tensor, to_device_fn) args, kwargs = self._data_loader.get_inputs(dataloader_output) self._model.zero_grad() outputs = self._model(*args, **kwargs) loss = self._criterion(outputs, self._data_loader.get_target(dataloader_output)) loss.backward(create_graph=True) grads = self._parameter_handler.get_gradients() self._model.zero_grad() return grads
def wrap_nncf_model_inputs_with_objwalk(model_args, model_kwargs): model_args = objwalk(model_args, is_tensor, nncf_model_input) model_kwargs = objwalk(model_kwargs, is_tensor, nncf_model_input) return model_args, model_kwargs
def _infer_batch(self, args_kwargs_tuple, device): to_device_fn = partial(torch.Tensor.to, device=device) args, kwargs = objwalk(args_kwargs_tuple, is_tensor, to_device_fn) self.model(*args, **kwargs)