def __init__( self, model: _Model, loaders: "OrderedDict[str, DataLoader]", callbacks: "Union[OrderedDict[str, Callback], List[Callback]]" = None, logdir: str = None, stage: str = "train", criterion: _Criterion = None, optimizer: _Optimizer = None, scheduler: _Scheduler = None, num_epochs: int = 1, valid_loader: str = "valid", main_metric: str = "loss", minimize_metric: bool = True, verbose: bool = False, state_kwargs: Dict = None, checkpoint_data: Dict = None, distributed_params: Dict = None, monitoring_params: Dict = None, initial_seed: int = 42, ): self._model = model self._loaders = loaders self._callbacks = process_callback(callbacks) self._criterion = criterion self._optimizer = optimizer self._scheduler = scheduler self._initial_seed = initial_seed self._logdir = logdir self._stage = stage self._num_epochs = num_epochs self._valid_loader = valid_loader self._main_metric = main_metric self._minimize_metric = minimize_metric self._verbose = verbose self._additional_state_kwargs = state_kwargs or {} self.checkpoint_data = checkpoint_data or {} self._distributed_params = distributed_params or {} self._monitoring_params = monitoring_params or {}
def __init__(self, *, device=None, model=None, criterion=None, optimizer: Optimizer = None, scheduler=None, logdir: str = None, stage: str = "infer", num_epochs: int = 1, main_metric: str = "loss", minimize_metric: bool = True, valid_loader: str = "valid", verbose: bool = False, checkpoint_data: Dict = None, batch_consistant_metrics: bool = True, **kwargs): # @TODO: refactor # hack to prevent cycle imports from ..callbacks import ( VerboseLogger, ConsoleLogger, TensorboardLogger, RaiseExceptionLogger, ) self.logdir = Path(logdir) if logdir is not None else None self.model = model self.criterion = criterion self.optimizer = optimizer self.scheduler = scheduler # special info self.stage = stage self.device = device self.loader_name = None self.phase = None # data pipeline self.input = None self.output = None # counters self.loader_len = 0 self.batch_size = 0 self.step = 0 self.epoch = 0 self.stage_epoch = 0 self.num_epochs = num_epochs # metrics & logging self.main_metric = main_metric self.minimize_metric = minimize_metric self.valid_loader = valid_loader self.metrics = MetricManager( valid_loader=valid_loader, main_metric=main_metric, minimize=minimize_metric, batch_consistant_metrics=batch_consistant_metrics) self.verbose: bool = verbose loggers = OrderedDict() if self.verbose: loggers["verbose"] = VerboseLogger() if not stage.startswith("infer"): loggers["console"] = ConsoleLogger() loggers["tensorboard"] = TensorboardLogger() loggers["exception"] = RaiseExceptionLogger() self.loggers = process_callback(loggers) self.timer = TimerManager() # base metrics single_optimizer = isinstance(optimizer, Optimizer) self.lr = None if single_optimizer else defaultdict(lambda: None) self.momentum = None if single_optimizer else defaultdict(lambda: None) self.loss = None # extra checkpoint data for saving in checkpoint files self.checkpoint_data = checkpoint_data or {} # other self.need_backward = False self.early_stop = False for k, v in kwargs.items(): setattr(self, k, v) self.exception: Optional[Exception] = None self.need_reraise_exception: bool = True self._freeze()