コード例 #1
0
    def __init__(self) -> None:
        super().__init__()
        self.optim_progress: OptimizationProgress = OptimizationProgress()

        self._outputs: _OUTPUTS_TYPE = {}
        self._skip_backward: bool = False
        self._optimizers: Tuple[Optimizer, ...] = tuple()
        self._indices: Tuple[int, ...] = tuple()
        self._hiddens: Optional[Any] = None
コード例 #2
0
    def __init__(self) -> None:
        super().__init__()
        self.optim_progress: OptimizationProgress = OptimizationProgress()

        self._outputs: _OUTPUTS_TYPE = {}
        self._skip_backward: bool = False
        self._batch_idx: int = 0
        self._optimizers: List[Optimizer] = []
        self._indices: List[int] = []
        self._hiddens: Optional[Any] = None
コード例 #3
0
    def __init__(self):
        super().__init__()
        # TODO: use default dict here to simplify logic in loop
        self.outputs: _OUTPUTS_TYPE = []
        self.optim_progress: OptimizationProgress = OptimizationProgress()

        self._skip_backward: bool = False
        self._batch_idx: int = 0
        self._optimizers: List[Optimizer] = []
        self._hiddens: Optional[Any] = None
コード例 #4
0
    def __init__(self) -> None:
        super().__init__()
        self.accumulated_loss: Optional[Tensor] = None
        self.batch_outputs: Optional[List[List[STEP_OUTPUT]]] = None
        self.running_loss: TensorRunningAccum = TensorRunningAccum(window_length=20)
        # the current split index when the batch gets split into chunks in truncated backprop through time
        self.split_idx: Optional[int] = None
        self.optim_progress = OptimizationProgress()

        self._warning_cache: WarningCache = WarningCache()
        self._hiddens: Optional[Tensor] = None
        self._optimizer_freq_cumsum: Optional[int] = None
        self._remaining_splits: Optional[List[Any]] = None
        self._skip_backward: bool = False
コード例 #5
0
    def __init__(self) -> None:
        super().__init__()
        self.accumulated_loss: Optional[Tensor] = None
        self.batch_outputs: Optional[List[List[STEP_OUTPUT]]] = None
        self.running_loss: TensorRunningAccum = TensorRunningAccum(window_length=20)
        self.batch_idx: int = 0
        self.split_idx: Optional[int] = None
        self.progress = BatchProgress()
        self.optim_progress = OptimizationProgress()

        self._warning_cache: WarningCache = WarningCache()
        self._hiddens: Optional[Tensor] = None
        self._optimizer_freq_cumsum: Optional[int] = None
        self._remaining_splits: Optional[List[Any]] = None
        self._skip_backward: bool = False