Beispiel #1
0
 def __init__(self,
              name: Optional[str] = None,
              save_dir: Optional[str] = None,
              offline: bool = False,
              id: Optional[str] = None,
              anonymous: bool = False,
              version: Optional[str] = None,
              project: Optional[str] = None,
              log_model: bool = False,
              experiment=None,
              prefix: str = '',
              **kwargs):
     if wandb is None:
         raise ImportError(
             'You want to use `wandb` logger which is not installed yet,'  # pragma: no-cover
             ' install it with `pip install wandb`.')
     super().__init__()
     self._name = name
     self._save_dir = save_dir
     self._anonymous = 'allow' if anonymous else None
     self._id = version or id
     self._project = project
     self._experiment = experiment
     self._offline = offline
     self._log_model = log_model
     self._prefix = prefix
     self._kwargs = kwargs
     # logging multiple Trainer on a single W&B run (k-fold, resuming, etc)
     self._step_offset = 0
     self.warning_cache = WarningCache()
 def __init__(self, trainer):
     self.trainer = trainer
     self.testing = False
     self.outputs = []
     self.step_metrics = []
     self.predictions = None
     self.max_batches = None
     self.warning_cache = WarningCache()
Beispiel #3
0
 def __init__(self, trainer):
     self.trainer = trainer
     self.early_stopping_accumulator = None
     self.checkpoint_accumulator = None
     self.accumulated_loss = None
     self.warning_cache = WarningCache()
     self._teardown_already_run = False
     self.running_loss = TensorRunningAccum(window_length=20)
 def __init__(self, trainer):
     self.trainer = trainer
     self.outputs = []
     self.step_metrics = []
     self.predictions = None
     self.max_batches = None
     self.warning_cache = WarningCache()
     self.num_dataloaders = None
Beispiel #5
0
 def __init__(self, trainer):
     self.trainer = trainer
     self.early_stopping_accumulator = None
     self.checkpoint_accumulator = None
     self.accumulated_loss = None
     self.warning_cache = WarningCache()
     self._teardown_already_run = False
     self.running_loss = TensorRunningAccum(window_length=20)
     self.automatic_optimization = True
     self._curr_step_result = None
     self._cur_grad_norm_dict = None
Beispiel #6
0
    def __init__(self,
                 name: Optional[str] = None,
                 save_dir: Optional[str] = None,
                 offline: bool = False,
                 id: Optional[str] = None,
                 anonymous: bool = False,
                 version: Optional[str] = None,
                 project: Optional[str] = None,
                 log_model: bool = False,
                 experiment=None,
                 prefix: str = '',
                 **kwargs):
        if wandb is None:
            raise ImportError(
                'You want to use `wandb` logger which is not installed yet,'  # pragma: no-cover
                ' install it with `pip install wandb`.')

        if offline and log_model:
            raise MisconfigurationException(
                f'Providing log_model={log_model} and offline={offline} is an invalid configuration'
                ' since model checkpoints cannot be uploaded in offline mode.\n'
                'Hint: Set `offline=False` to log your model.')

        super().__init__()
        self._name = name
        self._save_dir = save_dir
        self._anonymous = 'allow' if anonymous else None
        self._id = version or id
        self._project = project
        self._experiment = experiment
        self._offline = offline
        self._log_model = log_model
        self._prefix = prefix
        self._kwargs = kwargs
        # logging multiple Trainer on a single W&B run (k-fold, resuming, etc)
        self._step_offset = 0
        self.warning_cache = WarningCache()
def get_a_var(obj):  # pragma: no-cover
    if isinstance(obj, torch.Tensor):
        return obj

    if isinstance(obj, (list, tuple)):
        for result in map(get_a_var, obj):
            if isinstance(result, torch.Tensor):
                return result
    if isinstance(obj, dict):
        for result in map(get_a_var, obj.items()):
            if isinstance(result, torch.Tensor):
                return result
    return None


warning_cache = WarningCache()


class LightningDataParallel(DataParallel):
    """
    Override the forward call in lightning so it goes to training and validation step respectively
    """

    def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)

        for t in chain(self.module.parameters(), self.module.buffers()):
            if t.device != self.src_device_obj:
                raise RuntimeError("module must have its parameters and buffers "
                                   "on device {} (device_ids[0]) but found one of "