Esempio n. 1
0
def test_compare_version(monkeypatch):
    from pytorch_lightning.utilities.imports import torch

    monkeypatch.setattr(torch, "__version__", "1.8.9")
    assert not _compare_version("torch", operator.ge, "1.10.0")
    assert _compare_version("torch", operator.lt, "1.10.0")

    monkeypatch.setattr(torch, "__version__", "1.10.0.dev123")
    assert _compare_version("torch", operator.ge, "1.10.0.dev123")
    assert not _compare_version("torch", operator.ge, "1.10.0.dev124")

    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.dev123",
                            use_base_version=True)
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.dev124",
                            use_base_version=True)

    monkeypatch.setattr(torch, "__version__",
                        "1.10.0a0+0aef44c")  # dev version before rc
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.rc0",
                            use_base_version=True)
    assert not _compare_version("torch", operator.ge, "1.10.0.rc0")
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0",
                            use_base_version=True)
    assert not _compare_version("torch", operator.ge, "1.10.0")
Esempio n. 2
0
def test_compare_version(monkeypatch):
    monkeypatch.setattr(torch, "__version__", "1.8.9")
    assert not _compare_version("torch", operator.ge, "1.10.0")
    assert _compare_version("torch", operator.lt, "1.10.0")

    monkeypatch.setattr(torch, "__version__", "1.10.0.dev123")
    assert _compare_version("torch", operator.ge, "1.10.0.dev123")
    assert not _compare_version("torch", operator.ge, "1.10.0.dev124")

    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.dev123",
                            use_base_version=True)
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.dev124",
                            use_base_version=True)

    monkeypatch.setattr(torch, "__version__",
                        "1.10.0a0+0aef44c")  # dev version before rc
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0.rc0",
                            use_base_version=True)
    assert not _compare_version("torch", operator.ge, "1.10.0.rc0")
    assert _compare_version("torch",
                            operator.ge,
                            "1.10.0",
                            use_base_version=True)
    assert not _compare_version("torch", operator.ge, "1.10.0")
def _normalize_parse_gpu_string_input(
        s: Union[int, str, List[int]]) -> Union[int, List[int]]:
    if not isinstance(s, str):
        return s
    if s == '-1':
        return -1
    if ',' in s:
        return [int(x.strip()) for x in s.split(',') if len(x) > 0]
    num_gpus = int(s.strip())
    if _compare_version("pytorch_lightning", operator.lt, "1.5"):
        rank_zero_deprecation(
            f"Parsing of the Trainer argument gpus='{s}' (string) will change in the future."
            " In the current version of Lightning, this will select"
            f" CUDA device with index {num_gpus}, but from v1.5 it will select gpus"
            f" {list(range(num_gpus))} (same as gpus={s} (int)).", )
        return [num_gpus]
    return num_gpus
def parse_gpu_ids(
        gpus: Optional[Union[int, str, List[int]]]) -> Optional[List[int]]:
    """
    Parses the GPU ids given in the format as accepted by the
    :class:`~pytorch_lightning.trainer.Trainer`.

    Args:
        gpus: An int -1 or string '-1' indicate that all available GPUs should be used.
            A list of ints or a string containing list of comma separated integers
            indicates specific GPUs to use.
            An int 0 means that no GPUs should be used.
            Any int N > 0 indicates that GPUs [0..N) should be used.

    Returns:
        a list of gpus to be used or ``None`` if no GPUs were requested

    If no GPUs are available but the value of gpus variable indicates request for GPUs
    then a MisconfigurationException is raised.
    """
    # Check that gpus param is None, Int, String or List
    _check_data_type(gpus)

    # Handle the case when no gpus are requested
    if gpus is None or isinstance(gpus, int) and gpus == 0:
        return None

    if _compare_version("pytorch_lightning", operator.ge,
                        "1.5") and isinstance(gpus,
                                              str) and gpus.strip() == "0":
        # TODO: in v1.5 combine this with the above if statement
        return None

    # We know user requested GPUs therefore if some of the
    # requested GPUs are not available an exception is thrown.
    gpus = _normalize_parse_gpu_string_input(gpus)
    gpus = _normalize_parse_gpu_input_to_list(gpus)
    if not gpus:
        raise MisconfigurationException(
            "GPUs requested but none are available.")
    if TorchElasticEnvironment.is_using_torchelastic(
    ) and len(gpus) != 1 and len(_get_all_available_gpus()) == 1:
        # omit sanity check on torchelastic as by default shows one visible GPU per process
        return gpus
    return _sanitize_gpu_ids(gpus)
Esempio n. 5
0
import operator

from pytorch_lightning.utilities.imports import _compare_version, _TORCHTEXT_LEGACY

if _TORCHTEXT_LEGACY:
    if _compare_version("torchtext", operator.ge, "0.9.0"):
        from torchtext.legacy.data import Batch, Dataset, Example, Field, Iterator, LabelField
    else:
        from torchtext.data import Batch, Dataset, Example, Field, Iterator, LabelField
else:
    Batch = type(None)
    Dataset = type(None)
    Example = type(None)
    Field = type(None)
    Iterator = type(None)
    LabelField = type(None)
Esempio n. 6
0
import pytest
import torch

import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _compare_version
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.imports import Batch, Dataset, Example, Field, LabelField
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel

PL_VERSION_LT_1_5 = _compare_version("pytorch_lightning", operator.lt, "1.5")
PRETEND_N_OF_GPUS = 16


@RunIf(min_gpus=2)
def test_multi_gpu_none_backend(tmpdir):
    """Make sure when using multiple GPUs the user can't use `distributed_backend = None`."""
    tutils.set_random_master_port()
    trainer_options = dict(
        default_root_dir=tmpdir,
        progress_bar_refresh_rate=0,
        max_epochs=1,
        limit_train_batches=0.2,
        limit_val_batches=0.2,
        gpus=2,
    )
import numpy as np
import pytest
import torch
import yaml

from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.imports import _compare_version, _OMEGACONF_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf

if _OMEGACONF_AVAILABLE:
    from omegaconf import OmegaConf


@pytest.mark.skipif(_compare_version("tensorboard", operator.ge, "2.6.0"),
                    reason="cannot import EventAccumulator in >= 2.6.0")
def test_tensorboard_hparams_reload(tmpdir):
    from tensorboard.backend.event_processing.event_accumulator import EventAccumulator

    class CustomModel(BoringModel):
        def __init__(self, b1=0.5, b2=0.999):
            super().__init__()
            self.save_hyperparameters()

    trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
    model = CustomModel()
    assert trainer.log_dir == trainer.logger.log_dir
    trainer.fit(model)

    assert trainer.log_dir == trainer.logger.log_dir
Esempio n. 8
0
from typing import Any, Dict, Optional, Union
from weakref import ReferenceType

import torch.nn as nn

from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import _module_available, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _compare_version
from pytorch_lightning.utilities.warnings import WarningCache

warning_cache = WarningCache()

_WANDB_AVAILABLE = _module_available("wandb")
_WANDB_GREATER_EQUAL_0_10_22 = _compare_version("wandb", operator.ge,
                                                "0.10.22")

try:
    import wandb
    from wandb.wandb_run import Run
except ImportError:
    # needed for test mocks, these tests shall be updated
    wandb, Run = None, None


class WandbLogger(LightningLoggerBase):
    r"""
    Log using `Weights and Biases <https://docs.wandb.ai/integrations/lightning>`_.

    Install it with pip:
                      max_epochs=1,
                      checkpoint_callback=False,
                      logger=False)
    with pytest.deprecated_call(
            match="is deprecated in v1.3 and will be removed in v1.5"):
        trainer.metrics_to_scalars({})


def test_v1_5_0_lighting_module_grad_norm(tmpdir):
    model = BoringModel()
    with pytest.deprecated_call(
            match="is deprecated in v1.3 and will be removed in v1.5"):
        model.grad_norm(2)


@pytest.mark.xfail(condition=_compare_version("pytorch_lightning", operator.ge,
                                              "1.5"),
                   reason="parsing of string will change in v1.5")
@mock.patch("torch.cuda.device_count", return_value=4)
def test_v1_5_0_trainer_gpus_str_parsing(*_):
    # TODO: when removing this, make sure docs in docs/advanced/multi-gpu.rst reflect the new
    #   behavior regarding GPU selection. Ping @awaelchli if unsure.
    with pytest.deprecated_call(
            match=r"Parsing of the Trainer argument gpus='3' .* will change."):
        Trainer(gpus="3", accelerator="ddp_spawn")

    with pytest.deprecated_call(
            match=r"Parsing of the Trainer argument gpus='3' .* will change."):
        gpus = device_parser.parse_gpu_ids("3")
        assert gpus == [3]

    with pytest.deprecated_call(
from unittest import mock

import numpy as np
import pytest
import torch
import yaml
from omegaconf import OmegaConf

from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.imports import _compare_version
from tests.helpers import BoringModel


@pytest.mark.skipif(
    _compare_version("tensorboard", operator.ge, "2.6.0"), reason="cannot import EventAccumulator in >= 2.6.0"
)
def test_tensorboard_hparams_reload(tmpdir):
    from tensorboard.backend.event_processing.event_accumulator import EventAccumulator

    class CustomModel(BoringModel):
        def __init__(self, b1=0.5, b2=0.999):
            super().__init__()
            self.save_hyperparameters()

    trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
    model = CustomModel()
    assert trainer.log_dir == trainer.logger.log_dir
    trainer.fit(model)

    assert trainer.log_dir == trainer.logger.log_dir
Esempio n. 11
0
                      checkpoint_callback=False,
                      logger=False)
    with pytest.deprecated_call(
            match="is deprecated in v1.3 and will be removed in v1.5"):
        trainer.metrics_to_scalars({})


def test_v1_5_0_lighting_module_grad_norm(tmpdir):
    model = BoringModel()
    with pytest.deprecated_call(
            match="is deprecated in v1.3 and will be removed in v1.5"):
        model.grad_norm(2)


@pytest.mark.xfail(
    condition=_compare_version("pytorch_lightning", operator.ge, "1.5"),
    reason="parsing of string will change in v1.5",
)
@mock.patch('torch.cuda.device_count', return_value=4)
def test_v1_5_0_trainer_gpus_str_parsing(*_):
    # TODO: when removing this, make sure docs in docs/advanced/multi-gpu.rst reflect the new
    #   behavior regarding GPU selection. Ping @awaelchli if unsure.
    with pytest.deprecated_call(
            match=r"Parsing of the Trainer argument gpus='3' .* will change."):
        Trainer(gpus="3", accelerator="ddp_spawn")

    with pytest.deprecated_call(
            match=r"Parsing of the Trainer argument gpus='3' .* will change."):
        gpus = device_parser.parse_gpu_ids("3")
        assert gpus == [3]
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator

from pytorch_lightning.utilities import _module_available
from pytorch_lightning.utilities.imports import _compare_version

_BOLTS_AVAILABLE = _module_available("pl_bolts") and _compare_version(
    "pl_bolts", operator.ge, "0.4.0")
_BOLTS_GREATER_EQUAL_0_5_0 = _module_available(
    "pl_bolts") and _compare_version("pl_bolts", operator.ge, "0.5.0")
_WANDB_AVAILABLE = _module_available("wandb")
Esempio n. 13
0
from functools import partial
from itertools import chain
from types import ModuleType
from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Set, Type

import torch
from torch import nn, Tensor
from torch.nn import Module
from torch.nn.modules.container import ModuleDict, ModuleList, Sequential

import pytorch_lightning as pl
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _compare_version
from pytorch_lightning.utilities.rank_zero import rank_zero_warn

_TORCH_GREATER_EQUAL_1_10 = _compare_version("torch", operator.ge, "1.10.0")

if _TORCH_GREATER_EQUAL_1_10:
    from torch._C import _DisableTorchDispatch  # type: ignore[attr-defined]

    ####################################################################
    # BELOW: TAKEN FROM https://github.com/pytorch/pytorch/pull/66317. #
    # TODO: Removed once merged and released on PyTorch side           #
    ####################################################################

    @contextmanager
    def enable_python_mode(cls) -> Iterator[None]:
        if not hasattr(cls, "__torch_dispatch__"):
            raise ValueError("The class passed to enable_python_mode "
                             "must have a __torch_dispatch__ classmethod")
        if not isinstance(cls, type) or not issubclass(cls, (Tensor, )):