Пример #1
0
def test_hooks_removed_after_summarize(max_depth):
    """Test that all hooks were properly removed after summary, even ones that were not run."""
    model = UnorderedModel()
    summary = ModelSummary(model, max_depth=max_depth)
    # hooks should be removed
    for _, layer in summary.summarize().items():
        handle = layer._hook_handle
        assert handle.id not in handle.hooks_dict_ref()
Пример #2
0
def test_max_depth_param(max_depth):
    """Test that only the modules up to the desired depth are shown."""
    model = DeepNestedModel()
    summary = ModelSummary(model, max_depth=max_depth)
    for lname in summary.layer_names:
        if max_depth >= 0:
            assert lname.count(".") < max_depth
Пример #3
0
def test_v1_6_0_deprecated_model_summary_mode(tmpdir):
    model = BoringModel()
    with pytest.deprecated_call(match="Argument `mode` in `ModelSummary` is deprecated in v1.4"):
        ModelSummary(model, mode="top")

    with pytest.deprecated_call(match="Argument `mode` in `LightningModule.summarize` is deprecated in v1.4"):
        model.summarize(mode="top")
def test_invalid_weights_summmary():
    """Test that invalid value for weights_summary raises an error."""
    model = LightningModule()

    with pytest.raises(
        MisconfigurationException, match="`weights_summary` can be None, .* got temp"
    ), pytest.deprecated_call(match="weights_summary=temp)` is deprecated"):
        Trainer(weights_summary="temp")

    with pytest.raises(ValueError, match="max_depth` can be .* got temp"):
        ModelSummary(model, max_depth="temp")
Пример #5
0
def test_lazy_model_summary():
    """Test that the model summary can work with lazy layers."""
    lazy_model = LazyModel()
    summary = ModelSummary(lazy_model)

    with pytest.warns(
            UserWarning,
            match=r"A layer with UninitializedParameter was found. "
            r"Thus, the total number of parameters detected may be inaccurate.",
    ):
        if _TORCH_GREATER_EQUAL_1_9:
            assert summary.total_parameters == 0
            assert summary.trainable_parameters == 0
        else:
            # bug in 1.8: the bias of a LazyLinear layer is initialized!
            # https://github.com/pytorch/pytorch/issues/58350
            assert summary.total_parameters == 7
            assert summary.trainable_parameters == 7
Пример #6
0
 def log_model_summary(self, model, max_depth=-1):
     model_str = str(ModelSummary(model=model, max_depth=max_depth))
     self.run[self._construct_path_with_prefix(
         "model/summary")] = neptune.types.File.from_content(
             content=model_str, extension="txt")