Exemple #1
0
    def push(self, model: nn.Module, pull_on_fail=True):
        """
        Try to push a model to the ordered server, if failed, the newest
        model will be automatically pulled and its parameters will be
        assigned to ``model``. Gradients will not be cleared.

        Args:
            model: Model to push.
            pull_on_fail: Pull the newest parameters if push failed.
        """
        if not hasattr(model, "pp_version"):
            model.pp_version = 0

        copied_model_params = deepcopy(model.state_dict())
        for k, v in copied_model_params.items():
            copied_model_params[k] = v.to("cpu")
        if not self.o_server.push(
                self.model_name, copied_model_params,
                version=model.pp_version + 1, prev_version=model.pp_version
        ):
            if pull_on_fail:
                result = self.o_server.pull(self.model_name)
                if result is None:  # pragma: no cover
                    raise RuntimeError("Pull failed, this should not happen.")
                st_dict, version = result
                prep_load_state_dict(model, st_dict)
                model.pp_version = version
            return False
        else:
            model.pp_version += 1
        return True
Exemple #2
0
def test_prep_load_state_dict(pytestconfig):
    model = t.nn.Linear(100, 100)
    model2 = t.nn.Linear(100, 100).to(pytestconfig.getoption("gpu_device"))
    state_dict = model2.state_dict()
    prep_load_state_dict(model, state_dict)
    assert t.all(model.weight == model2.weight.to("cpu"))
    assert t.all(model.bias == model2.bias.to("cpu"))
Exemple #3
0
    def pull(self, model: nn.Module):
        """
        Pull the newest model.

        Args:
            model: Model to push.
        """
        params = self.server.pull("_push_pull_grad_managed_model")
        prep_load_state_dict(model, params)
Exemple #4
0
    def pull(self, model: nn.Module):
        """
        Pull the newest model. Its gradients will be cleared.

        Args:
            model: Model to push.
        """
        model.zero_grad()
        params = self.o_server.pull(self.model_name)
        if params is not None:
            # params could be None if the master reducer has't performed
            # a single reduction operation yet
            prep_load_state_dict(model, params[0])
Exemple #5
0
    def pull(self, model: nn.Module):
        """
        Pull the newest state dict of your model and update its parameters
        and ``pp_version``.

        Args:
            model: Model to pull.
        """
        result = self.server.pull(self.MODEL_NAME)
        if result is None:
            return
        st_dict, version = result
        if not hasattr(model, "pp_version") or model.pp_version < version:
            prep_load_state_dict(model, st_dict)
            model.pp_version = version
Exemple #6
0
    def pull(self, model: nn.Module):
        """
        Pull the newest state dict of your model and update its parameters
        and ``pp_version``. Gradients will not be cleared.

        Args:
            model: Model to pull.
        """
        result = self.o_server.pull(self.model_name)
        if result is None:  # pragma: no cover
            return False
        st_dict, version = result
        if not hasattr(model, "pp_version") or model.pp_version < version:
            prep_load_state_dict(model, st_dict)
            model.pp_version = version
        return True