Exemplo n.º 1
0
    def step(self, context_id):
        """
        Performs a single optimization step.

        This will call :meth:`torch.optim.Optimizer.step` on each worker
        containing parameters to be optimized, and will block until all workers
        return. The provided ``context_id`` will be used to retrieve the
        corresponding :class:`~torch.distributed.autograd.context` that
        contains the gradients that should be applied to the parameters.

        Args:
            context_id: the autograd context id for which we should run the
                optimizer step.
        """
        dist_autograd._is_valid_context(context_id)

        if self.is_functional_optim:
            optimizer_step_func = _script_local_optimizer_step
        else:
            optimizer_step_func = _local_optimizer_step

        rpc_futs = []
        for optimizer in self.remote_optimizers:
            rpc_futs.append(
                rpc.rpc_async(
                    optimizer.owner(),
                    optimizer_step_func,
                    args=(optimizer, context_id),
                ))
        _wait_for_all(rpc_futs)
Exemplo n.º 2
0
    def step(self, context_id):
        """
        Performs a single optimization step.

        This will call :meth:`torch.optim.Optimizer.step` on each worker
        containing parameters to be optimized, and will block until all workers
        return. The current distributed autograd
        :class:`~torch.distributed.autograd.context` will be used globally.

        Args:
            context_id: the autograd context id for which we should run the
                optimizer step.
        """
        dist_autograd._is_valid_context(context_id)
        rpc_futs = []
        for optim in self.remote_optimizers:
            rpc_futs.append(
                rpc.rpc_async(
                    optim.owner(),
                    _local_optimizer_step,
                    args=(optim, context_id),
                ))
        _wait_for_all(rpc_futs)