Exemple #1
0
def sync_params(module):
    """ Sync all parameters of module across all MPI processes. """
    if num_procs() == 1:
        return
    for p in module.parameters():
        p_numpy = p.data.numpy()
        broadcast(p_numpy)
def sync_params(module):
    """ Sync all parameters of module across all MPI processes. """
    if num_procs()==1:
        return
    for p in module.parameters():
        p_data = p.data.cpu()
        p_numpy = p_data.numpy()
        broadcast(p_numpy)
        if p.device.type != 'cpu' and proc_id() != 0:
            p.data.copy_(p_data)    # copy parameters back to GPU
def sync_all_params(params, root=0):
    if isinstance(params, dict):
        params = sorted(params.items())
    elif isinstance(params, list):
        # support both named_parameters() and regular parameters()
        params = [p if isinstance(p, tuple) else (None, p) for p in params]

    for _, p in params:
        data = p.data.numpy()
        broadcast(data, root)
        p.data.copy_(torch.Tensor(data))
Exemple #4
0
 def _broadcast(x):
     broadcast(x)
     return x
Exemple #5
0
 def _broadcast(x):
     broadcast(x.numpy())
     return x