예제 #1
0
def sync_params(module):
    """ Sync all parameters of module across all MPI processes. """
    if num_procs() == 1:
        return
    for p in module.parameters():
        p_numpy = p.data.numpy()
        broadcast(p_numpy)
예제 #2
0
def sync_params(module):
    """ Sync all parameters of module across all MPI processes. """
    if num_procs()==1:
        return
    for p in module.parameters():
        p_data = p.data.cpu()
        p_numpy = p_data.numpy()
        broadcast(p_numpy)
        if p.device.type != 'cpu' and proc_id() != 0:
            p.data.copy_(p_data)    # copy parameters back to GPU
예제 #3
0
def sync_all_params(params, root=0):
    if isinstance(params, dict):
        params = sorted(params.items())
    elif isinstance(params, list):
        # support both named_parameters() and regular parameters()
        params = [p if isinstance(p, tuple) else (None, p) for p in params]

    for _, p in params:
        data = p.data.numpy()
        broadcast(data, root)
        p.data.copy_(torch.Tensor(data))
예제 #4
0
 def _broadcast(x):
     broadcast(x)
     return x
예제 #5
0
 def _broadcast(x):
     broadcast(x.numpy())
     return x