def broadcast_parameters(params, root_rank=0): """ Broadcasts the parameters from root rank to all other processes. Typical usage is to broadcast the `Module.get_params()` or the `Block.collect_params()`. Arguments: params: One of the following: - dict of parameters to broadcast - ParameterDict to broadcast root_rank: The rank of the process from which parameters will be broadcasted to all other processes. """ tensors = [] if isinstance(params, dict): tensors = [p for _, p in sorted(params.items())] elif isinstance(params, mx.gluon.parameter.ParameterDict): for _, p in sorted(params.items()): try: tensors.append(p.data()) except mx.gluon.parameter.DeferredInitializationError: # Inject wrapper method with post-initialization broadcast to # handle parameters with deferred initialization global parameter_index byteps_declare_tensor(p.data(), "parameter_" + str(parameter_index)) new_init = _append_broadcast_init(p, root_rank, parameter_index) parameter_index += 1 p._init_impl = types.MethodType(new_init, p) else: raise ValueError('invalid params of type: %s' % type(params)) # Run tensor initilization for i in range(len(tensors)): byteps_declare_tensor(tensors[i], "parameter_" + str(parameter_index)) # Broadcast is implemented as push + pull in BytePS # To broadcast: we should zero-out all non-root tensors, and disable push_pull average if rank() != root_rank: tensors[i].__imul__(0) byteps_push_pull(tensors[i], version=0, priority=0, name="parameter_" + str(parameter_index), is_average=False) parameter_index += 1 # Make sure tensors pushed to MXNet engine get processed such that all # workers are synced before starting training. for tensor in tensors: tensor.wait_to_read()
def _init_params(self): tensors = [] for param in self._params_to_init: if param._deferred_init: tensors.append(param) else: param_arrays = param._check_and_get(param._data, list) idx = self._param2idx[param.name] if rank() != self.root_rank: param_arrays[0].__imul__(0) byteps_push_pull(param_arrays[0], version=0, priority=0, name="parameter_" + str(idx), is_average=False) self._params_to_init = tensors
def broadcast_parameters(params, root_rank=0): """ Broadcasts the parameters from root rank to all other processes. Typical usage is to broadcast the `Module.get_params()`. Arguments: params: dict of parameters to broadcast root_rank: The rank of the process from which parameters will be broadcasted to all other processes. """ global parameter_index if isinstance(params, dict): tensors = [p for _, p in sorted(params.items())] # Run tensor initilization for i in range(len(tensors)): byteps_declare_tensor(tensors[i], "parameter_" + str(parameter_index)) # Broadcast is implemented as push + pull in BytePS # To broadcast: we should zero-out all non-root tensors, and disable push_pull average if rank() != root_rank: tensors[i].__imul__(0) byteps_push_pull(tensors[i], version=0, priority=0, name="parameter_" + str(parameter_index), is_average=False) parameter_index += 1 # Make sure tensors pushed to MXNet engine get processed such that all # workers are synced before starting training. for tensor in tensors: tensor.wait_to_read() elif isinstance(params, mx.gluon.parameter.ParameterDict): raise TypeError("For gluon users, you should not call this function. " "DistributedTrainer will broadcast all parameters at " "the first training step.") else: raise ValueError('Invalid params of type: %s' % type(params))