def _moments(self, inputs, reduction_axes, keep_dims): """Compute the mean and variance: it overrides the original _moments.""" worker_mean, worker_variance = super( SyncBatchNormalization, self)._moments(inputs, reduction_axes, keep_dims=keep_dims) # print(worker_mean) # print(worker_variance) if size() > 1: # Compute variance using: Var[X] = E[X^2] - E[X]^2. worker_square_of_mean = tf.math.square(worker_mean) worker_mean_of_square = worker_variance + worker_square_of_mean # Average stats across all workers worker_stack = tf.stack([worker_mean, worker_mean_of_square]) group_stack = _allreduce(worker_stack, op=Average) group_stack /= size() group_mean, group_mean_of_square = tf.unstack(group_stack) group_variance = group_mean_of_square - tf.math.square(group_mean) return (group_mean, group_variance) else: return (worker_mean, worker_variance)
def allreduce(tensor, average=True, device_dense='', device_sparse='', compression=Compression.none): """Perform an allreduce on a tf.Tensor or tf.IndexedSlices. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average: If True, computes the average over all ranks. Otherwise, computes the sum over all ranks. device_dense: Device to be used for dense tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_ALLREDUCE. device_sparse: Device to be used for sparse tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_ALLGATHER. compression: Compression algorithm used to reduce the amount of data sent and received by each worker node. Defaults to not using compression. Returns: A tensor of the same shape and type as `tensor`, summed across all processes. """ if isinstance(tensor, tf.IndexedSlices): with tf.device(device_sparse): # For IndexedSlices, do two allgathers instead of an allreduce. horovod_size = tf.cast(size(), tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide allgathered values by # the Horovod size. new_values = tf.div(values, horovod_size) if average else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: with tf.device(device_dense): horovod_size = tf.cast(size(), dtype=tensor.dtype) tensor_compressed, ctx = compression.compress(tensor) summed_tensor_compressed = _allreduce(tensor_compressed) summed_tensor = compression.decompress(summed_tensor_compressed, ctx) new_tensor = (tf.div(summed_tensor, horovod_size) if average else summed_tensor) return new_tensor
def allreduce(tensor, average_dense=True, average_sparse=True, use_allgatherv=False, device_dense='', device_sparse=''): """Perform an allreduce on a tf.Tensor or tf.IndexedSlices. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average_dense: If True, computes the average over all ranks for dense tensors. Otherwise, computes the sum over all ranks. average_sparse: If True, computes the average over all ranks for sparse tensors. Otherwise, computes the sum over all ranks. device_dense: Device to be used for dense tensors. Uses GPU by default if Horovod was build with HOROVOD_GPU_ALLREDUCE. device_sparse: Device to be used for sparse tensors. Uses GPU by default if Horovod was build with HOROVOD_GPU_ALLGATHER. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. """ if isinstance(tensor, tf.IndexedSlices): with tf.device(device_sparse): # For IndexedSlices, do two allgathers intead of an allreduce. if use_allgatherv: values = allgatherv(tensor.values) indices = allgatherv(tensor.indices) else: values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide all gathered values by # the Horovod size. horovod_size = tf.cast(size(), tensor.values.dtype) new_values = tf.div(values, horovod_size) if average_sparse else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: with tf.device(device_dense): horovod_size = tf.cast(size(), tensor.dtype) summed_tensor = _allreduce(tensor) new_tensor = (tf.div(summed_tensor, horovod_size) if average_dense else summed_tensor) return new_tensor
def gradient(self, target, sources, output_gradients=None): gradients = super(self.__class__, self).gradient(target, sources, output_gradients) if size() > 1 or os.environ.get('HOROVOD_ELASTIC') == '1': return self._allreduce_grads(gradients) else: return gradients
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = self._optimizer.compute_gradients(*args, **kwargs) if size() > 1: averaged_gradients = [] with tf.name_scope(self._name + "_Allreduce"): for grad, var in gradients: if grad is not None: if self._sparse_as_dense and \ isinstance(grad, tf.IndexedSlices): grad = tf.convert_to_tensor(grad) avg_grad = allreduce(grad, device_dense=self._device_dense, device_sparse=self._device_sparse, compression=self._compression) averaged_gradients.append((avg_grad, var)) else: averaged_gradients.append((None, var)) return averaged_gradients else: return gradients
def gradient(self, target, sources, output_gradients=None): gradients = super(self.__class__, self).gradient(target, sources, output_gradients) if size() > 1: return self._allreduce_grads(gradients) else: return gradients
def _grouped_allreduce_cond(tensors, *args, **kwargs): def allreduce_fn(): return grouped_allreduce(tensors, *args, **kwargs) def id_fn(): return tensors return tf.cond((size_op() > 1) if int(os.environ.get("HOROVOD_ELASTIC", 0)) else tf.convert_to_tensor(size() > 1), allreduce_fn, id_fn)
def apply_gradients(self, grads_and_vars, **kwargs): """Apply gradients to provided variables. See Optimizer.apply_gradients() for more info. In DistributedOptimizer, apply_gradients() is overriden to also allreduce the gradients before applying them. """ if size() > 1: grads, vars = zip(*grads_and_vars) avg_grads = self._allreduce_grads(grads) grads_and_vars = list(zip(avg_grads, vars)) return self._optimizer.apply_gradients(grads_and_vars, **kwargs)
def allreduce(tensor, average=True, device_dense='', device_sparse=''): """Perform an allreduce on a tf.Tensor or tf.IndexedSlices. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average: If True, computes the average over all ranks. Otherwise, computes the sum over all ranks. device_dense: Device to be used for dense tensors. Uses GPU by default if Horovod was build with HOROVOD_GPU_ALLREDUCE. device_sparse: Device to be used for sparse tensors. Uses GPU by default if Horovod was build with HOROVOD_GPU_ALLGATHER. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. """ if isinstance(tensor, tf.IndexedSlices): with tf.device(device_sparse): # For IndexedSlices, do two allgathers intead of an allreduce. horovod_size = tf.cast(size(), tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide all gathered values by # the Horovod size. new_values = tf.div(values, horovod_size) if average else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: with tf.device(device_dense): horovod_size = tf.cast(size(), tensor.dtype) summed_tensor = _allreduce(tensor) new_tensor = (tf.div(summed_tensor, horovod_size) if average else summed_tensor) return new_tensor
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = self._optimizer.compute_gradients(*args, **kwargs) if size() > 1 or os.environ.get('HOROVOD_ELASTIC') == '1': grads, vars = zip(*gradients) avg_grads = self._allreduce_grads(grads) return list(zip(avg_grads, vars)) else: return gradients
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = (super(DistributedOptimizer, self) .compute_gradients(*args, **kwargs)) if size() > 1: with tf.name_scope(self._name + "_Allreduce"): return [(allreduce(gradient, device_dense=self._device_dense, device_sparse=self._device_sparse), var) for (gradient, var) in gradients] else: return gradients
def allgather_object(obj, session=None, name=None): """ Serializes and allgathers an object from all other processes. Arguments: obj: An object capable of being serialized without losing any context. session: Session for TensorFlow v1 compatibility. name: Optional name to use during allgather, will default to the class type. Returns: The list of objects that were allgathered across all ranks. """ if name is None: name = type(obj).__name__ def load(byte_array): buf = io.BytesIO(byte_array.tobytes()) return cloudpickle.load(buf) def to_numpy(v): if not _executing_eagerly(): sess = session or ops.get_default_session() return sess.run(v) else: return v.numpy() b = io.BytesIO() cloudpickle.dump(obj, b) t = tf.convert_to_tensor(bytearray(b.getvalue()), dtype=tf.uint8) sz = tf.convert_to_tensor([t.shape[0]], dtype=tf.int32) sizes = to_numpy(allgather(sz, name=name + '.sz')) gathered = to_numpy(allgather(t, name=name + '.t')) def select(i): start = sizes[i - 1] if i > 0 else 0 end = start + sizes[i] return gathered[start:end] return [load(select(i)) for i in range(size())]
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = self._optimizer.compute_gradients(*args, **kwargs) if size() > 1: self.grads, vars = zip(*gradients) if _executing_eagerly(): allreduced_grads = self._allreduce_grads(self.grads) else: self._agg_helper.init_aggregation_vars(self.grads) allreduced_grads = self._agg_helper.compute_gradients( self.grads) return list(zip(allreduced_grads, vars)) else: return gradients
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = (super(DistributedOptimizer, self) .compute_gradients(*args, **kwargs)) if size() > 1: averaged_gradients = [] with tf.name_scope(self._name + "_Allreduce"): for grad, var in gradients: if grad is not None: avg_grad = allreduce(grad, device_dense=self._device_dense, device_sparse=self._device_sparse) averaged_gradients.append((avg_grad, var)) else: averaged_gradients.append((None, var)) return averaged_gradients else: return gradients
def compute_gradients(self, *args, **kwargs): """Compute gradients of all trainable variables. See Optimizer.compute_gradients() for more info. In DistributedOptimizer, compute_gradients() is overriden to also allreduce the gradients before returning them. """ gradients = (super(DistributedOptimizer, self).compute_gradients(*args, **kwargs)) if size() > 1: averaged_gradients = [] with tf.name_scope(self._name + "_Allreduce"): for grad, var in gradients: if grad is not None: avg_grad = allreduce(grad, device_dense=self._device_dense, device_sparse=self._device_sparse) averaged_gradients.append((avg_grad, var)) else: averaged_gradients.append((None, var)) return averaged_gradients else: return gradients
def allreduce(tensor, average=None, device_dense='', device_sparse='', compression=Compression.none, op=None): """Perform an allreduce on a tf.Tensor or tf.IndexedSlices. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average: DEPRECATED, please use op instead. device_dense: Device to be used for dense tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_ALLREDUCE. device_sparse: Device to be used for sparse tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_ALLGATHER. compression: Compression algorithm used to reduce the amount of data sent and received by each worker node. Defaults to not using compression. op: The reduction operation to combine tensors across different ranks. Defaults to Average if None is given. Returns: A tensor of the same shape and type as `tensor`, summed across all processes. """ op = handle_average_backwards_compatibility(op, average) # Averaging happens in framework code, so translate that to Sum for the actual call true_op = Sum if op == Average else op if isinstance(tensor, tf.IndexedSlices): # TODO: Need to fix this to actuall call Adasum if op == Adasum: raise NotImplementedError( "The Adasum reduction does not currently support " "sparse tensors. As a workaround please pass sparse_as_dense=True to " "DistributedOptimizer") with tf.device(device_sparse): # For IndexedSlices, do two allgathers instead of an allreduce. horovod_size = tf.cast(size(), tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide allgathered values by # the Horovod size. new_values = (values / horovod_size) if op == Average else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: with tf.device(device_dense): horovod_size = tf.cast(size(), dtype=tensor.dtype) tensor_compressed, ctx = compression.compress(tensor) summed_tensor_compressed = _allreduce(tensor_compressed, op=true_op) summed_tensor = compression.decompress(summed_tensor_compressed, ctx) if op == Adasum: if ('CPU' not in tensor.device and has_gpu): if nccl_built(): if not is_homogeneous: raise NotImplementedError( 'Running GPU Adasum on heterogeneous cluster is not supported yet.' ) elif not check_num_rank_power_of_2( int(size() / local_size())): raise NotImplementedError( 'Running GPU Adasum with non-power of 2 nodes is not supported yet.' ) horovod_local_size = tf.cast(local_size(), dtype=tensor.dtype) new_tensor = summed_tensor / horovod_local_size else: warnings.warn( "Adasum reduction does not currently support " "GPU reduction using MPI. Tensors are copied to CPU memory instead." "To use Adasum for GPU reduction, please compile Horovod with HOROVOD_GPU_ALLREDUCE=NCCL." ) new_tensor = summed_tensor else: if not check_num_rank_power_of_2(size()): raise NotImplementedError( 'Running Adasum with non-power of 2 ranks is not supported yet.' ) new_tensor = summed_tensor else: new_tensor = (summed_tensor / horovod_size) if op == Average else summed_tensor return new_tensor
def allreduce(tensor, average=None, device_dense='', device_sparse='', compression=Compression.none, op=None, prescale_factor=1.0, postscale_factor=1.0, name=None): """Perform an allreduce on a tf.Tensor or tf.IndexedSlices. This function performs a bandwidth-optimal ring allreduce on the input tensor. If the input is an tf.IndexedSlices, the function instead does an allgather on the values and the indices, effectively doing an allreduce on the represented tensor. Arguments: tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce. The shape of the input must be identical across all ranks. average: .. warning:: .. deprecated:: 0.19.0 Use `op` instead. Will be removed in v0.21.0. device_dense: Device to be used for dense tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_OPERATIONS. device_sparse: Device to be used for sparse tensors. Uses GPU by default if Horovod was built with HOROVOD_GPU_OPERATIONS. compression: Compression algorithm used to reduce the amount of data sent and received by each worker node. Defaults to not using compression. op: The reduction operation to combine tensors across different ranks. Defaults to Average if None is given. prescale_factor: Multiplicative factor to scale tensor before allreduce. postscale_factor: Multiplicative factor to scale tensor after allreduce. name: A name of the allreduce operation Returns: A tensor of the same shape and type as `tensor`, summed across all processes. """ op = handle_average_backwards_compatibility(op, average) if isinstance(tensor, tf.IndexedSlices): # TODO: Need to fix this to actuall call Adasum if op == Adasum: raise NotImplementedError( 'The Adasum reduction does not currently support sparse tensors. As a ' 'workaround please pass sparse_as_dense=True to DistributedOptimizer' ) with tf.device(device_sparse): # For IndexedSlices, do two allgathers instead of an allreduce. horovod_size = tf.cast(size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else size(), dtype=tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide allgathered values by # the Horovod size. new_values = (values / horovod_size) if op == Average else values return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape) else: average_in_framework = False if rocm_built(): # For ROCm, perform averaging at framework level average_in_framework = op == Average or op == Adasum op = Sum if op == Average else op with tf.device(device_dense): horovod_size = tf.cast(size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else size(), dtype=tensor.dtype) tensor_compressed, ctx = compression.compress(tensor) summed_tensor_compressed = _allreduce( tensor_compressed, op=op, prescale_factor=prescale_factor, postscale_factor=postscale_factor, name=name) summed_tensor = compression.decompress(summed_tensor_compressed, ctx) if op == Adasum: if 'CPU' not in tensor.device and gpu_available('tensorflow'): if nccl_built(): if not is_homogeneous: raise NotImplementedError( 'Running GPU Adasum on heterogeneous cluster is not supported yet.' ) elif not check_num_rank_power_of_2( int(size() / local_size())): raise NotImplementedError( 'Running GPU Adasum with non-power of 2 nodes is not supported yet.' ) if rocm_built(): horovod_local_size = tf.cast( local_size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else local_size(), dtype=tensor.dtype) new_tensor = summed_tensor / horovod_local_size else: new_tensor = summed_tensor else: warnings.warn( 'Adasum reduction does not currently support GPU reduction using MPI. Tensors ' 'are copied to CPU memory instead. To use Adasum for GPU reduction, please ' 'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.' ) new_tensor = summed_tensor else: if not check_num_rank_power_of_2(size()): raise NotImplementedError( 'Running Adasum with non-power of 2 ranks is not supported yet.' ) new_tensor = summed_tensor else: if rocm_built(): new_tensor = (summed_tensor / horovod_size ) if average_in_framework else summed_tensor else: new_tensor = summed_tensor return new_tensor
def grouped_allreduce(tensors, average=None, device_dense='', device_sparse='', compression=Compression.none, op=None, prescale_factor=1.0, postscale_factor=1.0): if not tensors: return tensors op = handle_average_backwards_compatibility(op, average) average_in_framework = False if rocm_built(): # For ROCm, perform averaging at framework level average_in_framework = op == Average or op == Adasum op = Sum if op == Average else op if any(isinstance(t, tf.IndexedSlices) for t in tensors): # TODO: Need to fix this to actuall call Adasum if op == Adasum: raise NotImplementedError( 'The Adasum reduction does not currently support sparse tensors. As a ' 'workaround please pass sparse_as_dense=True to DistributedOptimizer' ) with tf.device(device_sparse): new_values = [] for tensor in tensors: # For IndexedSlices, do two allgathers instead of an allreduce. horovod_size = tf.cast(size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else size(), dtype=tensor.values.dtype) values = allgather(tensor.values) indices = allgather(tensor.indices) # To make this operation into an average, divide allgathered values by # the Horovod size. new_values += (values / horovod_size) if op == Average else values return [ tf.IndexedSlices(x, indices, dense_shape=t.dense_shape) for x, t in zip(new_values, tensors) ] else: with tf.device(device_dense): tensors_compressed, ctxs = zip( *[compression.compress(tensor) for tensor in tensors]) summed_tensors_compressed = _grouped_allreduce( tensors_compressed, op=op, prescale_factor=prescale_factor, postscale_factor=postscale_factor) summed_tensors = [ compression.decompress(t, ctx) for t, ctx in zip(summed_tensors_compressed, ctxs) ] if op == Adasum: if 'CPU' not in tensor.device and gpu_available('tensorflow'): if nccl_built(): if not is_homogeneous: raise NotImplementedError( 'Running GPU Adasum on heterogeneous cluster is not supported yet.' ) elif not check_num_rank_power_of_2( int(size() / local_size())): raise NotImplementedError( 'Running GPU Adasum with non-power of 2 nodes is not supported yet.' ) if rocm_built(): new_tensors = [] for tensor in summed_tensors: horovod_local_size = tf.cast( local_size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else local_size(), dtype=tensor.dtype) new_tensors += tensor / horovod_local_size else: new_tensors = summed_tensors else: warnings.warn( 'Adasum reduction does not currently support GPU reduction using MPI. Tensors ' 'are copied to CPU memory instead. To use Adasum for GPU reduction, please ' 'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.' ) new_tensors = summed_tensors else: if not check_num_rank_power_of_2(size()): raise NotImplementedError( 'Running Adasum with non-power of 2 ranks is not supported yet.' ) new_tensors = summed_tensors else: if rocm_built(): new_tensors = [] for tensor in summed_tensors: horovod_size = tf.cast(size_op() if int( os.environ.get("HOROVOD_ELASTIC", 0)) else size(), dtype=tensor.dtype) new_tensors += ( tensor / horovod_size) if average_in_framework else tensor else: new_tensors = summed_tensors return new_tensors