Esempio n. 1
0
def _allreduce_async(tensor, output, name, op, prescale_factor,
                     postscale_factor, process_set: ProcessSet):
    # Set the divisor for reduced gradients to average when necessary
    if op == Average:
        if rocm_built():
            # For ROCm, perform averaging at framework level
            divisor = size()
            op = Sum
        else:
            divisor = 1

    elif op == Adasum:
        if process_set != global_process_set:
            raise NotImplementedError(
                "Adasum does not support non-global process sets yet.")
        if tensor.device.type != 'cpu' and gpu_available('torch'):
            if nccl_built():
                if not is_homogeneous():
                    raise NotImplementedError(
                        'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                    )
                elif not num_rank_is_power_2(int(size() / local_size())):
                    raise NotImplementedError(
                        'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                    )
                if rocm_built():
                    # For ROCm, perform averaging at framework level
                    divisor = local_size()
                else:
                    divisor = 1
            else:
                warnings.warn(
                    'Adasum reduction does not currently support GPU reduction using MPI. Tensors are '
                    'copied to CPU memory instead. To use Adasum for GPU reduction, please compile Horovod '
                    'with HOROVOD_GPU_OPERATIONS=NCCL.')
                divisor = 1
        else:
            if not num_rank_is_power_2(size()):
                raise NotImplementedError(
                    'Running Adasum with non-power of 2 ranks is not supported yet.'
                )
            divisor = 1
    else:
        divisor = 1

    function = _check_function(_allreduce_function_factory, tensor)
    try:
        handle = getattr(mpi_lib, function)(
            tensor, output, divisor,
            name.encode() if name is not None else _NULL, op, prescale_factor,
            postscale_factor, process_set.process_set_id)
    except RuntimeError as e:
        raise HorovodInternalError(e)
    _handle_map[handle] = (tensor, output)
    return handle
Esempio n. 2
0
def _allreduce_async(tensor, output, name, op):
    if tensor.dtype == torch.float16 and not _fp16_supported:
        raise NotImplementedError(
            'float16 allreduce is not supported for PyTorch version {} < 1.0.0'
            .format(torch.__version__))

    # Set the divisor for reduced gradients to average when necessary
    if op == Average:
        divisor = size()
    elif op == Adasum:
        if tensor.device.type != 'cpu' and gpu_available('torch'):
            if nccl_built():
                if not is_homogeneous():
                    raise NotImplementedError(
                        'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                    )
                elif not num_rank_is_power_2(int(size() / local_size())):
                    raise NotImplementedError(
                        'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                    )
                divisor = local_size()
            else:
                warnings.warn(
                    'Adasum reduction does not currently support GPU reduction using MPI. Tensors are '
                    'copied to CPU memory instead. To use Adasum for GPU reduction, please compile Horovod '
                    'with HOROVOD_GPU_OPERATIONS=NCCL.')
                divisor = 1
        else:
            if not num_rank_is_power_2(size()):
                raise NotImplementedError(
                    'Running Adasum with non-power of 2 ranks is not supported yet.'
                )
            divisor = 1
    else:
        divisor = 1
    # Averaging happens in framework code, so translate that to Sum for the actual call
    true_op = Sum if op == Average else op

    function = _check_function(_allreduce_function_factory, tensor)
    try:
        handle = getattr(mpi_lib, function)(
            tensor, output, divisor,
            name.encode() if name is not None else _NULL, true_op)
    except RuntimeError as e:
        raise HorovodInternalError(e)
    _handle_map[handle] = (tensor, output)
    return handle
Esempio n. 3
0
def allreduce(tensor,
              average=None,
              device_dense='',
              device_sparse='',
              compression=Compression.none,
              op=None,
              prescale_factor=1.0,
              postscale_factor=1.0,
              name=None):
    """Perform an allreduce on a tf.Tensor or tf.IndexedSlices.

    This function performs a bandwidth-optimal ring allreduce on the input
    tensor. If the input is an tf.IndexedSlices, the function instead does an
    allgather on the values and the indices, effectively doing an allreduce on
    the represented tensor.

    Arguments:
        tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
                The shape of the input must be identical across all ranks.
        average:
            .. warning:: .. deprecated:: 0.19.0

                Use `op` instead. Will be removed in v0.21.0.

        device_dense: Device to be used for dense tensors. Uses GPU by default
                      if Horovod was built with HOROVOD_GPU_OPERATIONS.
        device_sparse: Device to be used for sparse tensors. Uses GPU by default
                       if Horovod was built with HOROVOD_GPU_OPERATIONS.
        compression: Compression algorithm used to reduce the amount of data
                     sent and received by each worker node.  Defaults to not
                     using compression.
        op: The reduction operation to combine tensors across different ranks.
            Defaults to Average if None is given.
        prescale_factor: Multiplicative factor to scale tensor before allreduce.
        postscale_factor: Multiplicative factor to scale tensor after allreduce.
        name: A name of the allreduce operation

    Returns:
        A tensor of the same shape and type as `tensor`, summed across all
        processes.
    """
    op = handle_average_backwards_compatibility(op, average)

    if isinstance(tensor, tf.IndexedSlices):
        # TODO: Need to fix this to actuall call Adasum
        if op == Adasum:
            raise NotImplementedError(
                'The Adasum reduction does not currently support sparse tensors. As a '
                'workaround please pass sparse_as_dense=True to DistributedOptimizer'
            )
        with tf.device(device_sparse):
            # For IndexedSlices, do two allgathers instead of an allreduce.
            horovod_size = tf.cast(size_op() if int(
                os.environ.get("HOROVOD_ELASTIC", 0)) else size(),
                                   dtype=tensor.values.dtype)
            values = allgather(tensor.values)
            indices = allgather(tensor.indices)

            # To make this operation into an average, divide allgathered values by
            # the Horovod size.
            new_values = (values / horovod_size) if op == Average else values
        return tf.IndexedSlices(new_values,
                                indices,
                                dense_shape=tensor.dense_shape)
    else:
        average_in_framework = False
        if rocm_built():
            # For ROCm, perform averaging at framework level
            average_in_framework = op == Average or op == Adasum
            op = Sum if op == Average else op

        with tf.device(device_dense):
            horovod_size = tf.cast(size_op() if int(
                os.environ.get("HOROVOD_ELASTIC", 0)) else size(),
                                   dtype=tensor.dtype)
            tensor_compressed, ctx = compression.compress(tensor)
            summed_tensor_compressed = _allreduce(
                tensor_compressed,
                op=op,
                prescale_factor=prescale_factor,
                postscale_factor=postscale_factor,
                name=name)
            summed_tensor = compression.decompress(summed_tensor_compressed,
                                                   ctx)
            if op == Adasum:
                if 'CPU' not in tensor.device and gpu_available('tensorflow'):
                    if nccl_built():
                        if not is_homogeneous:
                            raise NotImplementedError(
                                'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                            )
                        elif not check_num_rank_power_of_2(
                                int(size() / local_size())):
                            raise NotImplementedError(
                                'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                            )
                        if rocm_built():
                            horovod_local_size = tf.cast(
                                local_size_op() if int(
                                    os.environ.get("HOROVOD_ELASTIC",
                                                   0)) else local_size(),
                                dtype=tensor.dtype)
                            new_tensor = summed_tensor / horovod_local_size
                        else:
                            new_tensor = summed_tensor
                    else:
                        warnings.warn(
                            'Adasum reduction does not currently support GPU reduction using MPI. Tensors '
                            'are copied to CPU memory instead. To use Adasum for GPU reduction, please '
                            'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.'
                        )
                        new_tensor = summed_tensor
                else:
                    if not check_num_rank_power_of_2(size()):
                        raise NotImplementedError(
                            'Running Adasum with non-power of 2 ranks is not supported yet.'
                        )
                    new_tensor = summed_tensor
            else:
                if rocm_built():
                    new_tensor = (summed_tensor / horovod_size
                                  ) if average_in_framework else summed_tensor
                else:
                    new_tensor = summed_tensor
        return new_tensor
Esempio n. 4
0
def grouped_allreduce(tensors,
                      average=None,
                      device_dense='',
                      device_sparse='',
                      compression=Compression.none,
                      op=None,
                      prescale_factor=1.0,
                      postscale_factor=1.0):
    if not tensors:
        return tensors

    op = handle_average_backwards_compatibility(op, average)

    average_in_framework = False
    if rocm_built():
        # For ROCm, perform averaging at framework level
        average_in_framework = op == Average or op == Adasum
        op = Sum if op == Average else op

    if any(isinstance(t, tf.IndexedSlices) for t in tensors):
        # TODO: Need to fix this to actuall call Adasum
        if op == Adasum:
            raise NotImplementedError(
                'The Adasum reduction does not currently support sparse tensors. As a '
                'workaround please pass sparse_as_dense=True to DistributedOptimizer'
            )
        with tf.device(device_sparse):
            new_values = []
            for tensor in tensors:
                # For IndexedSlices, do two allgathers instead of an allreduce.
                horovod_size = tf.cast(size_op() if int(
                    os.environ.get("HOROVOD_ELASTIC", 0)) else size(),
                                       dtype=tensor.values.dtype)
                values = allgather(tensor.values)
                indices = allgather(tensor.indices)

                # To make this operation into an average, divide allgathered values by
                # the Horovod size.
                new_values += (values /
                               horovod_size) if op == Average else values
        return [
            tf.IndexedSlices(x, indices, dense_shape=t.dense_shape)
            for x, t in zip(new_values, tensors)
        ]
    else:
        with tf.device(device_dense):
            tensors_compressed, ctxs = zip(
                *[compression.compress(tensor) for tensor in tensors])
            summed_tensors_compressed = _grouped_allreduce(
                tensors_compressed,
                op=op,
                prescale_factor=prescale_factor,
                postscale_factor=postscale_factor)
            summed_tensors = [
                compression.decompress(t, ctx)
                for t, ctx in zip(summed_tensors_compressed, ctxs)
            ]
            if op == Adasum:
                if 'CPU' not in tensor.device and gpu_available('tensorflow'):
                    if nccl_built():
                        if not is_homogeneous:
                            raise NotImplementedError(
                                'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                            )
                        elif not check_num_rank_power_of_2(
                                int(size() / local_size())):
                            raise NotImplementedError(
                                'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                            )
                        if rocm_built():
                            new_tensors = []
                            for tensor in summed_tensors:
                                horovod_local_size = tf.cast(
                                    local_size_op() if int(
                                        os.environ.get("HOROVOD_ELASTIC",
                                                       0)) else local_size(),
                                    dtype=tensor.dtype)
                                new_tensors += tensor / horovod_local_size
                        else:
                            new_tensors = summed_tensors
                    else:
                        warnings.warn(
                            'Adasum reduction does not currently support GPU reduction using MPI. Tensors '
                            'are copied to CPU memory instead. To use Adasum for GPU reduction, please '
                            'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.'
                        )
                        new_tensors = summed_tensors
                else:
                    if not check_num_rank_power_of_2(size()):
                        raise NotImplementedError(
                            'Running Adasum with non-power of 2 ranks is not supported yet.'
                        )
                    new_tensors = summed_tensors
            else:
                if rocm_built():
                    new_tensors = []
                    for tensor in summed_tensors:
                        horovod_size = tf.cast(size_op() if int(
                            os.environ.get("HOROVOD_ELASTIC", 0)) else size(),
                                               dtype=tensor.dtype)
                        new_tensors += (
                            tensor /
                            horovod_size) if average_in_framework else tensor
                else:
                    new_tensors = summed_tensors
        return new_tensors
Esempio n. 5
0
from horovod.tensorflow.mpi_ops import allgather, broadcast, _allreduce
from horovod.tensorflow.mpi_ops import init, shutdown
from horovod.tensorflow.mpi_ops import size, local_size, rank, local_rank, is_homogeneous
from horovod.tensorflow.mpi_ops import mpi_threads_supported, mpi_enabled, mpi_built
from horovod.tensorflow.mpi_ops import gloo_enabled, gloo_built
from horovod.tensorflow.mpi_ops import nccl_built, ddl_built, mlsl_built
from horovod.tensorflow.mpi_ops import Average, Sum, Adasum
from horovod.tensorflow.mpi_ops import _check_has_gpu
from horovod.tensorflow.mpi_ops import handle_average_backwards_compatibility, check_num_rank_power_of_2

from horovod.tensorflow.util import _executing_eagerly, _make_subgraph, _cache

import tensorflow as tf
import warnings

has_gpu = gpu_available('tensorflow')


def allreduce(tensor,
              average=None,
              device_dense='',
              device_sparse='',
              compression=Compression.none,
              op=None):
    """Perform an allreduce on a tf.Tensor or tf.IndexedSlices.

    This function performs a bandwidth-optimal ring allreduce on the input
    tensor. If the input is an tf.IndexedSlices, the function instead does an
    allgather on the values and the indices, effectively doing an allreduce on
    the represented tensor.
Esempio n. 6
0
Adasum = _basics.Adasum

is_homogeneous = _basics.is_homogeneous

handle_average_backwards_compatibility = get_average_backwards_compatibility_fun(
    _basics)

# Schema: handle -> input, output
# We keep input in order to make sure it does not get garbage collected
# before the operation is finished.
_handle_map = {}

# Only support fp16 allreduce for PyTorch versions using v2 API.
_fp16_supported = _v2_api

_has_gpu = gpu_available('torch')


def _check_function(function_factory, tensor):
    function = function_factory(tensor)
    if not hasattr(mpi_lib, function):
        raise ValueError('Tensor type %s is not supported.' % tensor.type())
    if not tensor.is_contiguous():
        raise ValueError('Tensor is required to be contiguous.')
    return function


def _allreduce_function_factory(tensor):
    return 'horovod_torch_allreduce_async_' + tensor.type().replace('.', '_')

Esempio n. 7
0
def allreduce(tensor, average=None, device_dense='', device_sparse='',
              compression=Compression.none, op=None):
    """Perform an allreduce on a tf.Tensor or tf.IndexedSlices.

    This function performs a bandwidth-optimal ring allreduce on the input
    tensor. If the input is an tf.IndexedSlices, the function instead does an
    allgather on the values and the indices, effectively doing an allreduce on
    the represented tensor.

    Arguments:
        tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
                The shape of the input must be identical across all ranks.
        average: DEPRECATED, please use op instead.
        device_dense: Device to be used for dense tensors. Uses GPU by default
                      if Horovod was built with HOROVOD_GPU_ALLREDUCE.
        device_sparse: Device to be used for sparse tensors. Uses GPU by default
                       if Horovod was built with HOROVOD_GPU_ALLGATHER.
        compression: Compression algorithm used to reduce the amount of data
                     sent and received by each worker node.  Defaults to not
                     using compression.
        op: The reduction operation to combine tensors across different ranks.
            Defaults to Average if None is given.

    Returns:
        A tensor of the same shape and type as `tensor`, summed across all
        processes.
    """
    op = handle_average_backwards_compatibility(op, average)
    # Averaging happens in framework code, so translate that to Sum for the actual call
    true_op = Sum if op == Average else op

    if isinstance(tensor, tf.IndexedSlices):
        # TODO: Need to fix this to actuall call Adasum
        if op == Adasum:
            raise NotImplementedError('The Adasum reduction does not currently support sparse tensors. As a '
                                      'workaround please pass sparse_as_dense=True to DistributedOptimizer')
        with tf.device(device_sparse):
            # For IndexedSlices, do two allgathers instead of an allreduce.
            horovod_size = tf.cast(size(), tensor.values.dtype)
            values = allgather(tensor.values)
            indices = allgather(tensor.indices)

            # To make this operation into an average, divide allgathered values by
            # the Horovod size.
            new_values = (values / horovod_size) if op == Average else values
        return tf.IndexedSlices(new_values, indices,
                                dense_shape=tensor.dense_shape)
    else:
        with tf.device(device_dense):
            horovod_size = tf.cast(size(), dtype=tensor.dtype)
            tensor_compressed, ctx = compression.compress(tensor)
            summed_tensor_compressed = _allreduce(tensor_compressed, op=true_op)
            summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
            if op == Adasum:
                if 'CPU' not in tensor.device and gpu_available('tensorflow'):
                    if nccl_built():
                        if not is_homogeneous:
                            raise NotImplementedError(
                                'Running GPU Adasum on heterogeneous cluster is not supported yet.')
                        elif not check_num_rank_power_of_2(int(size() / local_size())):
                            raise NotImplementedError(
                                'Running GPU Adasum with non-power of 2 nodes is not supported yet.')
                        horovod_local_size = tf.cast(local_size(), dtype=tensor.dtype)
                        new_tensor = summed_tensor / horovod_local_size
                    else:
                        warnings.warn('Adasum reduction does not currently support GPU reduction using MPI. Tensors '
                                      'are copied to CPU memory instead. To use Adasum for GPU reduction, please '
                                      'compile Horovod with HOROVOD_GPU_ALLREDUCE=NCCL.')
                        new_tensor = summed_tensor
                else:
                    if not check_num_rank_power_of_2(size()):
                        raise NotImplementedError('Running Adasum with non-power of 2 ranks is not supported yet.')
                    new_tensor = summed_tensor
            else:
                new_tensor = (summed_tensor / horovod_size) if op == Average else summed_tensor
        return new_tensor