Esempio n. 1
0
import collections
import os
import torch
from bluefog.common.util import check_extension
from bluefog.torch.optimizers import (
    CommunicationType,
    DistributedAdaptThenCombineOptimizer,
    DistributedAdaptWithCombineOptimizer,
    DistributedGradientAllreduceOptimizer,
    DistributedWinPutOptimizer,
    DistributedAllreduceOptimizer,
    DistributedNeighborAllreduceOptimizer,
    DistributedHierarchicalNeighborAllreduceOptimizer,
)

check_extension("bluefog.torch", __file__, "mpi_lib")

from bluefog.torch.mpi_ops import init, shutdown
from bluefog.torch.mpi_ops import size, local_size, rank, local_rank
from bluefog.torch.mpi_ops import machine_size, machine_rank
from bluefog.torch.mpi_ops import load_topology, set_topology
from bluefog.torch.mpi_ops import load_machine_topology, set_machine_topology
from bluefog.torch.mpi_ops import in_neighbor_ranks, out_neighbor_ranks
from bluefog.torch.mpi_ops import in_neighbor_machine_ranks, out_neighbor_machine_ranks
from bluefog.torch.mpi_ops import mpi_threads_supported
from bluefog.torch.mpi_ops import unified_mpi_window_model_supported
from bluefog.torch.mpi_ops import nccl_built, is_homogeneous
from bluefog.torch.mpi_ops import suspend, resume

from bluefog.torch.mpi_ops import allreduce, allreduce_nonblocking
from bluefog.torch.mpi_ops import allreduce_, allreduce_nonblocking_
Esempio n. 2
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import os
import torch
from bluefog.common.util import check_extension
from bluefog.torch.optimizers import (
    DistributedGradientAllreduceOptimizer, DistributedAllreduceOptimizer,
    DistributedNeighborAllreduceOptimizer,
    DistributedHierarchicalNeighborAllreduceOptimizer,
    DistributedWinPutOptimizer)

check_extension('bluefog.torch', __file__, 'mpi_lib')

from bluefog.torch.mpi_ops import init, shutdown
from bluefog.torch.mpi_ops import size, local_size, rank, local_rank
from bluefog.torch.mpi_ops import load_topology, set_topology
from bluefog.torch.mpi_ops import in_neighbor_ranks, out_neighbor_ranks
from bluefog.torch.mpi_ops import mpi_threads_supported
from bluefog.torch.mpi_ops import unified_mpi_window_model_supported
from bluefog.torch.mpi_ops import nccl_built, is_homogeneous

from bluefog.torch.mpi_ops import allreduce, allreduce_nonblocking
from bluefog.torch.mpi_ops import allreduce_, allreduce_nonblocking_
from bluefog.torch.mpi_ops import allgather, allgather_nonblocking
from bluefog.torch.mpi_ops import broadcast, broadcast_nonblocking
from bluefog.torch.mpi_ops import broadcast_, broadcast_nonblocking_
from bluefog.torch.mpi_ops import neighbor_allgather, neighbor_allgather_nonblocking
Esempio n. 3
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import os
import tensorflow as tf

from bluefog.common.util import check_extension
check_extension('bluefog.tensorflow', __file__, 'mpi_lib')

# pylint: disable = wrong-import-position
from bluefog.tensorflow.mpi_ops import init, shutdown
from bluefog.tensorflow.mpi_ops import size, local_size, rank, local_rank
from bluefog.tensorflow.mpi_ops import load_topology, set_topology
from bluefog.tensorflow.mpi_ops import in_neighbor_ranks, out_neighbor_ranks
from bluefog.tensorflow.mpi_ops import mpi_threads_supported
from bluefog.tensorflow.mpi_ops import unified_mpi_window_model_supported

from bluefog.tensorflow.mpi_ops import allreduce, broadcast, allgather

from bluefog.tensorflow.optimizers import broadcast_variables
from bluefog.tensorflow.optimizers import DistributedOptimizer
if hasattr(tf, 'GradientTape'):
    from bluefog.tensorflow.optimizers import DistributedGradientTape