Exemplo n.º 1
0
def test_idist_collective_ops_no_dist():
    _test_distrib_all_reduce("cpu")
    _test_distrib_all_gather("cpu")
    _test_distrib_barrier("cpu")
    _test_distrib_broadcast("cpu")

    if torch.cuda.device_count() > 1:
        _test_distrib_all_reduce("cuda")
        _test_distrib_all_gather("cuda")
        _test_distrib_barrier("cuda")
        _test_distrib_broadcast("cuda")
Exemplo n.º 2
0
def test_idist_broadcast_gloo(distributed_context_single_node_gloo):

    device = "cpu"
    _test_distrib_broadcast(device)
Exemplo n.º 3
0
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):

    device = "cuda:{}".format(
        distributed_context_single_node_nccl["local_rank"])
    _test_distrib_broadcast(device)
Exemplo n.º 4
0
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):

    device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
    _test_distrib_broadcast(device)
Exemplo n.º 5
0
def _test_idist_broadcast_xla_in_child_proc(index):
    device = idist.device()
    _test_distrib_broadcast(device)
Exemplo n.º 6
0
def test_idist_broadcast_xla():

    device = idist.device()
    _test_distrib_broadcast(device)
Exemplo n.º 7
0
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):

    device = idist.device()
    _test_distrib_broadcast(device)