示例#1
0
        def _test_all_gather(self,
                             group,
                             group_id,
                             rank,
                             cuda=False,
                             rank_to_GPU=None,
                             dtype=torch.float,
                             qtype=None):
            for dest in group:
                tensor = _build_tensor([dest + 1, dest + 1], rank, dtype=dtype)
                tensors = [
                    _build_tensor([dest + 1, dest + 1], -1, dtype=dtype)
                    for i in group
                ]
                expected_tensors = [
                    _build_tensor([dest + 1, dest + 1], i, dtype=dtype)
                    for i in group
                ]
                if cuda:
                    tensor = tensor.cuda(rank_to_GPU[rank][0])
                    tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
                if tensors[0].dtype == torch.complex64:
                    tensor_shapes = [torch.view_as_real(tensors[0]).shape]
                else:
                    tensor_shapes = [tensors[0].shape]
                allgather = quant.auto_quantize(dist.all_gather,
                                                qtype,
                                                quant_loss=None)
                allgather(tensors, tensor, group=group_id, async_op=False)

                for t1, t2 in zip(tensors, expected_tensors):
                    self.assertEqual(t1, t2)
示例#2
0
 def _test_all_to_all_single(self,
                             group,
                             group_id,
                             rank,
                             cuda=False,
                             rank_to_GPU=None,
                             dtype=torch.float,
                             qtype=DQuantType.FP16):
     if group_id is not None:
         size = len(group)
         in_splits = [i + 1 for i in group]
         out_splits = [rank + 1 for _ in group]
         in_tensor = torch.ones([sum(in_splits), size],
                                dtype=dtype) * rank
         out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
         expected_tensor = torch.cat([
             torch.ones([rank + 1, size], dtype=dtype) * i
             for i in group
         ])
         if cuda:
             rank_to_GPU = rank_to_GPU[rank][0]
             in_tensor = in_tensor.cuda(rank_to_GPU)
             expected_tensor = expected_tensor.cuda(rank_to_GPU)
             out_tensor = out_tensor.cuda(rank_to_GPU)
             quantize_alltoall_single = quant.auto_quantize(
                 dist.all_to_all_single, qtype, quant_loss=None)
             quantize_alltoall_single(out_tensor,
                                      in_tensor,
                                      out_splits=out_splits,
                                      in_splits=in_splits,
                                      group=group_id)
             self.assertEqual(out_tensor, expected_tensor)
示例#3
0
 def _test_all_to_all(
     self,
     group,
     group_id,
     rank,
     cuda=False,
     rank_to_GPU=None,
     dtype=torch.float,
     qtype=None
 ):
     if group_id is not None:
         size = len(group)
         in_splits = [i + 1 for i in group]
         in_tensors = [
             torch.ones([in_splits[i], size], dtype=dtype) * rank
             for i, _ in enumerate(group)
         ]
         out_tensors = [
             torch.ones([(rank + 1), size], dtype=dtype) for _ in group
         ]
         expected_tensors = [
             torch.ones([rank + 1, size], dtype=dtype) * i for i in group
         ]
         if cuda:
             in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
             expected_tensors = [
                 t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
             ]
             out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
         quantize_alltoall = quant.auto_quantize(dist.all_to_all, qtype, quant_loss=None)
         quantize_alltoall(out_tensors, in_tensors, group=group_id)
         for t1, t2 in zip(out_tensors, expected_tensors):
             self.assertEqual(t1, t2)