def test_timeline_push_sum(self): # Use win_accumulate to simulate the push-sum algorithm (sync). outdegree = len(bf.out_neighbor_ranks()) indegree = len(bf.in_neighbor_ranks()) # we append the p at the last of data. x = torch.Tensor( [bf.rank() / (indegree + 1), 1.0 / bf.size() / (indegree + 1)]) # Remember we do not create buffer with 0. bf.win_create(x, name="x_buff") x = bf.win_update_then_collect(name="x_buff") for _ in range(10): bf.win_accumulate(x, name="x_buff", dst_weights={ rank: 1.0 / (outdegree + 1) for rank in bf.out_neighbor_ranks() }, require_mutex=True) x.div_(1 + outdegree) x = bf.win_update_then_collect(name="x_buff") bf.barrier() # Do not forget to sync at last! x = bf.win_update_then_collect(name="x_buff") file_name = f"{self.temp_file}{bf.rank()}.json" with open(file_name, 'r') as tf: timeline_text = tf.read() assert 'MPI_WIN_ACCUMULATE' in timeline_text, timeline_text assert 'ENQUEUE_WIN_ACCUMULATE' in timeline_text, timeline_text bf.win_free()
def test_asscoicated_with_p(self): size = bf.size() rank = bf.rank() if size <= 3: fname = inspect.currentframe().f_code.co_name warnings.warn( "Skip {} because it only supports test over at least 3 nodes". format(fname)) return dtypes = [torch.FloatTensor, torch.DoubleTensor] if TEST_ON_GPU and not bf.nccl_built(): dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor] bf.set_topology(topology_util.RingGraph(size)) bf.turn_on_win_ops_with_associated_p() for dtype, send_rank in itertools.product(dtypes, range(size)): tensor = torch.FloatTensor([23]).fill_(1).mul_(rank) tensor = self.cast_and_place(tensor, dtype) window_name = "win_asscoicate_with_p_{}_{}".format( dtype, send_rank) bf.win_create(tensor, window_name) left_neighbor_rank = (send_rank - 1) % size right_neighbor_rank = (send_rank + 1) % size if rank == send_rank: bf.win_accumulate(tensor, name=window_name, self_weight=0.5, dst_weights={ left_neighbor_rank: 0.5, right_neighbor_rank: 0.5 }) bf.barrier() bf.win_update_then_collect(name=window_name) associated_p = bf.win_associated_p(name=window_name) if rank == send_rank: assert associated_p == 0.5, ( "associated_p for sender {} is wrong. Get {}".format( rank, associated_p)) elif (rank == left_neighbor_rank) or (rank == right_neighbor_rank): assert (associated_p - 1.5) < EPSILON, ( "associated_p for received neighbor {} is wrong. Get {}". format(rank, associated_p)) else: assert associated_p == 1.0, ( "associated_p for untouched node {} is wrong. Get {}". format(rank, associated_p)) bf.turn_off_win_ops_with_associated_p()
def test_win_update_then_collect(self): size = bf.size() rank = bf.rank() if size <= 1: fname = inspect.currentframe().f_code.co_name warnings.warn("Skip {} due to size 1".format(fname)) return dtypes = [torch.FloatTensor, torch.DoubleTensor] if TEST_ON_GPU: dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor] indegree = int(np.ceil(np.log2(size))) expected_result = rank * (indegree + 1) dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): tensor = torch.FloatTensor(*([DIM_SIZE] * dim)).fill_(1).mul_(rank) tensor = self.cast_and_place(tensor, dtype) window_name = "win_update_collect_{}_{}".format(dim, dtype) bf.win_create(tensor, window_name) # After the collect ops, the neighbro tensor will become zero. # So second win_update_then_collect should produce the same value. for _ in range(2): collect_tensor = bf.win_update_then_collect(window_name) assert (list(collect_tensor.shape) == [DIM_SIZE] * dim), ( "bf.win_update_then_collect produces wrong shape tensor.") assert (collect_tensor.data - expected_result).abs().max( ) < EPSILON, ( "bf.win_update_then_collect produces wrong tensor value " + "[{0}-{1}]!={2} at rank {2}.".format( collect_tensor.min(), collect_tensor.max(), rank))
def test_asscoicated_with_p_random_test(self): size = bf.size() rank = bf.rank() dtypes = [torch.FloatTensor, torch.DoubleTensor] # Current, nccl version hasn't supported the associated with p yet. if TEST_ON_GPU and not bf.nccl_built(): dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1] bf.turn_on_win_ops_with_associated_p() for dtype, dim in itertools.product(dtypes, dims): tensor = torch.FloatTensor(*([23] * dim)).fill_(1) tensor = self.cast_and_place(tensor, dtype) window_name = "win_asscoicate_with_p_random_{}_{}".format( dim, dtype) bf.win_create(tensor, window_name, zero_init=True) for _ in range(10): random_weights = np.random.rand( len(bf.out_neighbor_ranks()) + 1) random_weights /= random_weights.sum() self_weight = random_weights[-1] dst_weights = { r: random_weights[i] for i, r in enumerate(bf.out_neighbor_ranks()) } bf.win_put(tensor, self_weight=self_weight, dst_weights=dst_weights, name=window_name, require_mutex=True) bf.win_update(name=window_name, require_mutex=True) bf.win_accumulate(tensor, name=window_name, require_mutex=True, self_weight=self_weight, dst_weights=dst_weights) bf.win_update_then_collect(name=window_name) bf.barrier() bf.win_update_then_collect(name=window_name) associated_p = bf.win_associated_p(name=window_name) # Because the associated p should operate the same as tensor always # the following assert should be true no matter what order is excuted. assert abs(associated_p - tensor.data[0]) < EPSILON bf.turn_off_win_ops_with_associated_p()
def synchronize(self): # Here synchronize just to make sure win_put ops is finished # in one iteration. with torch.no_grad(): for p, handle in self._handles.items(): _ = bf.win_wait(handle) name = self._parameter_names.get(p) self._pushsum_delay[p] = self._num_steps_per_communication extended_parameter = self._named_extension_parameters[name] extended_parameter.mul_(self.self_weight) # Last dimension is the push_sum weights and we want parameter / weight extended_parameter = bf.win_update_then_collect(name=name) corrected_parameter = (extended_parameter[:-1] / extended_parameter[-1]).reshape(p.shape) # Update p to the average of neighbors. p.set_(corrected_parameter) self._handles.clear() self._synchronized = True
sent_neighbor = bf.out_neighbor_ranks()[i % num_out_neighbors] dst_weights = {sent_neighbor: 0.5} self_weight = 0.5 else: dst_weights = { rank: 1.0 / (outdegree + 1) for rank in bf.out_neighbor_ranks() } self_weight = 1 / (1 + outdegree) bf.win_accumulate(x, name="x", self_weight=self_weight, dst_weights=dst_weights, require_mutex=True) bf.win_update_then_collect(name="x") associated_p = bf.win_associated_p(name="x") mse.append( torch.norm(x / associated_p - x_bar, p=2) / torch.norm(x_bar, p=2)) # Do not forget to sync at last! bf.barrier() bf.win_update_then_collect(name="x") associated_p = bf.win_associated_p(name="x") print(f"associated p at {bf.rank()} is {associated_p}") bf.turn_off_win_ops_with_associated_p() mse.append( torch.norm(x / associated_p - x_bar, p=2) / torch.norm(x_bar, p=2)) bf.win_free(name="x") else:
def push_diging(X, y, w_opt, loss, maxite=2000, alpha=1e-1, **kwargs): if loss == 'logistic_regression': rho = kwargs.get('rho', 1e-1) elif loss == 'linear_regression': rho = 0 else: raise NotImplementedError( 'Task not supported. This example only supports' + ' linear_regression and logistic_regression') outdegree = len(bf.out_neighbor_ranks()) indegree = len(bf.in_neighbor_ranks()) # We let w = col{u, y, v}, i.e., u, y, v = w[:n], w[n:2*n], w[2n] # Insteady of three directed_neighbor_allreduce operations for u, y, # and v respectively, we exploit one directed_neighbor_allreduce for # the combo vector w. This guarantees u, y, and v to be transmitted # simultanesly and avoids the mismatch between them. Experiments # show directed_neighbor_allreduce(w) is crutial for convergence of # push_diging. w = torch.zeros(2 * n + 1, 1).to(torch.double) x = torch.zeros(n, 1, dtype=torch.double, requires_grad=True) loss_step(X, y, x, tensor_name='w_buff', loss=loss, rho=rho) grad = x.grad.data.clone() w[n:2 * n] = grad x.grad.data.zero_() w[-1] = 1.0 grad_prev = w[n:2 * n].clone() bf.win_create(w, name="w_buff", zero_init=True) mse = [] for _ in range(maxite): bf.barrier() w[:n] = w[:n] - alpha * w[n:2 * n] bf.win_accumulate(w, name="w_buff", dst_weights={ rank: 1.0 / (outdegree * 2) for rank in bf.out_neighbor_ranks() }, require_mutex=True) w.div_(2) bf.barrier() w = bf.win_update_then_collect(name="w_buff") x.data = w[:n] / w[-1] loss_step(X, y, x, tensor_name='w_buff', loss=loss, rho=rho) grad = x.grad.data.clone() x.grad.data.zero_() w[n:2 * n] += grad - grad_prev grad_prev = grad if bf.rank() == 0: mse.append(torch.norm(x.data - w_opt, p=2)) bf.barrier() w = bf.win_update_then_collect(name="w_buff") x.data = w[:n] / w[-1] return x, mse