def reduce(self, k, phi, psi_n, psi_l, psi_r): """Reduce skeleton for distributed tree The parameters must respect these equalities (closure property): * k(l, b, r) = psi_n(l, phi(b), r) * psi_n(psi_n(value, l, y), b, r) = psi_n(value, psi_l(l,b,r), y) * psi_n(l, b, psi_n(value, r, y)) = psi_n(value, psi_r(l,b,r), y) Parameters ---------- k : callable The function used to reduce a BTree into a single value phi : callable A function used to respect the closure property to allow partial computation psi_n : callable A function used to respect the closure property to make partial computation psi_l : callable A function used to respect the closure property to make partial computation on the left psi_r : callable A function used to respect the closure property to make partial computation on the right """ logger.debug( '[START] PID[' + str(PID) + '] reduce skeleton') # Step 1 : Local Reduction gt = Segment([None] * self.__nb_segs) i = 0 for (start, offset) in self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]: logger.debug('[START] PID[' + str(PID) + '] reduce_local from ' + str(start) + ' to ' + str(start + offset)) gt[i] = Segment(self.__content[start:start + offset]).reduce_local(k, phi, psi_l, psi_r) logger.debug('[END] PID[' + str(PID) + '] reduce_local from ' + str(start) + ' to ' + str(start + offset)) i = i+1 # Step 2 : Gather local Results if PID == 0: for i in range(1, NPROCS): logger.debug( '[START] PID[' + str(PID) + '] reception from ' + str(i)) gt.extend(COMM.recv(source=i, tag=TAG_COMM_REDUCE)['c']) logger.debug( '[END] PID[' + str(PID) + '] reception from ' + str(i)) else: logger.debug( '[START] PID[' + str(PID) + '] emission to ' + str(0)) COMM.send({'c': gt}, dest=0, tag=TAG_COMM_REDUCE) logger.debug( '[END] PID[' + str(PID) + '] emission to ' + str(0)) # Step 3 : Global Reduction par.at_root(lambda: logger.debug('[START] PID[' + str(PID) + '] reduce_global')) res = gt.reduce_global(psi_n) if PID == 0 else None par.at_root(lambda: logger.debug('[END] PID[' + str(PID) + '] reduce_global')) logger.debug( '[END] PID[' + str(PID) + '] reduce skeleton') return res
def __global_upwards_accumulation(psi_n, gt): gt2 = None if PID == 0: par.at_root( lambda: logger.debug('[START] PID[%s] uacc_global', PID)) gt2 = gt.uacc_global(psi_n) for i, _ in enumerate(gt2): if gt2[i].is_node(): gt2[i] = TaggedValue((gt2.get_left(i).get_value(), gt2.get_right(i).get_value()), gt2[i].get_tag()) par.at_root(lambda: logger.debug('[END] PID[%s] uacc_global', PID)) return gt2
def reduce(self, k, phi, psi_n, psi_l, psi_r): """Reduce skeleton for distributed tree The parameters must respect these equalities (closure property): * k(l, b, r) = psi_n(l, phi(b), r) * psi_n(psi_n(value, l, y), b, r) = psi_n(value, psi_l(l,b,r), y) * psi_n(l, b, psi_n(value, r, y)) = psi_n(value, psi_r(l,b,r), y) Parameters ---------- k : callable The function used to reduce a BTree into a single value phi : callable A function used to respect the closure property to allow partial computation psi_n : callable A function used to respect the closure property to make partial computation psi_l : callable A function used to respect the closure property to make partial computation on the left psi_r : callable A function used to respect the closure property to make partial computation on the right """ logger.debug('[START] PID[%s] reduce skeleton', PID) # Step 1 : Local Reduction gt = Segment([None] * self.__nb_segs) i = 0 for (start, offset) in \ self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]: logger.debug('[START] PID[%s] reduce_local from %s to %s', PID, start, start + offset) gt[i] = Segment(self.__content[start:start + offset]).reduce_local( k, phi, psi_l, psi_r) logger.debug('[END] PID[%s] reduce_local from %s to %s', PID, start, start + offset) i = i + 1 # Step 2 : Gather local Results self.__gather_local_result(gt, i, TAG_COMM_UACC_1) # Step 3 : Global Reduction par.at_root(lambda: logger.debug('[START] PID[%s] reduce_global', PID)) res = gt.reduce_global(psi_n) if PID == 0 else None par.at_root(lambda: logger.debug('[END] PID[%s] reduce_global', PID)) logger.debug('[END] PID[%s] reduce skeleton', PID) return res
def _main(): if _PID == 0: bal = balanced_btree(_rand_str, 15) ill = ill_balanced_btree(_rand_str, 15) rdm = random_btree(_rand_str, 15) for i in range(1, _NPROCS): _COMM.send({'b': bal, 'i': ill, 'r': rdm}, dest=i, tag=1) else: data = _COMM.recv(source=0, tag=1) bal = data['b'] ill = data['i'] rdm = data['r'] par.barrier() par.at_root(lambda: print("\nBALANCED\n")) _test(bal) par.at_root(lambda: print("\nILL BALANCED\n")) _test(ill) par.at_root(lambda: print("\nRANDOM\n")) _test(rdm)
def _test(bin_tree): print(bin_tree) par.at_root(lambda: print("-----")) m_bridge_param = 3 linear_tree = LTree.init_from_bt(bin_tree, m_bridge_param) parallel_tree = PTree(linear_tree) par.at_root(lambda: print(linear_tree)) print(parallel_tree) par.at_root(lambda: print("-----")) res = prefix(parallel_tree) par.at_root(lambda: print("prefix result:")) print(res) par.at_root(lambda: print("-----")) res = size(parallel_tree) par.at_root(lambda: print("size result:")) print(res) par.at_root(lambda: print("-----")) res = size_by_node(parallel_tree) par.at_root(lambda: print("size_by_node result:")) print(res) par.at_root(lambda: print("-----")) res = ancestors(parallel_tree) par.at_root(lambda: print("ancestors result:")) print(res)
def dacc(self, gl, gr, c, phi_l, phi_r, psi_u, psi_d): """Downward accumulation skeleton for distributed tree The parameters must respect these equalities (closure property): * gl(c, b) = psi_d(c, phi_l(b)) * gr(c, b) = psi_d(c, phi_r(b)) * psi_d(psi_d(c, b), a) = psi_d(c, psi_u(b,a)) Parameters --------- gl : callable The function used to make an accumulation to the left children in a binary tree gr : callable The function used to make an accumulation to the right children in a binary tree c Initial value of accumulation phi_l : callable A function used to respect the closure property to allow partial computation on the left phi_r : callable A function used to respect the closure property to allow partial computation on the right psi_d : callable A function used to respect the closure property to make partial downward accumulation psi_u : callable A function used to respect the closure property to make partial computation """ logger.debug( '[START] PID[' + str(PID) + '] dAcc skeleton') # Step 1 : Computing Local Intermediate Values gt = Segment([None] * self.__nb_segs) i = 0 for (start, offset) in self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]: seg = Segment(self.__content[start:start + offset]) logger.debug( '[START] PID[' + str(PID) + '] dacc_path from ' + str(start) + ' to ' + str(start + offset)) if seg.has_critical(): gt[i] = seg.dacc_path(phi_l, phi_r, psi_u) else: gt[i] = TaggedValue(seg[0].get_value(), "L") logger.debug( '[END] PID[' + str(PID) + '] dacc_path from ' + str(start) + ' to ' + str(start + offset)) i = i + 1 # Step 2 : Gather Local Results if PID == 0: for iproc in range(1, NPROCS): logger.debug( '[START] PID[' + str(PID) + '] reception update from ' + str(i)) gt.extend(COMM.recv(source=iproc, tag=TAG_COMM_DACC_1)['c']) logger.debug( '[END] PID[' + str(PID) + '] reception update from ' + str(i)) else: logger.debug( '[START] PID[' + str(PID) + '] emission update to ' + str(0)) COMM.send({'c': gt}, dest=0, tag=TAG_COMM_DACC_1) logger.debug( '[END] PID[' + str(PID) + '] emission update to ' + str(0)) # Step 3 : Global Downward Accumulation par.at_root(lambda: logger.debug('[START] PID[' + str(PID) + '] dacc_global')) gt2 = (gt.dacc_global(psi_d, c) if PID == 0 else None) par.at_root(lambda: logger.debug('[END] PID[' + str(PID) + '] dacc_global')) # Step 4 : Distributing Global Result if PID == 0: start = 0 for iproc in range(NPROCS): iproc_off = self.__distribution[iproc] if iproc != 0: logger.debug( '[START] PID[' + str(PID) + '] emission global to ' + str(iproc)) COMM.send({'g': gt2[start: start + iproc_off]}, dest=iproc, tag=TAG_COMM_DACC_2) logger.debug( '[END] PID[' + str(PID) + '] emission global to ' + str(iproc)) start = start + iproc_off else: logger.debug( '[START] PID[' + str(PID) + '] reception global from ' + str(0)) gt2 = COMM.recv(source=0, tag=TAG_COMM_DACC_2)['g'] logger.debug( '[END] PID[' + str(PID) + '] reception global from ' + str(0)) # Step 5 : Local Downward Accumulation content = SList([None] * self.__content.length()) for i in range(len(self.__global_index[self.__start_index: self.__start_index + self.__nb_segs])): (start, offset) = self.__global_index[self.__start_index: self.__start_index + self.__nb_segs][i] logger.debug( '[START] PID[' + str(PID) + '] dacc_local from ' + str(start) + ' to ' + str(start + offset)) content[start:start + offset] = \ Segment(self.__content[start:start + offset]).dacc_local(gl, gr, gt2[i].get_value()) logger.debug( '[END] PID[' + str(PID) + '] dacc_local from ' + str(start) + ' to ' + str(start + offset)) res = PTree.init(self, content) logger.debug( '[END] PID[' + str(PID) + '] dAcc skeleton') return res
def uacc(self, k, phi, psi_n, psi_l, psi_r): """Upward accumulation skeleton for distributed tree The parameters must respect these equalities (closure property): * k(l, b, r) = psi_n(l, phi(b), r) * psi_n(psi_n(value, l, y), b, r) = psi_n(value, psi_l(l,b,r), y) * psi_n(l, b, psi_n(value, r, y)) = psi_n(value, psi_r(l,b,r), y) Parameters ---------- k : callable The function used to reduce a BTree into a single value phi : callable A function used to respect the closure property to allow partial computation psi_n : callable A function used to respect the closure property to make partial computation psi_l : callable A function used to respect the closure property to make partial computation on the left psi_r : callable A function used to respect the closure property to make partial computation on the right """ logger.debug( '[START] PID[' + str(PID) + '] uAcc skeleton') assert self.__distribution != [] # Step 1 : Local Upwards Accumulation gt = Segment([None] * self.__nb_segs) lt2 = SList([None] * self.__nb_segs) i = 0 for (start, offset) in self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]: logger.debug( '[START] PID[' + str(PID) + '] uacc_local from ' + str(start) + ' to ' + str(start + offset)) (top, res) = Segment(self.__content[start:start + offset]).uacc_local(k, phi, psi_l, psi_r) logger.debug( '[END] PID[' + str(PID) + '] uacc_local from ' + str(start) + ' to ' + str(start + offset)) gt[i] = top lt2[i] = res i = i + 1 # Step 2 : Gather local Results if PID == 0: for iproc in range(1, NPROCS): logger.debug( '[START] PID[' + str(PID) + '] reception local from ' + str(i)) gt.extend(COMM.recv(source=iproc, tag=TAG_COMM_UACC_1)['c']) logger.debug( '[END] PID[' + str(PID) + '] reception local from ' + str(i)) else: logger.debug( '[START] PID[' + str(PID) + '] emission local to ' + str(0)) COMM.send({'c': gt}, dest=0, tag=TAG_COMM_UACC_1) logger.debug( '[END] PID[' + str(PID) + '] emission local to ' + str(0)) # Step 3 : Global Upward Accumulation gt2 = None if PID == 0: par.at_root(lambda: logger.debug('[START] PID[' + str(PID) + '] uacc_global')) gt2 = gt.uacc_global(psi_n) for i in range(len(gt2)): if gt2[i].is_node(): gt2[i] = TaggedValue((gt2.get_left(i).get_value(), gt2.get_right(i).get_value()), gt2[i].get_tag()) par.at_root(lambda: logger.debug('[END] PID[' + str(PID) + '] uacc_global')) # Step 4 : Distributing Global Result start = 0 if PID == 0: for iproc in range(NPROCS): iproc_off = self.__distribution[iproc] if iproc != 0: logger.debug( '[START] PID[' + str(PID) + '] emission global to ' + str(iproc)) COMM.send({'g': gt2[start: start + iproc_off]}, dest=iproc, tag=TAG_COMM_UACC_2) logger.debug( '[END] PID[' + str(PID) + '] emission global to ' + str(iproc)) start = start + iproc_off else: logger.debug( '[START] PID[' + str(PID) + '] reception global from ' + str(0)) gt2 = COMM.recv(source=0, tag=TAG_COMM_UACC_2)['g'] logger.debug( '[END] PID[' + str(PID) + '] reception global from ' + str(0)) # Step 5 : Local Updates content = SList([None] * self.__content.length()) for i in range(len(self.__global_index[self.__start_index: self.__start_index + self.__nb_segs])): (start, offset) = self.__global_index[self.__start_index: self.__start_index + self.__nb_segs][i] logger.debug('[START] PID[' + str(PID) + '] uacc_update from ' + str(start) + ' to ' + str(start + offset)) if gt[i].is_node(): (lc, rc) = gt2[i].get_value() val = Segment(self.__content[start:start + offset]).uacc_update(lt2[i], k, lc, rc) else: val = lt2[i] logger.debug('[END] PID[' + str(PID) + '] uacc_update from ' + str(start) + ' to ' + str(start + offset)) content[start:start + offset] = val res = PTree.init(self, content) logger.debug( '[END] PID[' + str(PID) + '] uAcc skeleton') return res
def dacc(self, gl, gr, c, phi_l, phi_r, psi_u, psi_d): """Downward accumulation skeleton for distributed tree The parameters must respect these equalities (closure property): * gl(c, b) = psi_d(c, phi_l(b)) * gr(c, b) = psi_d(c, phi_r(b)) * psi_d(psi_d(c, b), a) = psi_d(c, psi_u(b,a)) Parameters --------- gl : callable The function used to make an accumulation to the left children in a binary tree gr : callable The function used to make an accumulation to the right children in a binary tree c Initial value of accumulation phi_l : callable A function used to respect the closure property to allow partial computation on the left phi_r : callable A function used to respect the closure property to allow partial computation on the right psi_d : callable A function used to respect the closure property to make partial downward accumulation psi_u : callable A function used to respect the closure property to make partial computation """ logger.debug('[START] PID[%s] dAcc skeleton', PID) # Step 1 : Computing Local Intermediate Values gt = Segment([None] * self.__nb_segs) i = 0 for (start, offset) in \ self.__global_index[self.__start_index: self.__start_index + self.__nb_segs]: seg = Segment(self.__content[start:start + offset]) logger.debug('[START] PID[%s] dacc_path from %s to %s', PID, start, start + offset) if seg.has_critical(): gt[i] = seg.dacc_path(phi_l, phi_r, psi_u) else: gt[i] = TaggedValue(seg[0].get_value(), "L") logger.debug('[END] PID[%s] dacc_path from %s to %s', PID, start, start + offset) i = i + 1 # Step 2 : Gather Local Results self.__gather_local_result(gt, i, TAG_COMM_DACC_1) # Step 3 : Global Downward Accumulation par.at_root(lambda: logger.debug('[START] PID[%s] dacc_global', PID)) gt2 = (gt.dacc_global(psi_d, c) if PID == 0 else None) par.at_root(lambda: logger.debug('[END] PID[%s] dacc_global', PID)) # Step 4 : Distributing Global Result gt2 = self.__distribute_global_result(gt2, TAG_COMM_DACC_2) # Step 5 : Local Downward Accumulation content = SList([None] * self.__content.length()) for i in range( len(self.__global_index[self.__start_index:self.__start_index + self.__nb_segs])): (start, offset ) = self.__global_index[self.__start_index:self.__start_index + self.__nb_segs][i] logger.debug('[START] PID[%s] dacc_local from %s to %s', PID, start, start + offset) content[start:start + offset] = \ Segment(self.__content[start:start + offset]).dacc_local(gl, gr, gt2[i].get_value()) logger.debug('[END] PID[%s] dacc_local from %s to %s', PID, start, start + offset) logger.debug('[END] PID[%s] dAcc skeleton', PID) return PTree.init(self, content)