def exchange_reflections_by_alltoall_sliced(self, mpi_communicator, number_of_slices): '''Split each hkl chunk into N slices. This is needed to address the MPI alltoall memory problem''' result_reflections = flex.reflection_table( ) # the total reflection table, which this rank will receive after running all slices of alltoall list_of_sliced_reflection_chunks = [ ] # if the self.split_reflections list contains chunks: [A,B,C...], it will be sliced like: [[A1,A2,...,An], [B1,B2,...,Bn], [C1,C2,...,Cn], ...], where n is the number of chunk slices for i in range(len(self.split_reflections)): reflection_chunk_slices = [] for chunk_slice in reflection_table_utils.get_next_reflection_table_slice( self.split_reflections[i], number_of_slices, self.reflection_table_stub): reflection_chunk_slices.append(chunk_slice) list_of_sliced_reflection_chunks.append(reflection_chunk_slices) for j in range(number_of_slices): reflection_chunks_for_alltoall = list() for i in range(len(self.split_reflections)): reflection_chunks_for_alltoall.append( list_of_sliced_reflection_chunks[i][j]) # [Aj,Bj,Cj...] self.logger.log_step_time("ALL-TO-ALL") received_reflection_chunks = mpi_communicator.alltoall( reflection_chunks_for_alltoall) self.logger.log("After all-to-all received %d reflection chunks" % len(received_reflection_chunks)) self.logger.log_step_time("ALL-TO-ALL", True) self.logger.log_step_time("CONSOLIDATE") self.logger.log("Consolidating reflection tables...") for chunk in received_reflection_chunks: result_reflections.extend(chunk) self.logger.log_step_time("CONSOLIDATE", True) return result_reflections
def get_reflections_from_alltoall_sliced(self, number_of_slices): '''Split each hkl chunk into N slices. This is needed to address the MPI alltoall memory problem''' result_reflections = self.distribute_reflection_table( ) # the total reflection table, which this rank will receive after all slices of alltoall list_of_sliced_hkl_chunks = [ ] # if self.hkl_chunks is [A,B,C...], this list will be [[A1,A2,...,An], [B1,B2,...,Bn], [C1,C2,...,Cn], ...], where n is the number of chunk slices for i in range(len(self.hkl_chunks)): hkl_chunk_slices = [] for chunk_slice in reflection_table_utils.get_next_reflection_table_slice( self.hkl_chunks[i], number_of_slices, self.distribute_reflection_table): hkl_chunk_slices.append(chunk_slice) list_of_sliced_hkl_chunks.append(hkl_chunk_slices) self.logger.log("Ready for all-to-all...") self.logger.log("Memory usage: %d MB" % get_memory_usage()) for j in range(number_of_slices): hkl_chunks_for_alltoall = list() for i in range(len(self.hkl_chunks)): hkl_chunks_for_alltoall.append( list_of_sliced_hkl_chunks[i][j]) # [Aj,Bj,Cj...] self.logger.log_step_time("ALL-TO-ALL") self.logger.log("Executing MPI all-to-all...") self.logger.log("Memory usage: %d MB" % get_memory_usage()) received_hkl_chunks = comm.alltoall(hkl_chunks_for_alltoall) self.logger.log("After all-to-all received %d hkl chunks" % len(received_hkl_chunks)) self.logger.log_step_time("ALL-TO-ALL", True) self.logger.log_step_time("CONSOLIDATE") self.logger.log("Consolidating reflection tables...") for chunk in received_hkl_chunks: result_reflections.extend(chunk) self.logger.log_step_time("CONSOLIDATE", True) return result_reflections