def _createIterationMatrices(self, H, Q, A, number_eigenvectors): if Q is None or H is None: start_index = 1 m = number_eigenvectors * 2 + 1 q = self.randomVector(A.totalShape()[0]) q /= norm(q) distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=m, n_columns=A.totalShape()[0]) Q = ParallelMatrix(distribution_plan) H = np.zeros((m, m), dtype=np.complex128) if 0 in Q.localRows(): Q.setRow(0, q) else: start_index = Q.totalShape()[0] m = start_index + number_eigenvectors + 1 new_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=m, n_columns=A.totalShape()[0]) # if new_distribution_plan.totalShape()[0] <= Q.distributionPlan().totalShape()[0]: # new_distribution_plan = Q.distributionPlan() Q = Q.enlargeTo(new_distribution_plan) H = self.resizeCopy(H, m, m) return H, Q, start_index, m
def _saveAutocorrelation(self, autocorrelation_function, number_modes, x_coordinates, y_coordinates, filename): new_coordinates_x = self.determineNewCoordinates( x_coordinates, self._hard_x_min, self._hard_x_max) new_coordinates_y = self.determineNewCoordinates( y_coordinates, self._hard_y_min, self._hard_y_max) twoform = autocorrelation_function.Twoform() eigenvalues = twoform.eigenvalues().copy() distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=number_modes, n_columns=len(new_coordinates_x) * len(new_coordinates_y)) if distribution_plan.myRank() == 0: twoform_vectors = TwoformVectorsWavefronts(new_coordinates_x, new_coordinates_y, filename) diagonal_elements, tde = self._getDiagonalElements( eigenvalues, twoform_vectors, distribution_plan) new_twoform = Twoform(new_coordinates_x, new_coordinates_y, diagonal_elements, eigenvalues, twoform_vectors) autocorrelation_function._setTwoform(new_twoform) autocorrelation_function.info().set("propagation_seperator", self.seperator()) autocorrelation_function.info().set("propagation", self.log()) autocorrelation_function.save(filename) return autocorrelation_function
def __init__(self, N_e, sigma_matrix, weighted_fields, x_coordinates, y_coordinates, k, number_modes): log("Setting up autocorrelation operator") self._action = 0 self._builder = AutocorrelationBuilder(N_e, sigma_matrix, weighted_fields, x_coordinates, y_coordinates, k, strategy=BuilderStrategyPython) self._distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_columns=self.dimensionSize(), n_rows=self.dimensionSize()) log("Setting up PETSc interface") self._petSc_operator = PetScOperator(self) self._number_modes = number_modes mpi.COMM_WORLD.barrier()
def testIndices(self): plan = DistributionPlan(mpi.COMM_WORLD, 10, 10) global_index = 5 local_index = plan.globalToLocalIndex(global_index) result = plan.localToGlobalIndex(local_index) self.assertEqual(global_index, result)
def crateParallelMatrixFromLocal(local_matrix): n_rows = local_matrix.shape[1] n_columns = local_matrix.shape[0] plan = DistributionPlan(mpi.COMM_WORLD, n_columns=n_columns, n_rows=n_rows) parallel_matrix = ParallelMatrix(plan) parallel_matrix.broadcast(local_matrix.transpose(), root=0) return parallel_matrix
def eigenfunctions(self, matrix, number_modes): import sys, slepc4py slepc4py.init(sys.argv) from petsc4py import PETSc from slepc4py import SLEPc E = SLEPc.EPS() E.create() E.setOperators(matrix.petScMatrix()) E.setProblemType(SLEPc.EPS.ProblemType.HEP) #E.setType(SLEPc.EPS.Type.ARNOLDI) E.setFromOptions() E.setTolerances(tol=1e-9, max_it=200) E.setDimensions(nev=number_modes) E.solve() Print = PETSc.Sys.Print iterations = E.getIterationNumber() self.log("Number of iterations of the method: %d" % iterations) eps_type = E.getType() self.log("Solution method: %s" % eps_type) nev, ncv, mpd = E.getDimensions() self.log("Number of requested eigenvalues: %d" % nev) tol, maxit = E.getTolerances() self.log("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit)) nconv = E.getConverged() self.log("Number of converged eigenpairs %d" % nconv) eigenvalues = np.zeros(nconv, dtype=np.complex128) result_vector = ParallelVector(matrix.distributionPlan()) plan = DistributionPlan(mpi.COMM_WORLD, n_columns=matrix.totalShape()[1], n_rows=nconv) eigenvectors_parallel = ParallelMatrix(plan) # Create the results vectors vr, wr = matrix.petScMatrix().getVecs() vi, wi = matrix.petScMatrix().getVecs() # for i in range(nconv): k = E.getEigenpair(i, vr, vi) result_vector.setCollective(vr.getArray()) eigenvalues[i] = k if i in eigenvectors_parallel.localRows(): eigenvectors_parallel.setRow(i, result_vector.fullData()) return eigenvalues, eigenvectors_parallel
def __init__(self, twoform): self._parent = twoform vector = self._parent.vector(0) self._n_size = vector.size self._petsc_matrix = PETSc.Mat().create() self._petsc_matrix.setSizes([self._n_size, self._n_size]) self._petsc_matrix.setUp() self._parallel_matrix = ParallelMatrixPETSc(self._petsc_matrix) plan = self._parallel_matrix.distributionPlan() self._parent._distribution_plan = plan self._vector_in = ParallelVector(plan) self._vector_out = ParallelVector(plan) self._distribution_plan = DistributionPlan( communicator=mpi.COMM_WORLD, n_columns=self.dimensionSize(), n_rows=self.dimensionSize())
def evaluateAllR_2_Fredholm_parallel_convolution(self, v_in, v_out): f = v_in.fullData().reshape(self._x_coordinates.shape[0], self._y_coordinates.shape[0]) distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_rows=self.numberFields(), n_columns=self.numberFields()) local_rows = distribution_plan.localRows() res = np.zeros_like(self._field) for i_field in local_rows: self._setActiveField(i_field) scal_prod_action = self._convolution.convolve2D( f, self._field_reverse_conj) self._field_tmp[:, :] = scal_prod_action * self._rho res += self._convolution.convolve2D(self._field, self._field_tmp) res *= self._grid_area v_out.sumFullData(res.ravel())
from comsyl.utils.Logger import logAll, log from comsyl.parallel.utils import isMaster, barrier from socket import gethostname from comsyl.parallel.DistributionPlan import DistributionPlan import os if isMaster(): print("Hello from master") if isMaster(): if not os.path.exists("tmp"): os.mkdir("tmp") logAll("Using LogAll") log("Using Log") s_id = str(mpi.COMM_WORLD.Get_rank()) + "_" + gethostname() print("s_id: ", s_id) print("str(mpi.COMM_WORLD.Get_rank()): ", str(mpi.COMM_WORLD.Get_rank())) number_modes = 1000 distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=number_modes, n_columns=1) print(distribution_plan) f = open("./tmp/TMP%s_in" % s_id, 'w') f.write(">>>>>>>>>") f.close
def arnoldi_iteration(self, H, Q, A, number_eigenvectors): H, Q, start_index, m = self._createIterationMatrices(H, Q, A, number_eigenvectors) qt_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=A.totalShape()[0], n_columns=m) q_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=m, n_columns=A.totalShape()[0]) parallel_vector = ParallelVector(qt_distribution_plan) parallel_vector._full_data[:] = Q.globalRow(start_index-1) for k in range(start_index, m): A.dot(parallel_vector, parallel_vector) if k in Q.localRows(): Q.setRow(k, parallel_vector.fullData()) q_k = Q.globalRow(k) if k == m or True: for j in range(k): q_j = Q.cachedGlobalRow(j) H[j, k-1] = np.vdot(q_j, q_k) q_k -= H[j, k-1] * q_j # else: # pv = ParallelVector(qt_distribution_plan) # pv2 = ParallelVector(q_distribution_plan) # pv._full_data[:] = q_k # Q.dot(pv,pv2,complex_conjugate=True) # H[:, k-1] = pv2.fullData()[:] # # p=H[:, k-1] # p[k:]=0 # pv2._full_data[:] = p # Q.dotForTransposed(pv2, pv) # # q_k -= pv.fullData() # H[k, k-1] = norm(q_k) Q.resetCacheGlobalRow() if k in Q.localRows(): Q.setRow(k, q_k) row_data = Q.globalRow(k) H[k, k-1] = norm(row_data) norm_row_data = row_data / H[k, k-1] if k%100==0 and Q.distributionPlan().myRank()==0: print("Arnoldi iteration: %i/%i"% (k, m-1)) sys.stdout.flush() # Is invariant null space if np.abs(H[k, k-1]) < 1e-100: break if k in Q.localRows(): Q.setRow(k, norm_row_data) parallel_vector._full_data[:]= norm_row_data new_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=k, n_columns=A.totalShape()[1]) Q = Q.shrinkTo(new_distribution_plan) return H[0:k,0:k], Q
def arnoldi(self, A, n = 25, accuracy=1e-8, accuracy_projection=None): n = min(A.totalShape()[0], n) # H: Hessenbergmatrix # Q: Schurbasis H = None Q = None my_rank = A.distributionPlan().myRank() for i in range(5): H, Q = self.arnoldi_iteration(H, Q, A, n) r = np.linalg.eigh(H) eig_val = r[0][::-1] eig_vec = r[1].transpose()[::-1, :] eig_vec = eig_vec.transpose() schur_vec = np.zeros((A.totalShape()[0], n), dtype=np.complex128) n = min(H.shape[0], n) q_distribution_plan = Q.distributionPlan() qt_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=Q.totalShape()[1], n_columns=Q.totalShape()[0]) parallel_vector_in = ParallelVector(q_distribution_plan) parallel_vector_out = ParallelVector(qt_distribution_plan) for i in range(n): t = eig_vec[:,i] full_data = np.append(eig_vec[:,i], np.zeros(Q.totalShape()[0]-eig_vec[:,i].shape[0])) parallel_vector_in._full_data[:] = full_data Q.dotForTransposed(parallel_vector_in, parallel_vector_out) schur_vec[:, i] = parallel_vector_out.fullData() parallel_vector_out._full_data[:] = schur_vec[:, n-1] A.dot(parallel_vector_out) acc = scipy.linalg.norm( parallel_vector_out.fullData()/eig_val.max() - (eig_val[n-1]/eig_val.max()) * schur_vec[:, n-1]) acc2 = np.abs(H[-1, -2] / eig_val[n-2]) if my_rank == 0: print("Accuracy last Schur/ritz vector for normalized matrix: %e"% acc) print("Accuracy projection vs smallest eigenvalue: %e"% acc2) if accuracy_projection is not None: if acc2 <= accuracy_projection and acc <= accuracy: if my_rank == 0: print("Converged") sys.stdout.flush() break else: if acc <= accuracy: if my_rank == 0: print("Converged") sys.stdout.flush() break return eig_val[0:n], schur_vec[:,0:n]
def testIndicesUndergo(self): plan = DistributionPlan(mpi.COMM_WORLD, 1, 1) print(plan.localRows())
def propagate(self, autocorrelation_function, filename, method='SRW', python_to_be_used="python"): source_filename = autocorrelation_function._io.fromFile() try: source_uid = autocorrelation_function.info().uid() except: source_uid = "None" autocorrelation_function.info().logStart() logAll("Propagating %s (%s)" % (source_filename, source_uid)) if self._maximum_mode is None: number_modes = autocorrelation_function.numberModes() else: number_modes = self._maximum_mode if isMaster(): if not os.path.exists("tmp"): os.mkdir("tmp") distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=number_modes, n_columns=1) n_rank = mpi.COMM_WORLD.Get_rank() x_coordinates = [] y_coordinates = [] for i_mode in distribution_plan.localRows(): for i in range(1): logAll("%i doing mode index: %i/%i (max mode index: %i)" % (n_rank, i_mode, max( distribution_plan.localRows()), number_modes - 1)) if n_rank == 0: sys.stdout.flush() wavefront = autocorrelation_function.coherentModeAsWavefront( i_mode) #wavefront._e_field[np.abs(wavefront._e_field)<0.000001]=0.0 if method == 'SRW': # CHANGE THIS FOR WOFRY srw_wavefront = propagateWavefront( self.__srw_beamline, wavefront, autocorrelation_function.SRWWavefrontRx(), autocorrelation_function.SRWWavefrontDRx(), autocorrelation_function.SRWWavefrontRy(), autocorrelation_function.SRWWavefrontDRy(), 1.0, 1.0, i_mode, python_to_be_used=python_to_be_used) elif method == 'WOFRY': srw_wavefront = propagateWavefrontWofry( self.__srw_beamline, wavefront, i_mode, python_to_be_used=python_to_be_used) else: raise Exception("Method not known: %s" % method) # norm_mode = trapez2D( np.abs(srw_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER PROPAGATION:" % i, i_mode,norm_mode) # sys.stdout.flush() #else: # break #if i==19: # exit() # if np.any(norm_srw_wavefront > 10): # exit() # # if np.any(norm_wavefront > 10): # exit() adjusted_wavefront = self._adjustWavefrontSize(srw_wavefront) # norm_mode = trapez2D( np.abs(adjusted_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER ADJUSTMENT:" % i, i_mode,norm_mode) # sys.stdout.flush() # exit() # writes a file for every wavefront TwoformVectorsWavefronts.pushWavefront(filename, adjusted_wavefront, index=i_mode) #print("Saving wavefront %i" % i_mode) x_coordinates.append( adjusted_wavefront.absolute_x_coordinates().copy()) y_coordinates.append( adjusted_wavefront.absolute_y_coordinates().copy()) mpi.COMM_WORLD.barrier() # replace the wavefronts bu the propagated ones af = self._saveAutocorrelation(autocorrelation_function, number_modes, x_coordinates, y_coordinates, filename) # convert from one file per wavefront to one big array af.Twoform().convertToTwoformVectorsEigenvectors() af.info().setEndTime() filelist = glob.glob(filename + "*") for f in filelist: os.remove(f) return af
def evaluateAllR_2_Fredholm_parallel_direct(self, v_in, v_out): if not self._has_phases: self._setUpPhases() v = v_in.fullData().copy() H = np.zeros_like(self._field) tmp2 = np.zeros((self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) result = np.zeros_like(self._field) tmp = np.zeros((self._field_y_coordinates.shape[0], self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) e_1 = np.zeros_like(self._field) e_2 = np.zeros_like(self._field) distribution_plan = DistributionPlan( communicator=mpi.COMM_WORLD, n_rows=len(self._field_x_coordinates), n_columns=len(self._field_y_coordinates)) local_rows = distribution_plan.localRows() range_y_coordinates = tuple(range(len(self._field_y_coordinates))) for i_field in range(self.numberFields()): self._setActiveField(i_field) for i_r_x in local_rows: i_x_1 = self._coordinate_map_x[i_r_x] i_x_minus_1 = self._coordinate_map_minus_x[i_r_x] for i_r_y in range_y_coordinates: i_y_1 = self._coordinate_map_y[i_r_y] i_y_minus_1 = self._coordinate_map_minus_y[i_r_y] self.relativeShiftedCopy(i_x_1, i_y_1, self._field_conj, e_1) tmp[i_r_y, :] = e_1.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[ i_y_minus_1, :, :].ravel() tmp2[:] = v * self._rho_phase_exp_x[i_x_minus_1, :, :].ravel() H[i_r_x, :] = tmp.dot(tmp2) v_out.sumFullData(H.ravel()) field_product = self._rho.flatten() * v_out.fullData() for i_r_x in local_rows: i_x_2 = self._coordinate_map_x[i_r_x] for i_r_y in range_y_coordinates: i_y_2 = self._coordinate_map_y[i_r_y] self.relativeShiftedCopy(i_x_2, i_y_2, self._field, e_2) tmp[i_r_y, :] = e_2.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[i_y_2, :, :].ravel() tmp2[:] = field_product * self._rho_phase_exp_x[ i_x_2, :, :].ravel() result[i_r_x, :] += tmp.dot(tmp2) # it is only dV to first power because the solver normalizes for one integration result *= self._grid_area v_out.sumFullData(result.ravel())
def __init__(self, x_coordinates, y_coordinates, intensity, eigenvalues_spatial, eigenvectors_parallel, phase_space_density, method): self._method = method if self._method not in ["accurate", "quick"]: raise Exception("Unknown divergence action %s" % self._method) communicator = mpi.COMM_WORLD n_vectors = eigenvalues_spatial.size self._intensity = intensity eigenvalues = eigenvalues_spatial self._number_modes = n_vectors self._x_coordinates = x_coordinates self._y_coordinates = y_coordinates self._petSc_operator = PetScOperatorDivergence(self) self._my_distribution_plan = DistributionPlan( communicator=communicator, n_rows=n_vectors, n_columns=self.dimensionSize()) self._prepareEigenvectors(communicator, eigenvectors_parallel) self._my_eigenvalues = eigenvalues[ self._my_distribution_plan.localRows()] self._my_eigenvectors_conjugated = self._my_eigenvectors.conj() self._my_eigenvectors_times_eigenvalues = self._my_eigenvectors self._my_eigenvectors = None for i_e, e in enumerate(self._my_eigenvalues): self._my_eigenvectors_times_eigenvalues[i_e, :, :] *= e self._phase_space_density = phase_space_density self._sigma_p_x = phase_space_density.divergencePartSigmaX() self._sigma_p_y = phase_space_density.divergencePartSigmaY() self._prefactor = phase_space_density.normalizationConstant() log("Divergence action sigma x/y: %e %e" % (self._sigma_p_x, self._sigma_p_y)) x_coordinates_weights = x_coordinates[ (x_coordinates > -5 * self._sigma_p_x) & (x_coordinates < 5 * self._sigma_p_x)] y_coordinates_weights = y_coordinates[ (y_coordinates > -5 * self._sigma_p_y) & (y_coordinates < 5 * self._sigma_p_y)] log("Calculating phase space density xy") weight_function = np.zeros( (x_coordinates_weights.shape[0], y_coordinates_weights.shape[0]), dtype=np.complex128) for i_x, x in enumerate(x_coordinates_weights): for i_y, y in enumerate(y_coordinates_weights): weight_function[i_x, i_y] = phase_space_density.staticPart( np.array([x, y])) weight_function_horizontal = np.zeros((x_coordinates.shape[0]), dtype=np.complex128) weight_function_vertical = np.zeros((y_coordinates.shape[0]), dtype=np.complex128) log("Calculating phase space density x") for i_x, x in enumerate(x_coordinates_weights): weight_function_horizontal[i_x] = phase_space_density.staticPart( np.array([x, 0.0])) log("Calculating phase space density y") for i_y, y in enumerate(y_coordinates_weights): weight_function_vertical[i_y] = phase_space_density.staticPart( np.array([0.0, y])) #plot(x_coordinates, weight_function_horizontal) #plot(y_coordinates, weight_function_vertical) self._weight_function = weight_function self._weight_function_horizontal = weight_function_horizontal self._weight_function_vertical = weight_function_vertical self._i_action = 0 self._convolution = Convolution()
def createDistributionPlan(rows=10, columns=10): return DistributionPlan(mpi.COMM_WORLD, n_rows=rows, n_columns=columns)