def _createParallelMatrix(self, f_gamma): log("Building matrix") return self._createParallelMatrixPETSc(f_gamma) product_coordinates=self.productCoordinates() n_coordinates = product_coordinates.shape[0] distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_rows=product_coordinates.shape[0], n_columns=product_coordinates.shape[0]) matrix = ParallelMatrix(distribution_plan=distribution_plan) if self._mode_element_wise: for i_row in distribution_plan.localRows(): self._printProgress(n_coordinates, i_row) r_i = product_coordinates[i_row, :] for i_column in range(n_coordinates): r_j = product_coordinates[i_column, :] value = f_gamma(r_i, r_j) # TODO raise NotImplementedError("Can only handle entire rows") # matrix.setElement(i_row, i_column, value) else: for i_row in distribution_plan.localRows(): self._printProgress(len(distribution_plan.localRows()), i_row) r_i = product_coordinates[i_row, :] value = f_gamma(r_i) value = value.reshape(value.size) matrix.setRow(global_index=i_row, content=value) if distribution_plan.communicator().Get_rank() == 0: log("done") return matrix
def evaluateAllR_2_Fredholm_parallel_direct(self, v_in, v_out): if not self._has_phases: self._setUpPhases() v = v_in.fullData().copy() H = np.zeros_like(self._field) tmp2 = np.zeros((self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) result = np.zeros_like(self._field) tmp = np.zeros( (self._field_y_coordinates.shape[0], self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) e_1 = np.zeros_like(self._field) e_2 = np.zeros_like(self._field) distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_rows=len(self._field_x_coordinates), n_columns=len(self._field_y_coordinates)) local_rows = distribution_plan.localRows() range_y_coordinates = tuple(range(len(self._field_y_coordinates))) for i_field in range(self.numberFields()): self._setActiveField(i_field) for i_r_x in local_rows: i_x_1 = self._coordinate_map_x[i_r_x] i_x_minus_1 = self._coordinate_map_minus_x[i_r_x] for i_r_y in range_y_coordinates: i_y_1 = self._coordinate_map_y[i_r_y] i_y_minus_1 = self._coordinate_map_minus_y[i_r_y] self.relativeShiftedCopy(i_x_1, i_y_1, self._field_conj, e_1) tmp[i_r_y, :] = e_1.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[i_y_minus_1, :, :].ravel() tmp2[:] = v * self._rho_phase_exp_x[i_x_minus_1, :, :].ravel() H[i_r_x, :] = tmp.dot(tmp2) v_out.sumFullData(H.ravel()) field_product = self._rho.flatten() * v_out.fullData() for i_r_x in local_rows: i_x_2 = self._coordinate_map_x[i_r_x] for i_r_y in range_y_coordinates: i_y_2 = self._coordinate_map_y[i_r_y] self.relativeShiftedCopy(i_x_2, i_y_2, self._field, e_2) tmp[i_r_y, :] = e_2.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[i_y_2, :, :].ravel() tmp2[:] = field_product * self._rho_phase_exp_x[i_x_2, :, :].ravel() result[i_r_x, :] += tmp.dot(tmp2) # it is only dV to first power because the solver normalizes for one integration result *= self._grid_area v_out.sumFullData(result.ravel())
def evaluateAllR_2_Fredholm_parallel_convolution(self, v_in, v_out): f = v_in.fullData().reshape(self._x_coordinates.shape[0], self._y_coordinates.shape[0]) distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_rows=self.numberFields(), n_columns=self.numberFields()) local_rows = distribution_plan.localRows() res = np.zeros_like(self._field) for i_field in local_rows: self._setActiveField(i_field) scal_prod_action = self._convolution.convolve2D(f, self._field_reverse_conj) self._field_tmp[:, :] = scal_prod_action * self._rho res += self._convolution.convolve2D(self._field, self._field_tmp) res *= self._grid_area v_out.sumFullData(res.ravel())
def evaluateAllR_2_Fredholm_parallel_convolution(self, v_in, v_out): f = v_in.fullData().reshape(self._x_coordinates.shape[0], self._y_coordinates.shape[0]) distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD, n_rows=self.numberFields(), n_columns=self.numberFields()) local_rows = distribution_plan.localRows() res = np.zeros_like(self._field) for i_field in local_rows: self._setActiveField(i_field) scal_prod_action = self._convolution.convolve2D( f, self._field_reverse_conj) self._field_tmp[:, :] = scal_prod_action * self._rho res += self._convolution.convolve2D(self._field, self._field_tmp) res *= self._grid_area v_out.sumFullData(res.ravel())
def testIndicesUndergo(self): plan = DistributionPlan(mpi.COMM_WORLD, 1, 1) print(plan.localRows())
def propagate(self, autocorrelation_function, filename, method='SRW', python_to_be_used="python"): source_filename = autocorrelation_function._io.fromFile() try: source_uid = autocorrelation_function.info().uid() except: source_uid = "None" autocorrelation_function.info().logStart() logAll("Propagating %s (%s)" % (source_filename, source_uid)) if self._maximum_mode is None: number_modes = autocorrelation_function.numberModes() else: number_modes = self._maximum_mode if isMaster(): if not os.path.exists("tmp"): os.mkdir("tmp") distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=number_modes, n_columns=1) n_rank = mpi.COMM_WORLD.Get_rank() x_coordinates = [] y_coordinates = [] for i_mode in distribution_plan.localRows(): for i in range(1): logAll("%i doing mode index: %i/%i (max mode index: %i)" % (n_rank, i_mode, max( distribution_plan.localRows()), number_modes - 1)) if n_rank == 0: sys.stdout.flush() wavefront = autocorrelation_function.coherentModeAsWavefront( i_mode) #wavefront._e_field[np.abs(wavefront._e_field)<0.000001]=0.0 if method == 'SRW': # CHANGE THIS FOR WOFRY srw_wavefront = propagateWavefront( self.__srw_beamline, wavefront, autocorrelation_function.SRWWavefrontRx(), autocorrelation_function.SRWWavefrontDRx(), autocorrelation_function.SRWWavefrontRy(), autocorrelation_function.SRWWavefrontDRy(), 1.0, 1.0, i_mode, python_to_be_used=python_to_be_used) elif method == 'WOFRY': srw_wavefront = propagateWavefrontWofry( self.__srw_beamline, wavefront, i_mode, python_to_be_used=python_to_be_used) else: raise Exception("Method not known: %s" % method) # norm_mode = trapez2D( np.abs(srw_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER PROPAGATION:" % i, i_mode,norm_mode) # sys.stdout.flush() #else: # break #if i==19: # exit() # if np.any(norm_srw_wavefront > 10): # exit() # # if np.any(norm_wavefront > 10): # exit() adjusted_wavefront = self._adjustWavefrontSize(srw_wavefront) # norm_mode = trapez2D( np.abs(adjusted_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER ADJUSTMENT:" % i, i_mode,norm_mode) # sys.stdout.flush() # exit() # writes a file for every wavefront TwoformVectorsWavefronts.pushWavefront(filename, adjusted_wavefront, index=i_mode) #print("Saving wavefront %i" % i_mode) x_coordinates.append( adjusted_wavefront.absolute_x_coordinates().copy()) y_coordinates.append( adjusted_wavefront.absolute_y_coordinates().copy()) mpi.COMM_WORLD.barrier() # replace the wavefronts bu the propagated ones af = self._saveAutocorrelation(autocorrelation_function, number_modes, x_coordinates, y_coordinates, filename) # convert from one file per wavefront to one big array af.Twoform().convertToTwoformVectorsEigenvectors() af.info().setEndTime() filelist = glob.glob(filename + "*") for f in filelist: os.remove(f) return af
def evaluateAllR_2_Fredholm_parallel_direct(self, v_in, v_out): if not self._has_phases: self._setUpPhases() v = v_in.fullData().copy() H = np.zeros_like(self._field) tmp2 = np.zeros((self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) result = np.zeros_like(self._field) tmp = np.zeros((self._field_y_coordinates.shape[0], self._field.shape[0] * self._field.shape[1]), dtype=np.complex128) e_1 = np.zeros_like(self._field) e_2 = np.zeros_like(self._field) distribution_plan = DistributionPlan( communicator=mpi.COMM_WORLD, n_rows=len(self._field_x_coordinates), n_columns=len(self._field_y_coordinates)) local_rows = distribution_plan.localRows() range_y_coordinates = tuple(range(len(self._field_y_coordinates))) for i_field in range(self.numberFields()): self._setActiveField(i_field) for i_r_x in local_rows: i_x_1 = self._coordinate_map_x[i_r_x] i_x_minus_1 = self._coordinate_map_minus_x[i_r_x] for i_r_y in range_y_coordinates: i_y_1 = self._coordinate_map_y[i_r_y] i_y_minus_1 = self._coordinate_map_minus_y[i_r_y] self.relativeShiftedCopy(i_x_1, i_y_1, self._field_conj, e_1) tmp[i_r_y, :] = e_1.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[ i_y_minus_1, :, :].ravel() tmp2[:] = v * self._rho_phase_exp_x[i_x_minus_1, :, :].ravel() H[i_r_x, :] = tmp.dot(tmp2) v_out.sumFullData(H.ravel()) field_product = self._rho.flatten() * v_out.fullData() for i_r_x in local_rows: i_x_2 = self._coordinate_map_x[i_r_x] for i_r_y in range_y_coordinates: i_y_2 = self._coordinate_map_y[i_r_y] self.relativeShiftedCopy(i_x_2, i_y_2, self._field, e_2) tmp[i_r_y, :] = e_2.ravel() tmp[i_r_y, :] *= self._rho_phase_exp_y[i_y_2, :, :].ravel() tmp2[:] = field_product * self._rho_phase_exp_x[ i_x_2, :, :].ravel() result[i_r_x, :] += tmp.dot(tmp2) # it is only dV to first power because the solver normalizes for one integration result *= self._grid_area v_out.sumFullData(result.ravel())
class DivergenceAction(object): def __init__(self, x_coordinates, y_coordinates, intensity, eigenvalues_spatial, eigenvectors_parallel, phase_space_density, method): self._method = method if self._method not in ["accurate", "quick"]: raise Exception("Unknown divergence action %s" % self._method) communicator = mpi.COMM_WORLD n_vectors = eigenvalues_spatial.size self._intensity = intensity eigenvalues = eigenvalues_spatial self._number_modes = n_vectors self._x_coordinates = x_coordinates self._y_coordinates = y_coordinates self._petSc_operator = PetScOperatorDivergence(self) self._my_distribution_plan = DistributionPlan( communicator=communicator, n_rows=n_vectors, n_columns=self.dimensionSize()) self._prepareEigenvectors(communicator, eigenvectors_parallel) self._my_eigenvalues = eigenvalues[ self._my_distribution_plan.localRows()] self._my_eigenvectors_conjugated = self._my_eigenvectors.conj() self._my_eigenvectors_times_eigenvalues = self._my_eigenvectors self._my_eigenvectors = None for i_e, e in enumerate(self._my_eigenvalues): self._my_eigenvectors_times_eigenvalues[i_e, :, :] *= e self._phase_space_density = phase_space_density self._sigma_p_x = phase_space_density.divergencePartSigmaX() self._sigma_p_y = phase_space_density.divergencePartSigmaY() self._prefactor = phase_space_density.normalizationConstant() log("Divergence action sigma x/y: %e %e" % (self._sigma_p_x, self._sigma_p_y)) x_coordinates_weights = x_coordinates[ (x_coordinates > -5 * self._sigma_p_x) & (x_coordinates < 5 * self._sigma_p_x)] y_coordinates_weights = y_coordinates[ (y_coordinates > -5 * self._sigma_p_y) & (y_coordinates < 5 * self._sigma_p_y)] log("Calculating phase space density xy") weight_function = np.zeros( (x_coordinates_weights.shape[0], y_coordinates_weights.shape[0]), dtype=np.complex128) for i_x, x in enumerate(x_coordinates_weights): for i_y, y in enumerate(y_coordinates_weights): weight_function[i_x, i_y] = phase_space_density.staticPart( np.array([x, y])) weight_function_horizontal = np.zeros((x_coordinates.shape[0]), dtype=np.complex128) weight_function_vertical = np.zeros((y_coordinates.shape[0]), dtype=np.complex128) log("Calculating phase space density x") for i_x, x in enumerate(x_coordinates_weights): weight_function_horizontal[i_x] = phase_space_density.staticPart( np.array([x, 0.0])) log("Calculating phase space density y") for i_y, y in enumerate(y_coordinates_weights): weight_function_vertical[i_y] = phase_space_density.staticPart( np.array([0.0, y])) #plot(x_coordinates, weight_function_horizontal) #plot(y_coordinates, weight_function_vertical) self._weight_function = weight_function self._weight_function_horizontal = weight_function_horizontal self._weight_function_vertical = weight_function_vertical self._i_action = 0 self._convolution = Convolution() def _prepareEigenvectors(self, communicator, parallel_eigenvectors): distribution_plan = self._my_distribution_plan self._my_eigenvectors = parallel_eigenvectors.localMatrix().reshape( len(distribution_plan.localRows()), len(self._x_coordinates), len(self._y_coordinates)) def communicator(self): return self._distribution_plan.communicator() def dimension_size(self): return self._weight_function.size def parallelDot(self, v): v_in = ParallelVector(self._distribution_plan) v_in.broadcast(v, root=0) self.dot(v_in, v_in) return v_in.fullData() def parrallelLinearOperator(self): return self def dot_accurate(self, v_in, v_out=None): if v_out is None: v_out = v_in self._i_action += 1 eigenvalues = self._my_eigenvalues eigenvectors_times_eigenvalues = self._my_eigenvectors_times_eigenvalues c_eigenvectors = self._my_eigenvectors_conjugated v_r = v_in.fullData().reshape((self._x_coordinates.shape[0], self._y_coordinates.shape[0])).copy() res = np.zeros_like(v_r) tmp = np.zeros_like(v_r) logProgress(self._number_modes, self._i_action, "Divergence action[accurate]") for i in range(len(eigenvalues)): tmp[:, :] = c_eigenvectors[i, :, :] * v_r c = self._convolution.convolve2D(tmp, self._weight_function) tmp[:, :] = eigenvectors_times_eigenvalues[i, :, :] * c res[:, :] += tmp v_out.sumFullData(res.ravel()) def dot_quick(self, v_in, v_out=None): if v_out is None: v_out = v_in self._i_action += 1 eigenvalues = self._my_eigenvalues eigenvectors_times_eigenvalues = self._my_eigenvectors_times_eigenvalues c_eigenvectors = self._my_eigenvectors_conjugated v_r = v_in.fullData().reshape((self._x_coordinates.shape[0], self._y_coordinates.shape[0])).copy() res = np.zeros_like(v_r) tmp = np.zeros_like(v_r) sigmas = [ self._sigma_p_x / (self._x_coordinates[1] - self._x_coordinates[0]), self._sigma_p_y / (self._y_coordinates[1] - self._y_coordinates[0]) ] logProgress(self._number_modes, self._i_action, "Divergence action[quick]") for i in range(len(eigenvalues)): tmp[:, :] = c_eigenvectors[i, :, :] * v_r t_i = tmp.imag.copy() tmp[:, :] = gaussian_filter(tmp.real, sigmas) tmp[:, :] += 1j * gaussian_filter(t_i, sigmas) tmp[:, :] *= eigenvectors_times_eigenvalues[i, :, :] res[:, :] += tmp # no dV because solver normalizes for one integration normalization = 2 * np.pi * sigmas[0] * sigmas[1] * self._prefactor res *= normalization v_out.sumFullData(res.ravel()) def dot(self, v_in, v_out=None): if self._method == "accurate": return self.dot_accurate(v_in, v_out) if self._method == "quick": return self.dot_quick(v_in, v_out) raise Exception("No suite able divergence method.") def apply(self, number_modes=None): eigenmoder = Eigenmoder(self._x_coordinates, self._y_coordinates) if number_modes is None: number_modes = self._number_modes - 2 twoform = eigenmoder.eigenmodes(self, number_modes) return twoform def trace(self): return self._intensity def dimensionSize(self): return len(self._x_coordinates) * len(self._y_coordinates) def totalShape(self): dimension_size = self.dimensionSize() shape = (dimension_size, dimension_size) return shape def distributionPlan(self): return self._distribution_plan def releaseMemory(self): pass def petScMatrix(self): context = self._petSc_operator A = PETSc.Mat().createPython( [self.dimensionSize(), self.dimensionSize()], context) A.setUp() return A
def propagate(self, autocorrelation_function, filename, method='SRW', python_to_be_used="python"): source_filename = autocorrelation_function._io.fromFile() try: source_uid = autocorrelation_function.info().uid() except: source_uid = "None" autocorrelation_function.info().logStart() logAll("Propagating %s (%s)" % (source_filename, source_uid)) if self._maximum_mode is None: number_modes = autocorrelation_function.numberModes() else: number_modes = self._maximum_mode if isMaster(): if not os.path.exists("tmp"): os.mkdir("tmp") distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=number_modes, n_columns=1) n_rank = mpi.COMM_WORLD.Get_rank() x_coordinates = [] y_coordinates = [] for i_mode in distribution_plan.localRows(): for i in range(1): logAll("%i doing mode index: %i/%i (max mode index: %i)" % (n_rank, i_mode, max(distribution_plan.localRows()), number_modes-1)) if n_rank == 0: sys.stdout.flush() wavefront = autocorrelation_function.coherentModeAsWavefront(i_mode) #wavefront._e_field[np.abs(wavefront._e_field)<0.000001]=0.0 if method == 'SRW': # CHANGE THIS FOR WOFRY srw_wavefront = propagateWavefront(self.__srw_beamline, wavefront, autocorrelation_function.SRWWavefrontRx(), autocorrelation_function.SRWWavefrontDRx(), autocorrelation_function.SRWWavefrontRy(), autocorrelation_function.SRWWavefrontDRy(), 1.0, 1.0, i_mode, python_to_be_used=python_to_be_used) elif method == 'WOFRY': srw_wavefront = propagateWavefrontWofry(self.__srw_beamline,wavefront,i_mode,python_to_be_used=python_to_be_used) else: raise Exception("Method not known: %s"%method) # norm_mode = trapez2D( np.abs(srw_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER PROPAGATION:" % i, i_mode,norm_mode) # sys.stdout.flush() #else: # break #if i==19: # exit() # if np.any(norm_srw_wavefront > 10): # exit() # # if np.any(norm_wavefront > 10): # exit() adjusted_wavefront = self._adjustWavefrontSize(srw_wavefront) # norm_mode = trapez2D( np.abs(adjusted_wavefront.E_field_as_numpy()[0,:,:,0])**2, 1, 1)**0.5 # if norm_mode > 1e2 or np.isnan(norm_mode): # print("TRY %i AFTER ADJUSTMENT:" % i, i_mode,norm_mode) # sys.stdout.flush() # exit() # writes a file for every wavefront TwoformVectorsWavefronts.pushWavefront(filename, adjusted_wavefront, index=i_mode) #print("Saving wavefront %i" % i_mode) x_coordinates.append(adjusted_wavefront.absolute_x_coordinates().copy()) y_coordinates.append(adjusted_wavefront.absolute_y_coordinates().copy()) mpi.COMM_WORLD.barrier() # replace the wavefronts bu the propagated ones af = self._saveAutocorrelation(autocorrelation_function, number_modes, x_coordinates, y_coordinates, filename) # convert from one file per wavefront to one big array af.Twoform().convertToTwoformVectorsEigenvectors() af.info().setEndTime() filelist = glob.glob(filename+"*") for f in filelist: os.remove(f) return af