def compute_evaluate_wavepackets(iom, basis="eigen", blockid=0): """Evaluate an in homogeneous Hagdorn wavepacket on a given grid for each timestep. :param iom: An ``IOManager`` instance providing the simulation data. :param basis: The basis where the evaluation is done. Can be 'eigen' or 'canonical'. :param blockid: The data block from which the values are read. """ parameters = iom.load_parameters() # Number of time steps we saved timesteps = iom.load_inhomogwavepacket_timegrid(blockid=blockid) nrtimesteps = timesteps.shape[0] # Prepare the potential for basis transformations Potential = PotentialFactory().create_potential(parameters) # Retrieve simulation data if iom.has_grid(blockid=blockid): grid = iom.load_grid(blockid=blockid) else: grid = iom.load_grid(blockid="global") params = iom.load_inhomogwavepacket_parameters(blockid=blockid) coeffs = iom.load_inhomogwavepacket_coefficients(blockid=blockid) # A data transformation needed by API specification params = [ [ params[j][i,:] for j in xrange(parameters["ncomponents"]) ] for i in xrange(nrtimesteps) ] coeffs = [ [ coeffs[i,j,:] for j in xrange(parameters["ncomponents"]) ] for i in xrange(nrtimesteps) ] # We want to save wavefunctions, thus add a data slot to the data file iom.add_wavefunction(parameters, timeslots=nrtimesteps, blockid=blockid) # Hack for allowing data blocks with different basis size than the global one # todo: remove when we got local parameter sets parameters.update_parameters({"basis_size": coeffs[0][0].shape[0]}) HAWP = HagedornWavepacketInhomogeneous(parameters) HAWP.set_quadrature(None) WF = WaveFunction(parameters) WF.set_grid(grid) # Iterate over all timesteps for i, step in enumerate(timesteps): print(" Evaluating inhomogeneous wavepacket at timestep "+str(step)) # Configure the wavepacket HAWP.set_parameters(params[i]) HAWP.set_coefficients(coeffs[i]) # Project to the eigenbasis if desired if basis == "eigen": HAWP.project_to_eigen(Potential) # Evaluate the wavepacket values = HAWP.evaluate_at(grid, prefactor=True) WF.set_values(values) # Save the wave function iom.save_wavefunction(WF.get_values(), timestep=step, blockid=blockid)
def load_data(resultsdir, evaluation_times, which_norm="wf"): """This script assumes filename specification: something_eps=..._dt=..._[h|f]_other_things. We group the simulations first by eps and then by dt. """ iom_f = IOManager() iom_h = IOManager() # Group the data from different simulations according to epsilon ids = get_result_dirs(resultsdir) eps_groups = group_by(ids, "eps") # Data structures for results epsdata = [ None for i in xrange(len(eps_groups)) ] axisdata = [ [] for i in xrange(len(eps_groups)) ] normdata = [ [ [] for i in xrange(len(eps_groups)) ] for t in xrange(len(evaluation_times)) ] # Loop over all simulations, grouped by same eps value for index, eps_group in enumerate(eps_groups): # Partition into fourier and hagedorn simulations dirs_f = gather_all(eps_group, "algorithm=fourier") dirs_h = gather_all(eps_group, "algorithm=hagedorn") if len(dirs_f) != len(dirs_h): raise ValueError("Found different number of fourier and hagedorn simulations!") # And sort by dt value dirs_f = sort_by(dirs_f, "dt") dirs_h = sort_by(dirs_h, "dt") # Loop over all simulations with same eps values sorted by size of dt for dir_f, dir_h in zip(dirs_f, dirs_h): print("Comparing simulation " + dir_h + " with " + dir_f) resultsfile_f = get_results_file(dir_f) iom_f.open_file(filename=resultsfile_f) resultsfile_h = get_results_file(dir_h) iom_h.open_file(filename=resultsfile_h) # Read the parameters parameters_f = iom_f.load_parameters() parameters_h = iom_h.load_parameters() # Scalar parameter of the x axis axisdata[index].append(parameters_f["dt"]) # Get the data grid = iom_f.load_grid(blockid="global") WF = WaveFunction(parameters_f) WF.set_grid(grid) # Convert times to timesteps using the time manager tm = parameters_f.get_timemanager() # Loop over all times for i, time in enumerate(evaluation_times): print(" at time T: " + str(time)) step = tm.compute_timestep(time) data_f = iom_f.load_wavefunction(timestep=step) data_h = iom_h.load_wavefunction(timestep=step) # Compute the norm || u_f - u_h || for all timesteps data_diff = data_f - data_h if which_norm == "wf": WF.set_values( [ data_diff[0,...] ] ) no = WF.get_norm(summed=True) elif which_norm == "2": no = norm( data_diff[0,...] ) elif which_norm == "max": no = max( data_diff[0,...] ) # Append norm values to global data structure normdata[i][index].append(no) # Scalar parameter of the different curves # We add this here because the simulation parameters are # already loaded but not overwritten yet be the next iteration # Remember: we need only a single epsilon out of each eps_group. epsdata[index] = parameters_f["eps"] iom_f.finalize() iom_h.finalize() # Convert lists to arrays epsdata = array(epsdata) axisdata = [ array(item) for item in axisdata ] return (times, epsdata, axisdata, normdata)
def read_data(iom_o, iom_s, gid, bid_ref=0): """Plot the wave function for a series of timesteps. :param iom_s: An ``IOManager`` instance providing the spawning simulation data. :param iom_o: An ``IOManager`` instance providing the reference simulation data. :param bid_ref: The block ID of the reference data. Default is data block '0'. """ parameters_o = iom_o.load_parameters() parameters_s = iom_s.load_parameters() # For each mother-child spawn try pair bidm, bidc = iom_s.get_block_ids(groupid=gid) # Read original data from first block grid_o = iom_o.load_grid(blockid="global") timegrid_o = iom_o.load_wavefunction_timegrid(blockid=bid_ref) WF = WaveFunction(parameters_o) WF.set_grid(grid_o) norms_L2 = [] norms_max = [] for step in timegrid_o: print(" Timestep # " + str(step)) # Retrieve original reference data wave_o = iom_o.load_wavefunction(timestep=step, blockid=bid_ref) values_o = [ wave_o[j,...] for j in xrange(parameters_o["ncomponents"]) ] # Compute absolute values, assume the data were stored in the eigenbasis values_o = [ sqrt(conj(item)*item) for item in values_o ] # Retrieve spawn data for mother and child packets values_s = [] try: # Load data of original packet wave = iom_s.load_wavefunction(timestep=step, blockid=bidm) values_s.append([ wave[j,...] for j in xrange(parameters_s["ncomponents"]) ]) # Load data of spawned packet wave = iom_s.load_wavefunction(timestep=step, blockid=bidc) values_s.append([ wave[j,...] for j in xrange(parameters_s["ncomponents"]) ]) have_spawn_data = True except ValueError: have_spawn_data = False if have_spawn_data is True: # Sum up the spawned parts values_sum = [] for i in xrange(parameters_o["ncomponents"]): values_sum.append(sqrt(reduce(lambda x,y: x+y, [ conj(item[i])*item[i] for item in values_s ]))) # Compute the difference to the original values_diff = [ item_o - item_s for item_o, item_s in zip(values_o, values_sum) ] else: # Return zeros if we did not spawn yet in this timestep values_diff = [ zeros(values_o[0].shape) for i in xrange(parameters_o["ncomponents"]) ] # Compute the L^2 norm WF.set_values(values_diff) curnorm_L2 = list(WF.get_norm()) curnorm_L2.append(WF.get_norm(summed=True)) # Compute the max norm curnorm_max = [ max(abs(item)) for item in values_diff ] curnorm_max.append(max(curnorm_max)) print(" at time " + str(step*parameters_o["dt"]) + " the error in L^2 norm is " + str(curnorm_L2)) norms_L2.append(curnorm_L2) norms_max.append(curnorm_max) return (timegrid_o*parameters_o["dt"], array(norms_L2), array(norms_max))
def load_data(resultspath, which_norm="wf"): # Sort the data from different simulations ids = FT.get_result_dirs(resultspath) dirs_f = FT.gather_all(ids, "fourier") dirs_h = FT.gather_all(ids, "hagedorn") dirs_f = FT.sort_by(dirs_f, "eps") dirs_h = FT.sort_by(dirs_h, "eps") if len(dirs_f) != len(dirs_h): raise ValueError("Found different number of fourier and hagedorn simulations!") number_simulations = len(dirs_f) normdata = [] axisdata = [] iom_f = IOManager() iom_h = IOManager() # Loop over all simulations for dir_f, dir_h in zip(dirs_f, dirs_h): print("Comparing simulation " + dir_h + " with " + dir_f) # Load the simulation data files resultsfile_f = FT.get_results_file(dir_f) iom_f.open_file(filename=resultsfile_f) resultsfile_h = FT.get_results_file(dir_h) iom_h.open_file(filename=resultsfile_h) # Read the parameters parameters_f = iom_f.load_parameters() parameters_h = iom_h.load_parameters() number_components = parameters_f["ncomponents"] # Scalar parameter that discriminates the simulations axisdata.append(parameters_f["eps"]) # Get the data grid = iom_f.load_grid(blockid="global") timesteps = iom_f.load_wavefunction_timegrid() data_f = iom_f.load_wavefunction() data_h = iom_h.load_wavefunction() # Compute the norm || u_f - u_h ||_L2 for all timesteps data_diff = data_f - data_h WF = WaveFunction(parameters_f) WF.set_grid(grid) norms = [] for i, step in enumerate(timesteps): if which_norm == "wf": WF.set_values([ data_diff[i,0,:] ]) no = WF.get_norm() elif which_norm == "2": no = norm(data_diff[i,0,:]) elif which_norm == "max": no = max(data_diff[i,0,:]) norms.append(no) # Append norm values to global data structure norms = array(norms) normdata.append(norms) iom_f.finalize() iom_h.finalize() return (axisdata, normdata, number_simulations, number_components)
def load_data(resultspath, which_norm="wf"): # Group the data from different simulations ids = FT.get_result_dirs(resultspath) ids = FT.group_by(ids, "eps") nsims = FT.get_number_simulations(resultspath) groupdata = [] axisdata = [ [] for i in xrange(nsims) ] normdata = [ [] for i in xrange(nsims) ] iom_f = IOManager() iom_h = IOManager() for index, sims in enumerate(ids): # Sorting based on file names dirs_f = FT.gather_all(sims, "fourier") dirs_h = FT.gather_all(sims, "hagedorn") if len(dirs_f) != len(dirs_h): raise ValueError("Found different number of fourier and hagedorn simulations!") dirs_f = FT.sort_by(dirs_f, "eps", as_string=True) dirs_h = FT.sort_by(dirs_h, "eps", as_string=True) # Loop over all simulations for dir_f, dir_h in zip(dirs_f, dirs_h): print("Comparing simulation " + dir_h + " with " + dir_f) resultsfile_f = FT.get_results_file(dir_f) iom_f.open_file(filename=resultsfile_f) resultsfile_h = FT.get_results_file(dir_h) iom_h.open_file(filename=resultsfile_h) # Read the parameters parameters_f = iom_f.load_parameters() parameters_h = iom_h.load_parameters() grid = iom_f.load_grid(blockid="global") # Precalculate eigenvectors for efficiency Potential = PotentialFactory().create_potential(parameters_f) eigenvectors = Potential.evaluate_eigenvectors_at(grid) # Get the data # Number of time steps we saved timesteps = iom_f.load_wavefunction_timegrid() # Scalar parameter that discriminates the simulations axisdata[index].append((parameters_f, timesteps)) WF = WaveFunction(parameters_f) WF.set_grid(grid) norms = [] for i, step in enumerate(timesteps): # Load the data that belong to the current timestep data_f = iom_f.load_wavefunction(timestep=step) data_h = iom_h.load_wavefunction(timestep=step) data_f = Potential.project_to_eigen(grid, data_f, eigenvectors) data_f = array(data_f) data_diff = data_f - data_h # Compute the norm || u_f - u_h || if which_norm == "wf": # Rearrange data to fit the input of WF and handle over WF.set_values([ data_diff[n,:] for n in xrange(parameters_f.ncomponents) ]) curnorm = WF.get_norm() # More than one component? If yes, compute also the overall norm if parameters_f.ncomponents > 1: nosum = WF.get_norm(summed=True) curnorm = list(curnorm) + [nosum] elif which_norm == "max": curnorm = [ max( abs(data_diff[n,:]) ) for n in xrange(parameters_f.ncomponents) ] # More than one component? If yes, compute also the overall norm if parameters_f.ncomponents > 1: nosum = max(curnorm) curnorm = list(curnorm) + [nosum] print(" at time " + str(step*parameters_f.dt) + " the error norm is " + str(curnorm)) norms.append(curnorm) # Append norm values to global data structure norms = array(norms) normdata[index].append(norms) # Scalar parameter of the different curves # We add this here because the simulation parameters are # already loaded but not overwritten yet be the next iteration # Remember: we need only a single epsilon out of each eps_group. groupdata.append(parameters_f.dt) iom_f.finalize() iom_h.finalize() return (groupdata, axisdata, normdata)