def timeloop(): global a, a_dump, drift_fac, i_dump, kick_fac, t, Δt # Do nothing if no dump times exist if len(a_dumps) == 0: return # Get the output filename patterns output_filenames = prepare_output_times() # Load initial conditions particles = load_particles(IC_file) # The number of time steps before Δt is updated Δt_update_freq = 10 # Initial cosmic time t, where a(t) = a_begin a = a_begin t = cosmic_time(a) # The time step size should be a # small fraction of the age of the universe. Δt = Δt_factor*t # Arrays containing the drift and kick factors ∫_t^(t + Δt/2)dt/a # and ∫_t^(t + Δt/2)dt/a**2. The two elements in each variable are # the first and second half of the factor for the entire time step. drift_fac = zeros(2, dtype=C2np['double']) kick_fac = zeros(2, dtype=C2np['double']) # Scalefactor at next dump and a corresponding index i_dump = 0 a_dump = a_dumps[i_dump] # Possible output at a == a_begin dump(particles, output_filenames) # The main time loop masterprint('Begin main time loop') timestep = -1 while i_dump < len(a_dumps): timestep += 1 # Print out message at beginning of each time step masterprint(terminal.bold('\nTime step {}'.format(timestep)) + '{:<14} {}' .format('\nScale factor:', significant_figures(a, 4, fmt='Unicode')) + '{:<14} {} Gyr'.format('\nCosmic time:', significant_figures(t/units.Gyr, 4, fmt='Unicode')) ) # Kick (first time is only half a kick, as kick_fac[1] == 0) do_kick_drift_integrals(0) particles.kick(kick_fac[0] + kick_fac[1]) if dump(particles, output_filenames, 'drift'): continue # Update Δt every Δt_update_freq time step if not (timestep % Δt_update_freq): # Let the positions catch up to the momenta particles.drift(drift_fac[0]) Δt = Δt_factor*t # Reset the second kick factor, # making the next operation a half kick. kick_fac[1] = 0 continue # Drift do_kick_drift_integrals(1) particles.drift(drift_fac[0] + drift_fac[1]) if dump(particles, output_filenames, 'kick'): continue
# This file has to be run in pure Python mode! # Include the concept_dir in the searched paths and get directory of this file import sys, os sys.path.append(os.environ['concept_dir']) this_dir = os.path.dirname(os.path.realpath(__file__)) # Imports from the CO𝘕CEPT code from commons import * from snapshot import load_particles # Read in the snapshot particles = load_particles(this_dir + '/snapshot') N = particles.N posx = particles.posx posy = particles.posy posz = particles.posz # Volume and linear size of cube the volume of a sphere with radius R_tophat V = 4*π/3*R_tophat**3 L = V**(1/3) # The number of complete L*L*L cubes within the box N_cubes_lin = int(boxsize//L) N_cubes = N_cubes_lin**3 # The number of particles in each of these cubes, if the snapshot is completely homogeneous N_in_cubes_homo = N*V/boxsize**3 # Count how many particles lie within each of the L*L*L cubes
# Include the concept_dir in the searched paths and get directory of this file import sys, os sys.path.append(os.environ['concept_dir']) this_dir = os.path.dirname(os.path.realpath(__file__)) # Imports from the CO𝘕CEPT code from commons import * from snapshot import load_particles # Determine the number of snapshots from the outputlist file N_snapshots = 1 # Read in data from the CO𝘕CEPT snapshots particles_cython = [] for i in (1, 2, 4): particles_cython.append(load_particles(this_dir + '/output/snapshot_cython_' + str(i), compare_params=False)) particles_python = [] for i in (1, 2, 4): particles_python.append(load_particles(this_dir + '/output/snapshot_python_' + str(i), compare_params=False)) # Using the particle order of the 0'th snapshot as the standard, find the corresponding # ID's in the snapshots and order these particles accoringly. N = particles_python[0].N D2 = zeros(N) ID = zeros(N, dtype='int') for i in range(N_snapshots): for j in range(3): x = particles_cython[j].posx y = particles_cython[j].posy z = particles_cython[j].posz x_procs = particles_python[j].posx
import sys, os sys.path.append(os.environ['concept_dir']) this_dir = os.path.dirname(os.path.realpath(__file__)) # Imports from the CO𝘕CEPT code from commons import * from snapshot import load_particles # Determine the number of snapshots from the outputlist file N_snapshots = np.loadtxt(this_dir + '/outputlist').size # Read in data from the CO𝘕CEPT snapshots particles = [] for i in range(N_snapshots): fname = 'snapshot_a={:.2f}'.format(np.loadtxt(this_dir + '/outputlist')[i]) particles.append([load_particles(this_dir + '/output_' + str(j) + '/' + fname, compare_params=False) for j in (1, 2, 4, 8)]) # Using the particle order of the 0'th snapshot as the standard, find the corresponding # ID's in the snapshots and order these particles accoringly. N = particles[0][0].N D2 = zeros(N) ID = zeros(N, dtype='int') for i in range(N_snapshots): x = particles[i][0].posx y = particles[i][0].posy z = particles[i][0].posz for j in (1, 2, 3): x_procs = particles[i][j].posx y_procs = particles[i][j].posy z_procs = particles[i][j].posz for l in range(N):