def resonance_mass_distributions(axes): """ Verification of mass disributions for several resonance species. Grey dashed lines are the Breit-Wigner distributions with mass-dependent width, grey solid lines are the same distributions with momentum integrated out, and colored lines are histograms of the sampled masses. """ with axes() as ax: T = .15 for ID, name in [ (213, r'$\rho(770)$'), (2214, r'$\Delta(1232)$'), (22212, r'$N(1535)$'), ]: info = frzout.species_dict[ID] m0 = info['mass'] w0 = info['width'] m_min, m_max = info['mass_range'] sign = -1 if info['boson'] else 1 def bw(m): w = w0 * np.sqrt((m - m_min) / (m0 - m_min)) return w / ((m - m0)**2 + w * w / 4) def f(p, m): return p * p / (np.exp(np.sqrt(p * p + m * m) / T) + sign) m = np.linspace(m_min, m_max, 200) ax.plot(m, bw(m) / integrate.quad(bw, m_min, m_max)[0], **dashed_line) bwf = np.array([ integrate.quad(lambda p: bw(m_) * f(p, m_), 0, 5)[0] for m_ in m ]) / integrate.dblquad(lambda m_, p: bw(m_) * f(p, m_), 0, 5, lambda _: m_min, lambda _: m_max)[0] ax.plot(m, bwf, color=default_color) hrg = frzout.HRG(T, species=[ID], res_width=True) x = np.array([[1, 0, 0, 0]], dtype=float) sigma = np.array([[1e6 / hrg.density(), 0, 0, 0]]) v = np.zeros((1, 3)) surface = frzout.Surface(x, sigma, v) parts = frzout.sample(surface, hrg) m = np.sqrt(np.inner(parts['p']**2, [1, -1, -1, -1])) ax.hist(m, bins=64, normed=True, histtype='step', label=name) ax.set_xlim(0, 2) ax.set_xlabel('Mass [GeV]') ax.set_ylabel('Probability') ax.set_yticklabels([]) ax.legend(loc='upper left')
def sample_bulk(Pi): Pi = None if Pi == 0 else np.array([Pi]) surface = frzout.Surface(x, sigma, v, Pi=Pi) parts = frzout.sample(surface, hrg) E = parts['p'][:, 0] psq = (parts['p'][:, 1:]**2).sum(axis=1) return (parts.size / volume, E.sum() / volume, (psq / (3 * E)).sum() / volume, np.sqrt(psq).mean())
def shear_and_bulk(axes): """ Shear tensor with `\pi^{xx} = -\pi^{yy}` and all other components zero, and bulk pressure fixed at `\Pi = -0.1P_0`. This is a very difficult test case but the algorithm remains accurate for small to moderate viscous pressure. """ hrg = frzout.HRG(.15, res_width=False) P0 = hrg.pressure() e0 = hrg.energy_density() x = np.array([[1., 0, 0, 0]]) sigma = np.array([[1e6 / hrg.density(), 0, 0, 0]]) v = np.zeros((1, 3)) pi_frac = np.linspace(-.5, .5, 11) Pi_frac = -.1 Tuv = np.array([ sample_Tuv( frzout.Surface(x, sigma, v, pi=make_pi_dict(xx=i * P0, yy=-i * P0), Pi=np.array([Pi_frac * P0])), hrg) for i in pi_frac ]).T P = Tuv.diagonal()[:, 1:].sum(axis=1) / 3 with axes() as ax: ax.plot(pi_frac, (Tuv[1, 1] - P) / P0, label='$\pi_{xx}$') ax.plot(pi_frac, pi_frac, **dashed_line) ax.plot(pi_frac, (Tuv[2, 2] - P) / P0, label='$\pi_{yy}$') ax.plot(pi_frac, -pi_frac, **dashed_line) ax.plot(pi_frac, P / P0 - 1, label='Pressure') ax.axhline(Pi_frac, **dashed_line) ax.plot(pi_frac, Tuv[0, 0] / e0 - 1, label='Energy density') ax.axhline(0, **dashed_line) ax.set_xlim(pi_frac.min(), pi_frac.max()) ax.set_ylim(pi_frac.min(), pi_frac.max()) ax.set_xlabel('$\pi_{xx}/P_0,\ -\pi_{yy}/P_0$') ax.set_ylabel( '$\pi_{ij}/P_0,\ \Delta P/P_0,\ \Delta\epsilon/\epsilon_0$') ax.legend(loc='upper center')
def shear_viscous_corrections(axes): r""" Verification that the desired shear tensor `\pi^{\mu\nu}` is reproduced. This test case checks that nonzero `\pi^{xy}` is reproduced without changing the equilibrium pressure or energy density. The algorithm starts to break down at very large shear pressure. The "shear and bulk" section below has additional checks. """ hrg = frzout.HRG(.15, res_width=False) P0 = hrg.pressure() e0 = hrg.energy_density() x = np.array([[1., 0, 0, 0]]) sigma = np.array([[1e6 / hrg.density(), 0, 0, 0]]) v = np.zeros((1, 3)) pi_frac = np.linspace(-.5, .5, 11) Tuv = np.array([ sample_Tuv(frzout.Surface(x, sigma, v, pi=make_pi_dict(xy=i * P0)), hrg) for i in pi_frac ]).T P = Tuv.diagonal()[:, 1:].sum(axis=1) / 3 with axes() as ax: ax.plot(pi_frac, Tuv[1, 2] / P0, label='$\pi_{xy}$') ax.plot(pi_frac, pi_frac, **dashed_line) ax.plot(pi_frac, Tuv[1, 3] / P0, label='$\pi_{xz}$') ax.plot(pi_frac, P / P0 - 1, label='Pressure') ax.plot(pi_frac, Tuv[0, 0] / e0 - 1, label='Energy density') ax.axhline(0, **dashed_line) ax.set_xlim(pi_frac.min(), pi_frac.max()) ax.set_ylim(pi_frac.min(), pi_frac.max()) ax.set_xlabel('$\pi_{xy}/P_0$') ax.set_ylabel( '$\pi_{ij}/P_0,\ \Delta P/P_0,\ \Delta\epsilon/\epsilon_0$') ax.legend(loc='upper left')
def equation_of_state(axes): """ Comparison of thermodynamic quantities from phase-space integrals (grey dashed lines) to averages over sampled particles (solid colored lines). """ with axes() as ax: volume = 1e6 x = np.array([[1, 0, 0, 0]], dtype=float) sigma = np.array([[volume, 0, 0, 0]]) v = np.zeros((1, 3)) surface = frzout.Surface(x, sigma, v) def eos_quantities(T): hrg = frzout.HRG(T, res_width=False) parts = frzout.sample(surface, hrg) E = parts['p'][:, 0] psq = (parts['p'][:, 1:]**2).sum(axis=1) T3 = (T / hbarc)**3 T4 = T * T3 return [ (hrg.density() / T3, parts.size / volume / T3), (hrg.energy_density() / T4, E.sum() / volume / T4), (3 * hrg.pressure() / T4, 3 * (psq / (3 * E)).sum() / volume / T4), ] T = np.linspace(100, 180, 20) / 1000 for quantity, label in zip( np.array([eos_quantities(t) for t in T]).transpose(1, 2, 0), ['$n/T^3$', '$\epsilon/T^4$', '$3p/T^4$']): ax.plot(T, quantity[1], label=label) ax.plot(T, quantity[0], **dashed_line) ax.set_xlabel('Temperature [GeV]') ax.legend(loc='upper left')
('dN_dy', [(s, float_t) for (s, _) in species]), ('mean_pT', [(s, float_t) for (s, _) in species]), ('pT_fluct', [('N', int_t), ('sum_pT', float_t), ('sum_pTsq', float_t)]), ('flow', [('N', int_t), ('Qn', complex_t, 8)]), ]) for n, ic in enumerate(initial_conditions, start=1): if n == 1: results = run_single_event(ic) resultsdict = dict( zip(['x', 'sigma', 'v'], np.hsplit(results, [3,6,8])), pi=dict(zip(['xx','xy','yy'],results.T[11:14])), Pi=results.T[15]) resultspce = run_single_event(ic, pce=True) finalsurface = frzout.Surface(**resultsdict, ymax=2) finalsurfacepce = frzout.Surface(**resultspce, ymax = 2) finalresults['initial_entropy'] = ic.sum() * grid_step**2 finalresultspce['initial_entropy'] = ic.sum() * grid_step**2 else: continue minsamples, maxsamples = 10, 1000 # reasonable range for nsamples minparts = 10**5 # min number of particles to sample nparts = 0 # for tracking total number of sampled particles npartspce = 0 hrg_kwargs = dict(species='urqmd', res_width=True) hrg = frzout.HRG(0.150, **hrg_kwargs)
def run_single_event(ic, nb, event_number): """ Run the initial condition event contained in HDF5 dataset object `ic` and save observables to `results`. """ results.fill(0) results['initial_entropy'] = ic.sum() * grid_step**2 results['Ncoll'] = nb.sum() * grid_step**2 logging.info("Nb %d", results['Ncoll']) assert all(n == grid_n for n in ic.shape) logging.info( 'free streaming initial condition for %.3f fm', args.tau_fs ) fs = freestream.FreeStreamer(ic, grid_max, args.tau_fs) # run coarse event on large grid and determine max radius rmax = math.sqrt(( run_hydro(ic, event_size=27, coarse=3)['x'][:, 1:3]**2 ).sum(axis=1).max()) logging.info('rmax = %.3f fm', rmax) # now run normal event with size set to the max radius # and create sampler surface object surface = frzout.Surface(**run_hydro(ic, event_size=rmax), ymax=2) logging.info('%d freeze-out cells', len(surface)) # Sampling particle for UrQMD events logging.info('sampling surface with frzout') minsamples, maxsamples = 10, 400 # reasonable range for nsamples minparts = 10**5 # min number of particles to sample nparts = 0 # for tracking total number of sampled particles with open('particles_in.dat', 'w') as f: for nsamples in range(1, maxsamples + 1): parts = frzout.sample(surface, hrg) if parts.size == 0: continue nparts += parts.size print('#', parts.size, file=f) for p in parts: print(p['ID'], *p['x'], *p['p'], file=f) if nparts >= minparts and nsamples >= minsamples: break results['nsamples'] = nsamples logging.info('produced %d particles in %d samples', nparts, nsamples) if nparts == 0: raise StopEvent('no particles produced') # ==================Heavy Flavor=========================== # Run Pythia+Lido prefix = os.environ.get('XDG_DATA_HOME') run_cmd( 'hydro-couple', '-y {:s}/pythia-setting.txt'.format(prefix), '-i ./initial.hdf', '-j {:d}'.format(0 if args.nevents is None else event_number-1), '--hydro ./JetData.h5', '-s {:s}/settings.xml'.format(prefix), '-t {:s}'.format(args.table_path), '-n {:d}'.format(args.NPythiaEvents), args.lido_args, ) # hadronization hq = 'c' prefix = os.environ.get('XDG_DATA_HOME')+"/hvq-hadronization/" os.environ["ftn20"] = "{}-meson-frzout.dat".format(hq) os.environ["ftn30"] = prefix+"parameters_{}_hd.dat".format(hq) os.environ["ftn40"] = prefix+"recomb_{}_tot.dat".format(hq) os.environ["ftn50"] = prefix+"recomb_{}_BR1.dat".format(hq) logging.info(os.environ["ftn30"]) subprocess.run("hvq-hadronization", stdin=open("{}-quark-frzout.dat".format(hq))) # ==================Heavy + Soft --> UrQMD=========================== run_cmd('convert_format {} particles_in.dat c-meson-frzout.dat'.format(nsamples)) run_cmd('run_urqmd urqmd_input.dat particles_out.dat') # read final particle data ID, charge, fmass, px, py, pz, y, eta, pT0, y0, w, _ = ( np.array(col, dtype=dtype) for (col, dtype) in zip( zip(*read_text_file('particles_out.dat')), (2*[int] + 10*[float]) ) ) # pT, phi, and id cut pT = np.sqrt(px**2+py**2) phi = np.arctan2(py, px) ET = fmass**2 + pT**2 charged = (charge != 0) abs_eta = np.fabs(eta) abs_ID = np.abs(ID) # It may be redunant to find b-hadron at this stage since UrQMD has # not included them yet heavy_pid = [pid for (_, pid) in species.get('heavy')] is_heavy = np.array([u in heavy_pid for u in abs_ID], dtype=bool) is_light = np.logical_not(is_heavy) #============for soft particles====================== results['dNch_deta'] = np.count_nonzero(charged & (abs_eta<.5) & is_light) / nsamples ET_eta = .6 results['dET_deta'] = ET[abs_eta < ET_eta].sum() / (2*ET_eta) / nsamples for s, pid in species.get('light'): cut = (abs_ID == pid) & (abs_eta < 0.5) N = np.count_nonzero(cut) results['dN_dy'][s] = N / nsamples results['mean_pT'][s] = (0. if N == 0 else pT[cut].mean()) pT_alice = pT[charged & (abs_eta < .8) & (.15 < pT) & (pT < 2.)] results['pT_fluct']['N'] = pT_alice.size results['pT_fluct']['sum_pT'] = pT_alice.sum() results['pT_fluct']['sum_pTsq'] = np.inner(pT_alice, pT_alice) phi_alice = phi[charged & (abs_eta < .8) & (.2 < pT) & (pT < 5.)] results['Qn_soft']['M'] = phi_alice.size results['Qn_soft']['Qn'] = [np.exp(1j*n*phi_alice).sum() for n in range(1, results.dtype['Qn_soft']['Qn'].shape[0] + 1)] #============for heavy flavors======================= for exp in ['ALICE', 'CMS']: #=========Event plane Q-vector from UrQMD events====================== phi_light = phi[charged & is_light \ & (JEC[exp]['vn_ref']['ybins'][0] < eta) \ & (eta < JEC[exp]['vn_ref']['ybins'][1]) \ & (JEC[exp]['vn_ref']['pTbins'][0] < pT) \ & (pT < JEC[exp]['vn_ref']['pTbins'][1])] results['Qn_ref_'+exp]['M'] = phi_light.shape[0] results['Qn_ref_'+exp]['Qn'] = np.array([np.exp(1j*n*phi_light).sum() for n in range(1, 5)]) #===========For heavy particles====================== # For charmed hadrons, use info after urqmd HF_dict = { 'pid': abs_ID[is_heavy], 'pT' : pT[is_heavy], 'y' : y[is_heavy], 'phi': phi[is_heavy], 'w' : w[is_heavy] # normalized to an area units } POI = [pid for (_, pid) in species.get('heavy')] flow = JLP.Qvector(HF_dict, JEC[exp]['vn_HF']['pTbins'], JEC[exp]['vn_HF']['ybins'], POI, order=4) Yield = JLP.Yield(HF_dict, JEC[exp]['Raa']['pTbins'], JEC[exp]['Raa']['ybins'], POI) for (s, pid) in species.get('heavy'): results['dX_dpT_dy_'+exp][s] = Yield[pid][:,0] results['Qn_poi_'+exp][s]['M'] = flow[pid]['M'][:,0] results['Qn_poi_'+exp][s]['Qn'] = flow[pid]['Qn'][:,0,:] # For full pT prediction #=========Use high precision Q-vector at the end of hydro============== # oversample to get a high percision event plane at freezeout ophi_light = np.empty(0) nloop=0 while ophi_light.size < 10**6 and nloop < 100000: nloop += 1 oE, opx, opy, opz = frzout.sample(surface, hrg)['p'].T oM, opT, oy, ophi = JLP.fourvec_to_curvelinear(opx, opy, opz, oE) ophi = ophi[(-2 < oy) & (oy < 2) & (0.2 < opT) & (opT <5.0)] ophi_light = np.append(ophi_light, ophi) results['Qn_ref_pred']['M'] = ophi_light.shape[0] results['Qn_ref_pred']['Qn'] = np.array([np.exp(1j*n*ophi_light).sum() for n in range(1, 5)]) del ophi_light #===========For heavy particles====================== # For charmed hadrons, use info after urqmd HF_dict = { 'pid': abs_ID[is_heavy], 'pT' : pT[is_heavy], 'y' : y[is_heavy], 'phi': phi[is_heavy], 'w' : w[is_heavy] } POI = [pid for (_, pid) in species.get('heavy')] flow = JLP.Qvector(HF_dict, JEC['pred-pT'], [[-2,2]], POI, order=4) Yield = JLP.Yield(HF_dict, JEC['pred-pT'], [[-1,1]], POI) for (s, pid) in species.get('heavy'): results['dX_dpT_dy_pred'][s] = Yield[pid][:,0] results['Qn_poi_pred'][s]['M'] = flow[pid]['M'][:,0] results['Qn_poi_pred'][s]['Qn'] = flow[pid]['Qn'][:,0]
def _realistic_surface_observables(): """ Compute observables for the "realistic surface" test case. """ with open('test_surface.dat', 'rb') as f: surface_data = np.array( [l.split() for l in f if not l.startswith(b'#')], dtype=float) # 0 1 2 3 4 5 6 7 # tau x y dsigma_t dsigma_x dsigma_y v_x v_y # 8 9 10 11 12 13 14 15 # pitt pitx pity pixx pixy piyy pizz Pi x, sigma, v, _ = np.hsplit(surface_data, [3, 6, 8]) pixx, pixy, piyy = surface_data.T[11:14] Pi = surface_data.T[15] sigma4 = np.zeros((sigma.shape[0], 4)) sigma4[:, :3] = sigma sigma4 *= x[:, :1] u_ = np.zeros((v.shape[0], 4)) u_[:, 0] = 1 u_[:, 1:3] = -v u_ /= np.sqrt(1 - np.square(v).sum(axis=1))[:, np.newaxis] vx, vy = v.T pi_uv = np.zeros((pixx.shape[0], 4, 4)) pi_uv[:, 0, 0] = vx * vx * pixx + vy * vy * piyy + 2 * vx * vy * pixy pi_uv[:, 1, 1] = pixx pi_uv[:, 2, 2] = piyy pi_uv[:, 3, 3] = pi_uv[:, 0, 0] - pixx - piyy pi_uv[:, 0, 1] = pi_uv[:, 1, 0] = -(vx * pixx + vy * pixy) pi_uv[:, 0, 2] = pi_uv[:, 2, 0] = -(vx * pixy + vy * piyy) pi_uv[:, 1, 2] = pi_uv[:, 2, 1] = pixy pT_max = 4 pT_bins = np.linspace(0, pT_max, 41) pT = (pT_bins[:-1] + pT_bins[1:]) / 2 delta_pT = pT_max / (pT_bins.size - 1) phi = np.linspace(0, 2 * np.pi, 100, endpoint=False) eta, eta_weights = special.ps_roots(30) eta_max = 4 eta *= eta_max eta_weights *= 2 * eta_max T = .145 hrg = frzout.HRG(T, res_width=False) eta_over_tau = hrg.eta_over_tau() zeta_over_tau = hrg.zeta_over_tau() cs2 = hrg.cs2() the_vn = [2, 3, 4] def calc_obs(ID): m = frzout.species_dict[ID]['mass'] degen = frzout.species_dict[ID]['degen'] sign = -1 if frzout.species_dict[ID]['boson'] else 1 pT_, phi_, eta_ = np.meshgrid(pT, phi, eta) mT_ = np.sqrt(m * m + pT_ * pT_) p = np.array([ mT_ * np.cosh(eta_), pT_ * np.cos(phi_), pT_ * np.sin(phi_), mT_ * np.sinh(eta_) ]).T # ignore negative contributions psigma = np.inner(p, sigma4) psigma.clip(min=0, out=psigma) pu = np.inner(p, u_) with np.errstate(over='ignore'): f = 1 / (np.exp(pu / T) + sign) df = f * (1 - sign * f) * ( ((pu * pu - m * m) / (3 * pu) - cs2 * pu) / (zeta_over_tau * T) * Pi + np.einsum('ijku,ijkv,auv->ijka', p, p, pi_uv) / (2 * pu * T * eta_over_tau)) f += df # (phi, pT) distribution phi_pT_dist = (2 * degen * np.einsum('i,ijka,ijka->jk', eta_weights, psigma, f) / (2 * np.pi * hbarc)**3 / phi.size) pT_dist = phi_pT_dist.sum(axis=1) # navg, pT dist, qn(pT) return (2 * np.pi * delta_pT * np.inner(pT, pT_dist), pT_dist, [ np.inner(np.exp(1j * n * phi), phi_pT_dist) / pT_dist for n in the_vn ]) obs_calc = [calc_obs(i) for i, _ in id_parts] surface = frzout.Surface(x, sigma, v, pi=dict(xx=pixx, yy=piyy, xy=pixy), Pi=Pi) ngroups = 1000 N = 1000 # nsamples per group nsamples = ngroups * N # need many samples for diff flow # too many to store all particles in memory -> accumulate observables obs_sampled = [ ( np.empty(nsamples, dtype=int), # ID particle counts np.zeros_like(pT), # pT distribution np.zeros((len(the_vn), pT.size)), # diff flow ) for _ in id_parts ] diff_flow_counts = [ np.zeros_like(vn, dtype=int) for (_, _, vn) in obs_sampled ] from multiprocessing.pool import ThreadPool for k in range(ngroups): print(' group', k) # threading increases performance since sample() releases the GIL with ThreadPool() as pool: parts = pool.map(lambda _: frzout.sample(surface, hrg), range(N)) # identified particle counts for (i, _), (counts, _, _) in zip(id_parts, obs_sampled): counts[k * N:(k + 1) * N] = [ np.count_nonzero(np.abs(p['ID']) == i) for p in parts ] # merge all samples parts = np.concatenate(parts) abs_ID = np.abs(parts['ID']) for (i, _), (_, pT_dist, vn_arr), dflow_counts, (_, _, qn_list) in zip( id_parts, obs_sampled, diff_flow_counts, obs_calc): parts_ = parts[abs_ID == i] px, py = parts_['p'].T[1:3] pT_ = np.sqrt(px * px + py * py) phi_ = np.arctan2(py, px) # pT distribution pT_dist += np.histogram(pT_, bins=pT_bins, weights=1 / pT_)[0] # differential flow for n, vn, dfc, qn in zip(the_vn, vn_arr, dflow_counts, qn_list): cosnphi = [ np.cos(n * phi_[np.fabs(pT_ - p) < .2] - npsi) for (p, npsi) in zip(pT, np.arctan2(qn.imag, qn.real)) ] vn += [c.sum() for c in cosnphi] dfc += [c.size for c in cosnphi] # normalize pT dists and diff flow for (_, pT_dist, vn), dflow_counts in zip(obs_sampled, diff_flow_counts): pT_dist /= 2 * np.pi * nsamples * delta_pT vn /= dflow_counts return pT, the_vn, obs_calc, obs_sampled
def stress_energy_tensor(axes): r""" Verification that the sampling algorithm reproduces the complete stress-energy tensor `T^{\mu\nu}` from hydrodynamics, including any viscous corrections. For each of the following, the flow velocity and viscous pressures are chosen randomly, particles are sampled, and the effective stress-energy tensor is computed by summing over the sampled particles. The sampled tensor is then compared to the expectation from hydro. In the heatmap cells, the first number is sampled value for the given component of the tensor and the second number (in parentheses) is the expected value from hydro. Cells are color-coded, where grey indicates perfect agreement, red indicates that the sampled value is large, and blue too small. Some disagreement is expected due to statistical fluctuations from finite numbers of particles. Before each heatmap, the randomly chosen velocity and viscous pressures are listed. The overall magnitude of shear pressure is quantified by the Lorentz scalar "pirel" = `\sqrt{\pi^{\mu\nu}\pi_{\mu\nu}/(e^2 + 3P_0^2)}`. """ hrg = frzout.HRG(.15, res_width=False) P0 = hrg.pressure() e0 = hrg.energy_density() for _ in range(3): vmag = np.random.rand() cos_theta = np.random.uniform(-1, 1) sin_theta = np.sqrt(1 - cos_theta**2) phi = np.random.uniform(0, 2 * np.pi) vx = vmag * sin_theta * np.cos(phi) vy = vmag * sin_theta * np.sin(phi) vz = vmag * cos_theta pixx, piyy, pixy, pixz, piyz = np.random.uniform(-.2, .2, 5) * P0 Pi = np.random.uniform(-.3, .3) * P0 surface = frzout.Surface(np.array([[1., 0, 0, 0]]), np.array([[1e7 / hrg.density(), 0, 0, 0]]), np.array([[vx, vy, vz]]), pi={ k[2:]: np.array([v]) for k, v in locals().items() if k.startswith('pi') }, Pi=np.array([Pi])) u = np.array([1, vx, vy, vz]) / np.sqrt(1 - vmag * vmag) pitt = (vx * vx * pixx + vy * vy * piyy - vz * vz * (pixx + piyy) + 2 * vx * vy * pixy + 2 * vx * vz * pixz + 2 * vy * vz * piyz) / (1 - vz * vz) pizz = pitt - pixx - piyy pitx = vx * pixx + vy * pixy + vz * pixz pity = vx * pixy + vy * piyy + vz * piyz pitz = vx * pixz + vy * piyz + vz * pizz piuv = np.array([ [pitt, pitx, pity, pitz], [pitx, pixx, pixy, pixz], [pity, pixy, piyy, piyz], [pitz, pixz, piyz, pizz], ]) uu = np.outer(u, u) g = np.array([1, -1, -1, -1], dtype=float) Delta = np.diag(g) - uu Tuv_check = e0 * uu - (P0 + Pi) * Delta + piuv Tuv = u[0] * sample_Tuv(surface, hrg) Tmag = np.sqrt(e0 * e0 + 3 * P0 * P0) pimag = np.sqrt(np.einsum('uv,uv,u,v', piuv, piuv, g, g)) diff = (Tuv - Tuv_check) / np.maximum(np.abs(Tuv_check), .1 * Tmag) tol = .05 fmt = '{:.3f}' with axes(caption=minus_sign(', '.join([ 'v = (' + ', '.join(3 * [fmt]).format(vx, vy, vz) + ')', 'pirel = ' + fmt.format(pimag / Tmag), 'Pi/P0 = ' + fmt.format(Pi / P0), ]))) as ax: ax.figure.set_size_inches(4.2, 4.2) ax.figure.set_dpi(100) ax.imshow(diff, cmap=plt.cm.coolwarm, vmin=-tol, vmax=tol) for i, j in np.ndindex(*Tuv.shape): ax.text(i, j, minus_sign('\n'.join( f.format(x[i, j]) for f, x in [ ('{:.4f}', Tuv), ('({:.4f})', Tuv_check), ])), ha='center', va='center', fontsize=.75 * font_size) ax.grid(False) ax.xaxis.tick_top() for i in ['x', 'y']: getattr(ax, 'set_{}ticks'.format(i))(range(4)) getattr(ax, 'set_{}ticklabels'.format(i))(['t', 'x', 'y', 'z'])
def bulk_viscous_corrections(axes): """ Effect of bulk viscosity on thermodynamic quantities and momentum distributions. The total pressure is the sum of the equilibrium and bulk pressures: `P = P_0 + \Pi`. In order to satisfy continuity of the stress-energy tensor, sampled particles must reproduce the total pressure without changing the equilibrium energy density; this is achieved by parametrically rescaling overall particle production and momentum depending on the bulk pressure. This works for negative bulk pressure all the way down to zero total pressure, but for positive bulk pressure, the necessary momentum scale factor diverges as the total pressure approaches twice the equilibrium pressure. The momentum scale is therefore restricted to a reasonable maximum (3) which effectively limits the positive bulk pressure to around 70% of the equilibrium pressure, depending on the hadron gas temperature and composition. Most quantities are plotted as the relative change to their equilibrium values vs. the relative bulk pressure `\Pi/P_0`. Colored lines are from samples and dashed lines are calculated. """ T = .15 hrg = frzout.HRG(T, res_width=False) volume = 1e6 / hrg.density() x = np.array([[1., 0, 0, 0]]) sigma = np.array([[volume, 0, 0, 0]]) v = np.zeros((1, 3)) def sample_bulk(Pi): Pi = None if Pi == 0 else np.array([Pi]) surface = frzout.Surface(x, sigma, v, Pi=Pi) parts = frzout.sample(surface, hrg) E = parts['p'][:, 0] psq = (parts['p'][:, 1:]**2).sum(axis=1) return (parts.size / volume, E.sum() / volume, (psq / (3 * E)).sum() / volume, np.sqrt(psq).mean()) n0 = hrg.density() e0 = hrg.energy_density() P0 = hrg.pressure() pavg0 = hrg.mean_momentum() Pi = np.linspace(-P0, P0, 31) Pi_min, Pi_max = hrg.Pi_lim() # ensure that the HRG is sampled at precisely the Pi limits for p in hrg.Pi_lim(): Pi[np.abs(Pi - p).argmin()] = p Pi_frac = Pi / P0 n, e, P, pavg = np.array([sample_bulk(x) for x in Pi]).T with axes( 'Pressure and energy density', 'Bulk pressure changes the effective pressure without changing ' 'the energy density.') as ax: ax.plot(Pi_frac, P / P0 - 1, label='Pressure ($P$)') ax.plot(Pi_frac, Pi_frac.clip(Pi_min / P0, Pi_max / P0), **dashed_line) ax.plot(Pi_frac, e / e0 - 1, label='Energy density ($\epsilon$)') ax.axhline(0, **dashed_line) ax.set_xlim(Pi_frac.min(), Pi_frac.max()) ax.set_ylim(Pi_frac.min(), Pi_frac.max()) ax.set_xlabel('$\Pi/P_0$') ax.set_ylabel('$\Delta P/P_0$, $\Delta\epsilon/\epsilon_0$') ax.legend(loc='upper left') nscale, pscale = np.array([hrg.bulk_scale_factors(p) for p in Pi]).T with axes( 'Particle density and momentum', 'The changes in particle density and mean momentum necessary ' 'to achieve the target pressure and energy density.') as ax: for y, y0, ycheck, label in [ (n, n0, nscale, 'Density ($n$)'), (pavg, pavg0, pscale, 'Mean momentum ($p$)'), ]: ax.plot(Pi_frac, y / y0 - 1, label=label) ax.plot(Pi_frac, ycheck - 1, **dashed_line) ax.set_xlim(Pi_frac.min(), Pi_frac.max()) ax.set_xlabel('$\Pi/P_0$') ax.set_ylabel(r'$\Delta n/n_0$, $\Delta p/p_0$') ax.legend(loc='upper left') def f(p, ID): m, boson, g = (frzout.species_dict[ID][k] for k in ['mass', 'boson', 'degen']) s = -1 if boson else 1 return g / (np.exp(np.sqrt(m * m + p * p) / T) + s) def density(ID, pscale=1): return (4 * np.pi) / (2 * np.pi * hbarc)**3 * integrate.quad( lambda p: p * p * f(p / pscale, ID), 0, 10)[0] ID = 211 n0 = density(ID) with axes( 'Distribution functions', 'Pion distribution functions `f(p)` for different bulk pressures. ' 'Colored histograms are samples; dashed lines are target ' 'distributions with rescaled momentum, `f(\lambda p)`.') as ax: nbins = 50 w = nbins * (2 * np.pi * hbarc)**3 / (2 * volume * 4 * np.pi) for k, Pi_frac in enumerate([0, -.1, -.3]): Pi = Pi_frac * P0 parts = frzout.sample( frzout.Surface(x, sigma, v, Pi=np.array([Pi])), hrg) psq = (parts[np.abs(parts['ID']) == ID]['p'][:, 1:]**2).sum(axis=1) pmag = np.sqrt(psq) offset = 10**(-k) ax.hist(pmag, bins=nbins, weights=w * offset / psq / pmag.ptp(), histtype='step', log=True, label='$\Pi = ' + ('0' if Pi_frac == 0 and k == 0 else r'{}P_0\ (f \times 10^{{{:d}}})'.format(Pi_frac, -k)) + '$') p = np.linspace(0, pmag.max(), 200) nscale, pscale = hrg.bulk_scale_factors(Pi) n = density(ID, pscale) ax.plot(p, n0 / n * nscale * offset * f(p / pscale, ID), **dashed_line) ax.set_xlim(0) ax.set_xlabel('$p\ \mathrm{[GeV]}$') ax.set_ylabel('$f(p)$') ax.legend(title='Pions only')
def moving_box(axes): """ A single 3D volume element with randomly chosen flow velocity and normal vector (this randomness means the volume element is not necessarily realistic for a heavy-ion collision, but it is still numerically valid). Histograms of sampled (x, y, z) momenta are compared to distributions computed by numerically integrating the Cooper-Frye function. Negative contributions are ignored. """ T = .15 x = np.array([[1, 0, 0, 0]], dtype=float) for ID, name in id_parts: info = frzout.species_dict[ID] m = info['mass'] g = info['degen'] sign = -1 if info['boson'] else 1 hrg = frzout.HRG(T, species=[ID]) v = np.atleast_2d([np.random.uniform(-i, i) for i in [.5, .5, .7]]) gamma = 1 / np.sqrt(1 - (v * v).sum()) ux, uy, uz = gamma * v.ravel() volume = 1e6 / hrg.density() sigma = np.random.uniform(-.5 * volume, 1.5 * volume, (1, 4)) with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'total freeze-out volume is negative') surface = frzout.Surface(x, sigma, v) def make_parts(): n = 0 for _ in range(10): parts = frzout.sample(surface, hrg) yield parts n += parts.size if n > 1e6: break parts = list(make_parts()) nsamples = len(parts) parts = np.concatenate(parts) psamples = parts['p'].T[1:] # 3D lattice of momentum points P = [np.linspace(p.min() - .5, p.max() + .5, 101) for p in psamples] Px, Py, Pz = np.meshgrid(*P, indexing='ij') dp = [p.ptp() / (p.size - 1) for p in P] # evaluate Cooper-Frye function on lattice E = np.sqrt(m * m + Px * Px + Py * Py + Pz * Pz) st, sx, sy, sz = sigma.ravel() dN = ((E * st + Px * sx + Py * sy + Pz * sz) / E / (np.exp( (E * gamma - Px * ux - Py * uy - Pz * uz) / T) + sign)) dN *= 2 * g / (2 * np.pi * hbarc)**3 # ignore negative contributions dN.clip(min=0, out=dN) with axes(name.replace('$', '`') + ' momentum') as ax: ax.set_yscale('log') ax.annotate(''.join([ '$\sigma_\mu = (', ', '.join('{:.3f}'.format(i / volume) for i in sigma.flat), ')$\n', '$v = (', ', '.join('{:.3f}'.format(i) for i in v.flat), ')$', ]), (.03, .96), xycoords='axes fraction', ha='left', va='top') nbins = 50 for i, (p, c) in enumerate(zip(psamples, ['x', 'y', 'z'])): ax.hist(p, bins=nbins, weights=np.full_like(p, nbins / p.ptp() / nsamples), histtype='step', label='$p_{}$'.format(c)) j, k = set(range(3)) - {i} # evaluate dN/dp_i by integrating out axes (j, k) ax.plot(P[i], dp[j] * dp[k] * dN.sum(axis=(j, k)), color=default_color) ax.set_xlabel('$p\ [\mathrm{GeV}]$') ax.set_ylabel('$dN/dp\ [\mathrm{GeV}^{-1}]$') ax.yaxis.get_major_locator().base(100) ax.legend()
def main(): collision_sys = 'PbPb5020' spectraFile = '%s/spectra/LHC5020-AA2ccbar.dat' % share # ==== parse the config file ============================================ if len(sys.argv) == 3: config = parseConfig(sys.argv[1]) jobID = sys.argv[2] else: config = {} jobID = 0 # ====== set up grid size variables ====================================== grid_step = 0.1 grid_max = 15.05 dtau = 0.25 * grid_step Nhalf = int(grid_max / grid_step) tau_fs = float(config.get('tau_fs')) xi_fs = float(config.get('xi_fs')) nevents = int(config.get('nevents')) # ========== initial condition ============================================ proj = collision_sys[:2] targ = collision_sys[2:4] run_cmd('trento {} {}'.format(proj, targ), str(nevents), '--grid-step {} --grid-max {}'.format(grid_step, grid_max), '--output {}'.format('initial.hdf5'), config.get('trento_args', '')) run_qhat(config.get('qhat_args')) # set up sampler HRG object Tswitch = float(config.get('Tswitch')) hrg = frzout.HRG(Tswitch, species='urqmd', res_width=True) eswitch = hrg.energy_density() finitial = h5py.File('initial.hdf5', 'r') for (ievent, dset) in enumerate(finitial.values()): resultFile = 'result_{}-{}.hdf5'.format(jobID, ievent) fresult = h5py.File(resultFile, 'w') print('# event: ', ievent) ic = [dset['matter_density'].value, dset['Ncoll_density'].value] event_gp = fresult.create_group('initial') event_gp.attrs.create('initial_entropy', grid_step**2 * ic[0].sum()) event_gp.attrs.create('N_coll', grid_step**2 * ic[1].sum()) for (k, v) in list(finitial['event_{}'.format(ievent)].attrs.items()): event_gp.attrs.create(k, v) # =============== Freestreaming =========================================== save_fs_history(ic[0], event_size=grid_max, grid_step=grid_step, tau_fs=tau_fs, xi=xi_fs, steps=5, grid_max=grid_max, coarse=2) fs = freestream.FreeStreamer(ic[0], grid_max, tau_fs) e = fs.energy_density() e_above = e[e > eswitch].sum() event_gp.attrs.create('multi_factor', e.sum() / e_above if e_above > 0 else 1) e.tofile('ed.dat') # calculate the participant plane angle participant_plane_angle(e, int(grid_max)) for i in [1, 2]: fs.flow_velocity(i).tofile('u{}.dat'.format(i)) for ij in [(1, 1), (1, 2), (2, 2)]: fs.shear_tensor(*ij).tofile('pi{}{}.dat'.format(*ij)) # ============== vishnew hydro =========================================== run_cmd( 'vishnew initialuread=1 iein=0', 't0={} dt={} dxy={} nls={}'.format(tau_fs, dtau, grid_step, Nhalf), config.get('hydro_args', '')) # ============= frzout sampler ========================================= surface_data = np.fromfile('surface.dat', dtype='f8').reshape(-1, 16) if surface_data.size == 0: print("empty event") continue print('surface_data.size: ', surface_data.size) surface = frzout.Surface(**dict( zip(['x', 'sigma', 'v'], np.hsplit(surface_data, [3, 6, 8])), pi=dict(zip(['xx', 'xy', 'yy'], surface_data.T[11:14])), Pi=surface_data.T[15]), ymax=3.) minsamples, maxsamples = 10, 100 minparts = 30000 nparts = 0 # for tracking total number of sampeld particles # sample soft particles and write to file with open('particle_in.dat', 'w') as f: nsamples = 0 while nsamples < maxsamples + 1: parts = frzout.sample(surface, hrg) if parts.size == 0: continue else: nsamples += 1 nparts += parts.size print("#", parts.size, file=f) for p in parts: print(p['ID'], *itertools.chain(p['x'], p['p']), file=f) if nparts >= minparts and nsamples >= minsamples: break event_gp.attrs.create('nsamples', nsamples, dtype=np.int) # =============== HQ initial position sampling =========================== initial_TAA = ic[1] np.savetxt('initial_Ncoll_density.dat', initial_TAA) HQ_sample_conf = {'IC_file': 'initial_Ncoll_density.dat',\ 'XY_file': 'initial_HQ.dat', \ 'IC_Nx_max': initial_TAA.shape[0], \ 'IC_Ny_max': initial_TAA.shape[1], \ 'IC_dx': grid_step, \ 'IC_dy': grid_step, \ 'IC_tau0': 0, \ 'N_sample': 60000, \ 'N_scale': 0.05, \ 'scale_flag': 0} ftmp = open('HQ_sample.conf', 'w') for (key, value) in zip(HQ_sample_conf.keys(), HQ_sample_conf.values()): inputline = ' = '.join([str(key), str(value)]) + '\n' ftmp.write(inputline) ftmp.close() run_cmd('HQ_sample HQ_sample.conf') # ================ HQ evolution (pre-equilibirum stages) ================= os.environ['ftn00'] = 'FreeStream.h5' os.environ['ftn10'] = '%s/dNg_over_dt_cD6.dat' % share print(os.environ['ftn10']) os.environ['ftn20'] = 'HQ_AAcY_preQ.dat' os.environ['ftn30'] = 'initial_HQ.dat' run_cmd('diffusion hq_input=3.0 initt={}'.format(tau_fs * xi_fs), config.get('diffusion_args', '')) # ================ HQ evolution (in medium evolution) ==================== os.environ['ftn00'] = 'JetData.h5' os.environ['ftn10'] = '%s/dNg_over_dt_cD6.dat' % share os.environ['ftn20'] = 'HQ_AAcY.dat' os.environ['ftn30'] = 'HQ_AAcY_preQ.dat' run_cmd('diffusion hq_input=4.0 initt={}'.format(tau_fs), config.get('diffusion_args', '')) # ============== Heavy quark hardonization ============================== os.environ['ftn20'] = 'Dmeson_AAcY.dat' child1 = 'cat HQ_AAcY.dat' p1 = subprocess.Popen(child1.split(), stdout=subprocess.PIPE) p2 = subprocess.Popen('fragPLUSrecomb', stdin=p1.stdout) p1.stdout.close() output = p2.communicate()[0] # ============ Heavy + soft UrQMD ================================= run_cmd( 'afterburner {} urqmd_final.dat particle_in.dat Dmeson_AAcY.dat'. format(nsamples)) # =========== processing data ==================================== calculate_beforeUrQMD(spectraFile, 'Dmeson_AAcY.dat', resultFile, 'beforeUrQMD/Dmeson', 1.0, 'a') calculate_beforeUrQMD(spectraFile, 'HQ_AAcY.dat', resultFile, 'beforeUrQMD/HQ', 1.0, 'a') calculate_beforeUrQMD(spectraFile, 'HQ_AAcY_preQ.dat', resultFile, 'beforeUrQMD/HQ_preQ', 1.0, 'a') if nsamples != 0: calculate_afterUrQMD(spectraFile, 'urqmd_final.dat', resultFile, 'afterUrQMD/Dmeson', 1.0, 'a') shutil.move('urqmd_final.dat', 'urqmd_final{}-{}.dat'.format(jobID, ievent)) shutil.move('Dmeson_AAcY.dat', 'Dmeson_AAcY{}-{}.dat'.format(jobID, ievent)) shutil.move('HQ_AAcY.dat', 'HQ_AAcY{}-{}.dat'.format(jobID, ievent)) shutil.move('HQ_AAcY_preQ.dat', 'HQ_AAcY_preQ{}-{}.dat'.format(jobID, ievent)) #=== after everything, save initial profile (depends on how large the size if, I may choose to forward this step) shutil.move('initial.hdf5', 'initial_{}.hdf5'.format(jobID))