Esempio n. 1
0
def sample_Tuv(surface, hrg):
    """
    Sample particles and compute the stress-energy tensor.

    """
    p = frzout.sample(surface, hrg)['p']
    return np.dot(p.T/p[:, 0], p) / surface.volume
Esempio n. 2
0
def resonance_mass_distributions(axes):
    """
    Verification of mass disributions for several resonance species.  Grey
    dashed lines are the Breit-Wigner distributions with mass-dependent width,
    grey solid lines are the same distributions with momentum integrated out,
    and colored lines are histograms of the sampled masses.

    """
    with axes() as ax:
        T = .15

        for ID, name in [
            (213, r'$\rho(770)$'),
            (2214, r'$\Delta(1232)$'),
            (22212, r'$N(1535)$'),
        ]:
            info = frzout.species_dict[ID]
            m0 = info['mass']
            w0 = info['width']
            m_min, m_max = info['mass_range']
            sign = -1 if info['boson'] else 1

            def bw(m):
                w = w0 * np.sqrt((m - m_min) / (m0 - m_min))
                return w / ((m - m0)**2 + w * w / 4)

            def f(p, m):
                return p * p / (np.exp(np.sqrt(p * p + m * m) / T) + sign)

            m = np.linspace(m_min, m_max, 200)

            ax.plot(m,
                    bw(m) / integrate.quad(bw, m_min, m_max)[0], **dashed_line)

            bwf = np.array([
                integrate.quad(lambda p: bw(m_) * f(p, m_), 0, 5)[0]
                for m_ in m
            ]) / integrate.dblquad(lambda m_, p: bw(m_) * f(p, m_), 0, 5,
                                   lambda _: m_min, lambda _: m_max)[0]

            ax.plot(m, bwf, color=default_color)

            hrg = frzout.HRG(T, species=[ID], res_width=True)

            x = np.array([[1, 0, 0, 0]], dtype=float)
            sigma = np.array([[1e6 / hrg.density(), 0, 0, 0]])
            v = np.zeros((1, 3))
            surface = frzout.Surface(x, sigma, v)

            parts = frzout.sample(surface, hrg)
            m = np.sqrt(np.inner(parts['p']**2, [1, -1, -1, -1]))

            ax.hist(m, bins=64, normed=True, histtype='step', label=name)

        ax.set_xlim(0, 2)
        ax.set_xlabel('Mass [GeV]')
        ax.set_ylabel('Probability')
        ax.set_yticklabels([])

        ax.legend(loc='upper left')
Esempio n. 3
0
def sample_Tuv(surface, hrg):
    """
    Sample particles and compute the stress-energy tensor.

    """
    p = frzout.sample(surface, hrg)['p']
    return np.dot(p.T / p[:, 0], p) / surface.volume
Esempio n. 4
0
 def make_parts():
     n = 0
     for _ in range(10):
         parts = frzout.sample(surface, hrg)
         yield parts
         n += parts.size
         if n > 1e6:
             break
Esempio n. 5
0
 def make_parts():
     n = 0
     for _ in range(10):
         parts = frzout.sample(surface, hrg)
         yield parts
         n += parts.size
         if n > 1e6:
             break
Esempio n. 6
0
    def sample_bulk(Pi):
        Pi = None if Pi == 0 else np.array([Pi])
        surface = frzout.Surface(x, sigma, v, Pi=Pi)
        parts = frzout.sample(surface, hrg)

        E = parts['p'][:, 0]
        psq = (parts['p'][:, 1:]**2).sum(axis=1)

        return (parts.size / volume, E.sum() / volume,
                (psq / (3 * E)).sum() / volume, np.sqrt(psq).mean())
Esempio n. 7
0
    def sample_bulk(Pi):
        surface = frzout.Surface(x, sigma, v, Pi=np.array([Pi]))
        parts = frzout.sample(surface, hrg)

        E = parts['p'][:, 0]
        psq = (parts['p'][:, 1:]**2).sum(axis=1)

        return (
            E.sum()/volume,
            (psq/(3*E)).sum()/volume,
            [(p.size/volume/2, p.mean()) for p in (
                np.sqrt(psq[np.abs(parts['ID']) == i]) for (i, _) in id_parts
            )]
        )
Esempio n. 8
0
        def eos_quantities(T):
            hrg = frzout.HRG(T, res_width=False)
            parts = frzout.sample(surface, hrg)
            E = parts['p'][:, 0]
            psq = (parts['p'][:, 1:]**2).sum(axis=1)

            T3 = (T/hbarc)**3
            T4 = T * T3

            return [
                (hrg.density()/T3, parts.size/volume/T3),
                (hrg.energy_density()/T4, E.sum()/volume/T4),
                (3*hrg.pressure()/T4, 3*(psq/(3*E)).sum()/volume/T4),
            ]
Esempio n. 9
0
        def eos_quantities(T):
            hrg = frzout.HRG(T, res_width=False)
            parts = frzout.sample(surface, hrg)
            E = parts['p'][:, 0]
            psq = (parts['p'][:, 1:]**2).sum(axis=1)

            T3 = (T / hbarc)**3
            T4 = T * T3

            return [
                (hrg.density() / T3, parts.size / volume / T3),
                (hrg.energy_density() / T4, E.sum() / volume / T4),
                (3 * hrg.pressure() / T4,
                 3 * (psq / (3 * E)).sum() / volume / T4),
            ]
Esempio n. 10
0
        finalresults['initial_entropy'] = ic.sum() * grid_step**2
        finalresultspce['initial_entropy'] = ic.sum() * grid_step**2
    else:
        continue
    

minsamples, maxsamples = 10, 1000  # reasonable range for nsamples
minparts = 10**5  # min number of particles to sample
nparts = 0  # for tracking total number of sampled particles
npartspce = 0

hrg_kwargs = dict(species='urqmd', res_width=True)
hrg = frzout.HRG(0.150, **hrg_kwargs)

for nsamples in range(1, maxsamples + 1):
    parts = frzout.sample(finalsurface, hrg)
    if parts.size == 0:
        continue
    nparts += parts.size
 #   print('#', parts.size, file=f)
    for p in parts:
        continue
 #       print(p['ID'], *p['x'], *p['p'], file=f)
    if nparts >= minparts and nsamples >= minsamples:
        break
    

for nsamplespce in range(1, maxsamples + 1):
    partspce = frzout.sample(finalsurfacepce, hrg)
    if partspce.size == 0:
        continue
Esempio n. 11
0
	def run_single_event(ic, nb, event_number):
		"""
		Run the initial condition event contained in HDF5 dataset object `ic`
		and save observables to `results`.

		"""
		results.fill(0)
		results['initial_entropy'] = ic.sum() * grid_step**2
		results['Ncoll'] = nb.sum() * grid_step**2
		logging.info("Nb %d", results['Ncoll'])
		assert all(n == grid_n for n in ic.shape)

		logging.info(
			'free streaming initial condition for %.3f fm',
			args.tau_fs
		)
		fs = freestream.FreeStreamer(ic, grid_max, args.tau_fs)

		# run coarse event on large grid and determine max radius
		rmax = math.sqrt((
			run_hydro(ic, event_size=27, coarse=3)['x'][:, 1:3]**2
		).sum(axis=1).max())
		logging.info('rmax = %.3f fm', rmax)

		# now run normal event with size set to the max radius
		# and create sampler surface object
		surface = frzout.Surface(**run_hydro(ic, event_size=rmax), ymax=2)
		logging.info('%d freeze-out cells', len(surface))

		# Sampling particle for UrQMD events
		logging.info('sampling surface with frzout')
		minsamples, maxsamples = 10, 400  # reasonable range for nsamples
		minparts = 10**5  # min number of particles to sample
		nparts = 0  # for tracking total number of sampled particles
		with open('particles_in.dat', 'w') as f:
			for nsamples in range(1, maxsamples + 1):
				parts = frzout.sample(surface, hrg)
				if parts.size == 0:
					continue
				nparts += parts.size
				print('#', parts.size, file=f)
				for p in parts:
					print(p['ID'], *p['x'], *p['p'], file=f)
				if nparts >= minparts and nsamples >= minsamples:
					break

		results['nsamples'] = nsamples
		logging.info('produced %d particles in %d samples', nparts, nsamples)

		if nparts == 0:
			raise StopEvent('no particles produced')

		# ==================Heavy Flavor===========================
		# Run Pythia+Lido
		prefix = os.environ.get('XDG_DATA_HOME')
		run_cmd(
                        'hydro-couple',
                        '-y {:s}/pythia-setting.txt'.format(prefix),
                        '-i ./initial.hdf',
                        '-j {:d}'.format(0 if args.nevents is None else event_number-1),
                        '--hydro ./JetData.h5',
                        '-s {:s}/settings.xml'.format(prefix),
                        '-t {:s}'.format(args.table_path),
                        '-n {:d}'.format(args.NPythiaEvents),
                        args.lido_args,
                )

		# hadronization
		hq = 'c'
		prefix = os.environ.get('XDG_DATA_HOME')+"/hvq-hadronization/"
		os.environ["ftn20"] = "{}-meson-frzout.dat".format(hq)
		os.environ["ftn30"] = prefix+"parameters_{}_hd.dat".format(hq)
		os.environ["ftn40"] = prefix+"recomb_{}_tot.dat".format(hq)
		os.environ["ftn50"] = prefix+"recomb_{}_BR1.dat".format(hq)
		logging.info(os.environ["ftn30"])
		subprocess.run("hvq-hadronization", stdin=open("{}-quark-frzout.dat".format(hq)))

		# ==================Heavy + Soft --> UrQMD===========================
		run_cmd('convert_format {} particles_in.dat c-meson-frzout.dat'.format(nsamples))
		run_cmd('run_urqmd urqmd_input.dat particles_out.dat')	

		# read final particle data
		ID, charge, fmass, px, py, pz, y, eta, pT0, y0, w, _ = (
			np.array(col, dtype=dtype) for (col, dtype) in
			zip(
				zip(*read_text_file('particles_out.dat')),
				(2*[int] + 10*[float])
			)
		)
		# pT, phi, and id cut
		pT = np.sqrt(px**2+py**2)
		phi = np.arctan2(py, px)
		ET = fmass**2 + pT**2
		charged = (charge != 0)
		abs_eta = np.fabs(eta)
		abs_ID = np.abs(ID)
		# It may be redunant to find b-hadron at this stage since UrQMD has
		# not included them yet
		heavy_pid = [pid for (_, pid) in species.get('heavy')]
		is_heavy = np.array([u in heavy_pid for u in abs_ID], dtype=bool)
		is_light = np.logical_not(is_heavy)

		#============for soft particles======================
		results['dNch_deta'] = np.count_nonzero(charged & (abs_eta<.5) & is_light) / nsamples
		ET_eta = .6
		results['dET_deta'] = ET[abs_eta < ET_eta].sum() / (2*ET_eta) / nsamples


		for s, pid in species.get('light'):
			cut = (abs_ID == pid) & (abs_eta < 0.5)
			N = np.count_nonzero(cut)
			results['dN_dy'][s] = N / nsamples
			results['mean_pT'][s] = (0. if N == 0 else pT[cut].mean())

		pT_alice = pT[charged & (abs_eta < .8) & (.15 < pT) & (pT < 2.)]
		results['pT_fluct']['N'] = pT_alice.size
		results['pT_fluct']['sum_pT'] = pT_alice.sum()
		results['pT_fluct']['sum_pTsq'] = np.inner(pT_alice, pT_alice)

		phi_alice = phi[charged & (abs_eta < .8) & (.2 < pT) & (pT < 5.)]
		results['Qn_soft']['M'] = phi_alice.size
		results['Qn_soft']['Qn'] = [np.exp(1j*n*phi_alice).sum()
				for n in range(1, results.dtype['Qn_soft']['Qn'].shape[0] + 1)]

		#============for heavy flavors=======================
		for exp in ['ALICE', 'CMS']:
			#=========Event plane Q-vector from UrQMD events======================
			phi_light = phi[charged & is_light \
				& (JEC[exp]['vn_ref']['ybins'][0] < eta) \
				& (eta < JEC[exp]['vn_ref']['ybins'][1]) \
				& (JEC[exp]['vn_ref']['pTbins'][0] < pT) \
				& (pT < JEC[exp]['vn_ref']['pTbins'][1])]
			results['Qn_ref_'+exp]['M'] = phi_light.shape[0]
			results['Qn_ref_'+exp]['Qn'] = np.array([np.exp(1j*n*phi_light).sum() 
											for n in range(1, 5)])
			#===========For heavy particles======================
			# For charmed hadrons, use info after urqmd
			HF_dict = { 'pid': abs_ID[is_heavy],
						'pT' : pT[is_heavy],
						'y'  : y[is_heavy],
						'phi': phi[is_heavy],
						'w' : w[is_heavy] # normalized to an area units
			  	}
			POI = [pid for (_, pid) in species.get('heavy')]
			flow = JLP.Qvector(HF_dict, JEC[exp]['vn_HF']['pTbins'],
								JEC[exp]['vn_HF']['ybins'], POI, order=4)
			Yield = JLP.Yield(HF_dict, JEC[exp]['Raa']['pTbins'],
								JEC[exp]['Raa']['ybins'], POI)
			for (s, pid) in species.get('heavy'):
				results['dX_dpT_dy_'+exp][s] = Yield[pid][:,0]
				results['Qn_poi_'+exp][s]['M'] = flow[pid]['M'][:,0]
				results['Qn_poi_'+exp][s]['Qn'] = flow[pid]['Qn'][:,0,:]

		# For full pT prediction
		#=========Use high precision Q-vector at the end of hydro==============
		# oversample to get a high percision event plane at freezeout
		ophi_light = np.empty(0)
		nloop=0
		while ophi_light.size < 10**6 and nloop < 100000:
			nloop += 1
			oE, opx, opy, opz = frzout.sample(surface, hrg)['p'].T
			oM, opT, oy, ophi = JLP.fourvec_to_curvelinear(opx, opy, opz, oE)
			ophi = ophi[(-2 < oy) & (oy < 2) & (0.2 < opT) & (opT <5.0)]
			ophi_light = np.append(ophi_light, ophi)
		results['Qn_ref_pred']['M'] = ophi_light.shape[0]
		results['Qn_ref_pred']['Qn'] = np.array([np.exp(1j*n*ophi_light).sum() 
							 for n in range(1, 5)])
		del ophi_light
		#===========For heavy particles======================
		# For charmed hadrons, use info after urqmd
		HF_dict = { 'pid': abs_ID[is_heavy],
					'pT' : pT[is_heavy],
					'y'  : y[is_heavy],
					'phi': phi[is_heavy],
					'w' : w[is_heavy]
		  		}
		POI = [pid for (_, pid) in species.get('heavy')]
		flow = JLP.Qvector(HF_dict, JEC['pred-pT'], [[-2,2]], POI, order=4)
		Yield = JLP.Yield(HF_dict, JEC['pred-pT'], [[-1,1]], POI)
		for (s, pid) in species.get('heavy'):
			results['dX_dpT_dy_pred'][s] = Yield[pid][:,0]
			results['Qn_poi_pred'][s]['M'] = flow[pid]['M'][:,0]
			results['Qn_poi_pred'][s]['Qn'] = flow[pid]['Qn'][:,0]
Esempio n. 12
0
def _realistic_surface_observables():
    """
    Compute observables for the "realistic surface" test case.

    """
    with open('test_surface.dat', 'rb') as f:
        surface_data = np.array(
            [l.split() for l in f if not l.startswith(b'#')], dtype=float)

    # 0    1  2  3         4         5         6    7
    # tau  x  y  dsigma_t  dsigma_x  dsigma_y  v_x  v_y
    # 8     9     10    11    12    13    14    15
    # pitt  pitx  pity  pixx  pixy  piyy  pizz  Pi
    x, sigma, v, _ = np.hsplit(surface_data, [3, 6, 8])
    pixx, pixy, piyy = surface_data.T[11:14]
    Pi = surface_data.T[15]

    sigma4 = np.zeros((sigma.shape[0], 4))
    sigma4[:, :3] = sigma
    sigma4 *= x[:, :1]

    u_ = np.zeros((v.shape[0], 4))
    u_[:, 0] = 1
    u_[:, 1:3] = -v
    u_ /= np.sqrt(1 - np.square(v).sum(axis=1))[:, np.newaxis]

    vx, vy = v.T
    pi_uv = np.zeros((pixx.shape[0], 4, 4))
    pi_uv[:, 0, 0] = vx * vx * pixx + vy * vy * piyy + 2 * vx * vy * pixy
    pi_uv[:, 1, 1] = pixx
    pi_uv[:, 2, 2] = piyy
    pi_uv[:, 3, 3] = pi_uv[:, 0, 0] - pixx - piyy
    pi_uv[:, 0, 1] = pi_uv[:, 1, 0] = -(vx * pixx + vy * pixy)
    pi_uv[:, 0, 2] = pi_uv[:, 2, 0] = -(vx * pixy + vy * piyy)
    pi_uv[:, 1, 2] = pi_uv[:, 2, 1] = pixy

    pT_max = 4
    pT_bins = np.linspace(0, pT_max, 41)
    pT = (pT_bins[:-1] + pT_bins[1:]) / 2
    delta_pT = pT_max / (pT_bins.size - 1)

    phi = np.linspace(0, 2 * np.pi, 100, endpoint=False)

    eta, eta_weights = special.ps_roots(30)
    eta_max = 4
    eta *= eta_max
    eta_weights *= 2 * eta_max

    T = .145
    hrg = frzout.HRG(T, res_width=False)
    eta_over_tau = hrg.eta_over_tau()
    zeta_over_tau = hrg.zeta_over_tau()
    cs2 = hrg.cs2()

    the_vn = [2, 3, 4]

    def calc_obs(ID):
        m = frzout.species_dict[ID]['mass']
        degen = frzout.species_dict[ID]['degen']
        sign = -1 if frzout.species_dict[ID]['boson'] else 1

        pT_, phi_, eta_ = np.meshgrid(pT, phi, eta)
        mT_ = np.sqrt(m * m + pT_ * pT_)
        p = np.array([
            mT_ * np.cosh(eta_), pT_ * np.cos(phi_), pT_ * np.sin(phi_),
            mT_ * np.sinh(eta_)
        ]).T

        # ignore negative contributions
        psigma = np.inner(p, sigma4)
        psigma.clip(min=0, out=psigma)

        pu = np.inner(p, u_)
        with np.errstate(over='ignore'):
            f = 1 / (np.exp(pu / T) + sign)

        df = f * (1 - sign * f) * (
            ((pu * pu - m * m) /
             (3 * pu) - cs2 * pu) / (zeta_over_tau * T) * Pi +
            np.einsum('ijku,ijkv,auv->ijka', p, p, pi_uv) /
            (2 * pu * T * eta_over_tau))
        f += df

        # (phi, pT) distribution
        phi_pT_dist = (2 * degen *
                       np.einsum('i,ijka,ijka->jk', eta_weights, psigma, f) /
                       (2 * np.pi * hbarc)**3 / phi.size)
        pT_dist = phi_pT_dist.sum(axis=1)

        # navg, pT dist, qn(pT)
        return (2 * np.pi * delta_pT * np.inner(pT, pT_dist), pT_dist, [
            np.inner(np.exp(1j * n * phi), phi_pT_dist) / pT_dist
            for n in the_vn
        ])

    obs_calc = [calc_obs(i) for i, _ in id_parts]

    surface = frzout.Surface(x,
                             sigma,
                             v,
                             pi=dict(xx=pixx, yy=piyy, xy=pixy),
                             Pi=Pi)

    ngroups = 1000
    N = 1000  # nsamples per group
    nsamples = ngroups * N

    # need many samples for diff flow
    # too many to store all particles in memory -> accumulate observables
    obs_sampled = [
        (
            np.empty(nsamples, dtype=int),  # ID particle counts
            np.zeros_like(pT),  # pT distribution
            np.zeros((len(the_vn), pT.size)),  # diff flow
        ) for _ in id_parts
    ]

    diff_flow_counts = [
        np.zeros_like(vn, dtype=int) for (_, _, vn) in obs_sampled
    ]

    from multiprocessing.pool import ThreadPool

    for k in range(ngroups):
        print('      group', k)
        # threading increases performance since sample() releases the GIL
        with ThreadPool() as pool:
            parts = pool.map(lambda _: frzout.sample(surface, hrg), range(N))
        # identified particle counts
        for (i, _), (counts, _, _) in zip(id_parts, obs_sampled):
            counts[k * N:(k + 1) * N] = [
                np.count_nonzero(np.abs(p['ID']) == i) for p in parts
            ]
        # merge all samples
        parts = np.concatenate(parts)
        abs_ID = np.abs(parts['ID'])
        for (i, _), (_, pT_dist, vn_arr), dflow_counts, (_, _, qn_list) in zip(
                id_parts, obs_sampled, diff_flow_counts, obs_calc):
            parts_ = parts[abs_ID == i]
            px, py = parts_['p'].T[1:3]
            pT_ = np.sqrt(px * px + py * py)
            phi_ = np.arctan2(py, px)
            # pT distribution
            pT_dist += np.histogram(pT_, bins=pT_bins, weights=1 / pT_)[0]
            # differential flow
            for n, vn, dfc, qn in zip(the_vn, vn_arr, dflow_counts, qn_list):
                cosnphi = [
                    np.cos(n * phi_[np.fabs(pT_ - p) < .2] - npsi)
                    for (p, npsi) in zip(pT, np.arctan2(qn.imag, qn.real))
                ]
                vn += [c.sum() for c in cosnphi]
                dfc += [c.size for c in cosnphi]

    # normalize pT dists and diff flow
    for (_, pT_dist, vn), dflow_counts in zip(obs_sampled, diff_flow_counts):
        pT_dist /= 2 * np.pi * nsamples * delta_pT
        vn /= dflow_counts

    return pT, the_vn, obs_calc, obs_sampled
Esempio n. 13
0
def bulk_viscous_corrections(axes):
    """
    Effect of bulk viscosity on thermodynamic quantities and momentum
    distributions.

    The total pressure is the sum of the equilibrium and bulk pressures:
    `P = P_0 + \Pi`.  In order to satisfy continuity of the stress-energy
    tensor, sampled particles must reproduce the total pressure without
    changing the equilibrium energy density; this is achieved by parametrically
    rescaling overall particle production and momentum depending on the bulk
    pressure.  This works for negative bulk pressure all the way down to zero
    total pressure, but for positive bulk pressure, the necessary momentum
    scale factor diverges as the total pressure approaches twice the
    equilibrium pressure.  The momentum scale is therefore restricted to a
    reasonable maximum (3) which effectively limits the positive bulk pressure
    to around 70% of the equilibrium pressure, depending on the hadron gas
    temperature and composition.

    Most quantities are plotted as the relative change to their equilibrium
    values vs. the relative bulk pressure `\Pi/P_0`.  Colored lines are from
    samples and dashed lines are calculated.

    """
    T = .15
    hrg = frzout.HRG(T, res_width=False)

    volume = 1e6 / hrg.density()
    x = np.array([[1., 0, 0, 0]])
    sigma = np.array([[volume, 0, 0, 0]])
    v = np.zeros((1, 3))

    def sample_bulk(Pi):
        Pi = None if Pi == 0 else np.array([Pi])
        surface = frzout.Surface(x, sigma, v, Pi=Pi)
        parts = frzout.sample(surface, hrg)

        E = parts['p'][:, 0]
        psq = (parts['p'][:, 1:]**2).sum(axis=1)

        return (parts.size / volume, E.sum() / volume,
                (psq / (3 * E)).sum() / volume, np.sqrt(psq).mean())

    n0 = hrg.density()
    e0 = hrg.energy_density()
    P0 = hrg.pressure()
    pavg0 = hrg.mean_momentum()

    Pi = np.linspace(-P0, P0, 31)
    Pi_min, Pi_max = hrg.Pi_lim()
    # ensure that the HRG is sampled at precisely the Pi limits
    for p in hrg.Pi_lim():
        Pi[np.abs(Pi - p).argmin()] = p
    Pi_frac = Pi / P0

    n, e, P, pavg = np.array([sample_bulk(x) for x in Pi]).T

    with axes(
            'Pressure and energy density',
            'Bulk pressure changes the effective pressure without changing '
            'the energy density.') as ax:
        ax.plot(Pi_frac, P / P0 - 1, label='Pressure ($P$)')
        ax.plot(Pi_frac, Pi_frac.clip(Pi_min / P0, Pi_max / P0), **dashed_line)

        ax.plot(Pi_frac, e / e0 - 1, label='Energy density ($\epsilon$)')
        ax.axhline(0, **dashed_line)

        ax.set_xlim(Pi_frac.min(), Pi_frac.max())
        ax.set_ylim(Pi_frac.min(), Pi_frac.max())

        ax.set_xlabel('$\Pi/P_0$')
        ax.set_ylabel('$\Delta P/P_0$, $\Delta\epsilon/\epsilon_0$')
        ax.legend(loc='upper left')

    nscale, pscale = np.array([hrg.bulk_scale_factors(p) for p in Pi]).T

    with axes(
            'Particle density and momentum',
            'The changes in particle density and mean momentum necessary '
            'to achieve the target pressure and energy density.') as ax:
        for y, y0, ycheck, label in [
            (n, n0, nscale, 'Density ($n$)'),
            (pavg, pavg0, pscale, 'Mean momentum ($p$)'),
        ]:
            ax.plot(Pi_frac, y / y0 - 1, label=label)
            ax.plot(Pi_frac, ycheck - 1, **dashed_line)

        ax.set_xlim(Pi_frac.min(), Pi_frac.max())

        ax.set_xlabel('$\Pi/P_0$')
        ax.set_ylabel(r'$\Delta n/n_0$, $\Delta p/p_0$')
        ax.legend(loc='upper left')

    def f(p, ID):
        m, boson, g = (frzout.species_dict[ID][k]
                       for k in ['mass', 'boson', 'degen'])
        s = -1 if boson else 1
        return g / (np.exp(np.sqrt(m * m + p * p) / T) + s)

    def density(ID, pscale=1):
        return (4 * np.pi) / (2 * np.pi * hbarc)**3 * integrate.quad(
            lambda p: p * p * f(p / pscale, ID), 0, 10)[0]

    ID = 211
    n0 = density(ID)

    with axes(
            'Distribution functions',
            'Pion distribution functions `f(p)` for different bulk pressures. '
            'Colored histograms are samples; dashed lines are target '
            'distributions with rescaled momentum, `f(\lambda p)`.') as ax:
        nbins = 50
        w = nbins * (2 * np.pi * hbarc)**3 / (2 * volume * 4 * np.pi)

        for k, Pi_frac in enumerate([0, -.1, -.3]):
            Pi = Pi_frac * P0
            parts = frzout.sample(
                frzout.Surface(x, sigma, v, Pi=np.array([Pi])), hrg)
            psq = (parts[np.abs(parts['ID']) == ID]['p'][:, 1:]**2).sum(axis=1)
            pmag = np.sqrt(psq)
            offset = 10**(-k)
            ax.hist(pmag,
                    bins=nbins,
                    weights=w * offset / psq / pmag.ptp(),
                    histtype='step',
                    log=True,
                    label='$\Pi = ' +
                    ('0' if Pi_frac == 0 and k == 0 else
                     r'{}P_0\ (f \times 10^{{{:d}}})'.format(Pi_frac, -k)) +
                    '$')

            p = np.linspace(0, pmag.max(), 200)
            nscale, pscale = hrg.bulk_scale_factors(Pi)
            n = density(ID, pscale)
            ax.plot(p, n0 / n * nscale * offset * f(p / pscale, ID),
                    **dashed_line)

        ax.set_xlim(0)
        ax.set_xlabel('$p\ \mathrm{[GeV]}$')
        ax.set_ylabel('$f(p)$')
        ax.legend(title='Pions only')
Esempio n. 14
0
def bulk_viscous_corrections(axes):
    """
    Effect of bulk viscosity on thermodynamic quantities and momentum
    distributions.

    The total pressure is the sum of the equilibrium and bulk pressures:
    `P = P_0 + \Pi`.

    Most quantities are plotted as the relative change to their equilibrium
    values vs. the relative bulk pressure `\Pi/P_0`.  Colored lines are from
    samples and dashed lines are calculated.

    The algorithm restricts bulk pressure to a range (roughly -0.5 to +0.2 of
    the ideal pressure, depending on the hadron gas temperature and
    composition) so that no particle densities ever go negative.  In realistic
    events the bulk pressure is easily within this range.

    """
    T = .15
    hrg = frzout.HRG(T, res_width=False)

    volume = 2e6/hrg.density()
    x = np.array([[1., 0, 0, 0]])
    sigma = np.array([[volume, 0, 0, 0]])
    v = np.zeros((1, 3))

    def sample_bulk(Pi):
        surface = frzout.Surface(x, sigma, v, Pi=np.array([Pi]))
        parts = frzout.sample(surface, hrg)

        E = parts['p'][:, 0]
        psq = (parts['p'][:, 1:]**2).sum(axis=1)

        return (
            E.sum()/volume,
            (psq/(3*E)).sum()/volume,
            [(p.size/volume/2, p.mean()) for p in (
                np.sqrt(psq[np.abs(parts['ID']) == i]) for (i, _) in id_parts
            )]
        )

    P0 = hrg.pressure()
    e0 = hrg.energy_density()

    Pi_frac = np.linspace(-.6, .3, 21)
    Pi = Pi_frac * P0
    Pi_min, Pi_max = hrg.Pi_lim()

    e, P, id_parts_samples = (
        np.array(i) for i in zip(*[sample_bulk(x) for x in Pi])
    )

    with axes(
            'Pressure and energy density',
            'Bulk pressure changes the effective pressure without changing '
            'the energy density.'
    ) as ax:
        ax.plot(Pi_frac, P/P0 - 1, label='Pressure')
        ax.plot(Pi_frac, Pi_frac.clip(Pi_min/P0, Pi_max/P0), **dashed_line)

        ax.plot(Pi_frac, e/e0 - 1, label='Energy density')
        ax.axhline(0, **dashed_line)

        ax.set_xlim(Pi_frac.min(), Pi_frac.max())
        ax.set_ylim(Pi_frac.min(), Pi_frac.max())

        ax.set_xlabel('$\Pi/P_0$')
        ax.set_ylabel('$\Delta P/P_0,\ \Delta\epsilon/\epsilon_0$')
        ax.legend(loc='upper left')

    density, pavg = id_parts_samples.T

    zeta_over_tau = hrg.zeta_over_tau()
    cs2 = hrg.cs2()

    def f(p, ID, Pi=0):
        Pi = min(max(Pi, Pi_min), Pi_max)
        m, boson, g = (
            frzout.species_dict[ID][k] for k in ['mass', 'boson', 'degen']
        )
        s = -1 if boson else 1
        E = np.sqrt(m*m + p*p)
        f0 = 1/(np.exp(E/T) + s)
        df = Pi/(T*zeta_over_tau)*(p*p/(3*E) - cs2*E)*f0*(1 - s*f0)
        return g*(f0 + df)

    def int_f(ID, Pi=0, inner=lambda p: 1):
        return (4*np.pi)/(2*np.pi*hbarc)**3 * integrate.quad(
            lambda p: p*p*inner(p)*f(p, ID, Pi), 0, 10
        )[0]

    def calc_density(ID, Pi=0):
        return int_f(ID, Pi)

    def calc_pavg(ID, Pi=0):
        return int_f(ID, Pi, inner=lambda p: p) / calc_density(ID, Pi)

    with axes(
            'Particle densities',
            'Changes in density are proportional to bulk pressure '
            'by construction.'
    ) as ax:
        for n, (i, label) in zip(density, id_parts):
            n0 = calc_density(i)
            ncalc = np.array([calc_density(i, Pi_) for Pi_ in Pi])
            ax.plot(Pi_frac, n/n0 - 1, label=label)
            ax.plot(Pi_frac, ncalc/n0 - 1, **dashed_line)

        ax.set_xlim(Pi_frac.min(), Pi_frac.max())

        ax.set_xlabel('$\Pi/P_0$')
        ax.set_ylabel('$\Delta n/n_0$')
        ax.legend(loc='lower right')

    with axes('Average momenta') as ax:
        for p, (i, label) in zip(pavg, id_parts):
            p0 = calc_pavg(i)
            pcalc = np.array([calc_pavg(i, Pi_) for Pi_ in Pi])
            ax.plot(Pi_frac, p/p0 - 1, label=label)
            ax.plot(Pi_frac, pcalc/p0 - 1, **dashed_line)

        ax.set_xlim(Pi_frac.min(), Pi_frac.max())

        ax.set_xlabel('$\Pi/P_0$')
        ax.set_ylabel(r'$\Delta\langle p \rangle/\langle p \rangle_0$')
        ax.legend(loc='upper left')

    ID = 211
    n0 = calc_density(ID)
    pavg0 = calc_pavg(ID)

    with axes(
            'Distribution functions',
            'Pion distribution functions `f(p)` for different bulk pressures. '
            'Colored histograms are samples, solid lines are `f_0 + \delta f` '
            '(which goes negative for large momentum and bulk pressure), '
            'and dashed lines are the actual target distributions with '
            'rescaled momentum, `f_0(\lambda p)`.'
    ) as ax:
        nbins = 50
        w = nbins*(2*np.pi*hbarc)**3/(2*volume*4*np.pi)

        for k, Pi_frac in enumerate([0, -.1, -.3]):
            Pi = Pi_frac*P0
            parts = frzout.sample(
                frzout.Surface(x, sigma, v, Pi=np.array([Pi])),
                hrg
            )
            psq = (parts[np.abs(parts['ID']) == ID]['p'][:, 1:]**2).sum(axis=1)
            pmag = np.sqrt(psq)
            scale = 10**(-k)
            ax.hist(
                pmag, bins=nbins, weights=w*scale/psq/pmag.ptp(),
                histtype='step', log=True,
                label='$\Pi = ' + (
                    '0' if Pi_frac == 0 and k == 0 else
                    r'{}P_0\ (f \times 10^{{{:d}}})'.format(Pi_frac, -k)
                ) + '$'
            )

            p = np.linspace(0, pmag.max(), 200)
            ax.plot(p, scale*f(p, ID, Pi), color=default_color)

            n = calc_density(ID, Pi)
            pavg = calc_pavg(ID, Pi)
            ax.plot(p, n0/n*scale*f(p*pavg0/pavg, ID), **dashed_line)

        ax.set_xlabel('$p\ \mathrm{[GeV]}$')
        ax.set_ylabel('$f(p)$')
        ax.legend()
Esempio n. 15
0
def resonance_mass_distributions(axes):
    """
    Verification of mass disributions for several resonance species.  Grey
    dashed lines are the Breit-Wigner distributions with mass-dependent width,
    grey solid lines are the same distributions with momentum integrated out,
    and colored lines are histograms of the sampled masses.

    """
    with axes() as ax:
        T = .15

        for ID, name in [
                (213, r'$\rho(770)$'),
                (2214, r'$\Delta(1232)$'),
                (22212, r'$N(1535)$'),
        ]:
            info = frzout.species_dict[ID]
            m0 = info['mass']
            w0 = info['width']
            m_min, m_max = info['mass_range']
            sign = -1 if info['boson'] else 1

            def bw(m):
                w = w0*np.sqrt((m - m_min)/(m0 - m_min))
                return w/((m - m0)**2 + w*w/4)

            def f(p, m):
                return p*p / (np.exp(np.sqrt(p*p + m*m)/T) + sign)

            m = np.linspace(m_min, m_max, 200)

            ax.plot(m, bw(m)/integrate.quad(bw, m_min, m_max)[0],
                    **dashed_line)

            bwf = np.array([
                integrate.quad(lambda p: bw(m_)*f(p, m_), 0, 5)[0] for m_ in m
            ]) / integrate.dblquad(
                lambda m_, p: bw(m_)*f(p, m_),
                0, 5, lambda _: m_min, lambda _: m_max
            )[0]

            ax.plot(m, bwf, color=default_color)

            hrg = frzout.HRG(T, species=[ID], res_width=True)

            x = np.array([[1, 0, 0, 0]], dtype=float)
            sigma = np.array([[1e6/hrg.density(), 0, 0, 0]])
            v = np.zeros((1, 3))
            surface = frzout.Surface(x, sigma, v)

            parts = frzout.sample(surface, hrg)
            m = np.sqrt(np.inner(parts['p']**2, [1, -1, -1, -1]))

            ax.hist(m, bins=64, normed=True, histtype='step', label=name)

        ax.set_xlim(0, 2)
        ax.set_xlabel('Mass [GeV]')
        ax.set_ylabel('Probability')
        ax.set_yticklabels([])

        ax.legend(loc='upper left')
Esempio n. 16
0
def main():
    collision_sys = 'PbPb5020'
    spectraFile = '%s/spectra/LHC5020-AA2ccbar.dat' % share

    # ==== parse the config file ============================================
    if len(sys.argv) == 3:
        config = parseConfig(sys.argv[1])
        jobID = sys.argv[2]
    else:
        config = {}
        jobID = 0

    # ====== set up grid size variables ======================================
    grid_step = 0.1
    grid_max = 15.05
    dtau = 0.25 * grid_step
    Nhalf = int(grid_max / grid_step)

    tau_fs = float(config.get('tau_fs'))
    xi_fs = float(config.get('xi_fs'))
    nevents = int(config.get('nevents'))

    # ========== initial condition ============================================
    proj = collision_sys[:2]
    targ = collision_sys[2:4]

    run_cmd('trento {} {}'.format(proj, targ), str(nevents),
            '--grid-step {} --grid-max {}'.format(grid_step, grid_max),
            '--output {}'.format('initial.hdf5'),
            config.get('trento_args', ''))

    run_qhat(config.get('qhat_args'))
    # set up sampler HRG object
    Tswitch = float(config.get('Tswitch'))
    hrg = frzout.HRG(Tswitch, species='urqmd', res_width=True)
    eswitch = hrg.energy_density()

    finitial = h5py.File('initial.hdf5', 'r')

    for (ievent, dset) in enumerate(finitial.values()):
        resultFile = 'result_{}-{}.hdf5'.format(jobID, ievent)
        fresult = h5py.File(resultFile, 'w')
        print('# event: ', ievent)
        ic = [dset['matter_density'].value, dset['Ncoll_density'].value]
        event_gp = fresult.create_group('initial')
        event_gp.attrs.create('initial_entropy', grid_step**2 * ic[0].sum())
        event_gp.attrs.create('N_coll', grid_step**2 * ic[1].sum())
        for (k, v) in list(finitial['event_{}'.format(ievent)].attrs.items()):
            event_gp.attrs.create(k, v)

        # =============== Freestreaming ===========================================
        save_fs_history(ic[0],
                        event_size=grid_max,
                        grid_step=grid_step,
                        tau_fs=tau_fs,
                        xi=xi_fs,
                        steps=5,
                        grid_max=grid_max,
                        coarse=2)
        fs = freestream.FreeStreamer(ic[0], grid_max, tau_fs)
        e = fs.energy_density()
        e_above = e[e > eswitch].sum()
        event_gp.attrs.create('multi_factor',
                              e.sum() / e_above if e_above > 0 else 1)
        e.tofile('ed.dat')

        # calculate the participant plane angle
        participant_plane_angle(e, int(grid_max))

        for i in [1, 2]:
            fs.flow_velocity(i).tofile('u{}.dat'.format(i))
        for ij in [(1, 1), (1, 2), (2, 2)]:
            fs.shear_tensor(*ij).tofile('pi{}{}.dat'.format(*ij))

        # ============== vishnew hydro ===========================================
        run_cmd(
            'vishnew initialuread=1 iein=0',
            't0={} dt={} dxy={} nls={}'.format(tau_fs, dtau, grid_step, Nhalf),
            config.get('hydro_args', ''))

        # ============= frzout sampler =========================================
        surface_data = np.fromfile('surface.dat', dtype='f8').reshape(-1, 16)
        if surface_data.size == 0:
            print("empty event")
            continue
        print('surface_data.size: ', surface_data.size)

        surface = frzout.Surface(**dict(
            zip(['x', 'sigma', 'v'], np.hsplit(surface_data, [3, 6, 8])),
            pi=dict(zip(['xx', 'xy', 'yy'], surface_data.T[11:14])),
            Pi=surface_data.T[15]),
                                 ymax=3.)

        minsamples, maxsamples = 10, 100
        minparts = 30000
        nparts = 0  # for tracking total number of sampeld particles

        # sample soft particles and write to file
        with open('particle_in.dat', 'w') as f:
            nsamples = 0
            while nsamples < maxsamples + 1:
                parts = frzout.sample(surface, hrg)
                if parts.size == 0:
                    continue
                else:
                    nsamples += 1
                    nparts += parts.size
                    print("#", parts.size, file=f)
                    for p in parts:
                        print(p['ID'],
                              *itertools.chain(p['x'], p['p']),
                              file=f)

                    if nparts >= minparts and nsamples >= minsamples:
                        break

        event_gp.attrs.create('nsamples', nsamples, dtype=np.int)

        # =============== HQ initial position sampling ===========================
        initial_TAA = ic[1]
        np.savetxt('initial_Ncoll_density.dat', initial_TAA)
        HQ_sample_conf = {'IC_file': 'initial_Ncoll_density.dat',\
                          'XY_file': 'initial_HQ.dat', \
                          'IC_Nx_max': initial_TAA.shape[0], \
                          'IC_Ny_max': initial_TAA.shape[1], \
                          'IC_dx': grid_step, \
                          'IC_dy': grid_step, \
                          'IC_tau0': 0, \
                          'N_sample': 60000, \
                          'N_scale': 0.05, \
                          'scale_flag': 0}

        ftmp = open('HQ_sample.conf', 'w')
        for (key, value) in zip(HQ_sample_conf.keys(),
                                HQ_sample_conf.values()):
            inputline = ' = '.join([str(key), str(value)]) + '\n'
            ftmp.write(inputline)
        ftmp.close()

        run_cmd('HQ_sample HQ_sample.conf')

        # ================ HQ evolution (pre-equilibirum stages) =================
        os.environ['ftn00'] = 'FreeStream.h5'
        os.environ['ftn10'] = '%s/dNg_over_dt_cD6.dat' % share
        print(os.environ['ftn10'])
        os.environ['ftn20'] = 'HQ_AAcY_preQ.dat'
        os.environ['ftn30'] = 'initial_HQ.dat'
        run_cmd('diffusion hq_input=3.0 initt={}'.format(tau_fs * xi_fs),
                config.get('diffusion_args', ''))

        # ================ HQ evolution (in medium evolution) ====================
        os.environ['ftn00'] = 'JetData.h5'
        os.environ['ftn10'] = '%s/dNg_over_dt_cD6.dat' % share
        os.environ['ftn20'] = 'HQ_AAcY.dat'
        os.environ['ftn30'] = 'HQ_AAcY_preQ.dat'
        run_cmd('diffusion hq_input=4.0 initt={}'.format(tau_fs),
                config.get('diffusion_args', ''))

        # ============== Heavy quark hardonization ==============================
        os.environ['ftn20'] = 'Dmeson_AAcY.dat'
        child1 = 'cat HQ_AAcY.dat'
        p1 = subprocess.Popen(child1.split(), stdout=subprocess.PIPE)
        p2 = subprocess.Popen('fragPLUSrecomb', stdin=p1.stdout)
        p1.stdout.close()
        output = p2.communicate()[0]

        # ============ Heavy + soft UrQMD =================================
        run_cmd(
            'afterburner {} urqmd_final.dat particle_in.dat Dmeson_AAcY.dat'.
            format(nsamples))

        # =========== processing data ====================================
        calculate_beforeUrQMD(spectraFile, 'Dmeson_AAcY.dat', resultFile,
                              'beforeUrQMD/Dmeson', 1.0, 'a')
        calculate_beforeUrQMD(spectraFile, 'HQ_AAcY.dat', resultFile,
                              'beforeUrQMD/HQ', 1.0, 'a')
        calculate_beforeUrQMD(spectraFile, 'HQ_AAcY_preQ.dat', resultFile,
                              'beforeUrQMD/HQ_preQ', 1.0, 'a')
        if nsamples != 0:
            calculate_afterUrQMD(spectraFile, 'urqmd_final.dat', resultFile,
                                 'afterUrQMD/Dmeson', 1.0, 'a')

        shutil.move('urqmd_final.dat',
                    'urqmd_final{}-{}.dat'.format(jobID, ievent))
        shutil.move('Dmeson_AAcY.dat',
                    'Dmeson_AAcY{}-{}.dat'.format(jobID, ievent))
        shutil.move('HQ_AAcY.dat', 'HQ_AAcY{}-{}.dat'.format(jobID, ievent))
        shutil.move('HQ_AAcY_preQ.dat',
                    'HQ_AAcY_preQ{}-{}.dat'.format(jobID, ievent))

    #=== after everything, save initial profile (depends on how large the size if, I may choose to forward this step)
    shutil.move('initial.hdf5', 'initial_{}.hdf5'.format(jobID))
Esempio n. 17
0
def _realistic_surface_observables():
    """
    Compute observables for the "realistic surface" test case.

    """
    with open('test_surface.dat', 'rb') as f:
        surface_data = np.array(
            [l.split() for l in f if not l.startswith(b'#')],
            dtype=float
        )

    # 0    1  2  3         4         5         6    7
    # tau  x  y  dsigma_t  dsigma_x  dsigma_y  v_x  v_y
    # 8     9     10    11    12    13    14    15
    # pitt  pitx  pity  pixx  pixy  piyy  pizz  Pi
    x, sigma, v, _ = np.hsplit(surface_data, [3, 6, 8])
    pixx, pixy, piyy = surface_data.T[11:14]
    Pi = surface_data.T[15]

    sigma_ = np.zeros((sigma.shape[0], 4))
    sigma_[:, :3] = sigma
    sigma_[:, 1:] *= -1
    sigma_ *= x[:, :1]

    u_ = np.zeros((v.shape[0], 4))
    u_[:, 0] = 1
    u_[:, 1:3] = -v
    u_ /= np.sqrt(1 - np.square(v).sum(axis=1))[:, np.newaxis]

    vx, vy = v.T
    pi_uv = np.zeros((pixx.shape[0], 4, 4))
    pi_uv[:, 0, 0] = vx*vx*pixx + vy*vy*piyy + 2*vx*vy*pixy
    pi_uv[:, 1, 1] = pixx
    pi_uv[:, 2, 2] = piyy
    pi_uv[:, 3, 3] = pi_uv[:, 0, 0] - pixx - piyy
    pi_uv[:, 0, 1] = pi_uv[:, 1, 0] = -(vx*pixx + vy*pixy)
    pi_uv[:, 0, 2] = pi_uv[:, 2, 0] = -(vx*pixy + vy*piyy)
    pi_uv[:, 1, 2] = pi_uv[:, 2, 1] = pixy

    pT_max = 4
    pT_bins = np.linspace(0, pT_max, 41)
    pT = (pT_bins[:-1] + pT_bins[1:])/2
    delta_pT = pT_max/(pT_bins.size - 1)

    phi = np.linspace(0, 2*np.pi, 100, endpoint=False)

    eta, eta_weights = special.ps_roots(30)
    eta_max = 4
    eta *= eta_max
    eta_weights *= 2*eta_max

    T = .145
    hrg = frzout.HRG(T, res_width=False)
    eta_over_tau = hrg.eta_over_tau()
    zeta_over_tau = hrg.zeta_over_tau()
    cs2 = hrg.cs2()

    the_vn = [2, 3, 4]

    def calc_obs(ID):
        m = frzout.species_dict[ID]['mass']
        degen = frzout.species_dict[ID]['degen']
        sign = -1 if frzout.species_dict[ID]['boson'] else 1

        pT_, phi_, eta_ = np.meshgrid(pT, phi, eta)
        mT_ = np.sqrt(m*m + pT_*pT_)
        p = np.array([
            mT_*np.cosh(eta_),
            pT_*np.cos(phi_),
            pT_*np.sin(phi_),
            mT_*np.sinh(eta_)
        ]).T

        # ignore negative contributions
        psigma = np.inner(p, sigma_)
        psigma.clip(min=0, out=psigma)

        pu = np.inner(p, u_)
        with np.errstate(over='ignore'):
            f = 1/(np.exp(pu/T) + sign)

        df = f*(1 - sign*f) * (
            ((pu*pu - m*m)/(3*pu) - cs2*pu)/(zeta_over_tau*T)*Pi +
            np.einsum('ijku,ijkv,auv->ijka', p, p, pi_uv)/(2*pu*T*eta_over_tau)
        )
        f += df

        # (phi, pT) distribution
        phi_pT_dist = (
            2*degen *
            np.einsum('i,ijka,ijka->jk', eta_weights, psigma, f) /
            (2*np.pi*hbarc)**3 / phi.size
        )
        pT_dist = phi_pT_dist.sum(axis=1)

        # navg, pT dist, qn(pT)
        return (
            2*np.pi*delta_pT * np.inner(pT, pT_dist),
            pT_dist,
            [np.inner(np.exp(1j*n*phi), phi_pT_dist)/pT_dist for n in the_vn]
        )

    obs_calc = [calc_obs(i) for i, _ in id_parts]

    surface = frzout.Surface(
        x, sigma, v,
        pi=dict(xx=pixx, yy=piyy, xy=pixy),
        Pi=Pi
    )

    ngroups = 1000
    N = 1000  # nsamples per group
    nsamples = ngroups*N

    # need many samples for diff flow
    # too many to store all particles in memory -> accumulate observables
    obs_sampled = [(
        np.empty(nsamples, dtype=int),  # ID particle counts
        np.zeros_like(pT),  # pT distribution
        np.zeros((len(the_vn), pT.size)),  # diff flow
    ) for _ in id_parts]

    diff_flow_counts = [np.zeros_like(vn, dtype=int)
                        for (_, _, vn) in obs_sampled]

    from multiprocessing.pool import ThreadPool

    for k in range(ngroups):
        print('      group', k)
        # threading increases performance since sample() releases the GIL
        with ThreadPool() as pool:
            parts = pool.map(lambda _: frzout.sample(surface, hrg), range(N))
        # identified particle counts
        for (i, _), (counts, _, _) in zip(id_parts, obs_sampled):
            counts[k*N:(k+1)*N] = [
                np.count_nonzero(np.abs(p['ID']) == i) for p in parts
            ]
        # merge all samples
        parts = np.concatenate(parts)
        abs_ID = np.abs(parts['ID'])
        for (i, _), (_, pT_dist, vn_arr), dflow_counts, (_, _, qn_list) in zip(
                id_parts, obs_sampled, diff_flow_counts, obs_calc
        ):
            parts_ = parts[abs_ID == i]
            px, py = parts_['p'].T[1:3]
            pT_ = np.sqrt(px*px + py*py)
            phi_ = np.arctan2(py, px)
            # pT distribution
            pT_dist += np.histogram(pT_, bins=pT_bins, weights=1/pT_)[0]
            # differential flow
            for n, vn, dfc, qn in zip(the_vn, vn_arr, dflow_counts, qn_list):
                cosnphi = [
                    np.cos(n*phi_[np.fabs(pT_ - p) < .2] - npsi)
                    for (p, npsi) in zip(pT, np.arctan2(qn.imag, qn.real))
                ]
                vn += [c.sum() for c in cosnphi]
                dfc += [c.size for c in cosnphi]

    # normalize pT dists and diff flow
    for (_, pT_dist, vn), dflow_counts in zip(obs_sampled, diff_flow_counts):
        pT_dist /= 2*np.pi*nsamples*delta_pT
        vn /= dflow_counts

    return pT, the_vn, obs_calc, obs_sampled