def det_deriv(ti_te, mi_me, bi, kperp_rhoi, w_bar): """ evaluate derivative of Det M w.r.t. omega_bar where omega_bar = w/k_par/v_A """ alpha_i = kperp_rhoi**2 / 2 alpha_e = alpha_i / ti_te / mi_me xi_i = w_bar / np.sqrt(bi) xi_e = w_bar * np.sqrt(ti_te / mi_me / bi) Z_i = zp(xi_i) Z_e = zp(xi_e) Gamma_1i = i0e(alpha_i) - i1e(alpha_i) Gamma_1e = i0e(alpha_e) - i1e(alpha_e) Gamma_0i = i0e(alpha_i) Gamma_0e = i0e(alpha_e) G_i = 1 / np.sqrt(bi) * ((1 - 2 * xi_i**2) * Z_i - 2 * xi_i) G_e = 1/np.sqrt(bi) * ((1-2*xi_e**2)*Z_e - 2 * xi_e) \ * np.sqrt(ti_te / mi_me) # Aprime, dA / d omega_bar Ap = Gamma_0i * G_i + ti_te * Gamma_0e * G_e Cp = Gamma_1i * G_i - Gamma_1e * G_e Dp = 2 * (Gamma_1i * G_i + 1 / ti_te * Gamma_1e * G_e) a = A(ti_te, mi_me, bi, kperp_rhoi, w_bar) c = C(ti_te, mi_me, bi, kperp_rhoi, w_bar) d = D(ti_te, mi_me, bi, kperp_rhoi, w_bar) res = Ap * (d - 2 / bi) + a * Dp - 2 * c * Cp return res
def F(ti_te, mi_me, kperp_rhoi): """ F = sum_s (2Ts/Ti) * Gamma_1s """ alpha_i = kperp_rhoi**2 / 2 alpha_e = alpha_i / ti_te / mi_me Gamma_1i = i0e(alpha_i) - i1e(alpha_i) Gamma_1e = i0e(alpha_e) - i1e(alpha_e) return 2 * Gamma_1i - 2 * Gamma_1e / ti_te
def E(ti_te, mi_me, kperp_rhoi): """ E = sum_s (qs/qi) * Gamma_1s """ alpha_i = kperp_rhoi**2 / 2 alpha_e = alpha_i / ti_te / mi_me Gamma_1i = i0e(alpha_i) - i1e(alpha_i) Gamma_1e = i0e(alpha_e) - i1e(alpha_e) return Gamma_1i - Gamma_1e
def C(ti_te, mi_me, bi, kperp_rhoi, w_bar): """ C = sum_s (qi/qs) Gamma_1s xi_s Z_s """ alpha_i = kperp_rhoi**2 / 2 alpha_e = alpha_i / ti_te / mi_me xi_i = w_bar / np.sqrt(bi) xi_e = w_bar * np.sqrt(ti_te / mi_me / bi) Z_i = zp(xi_i) Z_e = zp(xi_e) Gamma_1i = i0e(alpha_i) - i1e(alpha_i) Gamma_1e = i0e(alpha_e) - i1e(alpha_e) res = Gamma_1i * xi_i * Z_i - Gamma_1e * xi_e * Z_e return res
def kappa_to_stddev(kappa): ''' Convert kappa to wrapped gaussian std dev std = 1 - I_1(kappa)/I_0(kappa) ''' # return 1.0 - spsp.i1(kappa)/spsp.i0(kappa) return np.sqrt(-2.*np.log(spsp.i1e(kappa)/spsp.i0e(kappa)))
def kappa_to_stddev(kappa): ''' Convert kappa to wrapped gaussian std dev std = 1 - I_1(kappa)/I_0(kappa) ''' # return 1.0 - spsp.i1(kappa)/spsp.i0(kappa) return np.sqrt(-2. * np.log(spsp.i1e(kappa) / spsp.i0e(kappa)))
def gen_data(dtypes, shapes): inputs = [] for dtype, shape in zip(dtypes, shapes): input = random_gaussian(shape, miu=3.75).astype(dtype) inputs.append(input) expect = sp.i1e(input) output = np.full(expect.shape, np.nan, dtypes[0]) return expect, inputs, output
def test_besseli_larger(self, dtype): x = np.random.uniform(1., 20., size=int(1e4)).astype(dtype) try: from scipy import special # pylint: disable=g-import-not-at-top self.assertAllClose( special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x))) self.assertAllClose( special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x))) except ImportError as e: tf_logging.warn('Cannot test special functions: %s' % str(e))
def stddev_to_kappa_single(stddev): ''' Converts stddev to kappa No closed-form, does a line optimisation ''' errfunc = lambda kappa, stddev: (np.exp(-0.5*stddev**2.) - spsp.i1e(kappa)/spsp.i0e(kappa))**2. kappa_init = 1.0 kappa_opt = spopt.fmin(errfunc, kappa_init, args=(stddev, ), disp=False) return np.abs(kappa_opt[0])
def stddev_to_kappa_single(stddev): ''' Converts stddev to kappa No closed-form, does a line optimisation ''' errfunc = lambda kappa, stddev: (np.exp(-0.5 * stddev**2.) - spsp.i1e( kappa) / spsp.i0e(kappa))**2. kappa_init = 1.0 kappa_opt = spopt.fmin(errfunc, kappa_init, args=(stddev, ), disp=False) return np.abs(kappa_opt[0])
def prob_EMCCD(self, S, E): # get EM gain M = self.configs.detector_emgain a = 1.00 / M if (S > 0): prob = numpy.sqrt( a * E / S) * numpy.exp(-a * S - E + 2 * numpy.sqrt(a * E * S)) * i1e( 2 * numpy.sqrt(a * E * S)) else: prob = numpy.exp(-E) return prob
def forward(ctx, v, z): assert isinstance(v, Number), 'v must be a scalar' ctx.save_for_backward(z) ctx.v = v z_cpu = z.data.cpu().numpy() if np.isclose(v, 0): output = special.i0e(z_cpu, dtype=z_cpu.dtype) elif np.isclose(v, 1): output = special.i1e(z_cpu, dtype=z_cpu.dtype) else: # v > 0 output = special.ive(v, z_cpu, dtype=z_cpu.dtype) return torch.Tensor(output).to(z.device)
def w_stuff(fp,ref_distance,ref_dis_x,ref_dis_y): if (fp>0) : mask = np.zeros_like(ref_distance, dtype=np.bool) mask[np.tril_indices_from(mask, k=-1)] = True rij = ref_distance[mask] rij_x = ref_dis_x[mask] rij_y = ref_dis_y[mask] Wij = W(fp, rij) Wij_rij_x = 2*np.sum(Wij * rij_x) Wij_rij_y = 2*np.sum(Wij * rij_y) Wij_rij_norm = np.sqrt(Wij_rij_x*Wij_rij_x + Wij_rij_y*Wij_rij_y) fac = i1e(Wij_rij_norm)/i0e(Wij_rij_norm)/Wij_rij_norm Wr2 = 2.0 * np.sum( Wij * rij*rij ) print ('factor=%g Wij_rij_x=%s Wij_rij_y=%s Wij_rij_norm=%s'%(fac,Wij_rij_x,Wij_rij_y,Wij_rij_norm)) Wr2_full = fac * Wr2 / mask.shape[0] / (mask.shape[1]-1) / 2 Wr2 = Wr2 / 2 / mask.shape[0] / (mask.shape[1]-1) / 2 else: Wr2 = 0 Wr2_full = 0
def calculate_items(snaps, min_neigh=4, cutoff=1.5, MAXnb=100, nbins=2000, nbinsq=50, Pe=10, rho_0=0.60, CG_flag = False): """ snaps is an n-dimensional list which holds all the data from a dump file. Each item in the list is a snapshot dict of the per atom properties measured in the simulation. """ import fortran_tools as ft outer_vec = [] inner_vec = [] corr, corr_b, corr_in, count, lindemann = {}, {}, {}, {}, {} MSD, Q, Q2, g6t, g6t_re, g6t_im = {}, {}, {}, {}, {}, {} for t1,snap1 in enumerate(snaps): # for each snapshot in the dump file data box=snap1['box'] ref_coords = snap1['ucoords'] mus = snap1['mus'] if CG_flag: # if time averaged velocities are computed # at each time step # (to reduce noise of velocity per atom # at each timestep) vs = snap1['CG_vs'] else: vs = np.column_stack((snapshot['vx'], snapshot['vy'])) tmp_list = ft.distance_matrix(ref_coords, snap1['c_psi6[1]'], snap1['c_psi6[2]'], snap1['mus'], snap1['local_density'], box, cutoff, 1,rho_0, MAXnb, nbins,nbinsq, len(ref_coords),2) # distance matrix between particle pairs ref_distance, ref_dis_x, ref_dis_y = tmp_list[:3] # number of neighbours for all particles ref_num_nb, ref_list_nb = tmp_list[3:5] # correlation functions and structure functions g, g6, g6re, g6im, sq = tmp_list[5:10] g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:] # load array which each atom is labelled as being a member # of a cluster, with those atoms having label 0 being # members of the largest cluster. # Size of this array is the number of atoms in the simulation cl_i = snap1['new_c_c1'] # load array of cluster sizes, going from largest to # smallest # Size of this array is N_clusters cl_s = np.array(sorted(snap1['cluster_sizes'].values(), reverse=True)) Nc = snap1['N_clusters'] min_cluster_size = 2 # compute angular momentum and linear momentum of clusters cluster_AngM, cluster_LinM = ft.get_cluster_omega(ref_coords, vs, box, cl_i, cl_s, len(ref_coords), 2, Nc) # compute mean squared angular momentum of clusters RMS_AngMom2 = np.nanmean(np.where(cl_s>=min_cluster_size, np.multiply(cluster_AngM, cluster_AngM), np.nan)) RMS_AngMom = np.sqrt(RMS_AngMom2) # compute mean squared linear momentum of clusters RMS_LinMom2 = np.nanmean(np.where(cl_s>=min_cluster_size, np.multiply(cluster_LinM, cluster_LinM), np.nan)) RMS_LinMom = np.sqrt(RMS_LinMom2) # compute mean cluster size cluster_size = np.nanmean(np.where(cl_s>=min_cluster_size, cl_s,np.nan)) # compute (normalized) mean polarisation polarisation = np.linalg.norm(np.mean(mus,axis=0)) print(f"cluster info: RMS_L, RMS_M, size, " f"no_clusters(>={min_cluster_size}) " "no_clusters") print(RMS_AngMom,RMS_LinMom,cluster_size, np.count_nonzero( cl_s >= min_cluster_size ), len(cl_s)) if (Pe>0) : mask = np.zeros_like(ref_distance, dtype=np.bool) mask[np.tril_indices_from(mask, k=-1)] = True rij = ref_distance[mask] rij_x = ref_dis_x[mask] rij_y = ref_dis_y[mask] Wij = W(Pe, rij) Wij_rij_x = 2*np.sum(Wij * rij_x) Wij_rij_y = 2*np.sum(Wij * rij_y) Wij_rij_norm = np.sqrt(Wij_rij_x*Wij_rij_x + Wij_rij_y*Wij_rij_y) fac = i1e(Wij_rij_norm)/i0e(Wij_rij_norm)/Wij_rij_norm Wr2 = 2.0 * np.sum( Wij * rij*rij ) print(f"factor={fac} Wij_rij_x={Wij_rij_x} " f"Wij_rij_y={Wij_rij_y} Wij_rij_norm={Wij_rij_norm}") Wr2_full = fac * Wr2 / mask.shape[0] / (mask.shape[1]-1) / 2 Wr2 = Wr2 / 2 / mask.shape[0] / (mask.shape[1]-1) / 2 else: Wr2 = 0 Wr2_full = 0 #ITEM: ATOMS id type x y xu yu mux muy fx fy tqz v_psi6 # c_psi6[1] c_psi6[2] f_cg[1] f_cg[2] f_cg[3] f_cg[4] f_cg[5] # f_cg[6] f_cg[7] c_c1 '<psi6_re>', '<psi6_re^2>', '<psi6_im>', # '<psi6_im^2>', '<mux>', '<mux^2>', '<muy>', '<muy^2>' if t1==0 : # beginning of time averages p6re = np.mean(snap1['c_psi6[1]']) p6im = np.mean(snap1['c_psi6[2]']) p6 = np.absolute(np.complex(p6re, p6im)) sum_psi6 = p6 sum_psi62 = p6*p6 sum_psi6_cmplx = np.complex(p6re, p6im) sum_mux = np.mean(snap1['mux']) sum_mux2 = np.mean(np.array(snap1['mux']) ** 2) sum_muy = np.mean(snap1['muy']) sum_muy2 = np.mean(np.array(snap1['muy']) ** 2) theta = np.arctan2(snap1['muy'], snap1['mux']) sum_theta = np.mean(theta) sum_theta2 = np.mean(theta ** 2) nematic = (2.*np.cos(theta)**2 - 1.) sum_nematic = np.mean( nematic ) sum_nematic2 = np.mean( nematic**2 ) sum_RMS_AngMom = RMS_AngMom sum_RMS_AngMom2 = RMS_AngMom * RMS_AngMom sum_RMS_LinMom = RMS_LinMom sum_RMS_LinMom2 = RMS_LinMom * RMS_LinMom sum_cluster_size = cluster_size sum_polarisation = polarisation sum_g = np.matrix(g) sum_g6 = np.matrix(g6) sum_g6re = np.matrix(g6re) sum_g6im = np.matrix(g6im) sum_sq = np.array(sq) sum_g_ori = np.array(g_ori) sum_g_dp = np.array(g_dp) sum_g_dp_tr = np.array(g_dp_tr) g_pr = np.array(g_pr) sum_g_pr = np.array(g_pr) sum_Wr2 = Wr2 sum_Wr2_full = Wr2_full sum_pij_rij = s_pr g_cnt = 1 else: # add to time averages p6re = np.mean(snap1['c_psi6[1]']) p6im = np.mean(snap1['c_psi6[2]']) p6 = np.absolute(np.complex(p6re, p6im)) sum_psi6 += p6 sum_psi62 += p6*p6 sum_psi6_cmplx += np.complex(p6re, p6im) sum_mux += np.mean(snap1['mux']) sum_mux2 += np.mean(np.array(snap1['mux']) ** 2) sum_muy += np.mean(snap1['muy']) sum_muy2 += np.mean(np.array(snap1['muy']) ** 2) theta = np.arctan2(snap1['muy'], snap1['mux']) sum_theta += np.mean(theta) sum_theta2 += np.mean(theta ** 2) nematic = (2.*np.cos(theta)**2 - 1.) sum_nematic += np.mean( nematic ) sum_nematic2 += np.mean( nematic**2 ) sum_RMS_AngMom += RMS_AngMom sum_RMS_AngMom2 += RMS_AngMom * RMS_AngMom sum_RMS_LinMom += RMS_LinMom sum_RMS_LinMom2 += RMS_LinMom * RMS_LinMom sum_cluster_size += cluster_size sum_polarisation += polarisation sum_g += np.matrix(g) sum_g6 += np.matrix(g6) sum_g6re += np.matrix(g6re) sum_g6im += np.matrix(g6im) sum_sq += np.array(sq) sum_g_ori += np.array(g_ori) sum_g_dp += np.array(g_dp) sum_g_dp_tr += np.array(g_dp_tr) sum_g_pr += np.array(g_pr) sum_Wr2 += Wr2 sum_Wr2_full += Wr2_full sum_pij_rij += s_pr g_cnt += 1 # mask for distance matrix nb = np.logical_and(ref_distance<cutoff, ref_distance > 0) # count number of neighbours each particle has num_neighs_ref = ref_num_nb # determine whether particles are surrounded by other particles inner_particles_ref = num_neighs_ref > min_neigh # or not outer_particles_ref = np.logical_not(inner_particles_ref) # double count number of pairs which have more than four neighs norm_in = np.sum(np.multiply(nb[inner_particles_ref] [:,inner_particles_ref], nb[inner_particles_ref] [:,inner_particles_ref]))+0.0 # double count number of pairs which have less than four neighs norm_b = np.sum(np.multiply(nb[outer_particles_ref] [:,outer_particles_ref], nb[outer_particles_ref] [:,outer_particles_ref]))+0.0 # double count number of pairs which have neighbours norm_all = np.sum(np.multiply(nb,nb))+0.0 outer_n = outer_particles_ref.sum() inner_n = inner_particles_ref.sum() print 'boundary/inner = %s/%s' %(outer_n,inner_n ) inner, outer = get_inner_outer(snap1) inner_vec.extend(inner) outer_vec.extend(outer) for t2 in range(t1+1, ts): # for a snapshot at a later time snap2 = snaps[t2] t = snap2['step'] - snap1['step'] coords = snap2['ucoords']#[:30] tmp_list = ft.distance_matrix(coords, snap2['c_psi6[1]'], snap2['c_psi6[2]'], snap2['mus'], snap2['local_density'], box, cutoff, 0,rho_0, MAXnb, nbins,nbinsq, len(coords),2) # distance matrix between particle pairs distance, distance_x, ref_distance_y = tmp_list[:3] # number of neighbours for all particles num_nb, list_nb = tmp_list[3:5] # correlation functions and structure functions, # which are all not calculated and so are 0 in # what follows # g, g6, g6re, g6im, sq = tmp_list[5:10] # g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:] # mask for distance matrix nb1 = np.logical_and(distance<cutoff, distance > 0) out = '%s '%t count[t] = (count.get(t, 0)) + 1 # compute number of neighbours which are still neighbours # at a later time c = np.sum(np.multiply(nb,nb1)) c /= norm_all corr[t] = (corr.get(t, 0)) + c out += '%s '%c # compute number of neighbours with more than min_neigh # neighbours that are still neighbours at a later time c = np.sum(np.multiply(nb[inner_particles_ref] [:,inner_particles_ref], nb1[inner_particles_ref] [:,inner_particles_ref])) c /= norm_in corr_in[t] = (corr_in.get(t, 0)) + c out += '%s '%c # compute number of neighbours with less than min_neigh # neighbours that are still neighbours at a later time c = np.sum(np.multiply(nb[outer_particles_ref] [:,outer_particles_ref], nb1[outer_particles_ref] [:,outer_particles_ref])) c /= norm_b corr_b[t] = (corr_b.get(t, 0)) + c out += '%s '%c d = coords-ref_coords Dsq = [ ((u-d[j-1])**2).sum() for i, u in enumerate(d) for j in ref_list_nb[i,0:ref_num_nb[i]] ] if len(Dsq)> 0 : lindemann[t] = (lindemann.get(t, 0)) + np.mean(Dsq) # MSD of clusters cl_i = snap1['new_c_c1'] cl_s = sorted(snap1['cluster_sizes'].values(), reverse=True) Nc = snap1['N_clusters'] com_MSD = ft.get_cluster_msd(ref_coords, coords, box, cl_i, cl_s, len(coords), 2, Nc) MSD[t] = (MSD.get(t, 0)) + com_MSD # compute overlap autocorrelation function and psi6 # autocorrelation function tmplist = ft.get_overlap(ref_coords, coords, snap1['c_psi6[1]'], snap1['c_psi6[2]'], snap2['c_psi6[1]'], snap2['c_psi6[2]'], box, len(coords), 2) overlap, g6t_abs, g6_re, g6_im = tmplist Q[t] = (Q.get(t, 0)) + overlap Q2[t] = (Q2.get(t, 0)) + overlap*overlap g6t_re[t] = (g6t_re.get(t, 0)) + g6_re g6t_im[t] = (g6t_im.get(t, 0)) + g6_im g6t [t] = (g6t.get(t, 0)) + g6t_abs print(f"{out} , {lindemann[t]} , {MSD[t]} , {Q[t]} , " f"{g6t_re[t]} , {g6t_im[t]}") ret_t=[corr, corr_b, corr_in, count, lindemann, MSD, Q, Q2, g6t, g6t_re, g6t_im] ret_o=[g_cnt, sum_psi6, sum_psi62, sum_psi6_cmplx, sum_mux, sum_mux2, sum_muy, sum_muy2, sum_theta, sum_theta2, sum_nematic, sum_nematic2, sum_g, sum_g6, sum_g6re, sum_g6im, sum_sq, sum_g_ori, sum_g_dp , sum_g_dp_tr, sum_g_pr, sum_Wr2, sum_Wr2_full, sum_pij_rij, sum_RMS_LinMom, sum_RMS_LinMom2, sum_RMS_AngMom, sum_RMS_AngMom2, sum_cluster_size, sum_polarisation, inner_vec, outer_vec] return ret_t, ret_o
def test_i1e(self): x = torch.rand(100, 100) * 10 - 5 gt = sps.i1e(x.numpy()) assert np.allclose(i1e_cc(x).numpy(), gt) assert np.allclose(i1e_cuda(x.cuda()).cpu().numpy(), gt)