def Mlm_matrix_elememt_gen(self, X_p, a_p, p): pow_max = self.mu.pow + self.nu.pow C_t = np.zeros(pow_max[0] + 1) for t in range(0, pow_max[0] + 1): for i in range(max(0, t - self.nu.pow[0]), min(self.mu.pow[0], t) + 1): C_t[t] += binom(self.mu.pow[0], i) * np.power(X_p[0] - self.mu.x[0], self.mu.pow[0]-i) \ * binom(self.nu.pow[0], t-i) * np.power(X_p[0] - self.nu.x[0], self.nu.pow[0]-t+i) #print("t=", t, "; C_t=", C_t[t]) C_u = np.zeros(pow_max[1] + 1) for t in range(0, pow_max[1] + 1): for i in range(max(0, t - self.nu.pow[1]), min(self.mu.pow[1], t) + 1): C_u[t] += binom(self.mu.pow[1], i) * np.power(X_p[1] - self.mu.x[1], self.mu.pow[1]-i) \ * binom(self.nu.pow[1], t-i) * np.power(X_p[1] - self.nu.x[1], self.nu.pow[1]-t+i) #print("u=", t, "; C_u=", C_u[t]) C_v = np.zeros(pow_max[2] + 1) for t in range(0, pow_max[2] + 1): for i in range(max(0, t - self.nu.pow[2]), min(self.mu.pow[2], t) + 1): C_v[t] += binom(self.mu.pow[2], i) * np.power(X_p[2] - self.mu.x[2], self.mu.pow[2]-i) \ * binom(self.nu.pow[2], t-i) * np.power(X_p[2] - self.nu.x[2], self.nu.pow[2]+i-t) #print("v=", t, "; C_v=", C_v[t]) Mlm = Vlm(p) for t in range(0, len(C_t)): for u in range(0, len(C_u)): for v in range(0, len(C_v)): Mlm_G_tuv = ggq_dist(X_p, a_p, C_t[t] * C_u[u] * C_v[v], [t, u, v]) Mlm.added_to_self(Mlm_G_tuv.Mlm_init(p)) return Mlm
def Mlm_matrix_elememt_gen(self, X_p, a_p, p): pow_max = self.mu.pow + self.nu.pow C_t = np.zeros(pow_max[0]+1) for t in range(0, pow_max[0]+1): for i in range(max(0, t-self.nu.pow[0]), min(self.mu.pow[0], t)+1): C_t[t] += binom(self.mu.pow[0], i) * np.power(X_p[0] - self.mu.x[0], self.mu.pow[0]-i) \ * binom(self.nu.pow[0], t-i) * np.power(X_p[0] - self.nu.x[0], self.nu.pow[0]-t+i) #print("t=", t, "; C_t=", C_t[t]) C_u = np.zeros(pow_max[1]+1) for t in range(0, pow_max[1]+1): for i in range(max(0, t-self.nu.pow[1]), min(self.mu.pow[1], t)+1): C_u[t] += binom(self.mu.pow[1], i) * np.power(X_p[1] - self.mu.x[1], self.mu.pow[1]-i) \ * binom(self.nu.pow[1], t-i) * np.power(X_p[1] - self.nu.x[1], self.nu.pow[1]-t+i) #print("u=", t, "; C_u=", C_u[t]) C_v = np.zeros(pow_max[2]+1) for t in range(0, pow_max[2]+1): for i in range(max(0, t-self.nu.pow[2]), min(self.mu.pow[2], t)+1): C_v[t] += binom(self.mu.pow[2], i) * np.power(X_p[2] - self.mu.x[2], self.mu.pow[2]-i) \ * binom(self.nu.pow[2], t-i) * np.power(X_p[2] - self.nu.x[2], self.nu.pow[2]+i-t) #print("v=", t, "; C_v=", C_v[t]) Mlm = Vlm(p) for t in range(0, len(C_t)): for u in range(0, len(C_u)): for v in range(0, len(C_v)): Mlm_G_tuv = ggq_dist(X_p, a_p, C_t[t]*C_u[u]*C_v[v], [t, u, v]) Mlm.added_to_self(Mlm_G_tuv.Mlm_init(p)) return Mlm
def Mlm_init(self, p): self.Mlm = Vlm(p) v = self.pow[2] if self.pow[0] < self.pow[1]: t = self.pow[1] u = self.pow[0] else: t = self.pow[0] u = self.pow[1] factor1 = np.power(np.pi / self.a, 3 / 2) factor2 = 1 / (2 * self.a) if t == 0 and u == 0 and v == 0: self.Mlm.setlm(0, 0, self.k * factor1) elif t == 1 and u == 0 and v == 0: w11 = -self.k * factor2 * factor1 / 2 self.Mlm.setlm(1, 1, w11) elif t == 0 and u == 0 and v == 1: w10 = self.k * factor2 * factor1 self.Mlm.setlm(1, 0, w10) elif t == 2 and u == 0 and v == 0: w00 = self.k * factor2 * factor1 w20 = -w00 * factor2 / 2 w22 = -w20 / 2 self.Mlm.setlm(0, 0, w00) self.Mlm.setlm(2, 0, w20) self.Mlm.setlm(2, 2, w22) elif t == 1 and u == 1 and v == 0: w22 = -self.k * (factor2**2) * factor1 * 1j / 4 self.Mlm.setlm(2, 2, w22) elif t == 1 and u == 0 and v == 1: w21 = -self.k * (factor2**2) * factor1 / 2 self.Mlm.setlm(2, 1, w21) elif t == 0 and u == 0 and v == 2: w00 = self.k * factor2 * factor1 w20 = w00 * factor2 self.Mlm.setlm(0, 0, w00) self.Mlm.setlm(2, 0, w20) if self.pow[0] < self.pow[1]: for l in range(0, min(3, self.Mlm.degree + 1)): for m in range(0, l + 1): self.Mlm.setlm( l, m, self.Mlm.getlm(l, m).conjugate() * np.power(-1j, m)) for l in range(0, min(3, self.Mlm.degree + 1)): for m in range(-l, 0): self.Mlm.setlm(l, m, self.Mlm.getlm(l, -m).conjugate()) return self.Mlm
def M_expansion_to_box(self, box, p): self.Mlm_init(p) self.Mlm_array = np.reshape(self.Mlm_array, np.prod(self.Mlm_array.shape)) self.X_p = np.reshape(self.X_p, (len(self.Mlm_array), 3)) self.Mlm = Vlm(p) for i in range(0, len(self.Mlm_array)): self.Mlm.added_to_self( operation.M2M(self.Mlm_array[i], self.X_p[i] - box.x)) box.added_to_Mlm(self.Mlm)
class q_particle(abstract_source): """implementation of particle source""" def __init__(self, x, q): super().__init__(x) self.q = q #charge def M_expansion_to_box(self, box, p): self.Mlm = Vlm(p) self.Mlm.setlm(0, 0, self.q) super().M_expansion_to_box(box) def near_field_interaction(self, other, scale_factor): return 1 / (operation.distance_cal(self.x, other.x) * scale_factor)
def M_expansion_to_box(self, box, p): self.Mlm_init(p) self.Mlm_array = np.reshape(self.Mlm_array, np.prod(self.Mlm_array.shape)) self.X_p = np.reshape(self.X_p, (len(self.Mlm_array), 3)) self.Mlm = Vlm(p) for i in range(0, len(self.Mlm_array)): self.Mlm.added_to_self(operation.M2M(self.Mlm_array[i], self.X_p[i]-box.x)) box.added_to_Mlm(self.Mlm)
class gs_q_dist(abstract_source): """implementation of spherical gaussian charge disctribution""" def __init__(self, x, a, k): super().__init__(x) self.a = a # exponantial coefficient self.k = k # pre-factor def M_expansion_to_box(self, box, p): self.Mlm = Vlm(p) self.Mlm.setlm(0, 0, self.k * np.power(np.pi / self.a, 3 / 2)) super().M_expansion_to_box(box) def near_field_interaction(self, other, scale_factor): pre_factor = np.power(np.pi, 3) * self.k * other.k / ( np.power(self.a * other.a, 3/2)\ * operation.distance_cal(self.x, other.x) * scale_factor) t_sqrt = np.sqrt(self.a * other.a/ (self.a + other.a)) \ * operation.distance_cal(self.x, other.x) * scale_factor return pre_factor * erf(t_sqrt)
class shell_pair: def __init__(self, mu, nu): if (not type(mu) == CAO_basis) and (not type(nu) == CAO_basis): raise Exception("Must input contracted atomic basis") if not mu.basis_type == nu.basis_type: raise Exception("Must input two basis with same type") self.mu = mu self.nu = nu self.d = np.outer(mu.d, nu.d) self.a_k = np.outer(mu.a, nu.a) self.X_p = np.zeros(shape=(len(mu.a), len(nu.a), 3), dtype=np.float64) self.a_p = np.zeros(shape=self.a_k.shape, dtype=np.float64) for i in range(0, len(mu.a)): for j in range(0, len(nu.a)): self.a_p[i][j] = (mu.a[i] + nu.a[j]) self.X_p[i][j] = (mu.a[i] * mu.x + nu.a[j] * nu.x) / self.a_p[i][j] self.a_k /= self.a_p self.Mlm_array = None self.Mlm = None def M_expansion_to_box(self, box, p): self.Mlm_init(p) self.Mlm_array = np.reshape(self.Mlm_array, np.prod(self.Mlm_array.shape)) self.X_p = np.reshape(self.X_p, (len(self.Mlm_array), 3)) self.Mlm = Vlm(p) for i in range(0, len(self.Mlm_array)): self.Mlm.added_to_self( operation.M2M(self.Mlm_array[i], self.X_p[i] - box.x)) box.added_to_Mlm(self.Mlm) def Mlm_init(self, p): self.Mlm_array = np.ndarray(shape=self.a_k.shape, dtype=Vlm) dis_sq = sum((self.mu.x - self.nu.x)**2) for i in range(0, len(self.mu.a)): for j in range(0, len(self.nu.a)): self.Mlm_array[i][j] = self.Mlm_matrix_elememt_gen( self.X_p[i][j], self.a_p[i][j], p) scale_factor = np.exp(-self.a_k[i][j] * dis_sq) * self.d[i][j] self.Mlm_array[i][j].scale(scale_factor) def Mlm_matrix_elememt_gen(self, X_p, a_p, p): pow_max = self.mu.pow + self.nu.pow C_t = np.zeros(pow_max[0] + 1) for t in range(0, pow_max[0] + 1): for i in range(max(0, t - self.nu.pow[0]), min(self.mu.pow[0], t) + 1): C_t[t] += binom(self.mu.pow[0], i) * np.power(X_p[0] - self.mu.x[0], self.mu.pow[0]-i) \ * binom(self.nu.pow[0], t-i) * np.power(X_p[0] - self.nu.x[0], self.nu.pow[0]-t+i) #print("t=", t, "; C_t=", C_t[t]) C_u = np.zeros(pow_max[1] + 1) for t in range(0, pow_max[1] + 1): for i in range(max(0, t - self.nu.pow[1]), min(self.mu.pow[1], t) + 1): C_u[t] += binom(self.mu.pow[1], i) * np.power(X_p[1] - self.mu.x[1], self.mu.pow[1]-i) \ * binom(self.nu.pow[1], t-i) * np.power(X_p[1] - self.nu.x[1], self.nu.pow[1]-t+i) #print("u=", t, "; C_u=", C_u[t]) C_v = np.zeros(pow_max[2] + 1) for t in range(0, pow_max[2] + 1): for i in range(max(0, t - self.nu.pow[2]), min(self.mu.pow[2], t) + 1): C_v[t] += binom(self.mu.pow[2], i) * np.power(X_p[2] - self.mu.x[2], self.mu.pow[2]-i) \ * binom(self.nu.pow[2], t-i) * np.power(X_p[2] - self.nu.x[2], self.nu.pow[2]+i-t) #print("v=", t, "; C_v=", C_v[t]) Mlm = Vlm(p) for t in range(0, len(C_t)): for u in range(0, len(C_u)): for v in range(0, len(C_v)): Mlm_G_tuv = ggq_dist(X_p, a_p, C_t[t] * C_u[u] * C_v[v], [t, u, v]) Mlm.added_to_self(Mlm_G_tuv.Mlm_init(p)) return Mlm
class ggq_dist(abstract_source): """implementation of generalized gaussian charge disctribution""" def __init__(self, x, a, k, pow): super().__init__(x) self.a = a # exponantial coefficient self.k = k # pre-factor self.pow = pow # three d cartesian power [t,u,v] def M_expansion_to_box(self, box, p): self.Mlm_init(p) super().M_expansion_to_box(box) def near_field_interaction(self, other, scale_factor): print("call direct evaluation") def Mlm_init(self, p): self.Mlm = Vlm(p) v = self.pow[2] if self.pow[0] < self.pow[1]: t = self.pow[1] u = self.pow[0] else: t = self.pow[0] u = self.pow[1] factor1 = np.power(np.pi / self.a, 3 / 2) factor2 = 1 / (2 * self.a) if t == 0 and u == 0 and v == 0: self.Mlm.setlm(0, 0, self.k * factor1) elif t == 1 and u == 0 and v == 0: w11 = -self.k * factor2 * factor1 / 2 self.Mlm.setlm(1, 1, w11) elif t == 0 and u == 0 and v == 1: w10 = self.k * factor2 * factor1 self.Mlm.setlm(1, 0, w10) elif t == 2 and u == 0 and v == 0: w00 = self.k * factor2 * factor1 w20 = -w00 * factor2 / 2 w22 = -w20 / 2 self.Mlm.setlm(0, 0, w00) self.Mlm.setlm(2, 0, w20) self.Mlm.setlm(2, 2, w22) elif t == 1 and u == 1 and v == 0: w22 = -self.k * (factor2**2) * factor1 * 1j / 4 self.Mlm.setlm(2, 2, w22) elif t == 1 and u == 0 and v == 1: w21 = -self.k * (factor2**2) * factor1 / 2 self.Mlm.setlm(2, 1, w21) elif t == 0 and u == 0 and v == 2: w00 = self.k * factor2 * factor1 w20 = w00 * factor2 self.Mlm.setlm(0, 0, w00) self.Mlm.setlm(2, 0, w20) if self.pow[0] < self.pow[1]: for l in range(0, min(3, self.Mlm.degree + 1)): for m in range(0, l + 1): self.Mlm.setlm( l, m, self.Mlm.getlm(l, m).conjugate() * np.power(-1j, m)) for l in range(0, min(3, self.Mlm.degree + 1)): for m in range(-l, 0): self.Mlm.setlm(l, m, self.Mlm.getlm(l, -m).conjugate()) return self.Mlm
def M_expansion_to_box(self, box, p): self.Mlm = Vlm(p) self.Mlm.setlm(0, 0, self.k * np.power(np.pi / self.a, 3 / 2)) super().M_expansion_to_box(box)
def M_expansion_to_box(self, box, p): self.Mlm = Vlm(p) self.Mlm.setlm(0, 0, self.q) super().M_expansion_to_box(box)
class shell_pair: def __init__(self, mu, nu): if (not type(mu)== CAO_basis) and (not type(nu)== CAO_basis): raise Exception("Must input contracted atomic basis") if not mu.basis_type == nu.basis_type: raise Exception("Must input two basis with same type") self.mu = mu self.nu = nu self.d = np.outer(mu.d, nu.d) self.a_k = np.outer(mu.a, nu.a) self.X_p = np.zeros(shape=(len(mu.a), len(nu.a), 3), dtype=np.float64) self.a_p = np.zeros(shape=self.a_k.shape, dtype=np.float64) for i in range(0, len(mu.a)): for j in range(0, len(nu.a)): self.a_p[i][j] = (mu.a[i]+nu.a[j]) self.X_p[i][j] = (mu.a[i] * mu.x + nu.a[j] * nu.x) / self.a_p[i][j] self.a_k /= self.a_p self.Mlm_array = None self.Mlm = None def M_expansion_to_box(self, box, p): self.Mlm_init(p) self.Mlm_array = np.reshape(self.Mlm_array, np.prod(self.Mlm_array.shape)) self.X_p = np.reshape(self.X_p, (len(self.Mlm_array), 3)) self.Mlm = Vlm(p) for i in range(0, len(self.Mlm_array)): self.Mlm.added_to_self(operation.M2M(self.Mlm_array[i], self.X_p[i]-box.x)) box.added_to_Mlm(self.Mlm) def Mlm_init(self, p): self.Mlm_array = np.ndarray(shape=self.a_k.shape, dtype=Vlm) dis_sq = sum( (self.mu.x-self.nu.x) ** 2 ) for i in range(0, len(self.mu.a)): for j in range(0, len(self.nu.a)): self.Mlm_array[i][j] = self.Mlm_matrix_elememt_gen(self.X_p[i][j], self.a_p[i][j], p) scale_factor = np.exp(-self.a_k[i][j] * dis_sq) * self.d[i][j] self.Mlm_array[i][j].scale(scale_factor) def Mlm_matrix_elememt_gen(self, X_p, a_p, p): pow_max = self.mu.pow + self.nu.pow C_t = np.zeros(pow_max[0]+1) for t in range(0, pow_max[0]+1): for i in range(max(0, t-self.nu.pow[0]), min(self.mu.pow[0], t)+1): C_t[t] += binom(self.mu.pow[0], i) * np.power(X_p[0] - self.mu.x[0], self.mu.pow[0]-i) \ * binom(self.nu.pow[0], t-i) * np.power(X_p[0] - self.nu.x[0], self.nu.pow[0]-t+i) #print("t=", t, "; C_t=", C_t[t]) C_u = np.zeros(pow_max[1]+1) for t in range(0, pow_max[1]+1): for i in range(max(0, t-self.nu.pow[1]), min(self.mu.pow[1], t)+1): C_u[t] += binom(self.mu.pow[1], i) * np.power(X_p[1] - self.mu.x[1], self.mu.pow[1]-i) \ * binom(self.nu.pow[1], t-i) * np.power(X_p[1] - self.nu.x[1], self.nu.pow[1]-t+i) #print("u=", t, "; C_u=", C_u[t]) C_v = np.zeros(pow_max[2]+1) for t in range(0, pow_max[2]+1): for i in range(max(0, t-self.nu.pow[2]), min(self.mu.pow[2], t)+1): C_v[t] += binom(self.mu.pow[2], i) * np.power(X_p[2] - self.mu.x[2], self.mu.pow[2]-i) \ * binom(self.nu.pow[2], t-i) * np.power(X_p[2] - self.nu.x[2], self.nu.pow[2]+i-t) #print("v=", t, "; C_v=", C_v[t]) Mlm = Vlm(p) for t in range(0, len(C_t)): for u in range(0, len(C_u)): for v in range(0, len(C_v)): Mlm_G_tuv = ggq_dist(X_p, a_p, C_t[t]*C_u[u]*C_v[v], [t, u, v]) Mlm.added_to_self(Mlm_G_tuv.Mlm_init(p)) return Mlm