def __init__(self, vacuum_wavelength, particle_list, layer_system, k_parallel='default', resolution=None): z_list = [particle.position[2] for particle in particle_list] is_list = [layer_system.layer_number(z) for z in z_list] assert is_list.count(is_list[0]) == len( is_list) # all particles in same layer? SystemMatrix.__init__(self, particle_list) self.l_max = max([particle.l_max for particle in particle_list]) self.m_max = max([particle.m_max for particle in particle_list]) self.blocksize = fldex.blocksize(self.l_max, self.m_max) self.resolution = resolution lkup = coup.volumetric_coupling_lookup_table( vacuum_wavelength=vacuum_wavelength, particle_list=particle_list, layer_system=layer_system, k_parallel=k_parallel, resolution=resolution) self.lookup_table_plus, self.lookup_table_minus = lkup[0], lkup[1] self.rho_array, self.sum_z_array, self.diff_z_array = lkup[2], lkup[ 3], lkup[4]
def __init__(self, particle_list): self.particle_list = particle_list blocksizes = [ fldex.blocksize(particle.l_max, particle.m_max) for particle in self.particle_list ] self.shape = (sum(blocksizes), sum(blocksizes))
def test_multi2single_stlm(): idcs = [] lmax = 5 mmax = 5 count = 0 for tau in range(2): for l in range(1, lmax + 1): for m in range(-l, l + 1): idcs.append( fldex.multi_to_single_index(tau=tau, l=l, m=m, l_max=lmax, m_max=mmax)) count += 1 assert idcs == list(range(len(idcs))) ind_num = fldex.blocksize(lmax, mmax) assert count == ind_num idcs = [] lmax = 6 mmax = 3 count = 0 for tau in range(2): for l in range(1, lmax + 1): mlim = min(l, mmax) for m in range(-mlim, mlim + 1): idcs.append( fldex.multi_to_single_index(tau=tau, l=l, m=m, l_max=lmax, m_max=mmax)) count += 1 assert idcs == list(range(len(idcs))) ind_num = fldex.blocksize(lmax, mmax) assert count == ind_num
def index_block(self, i): """ Args: i (int): number of particle Returns: indices that correspond to the coefficients for that particle """ blocksizes = [ fldex.blocksize(particle.l_max, particle.m_max) for particle in self.particle_list[:(i + 1)] ] return range(sum(blocksizes[:i]), sum(blocksizes))
def t_matrix_sphere(k_medium, k_particle, radius, l_max, m_max): """T-matrix of a spherical scattering object. Args: k_medium (float or complex): Wavenumber in surrounding medium (inverse length unit) k_particle (float or complex): Wavenumber inside sphere (inverse length unit) radius (float): Radius of sphere (length unit) l_max (int): Maximal multipole degree m_max (int): Maximal multipole order blocksize (int): Total number of index combinations multi_to_single_index_map (function): A function that maps the SVWF indices (tau, l, m) to a single index Returns: T-matrix as ndarray """ t = np.zeros( (fldex.blocksize(l_max, m_max), fldex.blocksize(l_max, m_max)), dtype=complex) for tau in range(2): for m in range(-m_max, m_max + 1): for l in range(max(1, abs(m)), l_max + 1): n = fldex.multi_to_single_index(tau, l, m, l_max, m_max) t[n, n] = mie_coefficient(tau, l, k_medium, k_particle, radius) return t
def index(self, i, tau, l, m): r""" Args: i (int): particle number tau (int): spherical polarization index l (int): multipole degree m (int): multipole order Returns: Position in a system vector that corresponds to the :math:`(\tau, l, m)` coefficient of the i-th particle. """ blocksizes = [ fldex.blocksize(particle.l_max, particle.m_max) for particle in self.particle_list[:i] ] return sum(blocksizes) + fldex.multi_to_single_index( tau, l, m, self.particle_list[i].l_max, self.particle_list[i].m_max)
def __init__(self, vacuum_wavelength, particle_list, layer_system, k_parallel='default', resolution=None): z_list = [particle.position[2] for particle in particle_list] assert z_list.count(z_list[0]) == len(z_list) SystemMatrix.__init__(self, particle_list) self.l_max = max([particle.l_max for particle in particle_list]) self.m_max = max([particle.m_max for particle in particle_list]) self.blocksize = fldex.blocksize(self.l_max, self.m_max) self.resolution = resolution self.lookup_table, self.radial_distance_array = coup.radial_coupling_lookup_table( vacuum_wavelength=vacuum_wavelength, particle_list=particle_list, layer_system=layer_system, k_parallel=k_parallel, resolution=resolution)
def rotate_t_matrix(T, l_max, m_max, euler_angles, wdsympy=False): """T-matrix of a rotated particle. Args: T (numpy.array): T-matrix l_max (int): Maximal multipole degree m_max (int): Maximal multipole order euler_angles (list): Euler angles [alpha, beta, gamma] of rotated particle in (zy'z''-convention) in radian Returns: rotated T-matrix (numpy.array) """ if euler_angles == [0, 0, 0]: return T else: blocksize = fldex.blocksize(l_max, m_max) T_mat_rot = np.zeros([blocksize, blocksize], dtype=complex) # Doicu, Light Scattering by Systems of Particles, p. 70 (1.115) rot_mat_1 = fldex.block_rotation_matrix_D_svwf(l_max, m_max, -euler_angles[2], -euler_angles[1], -euler_angles[0], wdsympy) rot_mat_2 = fldex.block_rotation_matrix_D_svwf(l_max, m_max, euler_angles[0], euler_angles[1], euler_angles[2], wdsympy) T_mat_rot = (np.dot(np.dot(np.transpose(rot_mat_1), T), np.transpose(rot_mat_2))) # Mishchenko, Scattering, Absorption and Emission of Light by small Particles, p.120 (5.29) # T_rot_matrix = np.dot(np.dot(fldex.rotation_matrix_D(l_max, alpha, beta, gamma), T), # fldex.rotation_matrix_D(l_max, -gamma, -beta, -alpha)) return T_mat_rot
def taxsym_read_tmatrix(filename, l_max, m_max): """Export TAXSYM.f90 output to SMUTHI T-matrix. .. todo:: feedback to adapt particle m_max to nfmds m_max Args: filename (str): Name of the file containing the T-matrix output of TAXSYM.f90 l_max (int): Maximal multipole degree m_max (int): Maximal multipole order Returns: T-matrix as numpy.ndarray """ with open(smuthi.nfmds.nfmds_folder + '/TMATFILES/Info' + filename, 'r') as info_file: info_file_lines = info_file.readlines() assert 'The scatterer is an axisymmetric particle' in ' '.join( info_file_lines) for line in info_file_lines: if line.split()[0:4] == ['-', 'maximum', 'expansion', 'order,']: n_rank = int(line.split()[-1][0:-1]) if line.split()[0:5] == ['-', 'number', 'of', 'azimuthal', 'modes,']: m_rank = int(line.split()[-1][0:-1]) with open(smuthi.nfmds.nfmds_folder + '/TMATFILES/' + filename, 'r') as tmat_file: tmat_lines = tmat_file.readlines() t_nfmds = [[]] column_index = 0 for line in tmat_lines[3:]: split_line = line.split() for i_entry in range(int(len(split_line) / 2)): if column_index == 2 * n_rank: t_nfmds.append([]) column_index = 0 t_nfmds[-1].append( complex(split_line[2 * i_entry]) + 1j * complex(split_line[2 * i_entry + 1])) column_index += 1 t_matrix = np.zeros( (fldex.blocksize(l_max, m_max), fldex.blocksize(l_max, m_max)), dtype=complex) for m in range(-m_max, m_max + 1): n_max_nfmds = n_rank - max(1, abs(m)) + 1 for tau1 in range(2): for l1 in range(max(1, abs(m)), l_max + 1): n1 = fldex.multi_to_single_index(tau=tau1, l=l1, m=m, l_max=l_max, m_max=m_max) l1_nfmds = l1 - max(1, abs(m)) n1_nfmds = 2 * n_rank * abs(m) + tau1 * n_max_nfmds + l1_nfmds for tau2 in range(2): for l2 in range(max(1, abs(m)), l_max + 1): n2 = fldex.multi_to_single_index(tau=tau2, l=l2, m=m, l_max=l_max, m_max=m_max) l2_nfmds = l2 - max(1, abs(m)) n2_nfmds = tau2 * n_max_nfmds + l2_nfmds if abs(m) <= m_rank: if m >= 0: t_matrix[n1, n2] = t_nfmds[n1_nfmds][n2_nfmds] else: t_matrix[n1, n2] = t_nfmds[n1_nfmds][n2_nfmds] * ( -1)**(tau1 + tau2) return t_matrix
def __init__(self, vacuum_wavelength, particle_list, layer_system, k_parallel='default', resolution=None, cuda_blocksize=None, interpolator_kind='linear'): if cuda_blocksize is None: cuda_blocksize = cu.default_blocksize CouplingMatrixRadialLookup.__init__(self, vacuum_wavelength, particle_list, layer_system, k_parallel, resolution) sys.stdout.write('Prepare CUDA kernel and device lookup data ... ') sys.stdout.flush() start_time = time.time() if interpolator_kind == 'linear': coupling_source = cu.linear_radial_lookup_source % ( self.blocksize, self.shape[0], self.radial_distance_array.min(), resolution) elif interpolator_kind == 'cubic': coupling_source = cu.cubic_radial_lookup_source % ( self.blocksize, self.shape[0], self.radial_distance_array.min(), resolution) coupling_function = SourceModule(coupling_source).get_function( "coupling_kernel") n_lookup_array = np.zeros(self.shape[0], dtype=np.uint32) m_particle_array = np.zeros(self.shape[0], dtype=np.float32) x_array = np.zeros(self.shape[0], dtype=np.float32) y_array = np.zeros(self.shape[0], dtype=np.float32) i_particle = 0 for i, particle in enumerate(particle_list): for m in range(-particle.m_max, particle.m_max + 1): for l in range(max(1, abs(m)), particle.l_max + 1): for tau in range(2): #idx = self.index(i, tau, l, m) i_taulm = fldex.multi_to_single_index( tau, l, m, particle.l_max, particle.m_max) idx = i_particle + i_taulm n_lookup_array[idx] = fldex.multi_to_single_index( tau, l, m, self.l_max, self.m_max) m_particle_array[idx] = m # scale the x and y position to the lookup resolution: x_array[idx] = particle.position[0] y_array[idx] = particle.position[1] i_particle += fldex.blocksize(particle.l_max, particle.m_max) # lookup as numpy array in required shape re_lookup = self.lookup_table.real.astype(np.float32) im_lookup = self.lookup_table.imag.astype(np.float32) # transfer data to gpu n_lookup_array_d = gpuarray.to_gpu(n_lookup_array) m_particle_array_d = gpuarray.to_gpu(m_particle_array) x_array_d = gpuarray.to_gpu(x_array) y_array_d = gpuarray.to_gpu(y_array) re_lookup_d = gpuarray.to_gpu(re_lookup) im_lookup_d = gpuarray.to_gpu(im_lookup) sys.stdout.write('done | elapsed: ' + str(int(time.time() - start_time)) + 's\n') sys.stdout.flush() cuda_gridsize = (self.shape[0] + cuda_blocksize - 1) // cuda_blocksize def matvec(in_vec): re_in_vec_d = gpuarray.to_gpu(np.float32(in_vec.real)) im_in_vec_d = gpuarray.to_gpu(np.float32(in_vec.imag)) re_result_d = gpuarray.zeros(in_vec.shape, dtype=np.float32) im_result_d = gpuarray.zeros(in_vec.shape, dtype=np.float32) coupling_function(n_lookup_array_d.gpudata, m_particle_array_d.gpudata, x_array_d.gpudata, y_array_d.gpudata, re_lookup_d.gpudata, im_lookup_d.gpudata, re_in_vec_d.gpudata, im_in_vec_d.gpudata, re_result_d.gpudata, im_result_d.gpudata, block=(cuda_blocksize, 1, 1), grid=(cuda_gridsize, 1)) return re_result_d.get() + 1j * im_result_d.get() self.linear_operator = scipy.sparse.linalg.LinearOperator( shape=self.shape, matvec=matvec, dtype=complex)