def test_3d_4b(): """Alfven operator.""" x, y, z = symbols('x y z') u = IndexedBase('u') v = IndexedBase('v') bx = Constant('bx') by = Constant('by') bz = Constant('bz') b = Tuple(bx, by, bz) c0, c1, c2 = symbols('c0 c1 c2') a = Lambda((x, y, z, v, u), (c0 * Dot(u, v) - c1 * Div(u) * Div(v) + c2 * Dot(Curl(Cross(b, u)), Curl(Cross(b, v))))) print('> input := {0}'.format(a)) # ... expr = construct_weak_form(a, dim=DIM, is_block=True, verbose=True) print('> weak form := {0}'.format(expr)) # ... print('')
def test_3d_4b(): """Alfven operator.""" x,y,z = symbols('x y z') u = IndexedBase('u') v = IndexedBase('v') bx = Constant('bx') by = Constant('by') bz = Constant('bz') b = Tuple(bx, by, bz) c0,c1,c2 = symbols('c0 c1 c2') a = Lambda((x,y,z,v,u), ( c0 * Dot(u, v) - c1 * Div(u) * Div(v) + c2 *Dot(Curl(Cross(b,u)), Curl(Cross(b,v))))) print('> input := {0}'.format(a)) expr = gelatize(a, dim=DIM) print('> gelatized := {0}'.format(expr)) expr, info = initialize_weak_form(expr, dim=DIM) print('> temp form :=') # for a nice printing, we print the dictionary entries one by one for key, value in list(expr.items()): print('\t\t', key, '\t', value) expr = normalize_weak_from(expr) print('> normal form := {0}'.format(expr)) print('')
def test_3d_scalar_2(): print('============== test_3d_scalar_2 ================') # ... define the weak formulation x, y, z = symbols('x y z') u = Symbol('u') v = Symbol('v') alpha = Constant('alpha') nu = Constant('nu') a = Lambda((x, v, u), alpha * Dot(Grad(u), Grad(v)) + nu * u * v) # ... # ... create a finite element space p1 = 2 p2 = 2 p3 = 2 ne1 = 2 ne2 = 2 ne3 = 2 # ... print('> Grid :: [{},{},{}]'.format(ne1, ne2, ne3)) print('> Degree :: [{},{},{}]'.format(p1, p2, p3)) grid_1 = linspace(0., 1., ne1 + 1) grid_2 = linspace(0., 1., ne2 + 1) grid_3 = linspace(0., 1., ne3 + 1) V1 = SplineSpace(p1, grid=grid_1) V2 = SplineSpace(p2, grid=grid_2) V3 = SplineSpace(p3, grid=grid_3) V = TensorFemSpace(V1, V2, V3) # ... # ... kernel_py = compile_kernel('kernel_scalar_2', a, V, d_constants={'nu': 0.1}, d_args={'alpha': 'double'}, backend='python') kernel_f90 = compile_kernel('kernel_scalar_2', a, V, d_constants={'nu': 0.1}, d_args={'alpha': 'double'}, backend='fortran') M_py = assemble_matrix(V, kernel_py, args={'alpha': 2.0}) M_f90 = assemble_matrix(V, kernel_f90, args={'alpha': 2.0}) # ... assert_identical_coo(M_py, M_f90)
def test_2d_block_3(): print('============== test_2d_block_3 ================') x, y = symbols('x y') u = Symbol('u') v = Symbol('v') epsilon = Constant('epsilon') Laplace = lambda v, u: Dot(Grad(v), Grad(u)) Mass = lambda v, u: v * u u1, u2, p = symbols('u1 u2 p') v1, v2, q = symbols('v1 v2 q') a = Lambda((x, y, v1, v2, q, u1, u2, p), Laplace(v1, u1) - dx(v1) * p + Laplace(v2, u2) - dy(v2) * p + q * (dx(u1) + dy(u2)) + epsilon * Mass(q, p)) print('> input := {0}'.format(a)) # ... create a finite element space p1 = 2 p2 = 2 ne1 = 8 ne2 = 8 print('> Grid :: [{ne1},{ne2}]'.format(ne1=ne1, ne2=ne2)) print('> Degree :: [{p1},{p2}]'.format(p1=p1, p2=p2)) grid_1 = linspace(0., 1., ne1 + 1) grid_2 = linspace(0., 1., ne2 + 1) V1 = SplineSpace(p1, grid=grid_1) V2 = SplineSpace(p2, grid=grid_2) V = TensorFemSpace(V1, V2) V = VectorFemSpace(V, V, V) # ... # ... kernel_py = compile_kernel('kernel_block_3', a, V, d_args={'epsilon': 'double'}, backend='python') kernel_f90 = compile_kernel('kernel_block_3', a, V, d_args={'epsilon': 'double'}, backend='fortran') M_py = assemble_matrix(V, kernel_py, args={'epsilon': 1.e-3}) M_f90 = assemble_matrix(V, kernel_f90, args={'epsilon': 1.e-3}) # ... assert_identical_coo(M_py, M_f90) print('')
def test_1d_scalar_2(): print('============== test_1d_scalar_2 ================') # ... define the weak formulation x = Symbol('x') u = Symbol('u') v = Symbol('v') alpha = Constant('alpha') nu = Constant('nu') a = Lambda((x, v, u), alpha * Dot(Grad(u), Grad(v)) + nu * u * v) # ... # ... create a finite element space p = 3 ne = 64 print('> Grid :: {ne}'.format(ne=ne)) print('> Degree :: {p}'.format(p=p)) grid = linspace(0., 1., ne + 1) V = SplineSpace(p, grid=grid) # ... # ... kernel_py = compile_kernel('kernel_scalar_2', a, V, d_constants={'nu': 0.1}, d_args={'alpha': 'double'}, backend='python') kernel_f90 = compile_kernel('kernel_scalar_2', a, V, d_constants={'nu': 0.1}, d_args={'alpha': 'double'}, backend='fortran') M_py = assemble_matrix(V, kernel_py, args={'alpha': 2.0}) M_f90 = assemble_matrix(V, kernel_f90, args={'alpha': 2.0}) # ... assert_identical_coo(M_py, M_f90)
def test_2d_4(): x, y = symbols('x y') u = Symbol('u') v = Symbol('v') bx = Constant('bx') by = Constant('by') b = Tuple(bx, by) a = Lambda((x, y, v, u), 0.2 * u * v + Dot(b, Grad(v)) * u) print('> input := {0}'.format(a)) # ... expr = construct_weak_form(a, dim=DIM, is_block=False) print('> weak form := {0}'.format(expr)) # ... print('')
def test_2d_4(): x, y = symbols('x y') u = Symbol('u') v = Symbol('v') bx = Constant('bx') by = Constant('by') b = Tuple(bx, by) a = Lambda((x, y, v, u), 0.2 * u * v + Dot(b, Grad(v)) * u) print('> input := {0}'.format(a)) expr = gelatize(a, dim=DIM) print('> gelatized := {0}'.format(expr)) expr, info = initialize_weak_form(expr, dim=DIM) print('> temp form := {0}'.format(expr)) expr = normalize_weak_from(expr) print('> normal form := {0}'.format(expr)) print('')
def test_1d_2(): x = Symbol('x') u = Symbol('u') v = Symbol('v') b = Constant('b') a = Lambda((x,v,u), Dot(Grad(b*u), Grad(v)) + u*v) print('> input := {0}'.format(a)) # ... expr = construct_weak_form(a, dim=DIM) print('> weak form := {0}'.format(expr)) # ... print('')
def test_1d_scalar_2(): print('============== test_1d_scalar_2 ================') x = Symbol('x') u = Symbol('u') v = Symbol('v') b = Constant('b') a = Lambda((x, v, u), Dot(Grad(b * u), Grad(v)) + u * v) print('> input := {0}'.format(a)) # ... create a finite element space p = 3 ne = 64 print('> Grid :: {ne}'.format(ne=ne)) print('> Degree :: {p}'.format(p=p)) grid = linspace(0., 1., ne + 1) V = SplineSpace(p, grid=grid) # ... # ... create a glt symbol from a string without evaluation expr = glt_symbol(a, space=V) print('> glt symbol := {0}'.format(expr)) # ... # ... symbol_f90 = compile_symbol('symbol_scalar_2', a, V, d_constants={'b': 0.1}, backend='fortran') # ... # ... example of symbol evaluation t1 = linspace(-pi, pi, ne + 1) x1 = linspace(0., 1., ne + 1) e = zeros(ne + 1) symbol_f90(x1, t1, e) # ... print('')
def test_1d_2(): x,y = symbols('x y') u = Symbol('u') v = Symbol('v') # b = Function('b') b = Constant('b') a = Lambda((x,y,v,u), Dot(Grad(b*u), Grad(v)) + u*v) print('> input := {0}'.format(a)) expr = gelatize(a, dim=DIM) print('> gelatized := {0}'.format(expr)) expr = normalize_weak_from(expr) print('> normal form := {0}'.format(expr)) print('')
def test_2d_scalar_2(): print('============== test_2d_scalar_2 ================') x, y = symbols('x y') u = Symbol('u') v = Symbol('v') c = Constant('c') b0 = Constant('b0') b1 = Constant('b1') b = Tuple(b0, b1) a = Lambda((x, y, v, u), c * u * v + Dot(b, Grad(v)) * u + Dot(b, Grad(u)) * v) print('> input := {0}'.format(a)) # ... create a finite element space p1 = 2 p2 = 2 ne1 = 8 ne2 = 8 print('> Grid :: [{ne1},{ne2}]'.format(ne1=ne1, ne2=ne2)) print('> Degree :: [{p1},{p2}]'.format(p1=p1, p2=p2)) grid_1 = linspace(0., 1., ne1 + 1) grid_2 = linspace(0., 1., ne2 + 1) V1 = SplineSpace(p1, grid=grid_1) V2 = SplineSpace(p2, grid=grid_2) V = TensorFemSpace(V1, V2) # ... # ... create a glt symbol from a string without evaluation expr = glt_symbol(a, space=V) print('> glt symbol := {0}'.format(expr)) # ... # ... symbol_f90 = compile_symbol('symbol_scalar_2', a, V, d_constants={ 'b0': 0.1, 'b1': 1., 'c': 0.2 }, backend='fortran') # ... # ... example of symbol evaluation t1 = linspace(-pi, pi, ne1 + 1) t2 = linspace(-pi, pi, ne2 + 1) x1 = linspace(0., 1., ne1 + 1) x2 = linspace(0., 1., ne2 + 1) e = zeros((ne1 + 1, ne2 + 1), order='F') symbol_f90(x1, x2, t1, t2, e) # ... print('')
def test_2d_block_2(): print('============== test_2d_block_2 ================') x, y = symbols('x y') u = Symbol('u') v = Symbol('v') epsilon = Constant('epsilon') Laplace = lambda v, u: Dot(Grad(v), Grad(u)) Mass = lambda v, u: v * u u1, u2, p = symbols('u1 u2 p') v1, v2, q = symbols('v1 v2 q') a = Lambda((x, y, v1, v2, q, u1, u2, p), Laplace(v1, u1) - dx(v1) * p + Laplace(v2, u2) - dy(v2) * p + q * (dx(u1) + dy(u2)) + epsilon * Mass(q, p)) print('> input := {0}'.format(a)) # ... create a finite element space p1 = 2 p2 = 2 ne1 = 8 ne2 = 8 print('> Grid :: [{ne1},{ne2}]'.format(ne1=ne1, ne2=ne2)) print('> Degree :: [{p1},{p2}]'.format(p1=p1, p2=p2)) grid_1 = linspace(0., 1., ne1 + 1) grid_2 = linspace(0., 1., ne2 + 1) V1 = SplineSpace(p1, grid=grid_1) V2 = SplineSpace(p2, grid=grid_2) V = TensorFemSpace(V1, V2) V = VectorFemSpace(V, V, V) # ... # ... create a glt symbol from a string without evaluation expr = glt_symbol(a, space=V) print('> glt symbol := {0}'.format(expr)) # ... # TODO not working yet => need complex numbers # # ... # symbol_f90 = compile_symbol('symbol_block_2', a, V, # d_constants={'epsilon': 0.1}, # backend='fortran') # # ... # # # ... example of symbol evaluation # t1 = linspace(-pi,pi, ne1+1) # t2 = linspace(-pi,pi, ne2+1) # x1 = linspace(0.,1., ne1+1) # x2 = linspace(0.,1., ne2+1) # e = zeros((2, 2, ne1+1, ne2+1), order='F') # symbol_f90(x1,x2,t1,t2, e) # # ... print('')
def test_3d_block_4(): print('============== test_3d_block_4 ================') """Alfven operator.""" x, y, z = symbols('x y z') u = IndexedBase('u') v = IndexedBase('v') bx = Constant('bx') by = Constant('by') bz = Constant('bz') b = Tuple(bx, by, bz) c0 = Constant('c0') c1 = Constant('c1') c2 = Constant('c2') a = Lambda((x, y, z, v, u), (c0 * Dot(u, v) + c1 * Div(u) * Div(v) + c2 * Dot(Curl(Cross(b, u)), Curl(Cross(b, v))))) print('> input := {0}'.format(a)) # ... create a finite element space p1 = 2 p2 = 2 p3 = 2 ne1 = 2 ne2 = 2 ne3 = 2 # ... print('> Grid :: [{},{},{}]'.format(ne1, ne2, ne3)) print('> Degree :: [{},{},{}]'.format(p1, p2, p3)) grid_1 = linspace(0., 1., ne1 + 1) grid_2 = linspace(0., 1., ne2 + 1) grid_3 = linspace(0., 1., ne3 + 1) V1 = SplineSpace(p1, grid=grid_1) V2 = SplineSpace(p2, grid=grid_2) V3 = SplineSpace(p3, grid=grid_3) Vx = TensorFemSpace(V1, V2, V3) Vy = TensorFemSpace(V1, V2, V3) Vz = TensorFemSpace(V1, V2, V3) V = VectorFemSpace(Vx, Vy, Vz) # ... # ... create a glt symbol from a string without evaluation expr = glt_symbol(a, space=V) print('> glt symbol := {0}'.format(expr)) # ... # ... symbol_f90 = compile_symbol('symbol_block_4', a, V, d_constants={ 'bx': 0.1, 'by': 1., 'bz': 0.2, 'c0': 0.1, 'c1': 1., 'c2': 1. }, backend='fortran') # ... # ... example of symbol evaluation t1 = linspace(-pi, pi, ne1 + 1) t2 = linspace(-pi, pi, ne2 + 1) t3 = linspace(-pi, pi, ne3 + 1) x1 = linspace(0., 1., ne1 + 1) x2 = linspace(0., 1., ne2 + 1) x3 = linspace(0., 1., ne3 + 1) e = zeros((3, 3, ne1 + 1, ne2 + 1, ne3 + 1), order='F') symbol_f90(x1, x2, x3, t1, t2, t3, e) # ... print('')
def compile_kernel(name, expr, V, namespace=globals(), verbose=False, d_constants={}, d_args={}, context=None, backend='python', export_pyfile=True): """returns a kernel from a Lambda expression on a Finite Elements space.""" from spl.fem.vector import VectorFemSpace from spl.fem.splines import SplineSpace from spl.fem.tensor import TensorFemSpace # ... parametric dimension dim = V.pdim # ... # ... number of partial derivatives # TODO must be computed from the weak form then we re-initialize the # space if isinstance(V, SplineSpace): nderiv = V.nderiv elif isinstance(V, TensorFemSpace): nderiv = max(W.nderiv for W in V.spaces) elif isinstance(V, VectorFemSpace): nds = [] for W in V.spaces: if isinstance(W, SplineSpace): nderiv = W.nderiv elif isinstance(W, TensorFemSpace): nderiv = max(X.nderiv for X in W.spaces) nds.append(nderiv) nderiv = max(nds) # ... # ... if verbose: print('> input := {0}'.format(expr)) # ... # ... fields = [i for i in expr.free_symbols if isinstance(i, Field)] if verbose: print('> Fields = ', fields) # ... # ... expr = construct_weak_form(expr, dim=dim, is_block=isinstance(V, VectorFemSpace)) if verbose: print('> weak form := {0}'.format(expr)) # ... # ... contants # for each argument, we compute its datatype (needed for Pyccel) # case of Numeric Native Python types # this means that a has a given value (1, 1.0 etc) if d_constants: for k, a in list(d_constants.items()): if not isinstance(a, Number): raise TypeError('Expecting a Python Numeric object') # update the weak formulation using the given arguments _d = {} for k, v in list(d_constants.items()): if isinstance(k, str): _d[Constant(k)] = v else: _d[k] = v expr = expr.subs(_d) args = '' dtypes = '' if d_args: # ... additional arguments # for each argument, we compute its datatype (needed for Pyccel) for k, a in list(d_args.items()): # otherwise it can be a string, that specifies its type if not isinstance(a, str): raise TypeError('Expecting a string') if not a in ['int', 'double', 'complex']: raise TypeError('Wrong type for {} :: {}'.format(k, a)) # we convert the dictionaries to OrderedDict, to avoid wrong ordering d_args = OrderedDict(sorted(list(d_args.items()))) names = [] dtypes = [] for n, d in list(d_args.items()): names.append(n) dtypes.append(d) args = ', '.join('{}'.format(a) for a in names) dtypes = ', '.join('{}'.format(a) for a in dtypes) args = ', {}'.format(args) dtypes = ', {}'.format(dtypes) # TODO check what are the free_symbols of expr, # to make sure the final code will compile # the remaining free symbols must be the trial/test basis functions, # and the coordinates # ... # ... if isinstance(V, VectorFemSpace) and not (V.is_block): raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') # ... # ... pattern = 'scalar' if isinstance(V, VectorFemSpace): if V.is_block: pattern = 'block' else: raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') # ... # ... template_str = 'template_{dim}d_{pattern}'.format(dim=dim, pattern=pattern) try: template = eval(template_str) except: raise ValueError('Could not find the corresponding template {}'.format( template_str)) # ... # ... identation (def function body) tab = ' ' * 4 # ... # ... field coeffs if fields: field_coeffs = OrderedDict() for f in fields: coeffs = 'coeff_{}'.format(f.name) field_coeffs[str(f.name)] = coeffs ls = [v for v in list(field_coeffs.values())] field_coeffs_str = ', '.join(i for i in ls) # add ',' for kernel signature field_coeffs_str = ', {}'.format(field_coeffs_str) eval_field_str = print_eval_field(expr, V.pdim, fields, verbose=verbose) # ... if dim == 1: e_pattern = '{field}{deriv} = {field}{deriv}_values[g1]' elif dim == 2: e_pattern = '{field}{deriv} = {field}{deriv}_values[g1,g2]' elif dim == 3: e_pattern = '{field}{deriv} = {field}{deriv}_values[g1,g2,g3]' else: raise NotImplementedError('only 1d, 2d and 3d are available') field_values = OrderedDict() free_names = [str(f.name) for f in expr.free_symbols] for f in fields: ls = [] if f.name in free_names: ls.append(f.name) for deriv in BASIS_PREFIX: f_d = '{field}_{deriv}'.format(field=f.name, deriv=deriv) if f_d in free_names: ls.append(f_d) field_values[f.name] = ls tab_base = tab # ... update identation to be inside the loop for i in range(0, 3 * dim): tab += ' ' * 4 lines = [] for k, fs in list(field_values.items()): coeff = field_coeffs[k] for f in fs: ls = f.split('_') if len(ls) == 1: deriv = '' else: deriv = '_{}'.format(ls[-1]) line = e_pattern.format(field=k, deriv=deriv) line = tab + line lines.append(line) field_value_str = '\n'.join(line for line in lines) tab = tab_base # ... # ... field_types = [] slices = ','.join(':' for i in range(0, dim)) for v in list(field_coeffs.values()): field_types.append('double [{slices}]'.format(slices=slices)) field_types_str = ', '.join(i for i in field_types) field_types_str = ', {}'.format(field_types_str) # ... else: field_coeffs_str = '' eval_field_str = '' field_value_str = '' field_types_str = '' # ... # ... compute indentation tab_base = tab for i in range(0, 3 * dim): tab += ' ' * 4 # ... # ... print test functions d_test_basis = construct_test_functions(nderiv, dim) test_names = [i.name for i in expr.free_symbols if is_test_function(i)] test_names.sort() lines = [] for a in test_names: if a == 'Ni': basis = ' * '.join(d_test_basis[k, 0] for k in range(1, dim + 1)) line = 'Ni = {basis}'.format(basis=basis) else: deriv = a.split('_')[-1] nx = _count_letter(deriv, 'x') ny = _count_letter(deriv, 'y') nz = _count_letter(deriv, 'z') basis = ' * '.join(d_test_basis[k, d] for k, d in zip(range(1, dim + 1), [nx, ny, nz])) line = 'Ni_{deriv} = {basis}'.format(deriv=deriv, basis=basis) lines.append(tab + line) test_function_str = '\n'.join(l for l in lines) # ... # ... print trial functions d_trial_basis = construct_trial_functions(nderiv, dim) trial_names = [i.name for i in expr.free_symbols if is_trial_function(i)] trial_names.sort() lines = [] for a in trial_names: if a == 'Nj': basis = ' * '.join(d_trial_basis[k, 0] for k in range(1, dim + 1)) line = 'Nj = {basis}'.format(basis=basis) else: deriv = a.split('_')[-1] nx = _count_letter(deriv, 'x') ny = _count_letter(deriv, 'y') nz = _count_letter(deriv, 'z') basis = ' * '.join(d_trial_basis[k, d] for k, d in zip(range(1, dim + 1), [nx, ny, nz])) line = 'Nj_{deriv} = {basis}'.format(deriv=deriv, basis=basis) lines.append(tab + line) trial_function_str = '\n'.join(l for l in lines) # ... # ... tab = tab_base # ... # ... if isinstance(V, VectorFemSpace): if V.is_block: n_components = len(V.spaces) # ... - initializing element matrices # - define arguments lines = [] mat_args = [] slices = ','.join(':' for i in range(0, 2 * dim)) for i in range(0, n_components): for j in range(0, n_components): mat = 'mat_{i}{j}'.format(i=i, j=j) mat_args.append(mat) line = '{mat}[{slices}] = 0.0'.format(mat=mat, slices=slices) line = tab + line lines.append(line) mat_args_str = ', '.join(mat for mat in mat_args) mat_init_str = '\n'.join(line for line in lines) # ... # ... update identation to be inside the loop for i in range(0, 2 * dim): tab += ' ' * 4 tab_base = tab # ... # ... initializing accumulation variables lines = [] for i in range(0, n_components): for j in range(0, n_components): line = 'v_{i}{j} = 0.0'.format(i=i, j=j) line = tab + line lines.append(line) accum_init_str = '\n'.join(line for line in lines) # ... # .. update indentation for i in range(0, dim): tab += ' ' * 4 # ... # ... accumulation contributions lines = [] for i in range(0, n_components): for j in range(0, n_components): line = 'v_{i}{j} += ({__WEAK_FORM__}) * wvol' e = _convert_int_to_float(expr[i, j].evalf()) # we call evalf to avoid having fortran doing the evaluation of rational # division line = line.format(i=i, j=j, __WEAK_FORM__=e) line = tab + line lines.append(line) accum_str = '\n'.join(line for line in lines) # ... # ... assign accumulated values to element matrix if dim == 1: e_pattern = 'mat_{i}{j}[il_1, p1 + jl_1 - il_1] = v_{i}{j}' elif dim == 2: e_pattern = 'mat_{i}{j}[il_1, il_2, p1 + jl_1 - il_1, p2 + jl_2 - il_2] = v_{i}{j}' elif dim == 3: e_pattern = 'mat_{i}{j}[il_1, il_2, il_3, p1 + jl_1 - il_1, p2 + jl_2 - il_2, p3 + jl_3 - il_3] = v_{i}{j}' else: raise NotImplementedError('only 1d, 2d and 3d are available') tab = tab_base lines = [] for i in range(0, n_components): for j in range(0, n_components): line = e_pattern.format(i=i, j=j) line = tab + line lines.append(line) accum_assign_str = '\n'.join(line for line in lines) # ... code = template.format(__KERNEL_NAME__=name, __MAT_ARGS__=mat_args_str, __FIELD_COEFFS__=field_coeffs_str, __FIELD_EVALUATION__=eval_field_str, __MAT_INIT__=mat_init_str, __ACCUM_INIT__=accum_init_str, __FIELD_VALUE__=field_value_str, __TEST_FUNCTION__=test_function_str, __TRIAL_FUNCTION__=trial_function_str, __ACCUM__=accum_str, __ACCUM_ASSIGN__=accum_assign_str, __ARGS__=args) else: raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') else: e = _convert_int_to_float(expr.evalf()) # we call evalf to avoid having fortran doing the evaluation of rational # division code = template.format(__KERNEL_NAME__=name, __FIELD_COEFFS__=field_coeffs_str, __FIELD_EVALUATION__=eval_field_str, __FIELD_VALUE__=field_value_str, __TEST_FUNCTION__=test_function_str, __TRIAL_FUNCTION__=trial_function_str, __WEAK_FORM__=e, __ARGS__=args) # ... # print('--------------') # print(code) # print('--------------') # ... if context: from pyccel.epyccel import ContextPyccel if isinstance(context, ContextPyccel): context = [context] elif isinstance(context, (list, tuple)): for i in context: assert (isinstance(i, ContextPyccel)) else: raise TypeError( 'Expecting a ContextPyccel or list/tuple of ContextPyccel') # append functions to the namespace for c in context: for k, v in list(c.functions.items()): namespace[k] = v[0] # ... # ... exec(code, namespace) kernel = namespace[name] # ... # ... export the python code of the module if export_pyfile: write_code(name, code, ext='py', folder='.pyccel') # ... # ... if backend == 'fortran': # try: # import epyccel function from pyccel.epyccel import epyccel # ... define a header to specify the arguments types for kernel try: template = eval('template_header_{dim}d_{pattern}'.format( dim=dim, pattern=pattern)) except: raise ValueError('Could not find the corresponding template') # ... # ... if isinstance(V, VectorFemSpace): if V.is_block: # ... declare element matrices dtypes mat_types = [] for i in range(0, n_components): for j in range(0, n_components): if dim == 1: mat_types.append('double [:,:]') elif dim == 2: mat_types.append('double [:,:,:,:]') elif dim == 3: mat_types.append('double [:,:,:,:,:,:]') else: raise NotImplementedError( 'only 1d, 2d and 3d are available') mat_types_str = ', '.join(mat for mat in mat_types) # ... header = template.format(__KERNEL_NAME__=name, __MAT_TYPES__=mat_types_str, __FIELD_TYPES__=field_types_str, __TYPES__=dtypes) else: raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') else: header = template.format(__KERNEL_NAME__=name, __FIELD_TYPES__=field_types_str, __TYPES__=dtypes) # ... # compile the kernel kernel = epyccel(code, header, name=name, context=context) # except: # print('> COULD NOT CONVERT KERNEL TO FORTRAN') # print(' THE PYTHON BACKEND WILL BE USED') # ... return kernel
def compile_symbol(name, expr, V, namespace=globals(), verbose=False, d_constants={}, d_args={}, context=None, backend='python', export_pyfile=True): """returns a lmabdified function for the GLT symbol.""" from spl.fem.vector import VectorFemSpace # ... parametric dimension dim = V.pdim # ... # ... if verbose: print('> input := {0}'.format(expr)) # ... # ... fields = [i for i in expr.free_symbols if isinstance(i, Field)] if verbose: print('> Fields = ', fields) # ... # ... expr = glt_symbol(expr, space=V, evaluate=True) if verbose: print('> weak form := {0}'.format(expr)) # ... # ... contants # for each argument, we compute its datatype (needed for Pyccel) # case of Numeric Native Python types # this means that a has a given value (1, 1.0 etc) if d_constants: for k, a in list(d_constants.items()): if not isinstance(a, Number): raise TypeError('Expecting a Python Numeric object') # update the glt symbol using the given arguments _d = {} for k, v in list(d_constants.items()): if isinstance(k, str): _d[Constant(k)] = v else: _d[k] = v expr = expr.subs(_d) # print(expr) # import sys; sys.exit(0) args = '' dtypes = '' if d_args: # ... additional arguments # for each argument, we compute its datatype (needed for Pyccel) for k, a in list(d_args.items()): # otherwise it can be a string, that specifies its type if not isinstance(a, str): raise TypeError('Expecting a string') if not a in ['int', 'double', 'complex']: raise TypeError('Wrong type for {} :: {}'.format(k, a)) # we convert the dictionaries to OrderedDict, to avoid wrong ordering d_args = OrderedDict(sorted(list(d_args.items()))) names = [] dtypes = [] for n, d in list(d_args.items()): names.append(n) dtypes.append(d) args = ', '.join('{}'.format(a) for a in names) dtypes = ', '.join('{}'.format(a) for a in dtypes) args = ', {}'.format(args) dtypes = ', {}'.format(dtypes) # TODO check what are the free_symbols of expr, # to make sure the final code will compile # the remaining free symbols must be the trial/test basis functions, # and the coordinates # ... # ... if isinstance(V, VectorFemSpace) and not (V.is_block): raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') # ... # ... pattern = 'scalar' if isinstance(V, VectorFemSpace): if V.is_block: pattern = 'block' else: raise NotImplementedError( 'We only treat the case of a block space, for ' 'which all components have are identical.') # ... # ... template_str = 'symbol_{dim}d_{pattern}'.format(dim=dim, pattern=pattern) try: template = eval(template_str) except: raise ValueError('Could not find the corresponding template {}'.format( template_str)) # ... # ... if fields: raise NotImplementedError('TODO') else: field_coeffs_str = '' eval_field_str = '' field_value_str = '' field_types_str = '' # ... # ... if isinstance(V, VectorFemSpace): if V.is_block: n_components = len(V.spaces) # ... identation (def function body) tab = ' ' * 4 # ... # ... update identation to be inside the loop for i in range(0, dim): tab += ' ' * 4 tab_base = tab # ... # ... lines = [] indices = ','.join('i{}'.format(i) for i in range(1, dim + 1)) for i in range(0, n_components): for j in range(0, n_components): s_ij = 'symbol[{i},{j},{indices}]'.format(i=i, j=j, indices=indices) e_ij = _convert_int_to_float(expr.expr[i, j]) # we call evalf to avoid having fortran doing the evaluation of rational # division line = '{s_ij} = {e_ij}'.format(s_ij=s_ij, e_ij=e_ij.evalf()) line = tab + line lines.append(line) symbol_expr = '\n'.join(line for line in lines) # ... code = template.format(__SYMBOL_NAME__=name, __SYMBOL_EXPR__=symbol_expr, __FIELD_COEFFS__=field_coeffs_str, __FIELD_EVALUATION__=eval_field_str, __FIELD_VALUE__=field_value_str, __ARGS__=args) else: raise NotImplementedError('TODO') else: # we call evalf to avoid having fortran doing the evaluation of rational # division e = _convert_int_to_float(expr.expr) code = template.format(__SYMBOL_NAME__=name, __SYMBOL_EXPR__=e.evalf(), __FIELD_COEFFS__=field_coeffs_str, __FIELD_EVALUATION__=eval_field_str, __FIELD_VALUE__=field_value_str, __ARGS__=args) # ... # ... export the python code of the module if export_pyfile: write_code(name, code, ext='py', folder='.pyccel') # ... # ... if context: from pyccel.epyccel import ContextPyccel if isinstance(context, ContextPyccel): context = [context] elif isinstance(context, (list, tuple)): for i in context: assert (isinstance(i, ContextPyccel)) else: raise TypeError( 'Expecting a ContextPyccel or list/tuple of ContextPyccel') # append functions to the namespace for c in context: for k, v in list(c.functions.items()): namespace[k] = v[0] # ... # print(code) # import sys; sys.exit(0) # ... exec(code, namespace) kernel = namespace[name] # ... # ... if backend == 'fortran': # try: # import epyccel function from pyccel.epyccel import epyccel # ... define a header to specify the arguments types for kernel template_str = 'symbol_header_{dim}d_{pattern}'.format(dim=dim, pattern=pattern) try: template = eval(template_str) except: raise ValueError( 'Could not find the corresponding template {}'.format( template_str)) # ... # ... header = template.format(__SYMBOL_NAME__=name, __FIELD_TYPES__=field_types_str, __TYPES__=dtypes) # ... # compile the kernel kernel = epyccel(code, header, name=name, context=context) # except: # print('> COULD NOT CONVERT KERNEL TO FORTRAN') # print(' THE PYTHON BACKEND WILL BE USED') # ... return kernel