def test_simple_2d_check_raw_assignments():
    # use simply example
    z, y, x = ps.fields("z, y, x: [2d]")

    forward_assignments = [ps.Assignment(z[0, 0], x[0, 0]*sp.log(x[0, 0]*y[0, 0]))]

    jac = pystencils_autodiff.get_jacobian_of_assignments(
        forward_assignments, [x[0, 0], y[0, 0]])

    assert jac.shape == (1, 2)
    assert repr(jac) == 'Matrix([[log(x_C*y_C) + 1, x_C/y_C]])'

    for diff_mode in DiffModes:
        pystencils_autodiff.create_backward_assignments(
            forward_assignments, diff_mode=diff_mode)
def test_module_printing_parameter():
    module_name = "Ololol"

    for target in ('cpu', 'gpu'):

        z, y, x = pystencils.fields("z, y, x: [20,40]")
        a = sympy.Symbol('a')

        forward_assignments = pystencils.AssignmentCollection(
            {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

        backward_assignments = create_backward_assignments(forward_assignments)

        forward_ast = pystencils.create_kernel(forward_assignments, target)
        forward_ast.function_name = 'forward'
        backward_ast = pystencils.create_kernel(backward_assignments, target)
        backward_ast.function_name = 'backward'
        module = TorchModule(module_name, [forward_ast, backward_ast])
        print(module)

        module = TensorflowModule(module_name, {forward_ast: backward_ast})
        print(module)

        if target == 'cpu':
            module = PybindModule(module_name, [forward_ast, backward_ast])
            print(module)
            module = PybindModule(module_name, forward_ast)
            print(module)
    def __call__(self, pdf_field: pystencils_autodiff.AdjointField,
                 direction_symbol, *args, **kwargs):

        # apply heuristics
        if pdf_field.name.startswith('diff'):
            forward_field = pystencils.Field.new_field_with_different_name(
                pdf_field, pdf_field.name[len('diff'):])
            pdf_field = pystencils_autodiff.AdjointField(forward_field)

        assert isinstance(pdf_field, pystencils_autodiff.AdjointField), \
            f'{pdf_field} should be a pystencils_autodiff.AdjointField to use AdjointBoundaryCondition'

        forward_field = pdf_field.corresponding_forward_field
        forward_assignments = self._forward_condition(forward_field,
                                                      direction_symbol, *args,
                                                      **kwargs)

        backward_assignments = pystencils_autodiff.create_backward_assignments(
            forward_assignments,
            diff_fields_prefix=pdf_field.name_prefix,
            time_constant_fields=self._time_constant_fields,
            constant_fields=self._constant_fields)
        assert backward_assignments.all_assignments, (
            "Must have a at least one read field in forward boundary "
            "to have an meaningful adjoint boundary condition")

        return backward_assignments
Esempio n. 4
0
def test_tensorflow_jit_cpu():

    pytest.importorskip('tensorflow')

    module_name = "Ololol"

    target = 'cpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection(
        {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward_jit'
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward_jit'
    module = TensorflowModule(module_name, [forward_ast, backward_ast])

    lib = pystencils_autodiff.tensorflow_jit.compile_sources_and_load(
        [str(module)])
    assert 'call_forward_jit' in dir(lib)
    assert 'call_backward_jit' in dir(lib)

    lib = module.compile()
    assert 'call_forward_jit' in dir(lib)
    assert 'call_backward_jit' in dir(lib)
def test_native_tensorflow_compilation_cpu():
    tf = pytest.importorskip('tensorflow')

    module_name = "Ololol"

    target = 'cpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection({
        z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])
    })

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward'
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward'
    module = TensorflowModule(module_name, [forward_ast, backward_ast])
    print(module)

    # temp_file = write_cached_content(str(module), '.cpp')

    # command = ['c++', '-fPIC', temp_file, '-O2', '-shared',
    # '-o', 'foo.so'] + compile_flags + link_flags + extra_flags
    # print(command)
    # subprocess.check_call(command, env=_compile_env)

    lib = module.compile()
    assert 'call_forward' in dir(lib)
    assert 'call_backward' in dir(lib)
def test_pybind11_compilation_cpu(with_python_bindings):

    pytest.importorskip('pybind11')
    pytest.importorskip('cppimport')

    module_name = "Olololsada"

    target = 'cpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection(
        {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward'
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward'
    module = PybindModule(module_name, [forward_ast, backward_ast],
                          with_python_bindings=with_python_bindings)
    print(module)

    if with_python_bindings:
        pybind_extension = module.compile()
        assert pybind_extension is not None
        assert 'call_forward' in dir(pybind_extension)
        assert 'call_backward' in dir(pybind_extension)
def test_torch_native_compilation_cpu():
    from torch.utils.cpp_extension import load

    module_name = "Ololol"

    target = 'cpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection(
        {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward'
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward'
    module = TorchModule(module_name, [forward_ast, backward_ast])
    print(module)

    temp_file = write_cached_content(str(module), '.cpp')
    torch_extension = load(module_name, [temp_file])
    assert torch_extension is not None
    assert 'call_forward' in dir(torch_extension)
    assert 'call_backward' in dir(torch_extension)

    torch_extension = module.compile()
    assert torch_extension is not None
    assert 'call_forward' in dir(torch_extension)
    assert 'call_backward' in dir(torch_extension)
def test_reproducability():
    from sympy.core.cache import clear_cache

    output_0 = None
    for i in range(10):
        module_name = "Ololol"

        target = 'cpu'

        z, y, x = pystencils.fields("z, y, x: [20,40]")
        a = sympy.Symbol('a')

        forward_assignments = pystencils.AssignmentCollection(
            {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

        backward_assignments = create_backward_assignments(forward_assignments)

        forward_ast = pystencils.create_kernel(forward_assignments, target)
        forward_ast.function_name = 'forward'
        backward_ast = pystencils.create_kernel(backward_assignments, target)
        backward_ast.function_name = 'backward'
        new_output = str(TorchModule(module_name, [forward_ast, backward_ast]))
        TorchModule(module_name, [forward_ast, backward_ast]).compile()

        clear_cache()

        if not output_0:
            output_0 = new_output

        assert output_0 == new_output
def test_simple_2d_check_assignment_collection():
    # use simply example
    z, y, x = ps.fields("z, y, x: [2d]")

    forward_assignments = ps.AssignmentCollection([ps.Assignment(
        z[0, 0], x[0, 0]*sp.log(x[0, 0]*y[0, 0]))], [])

    jac = pystencils_autodiff.get_jacobian_of_assignments(
        forward_assignments, [x[0, 0], y[0, 0]])

    assert jac.shape == (len(forward_assignments.bound_symbols),
                         len(forward_assignments.free_symbols))
    print(repr(jac))
    assert repr(jac) == 'Matrix([[log(x_C*y_C) + 1, x_C/y_C]])'

    for diff_mode in DiffModes:
        pystencils_autodiff.create_backward_assignments(
            forward_assignments, diff_mode=diff_mode)
        pystencils_autodiff.create_backward_assignments(
            pystencils_autodiff.create_backward_assignments(forward_assignments), diff_mode=diff_mode)

    result1 = pystencils_autodiff.create_backward_assignments(
        forward_assignments, diff_mode=DiffModes.TRANSPOSED)
    result2 = pystencils_autodiff.create_backward_assignments(
        forward_assignments, diff_mode=DiffModes.TF_MAD)
    assert result1 == result2
def test_native_tensorflow_compilation_gpu():
    tf = pytest.importorskip('tensorflow')

    module_name = "Ololol"

    target = 'gpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection({
        z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])
    })

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward2'
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward2'
    module = TensorflowModule(module_name, [forward_ast, backward_ast])
    print(str(module))

    # temp_file = write_cached_content(str(module), '.cu')
    # if 'tensorflow_host_compiler' not in get_compiler_config():
    # get_compiler_config()['tensorflow_host_compiler'] = get_compiler_config()['command']

    # # on my machine g++-6 and clang-7 are working
    # # '-ccbin',
    # # 'g++-6',
    # command = ['nvcc',
    # temp_file.name,
    # '--expt-relaxed-constexpr',
    # '-ccbin',
    # get_compiler_config()['tensorflow_host_compiler'],
    # '-std=c++14',
    # '-x',
    # 'cu',
    # '-Xcompiler',
    # '-fPIC',
    # '-c',
    # '-o',
    # 'foo_gpu.o'] + compile_flags + extra_flags

    # subprocess.check_call(command)

    # command = ['c++', '-fPIC', 'foo_gpu.o',
    # '-shared', '-o', 'foo_gpu.so'] + link_flags

    # subprocess.check_call(command)
    lib = module.compile()

    assert 'call_forward2' in dir(lib)
    #
    assert 'call_backward2' in dir(lib)
Esempio n. 11
0
def test_tfmad_stencil():

    f, out = ps.fields("f, out: double[2D]")

    cont = ps.fd.Diff(f, 0) - ps.fd.Diff(f, 1)
    discretize = ps.fd.Discretization2ndOrder(dx=1)
    discretization = discretize(cont)

    assignment = ps.Assignment(out.center(), discretization)
    assignment_collection = ps.AssignmentCollection([assignment], [])
    print('Forward')
    print(assignment_collection)

    print('Backward')
    backward = pystencils_autodiff.create_backward_assignments(
        assignment_collection, diff_mode='transposed-forward')
    print(backward)
Esempio n. 12
0
def test_tensorflow_jit_gpu():

    pytest.importorskip('tensorflow')

    module_name = "Ololols"

    target = 'gpu'

    z, y, x = pystencils.fields("z, y, x: [20,40]")
    a = sympy.Symbol('a')

    forward_assignments = pystencils.AssignmentCollection(
        {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

    backward_assignments = create_backward_assignments(forward_assignments)

    forward_ast = pystencils.create_kernel(forward_assignments, target)
    forward_ast.function_name = 'forward_jit_gpu'  # must be different from CPU names
    backward_ast = pystencils.create_kernel(backward_assignments, target)
    backward_ast.function_name = 'backward_jit_gpu'
    module = TensorflowModule(module_name, [forward_ast, backward_ast])

    lib = pystencils_autodiff.tensorflow_jit.compile_sources_and_load(
        [], [str(module)])
    assert 'call_forward_jit_gpu' in dir(lib)
    assert 'call_backward_jit_gpu' in dir(lib)

    module = TensorflowModule(module_name, [forward_ast, backward_ast])
    lib = module.compile()
    assert 'call_forward_jit_gpu' in dir(lib)
    assert 'call_backward_jit_gpu' in dir(lib)

    file_name = pystencils_autodiff.tensorflow_jit.compile_sources_and_load(
        [], [str(module)], compile_only=True)
    print(file_name)
    assert exists(file_name)