コード例 #1
0
    def create_macroscopic_setter_op(self,
                                     velocity_input_tensor,
                                     density_input_tensor,
                                     force_input_tensor=None,
                                     backend='tensorflow',
                                     **kernel_compilation_kwargs):
        rho_field = self._data_handling.fields[self.density_data_name]
        vel_field = self._data_handling.fields[self.velocity_data_name]

        if self.force_data_name:
            assert force_input_tensor is not None, "must set force_field_tensor if using a force model"
            force_field = self._data_handling.fields[self.force_data_name]
        else:
            force_field = None

        _, setter_eqs = self._create_macroscopic_setter_and_getter_equations()

        op = pystencils_autodiff.AutoDiffOp(
            setter_eqs, self.name + "_SetMacroscopicValues",
            **kernel_compilation_kwargs)

        rtn_dict = op.create_tensorflow_op(
            {
                rho_field: density_input_tensor,
                vel_field: velocity_input_tensor,
                force_field: force_input_tensor
            },
            backend=backend)

        return rtn_dict
コード例 #2
0
def test_tfmad_gradient_check_torch():
    torch = pytest.importorskip('torch')

    a, b, out = ps.fields("a, b, out: float[5,7]")

    cont = 2 * ps.fd.Diff(a, 0) - 1.5 * ps.fd.Diff(a, 1) \
        - ps.fd.Diff(b, 0) + 3 * ps.fd.Diff(b, 1)
    discretize = ps.fd.Discretization2ndOrder(dx=1)
    discretization = discretize(cont) + 1.2 * a.center

    assignment = ps.Assignment(out.center(), discretization)
    assignment_collection = ps.AssignmentCollection([assignment], [])
    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               diff_mode='transposed-forward')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    a_tensor = torch.zeros(*a.shape, dtype=torch.float64, requires_grad=True)
    b_tensor = torch.zeros(*b.shape, dtype=torch.float64, requires_grad=True)

    function = auto_diff.create_tensorflow_op({
        a: a_tensor,
        b: b_tensor
    },
                                              backend='torch')

    torch.autograd.gradcheck(function.apply, [a_tensor, b_tensor])
コード例 #3
0
def test_valid_boundary_handling_tensorflow_native():
    pytest.importorskip('tensorflow')
    import tensorflow as tf

    a, b, out = ps.fields("a, b, out: double[10,11]")
    print(a.shape)

    cont = 2 * ps.fd.Diff(a, 0) - 1.5 * ps.fd.Diff(a, 1) - ps.fd.Diff(
        b, 0) + 3 * ps.fd.Diff(b, 1)
    discretize = ps.fd.Discretization2ndOrder(dx=1)
    discretization = discretize(cont)

    assignment = ps.Assignment(out.center(), discretization + 0.1 * b[1, 0])

    assignment_collection = ps.AssignmentCollection([assignment], [])

    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               boundary_handling='valid')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    # out_tensor = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
    # print(out_tensor)

    out_tensor = auto_diff.create_tensorflow_op(use_cuda=False,
                                                backend='tensorflow_native')(
                                                    a=a_tensor, b=b_tensor)

    with tf.compat.v1.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(out_tensor)

        if True:
            gradient_error = compute_gradient_error_without_border(
                [a_tensor, b_tensor], [a.shape, b.shape],
                out_tensor,
                out.shape,
                num_border_pixels=0,
                ndim=2,
                debug=False)
            print('error: %s' % gradient_error.max_error)
            print('avg error: %s' % gradient_error.avg_error)

            assert any(e < 1e-4 for e in gradient_error.values())
コード例 #4
0
def test_tfmad_gradient_check_torch_native(with_offsets, with_cuda):
    torch = pytest.importorskip('torch')
    import torch

    a, b, out = ps.fields("a, b, out: float64[5,7]")

    if with_offsets:
        cont = 2 * ps.fd.Diff(a, 0) - 1.5 * ps.fd.Diff(a, 1) - ps.fd.Diff(
            b, 0) + 3 * ps.fd.Diff(b, 1)
        discretize = ps.fd.Discretization2ndOrder(dx=1)
        discretization = discretize(cont)

        assignment = ps.Assignment(out.center(),
                                   discretization + 1.2 * a.center())
    else:
        assignment = ps.Assignment(out.center(),
                                   1.2 * a.center + 0.1 * b.center)

    assignment_collection = ps.AssignmentCollection([assignment], [])
    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               boundary_handling='zeros',
                                               diff_mode='transposed-forward')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    a_tensor = torch.zeros(*a.shape, dtype=torch.float64,
                           requires_grad=True).contiguous()
    b_tensor = torch.zeros(*b.shape, dtype=torch.float64,
                           requires_grad=True).contiguous()

    if with_cuda:
        a_tensor = a_tensor.cuda()
        b_tensor = b_tensor.cuda()

    function = auto_diff.create_tensorflow_op(use_cuda=with_cuda,
                                              backend='torch_native')

    dict = {a: a_tensor, b: b_tensor}
    torch.autograd.gradcheck(
        function.apply,
        tuple([dict[f] for f in auto_diff.forward_input_fields]),
        atol=1e-4,
        raise_exception=True)
コード例 #5
0
def test_tfmad_gradient_check_tensorflow_native(with_offsets, with_cuda,
                                                gradient_check):
    pytest.importorskip('tensorflow')
    import tensorflow as tf

    a, b, out = ps.fields("a, b, out: double[21,13]", layout='fzyx')
    print(a.shape)

    if with_offsets:
        cont = 2 * ps.fd.Diff(a, 0) - 1.5 * ps.fd.Diff(a, 1) - ps.fd.Diff(
            b, 0) + 3 * ps.fd.Diff(b, 1)
        discretize = ps.fd.Discretization2ndOrder(dx=1)
        discretization = discretize(cont)

        assignment = ps.Assignment(out.center(),
                                   discretization + 0.1 * b[1, 0])

    else:
        assignment = ps.Assignment(out.center(),
                                   1.2 * a.center + 0.1 * b.center)

    assignment_collection = ps.AssignmentCollection([assignment], [])

    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               boundary_handling='zeros')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    # out_tensor = auto_diff.create_tensorflow_op(use_cuda=with_cuda, backend='tensorflow_native')
    # print(out_tensor)

    a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    op = auto_diff.create_tensorflow_op(use_cuda=with_cuda,
                                        backend='tensorflow_native')

    theoretical, numerical = tf.test.compute_gradient(op, [a_tensor, b_tensor],
                                                      delta=0.001)
    assert np.allclose(theoretical[0], numerical[0])
    assert np.allclose(theoretical[1], numerical[1])
コード例 #6
0
def test_tfmad_two_outputs():

    domain_shape = (20, 30)
    vector_shape = domain_shape + (2, )

    curl_input_for_u = ps.Field.create_fixed_size(field_name='curl_input',
                                                  shape=domain_shape,
                                                  index_dimensions=0)
    u_field = ps.Field.create_fixed_size(field_name='curl',
                                         shape=vector_shape,
                                         index_dimensions=1)

    curl_op = pystencils_autodiff.AutoDiffOp(get_curl(curl_input_for_u,
                                                      u_field),
                                             diff_mode="transposed-forward")

    print('Forward')
    print(curl_op.forward_assignments)

    print('Backward')
    print(curl_op.backward_assignments)
コード例 #7
0
def test_tfmad_gradient_check():
    tf = pytest.importorskip('tensorflow')

    a, b, out = ps.fields("a, b, out: double[5,6]")
    print(a.shape)

    cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - ps.fd.Diff(b, 0) + ps.fd.Diff(
        b, 1)
    discretize = ps.fd.Discretization2ndOrder(dx=1)
    discretization = discretize(cont)

    assignment = ps.Assignment(out.center(), discretization)
    assignment_collection = ps.AssignmentCollection([assignment], [])
    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               diff_mode='transposed-forward')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    a_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    b_tensor = tf.Variable(np.zeros(a.shape, a.dtype.numpy_dtype))
    out_tensor = auto_diff.create_tensorflow_op({a: a_tensor, b: b_tensor})

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        gradient_error = compute_gradient_error_without_border(
            [a_tensor, b_tensor], [a.shape, b.shape],
            out_tensor,
            out.shape,
            num_border_pixels=2,
            ndim=2)
        print('error: %s' % gradient_error.max_error)

        assert any(e < 1e-4 for e in gradient_error.values())
コード例 #8
0
    def create_macroscopic_getter_op(self,
                                     pdf_input_tensor,
                                     force_field_tensor=None,
                                     backend='tensorflow',
                                     **kernel_compilation_kwargs):
        """Creates a Tensorflow Op or a operation for another framework

        Arguments:
            pdf_input_tensor {[type]} -- [description]

        Keyword Arguments:
            force_field_tensor {[type]} -- [description] (default: {None})
            backend {str} -- [description] (default: {'tensorflow'})

        Returns:
            [type] -- [description]
        """

        pdf_field = self._data_handling.fields[self._pdf_arr_name]

        if self.force_data_name:
            assert force_field_tensor is not None, "must set force_field_tensor if using a force model"
            force_field = self._data_handling.fields[self.force_data_name]
        else:
            force_field = None

        getter_eqs, _ = self._create_macroscopic_setter_and_getter_equations()

        op = pystencils_autodiff.AutoDiffOp(
            getter_eqs, self.name + "_SetMacroscopicValues",
            **kernel_compilation_kwargs)

        return op.create_tensorflow_op(
            {
                pdf_field: pdf_input_tensor,
                force_field: force_field_tensor
            },
            backend=backend)
コード例 #9
0
def test_tfmad_two_stencils():

    a, b, out = ps.fields("a, b, out: double[2D]")

    cont = ps.fd.Diff(a, 0) - ps.fd.Diff(a, 1) - \
        ps.fd.Diff(b, 0) + ps.fd.Diff(b, 1)
    discretize = ps.fd.Discretization2ndOrder(dx=1)
    discretization = discretize(cont)

    assignment = ps.Assignment(out.center(), discretization)
    assignment_collection = ps.AssignmentCollection([assignment], [])
    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               diff_mode='transposed-forward')
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    print(auto_diff)
コード例 #10
0
def test_tfmad_gradient_check_two_outputs(with_cuda):
    torch = pytest.importorskip('torch')
    import torch

    a, b, out1, out2, out3 = ps.fields(
        "a, b, out1, out2, out3: float64[21,13]")

    assignment_collection = ps.AssignmentCollection({
        out1.center:
        a.center + b.center,
        out2.center:
        a.center - b.center,
        out3.center:
        sympy.exp(b[-1, 0])
    })
    print('Forward')
    print(assignment_collection)

    print('Backward')
    auto_diff = pystencils_autodiff.AutoDiffOp(assignment_collection,
                                               boundary_handling='zeros',
                                               diff_mode='transposed-forward')
    print(auto_diff.backward_fields)
    backward = auto_diff.backward_assignments
    print(backward)
    print('Forward output fields (to check order)')
    print(auto_diff.forward_input_fields)

    a_tensor = torch.zeros(*a.shape, dtype=torch.float64,
                           requires_grad=True).contiguous()
    b_tensor = torch.zeros(*b.shape, dtype=torch.float64,
                           requires_grad=True).contiguous()
    out1_tensor = torch.zeros(*a.shape,
                              dtype=torch.float64,
                              requires_grad=True).contiguous()
    out2_tensor = torch.zeros(*b.shape,
                              dtype=torch.float64,
                              requires_grad=True).contiguous()
    out3_tensor = torch.zeros(*b.shape,
                              dtype=torch.float64,
                              requires_grad=True).contiguous()

    if with_cuda:
        a_tensor = a_tensor.cuda()
        b_tensor = b_tensor.cuda()
        out1_tensor = out1_tensor.cuda()
        out2_tensor = out2_tensor.cuda()
        out3_tensor = out3_tensor.cuda()

    function = auto_diff.create_tensorflow_op(use_cuda=with_cuda,
                                              backend='torch_native')

    dict = {
        a: a_tensor,
        b: b_tensor,
        out1: out1_tensor,
        out2: out2_tensor,
        out3: out3_tensor,
    }
    torch.autograd.gradcheck(
        function.apply,
        tuple([dict[f] for f in auto_diff.forward_input_fields]),
        atol=1e-4,
        raise_exception=True)
コード例 #11
0
    def __init__(self,
                 update_rule: LbmCollisionRule,
                 src_pdf_field: Union[Field, str] = '',
                 tmp_pdf_field: Union[Field, str] = '',
                 time_constant_fields=[],
                 *args,
                 constant_fields=[],
                 optimization={},
                 additional_fields=[],
                 **method_parameters):

        self._adjoint_boundary_conditions = dict()

        src_pdf_field, tmp_pdf_field = _guess_src_dst_field_from_update_rule(
            update_rule, src_pdf_field, tmp_pdf_field)
        self.pdf_field = src_pdf_field
        self.temporary_field = tmp_pdf_field

        # Field.layout is just spatial
        try:
            real_layout = pystencils.field.get_layout_from_strides(
                src_pdf_field.strides)
            ndim = len(src_pdf_field.shape)

            if pystencils.field.layout_string_to_tuple(
                    'fzyx', dim=ndim) == real_layout:
                layout_string = 'fzyx'
            elif pystencils.field.layout_string_to_tuple(
                    'tensorflow', dim=ndim) == real_layout:
                layout_string = 'tensorflow'
            elif pystencils.field.layout_string_to_tuple(
                    'zyxf', dim=ndim) == real_layout:
                layout_string = 'zyxf'
            elif pystencils.field.layout_string_to_tuple(
                    'c', dim=ndim) == real_layout:
                layout_string = 'c'
            else:
                raise NotImplementedError(
                    "Cannot recognize layout of src_pdf_field")
            optimization.update({'field_layout': layout_string})
        except Exception:
            pass

        optimization.update({
            'double_precision':
            src_pdf_field.dtype.numpy_dtype == np.float64
        })

        super().__init__(*args,
                         update_rule=update_rule,
                         optimization=optimization,
                         pdf_arr_name=src_pdf_field.name,
                         tmp_arr_name=tmp_pdf_field.name,
                         **method_parameters)

        self._target = 'gpu' if self._gpu else 'cpu'
        self._forward_assignments = update_rule
        # TODO: optimization parameters, e.g. target
        self._autodiff = pystencils_autodiff.AutoDiffOp(
            self._forward_assignments,
            "LBM",
            ghost_layers=self._data_handling.default_ghost_layers,
            target=self._target,
            data_type="double"
            if src_pdf_field.dtype.numpy_dtype == np.float64 else "float",
            cpu_openmp=True,
            time_constant_fields=time_constant_fields,
            constant_fields=constant_fields)

        self._lbmKernels = [
            self._autodiff.get_forward_kernel(is_gpu=self._gpu)
        ]
        self._backwardLbmKernels = [
            self._autodiff.get_backward_kernel(is_gpu=self._gpu)
        ]

        for field in set(additional_fields).union(set(time_constant_fields)):
            if field.name not in self.data_handling.cpu_arrays:
                self._data_handling.add_array(field.name,
                                              field.values_per_cell,
                                              layout=field.layout,
                                              dtype=field.dtype.numpy_dtype,
                                              gpu=self._gpu)
            adjoint_field = pystencils_autodiff.AdjointField(field)
            if adjoint_field.name not in self.data_handling.cpu_arrays:
                self._data_handling.add_array(adjoint_field.name,
                                              field.values_per_cell,
                                              layout=field.layout,
                                              dtype=field.dtype.numpy_dtype,
                                              gpu=self._gpu)

        for forward, backward in self._autodiff.backward_fields_map.items():
            if backward.name not in self._data_handling.cpu_arrays:
                self._data_handling.add_array_like(backward.name,
                                                   forward.name,
                                                   gpu=self._gpu)

        self._backward_boundary_handling = LatticeBoltzmannBoundaryHandling(
            self.method,
            self._data_handling,
            self._backward_tmp_array_name,
            name="backward_boundary_handling",
            flag_interface=None,  # TODO
            target=self._target,
            openmp=True)