Exemple #1
0
    def call_parameters(self, arr_shape):
        substitution_dict = {
            sym: value
            for sym, value in zip(self._symbolic_shape, arr_shape)
            if sym is not None
        }

        widths = [
            end - start for start, end in zip(
                _get_start_from_slice(self._iterationSlice),
                _get_end_from_slice(self._iterationSlice, arr_shape))
        ]
        widths = sp.Matrix(widths).subs(substitution_dict)
        extend_bs = (1, ) * (3 - len(self._block_size))
        block_size = self._block_size + extend_bs
        if not self._compile_time_block_size:
            assert len(block_size) == 3
            adapted_block_size = []
            for i in range(len(widths)):
                factor = div_floor(prod(block_size[:i]),
                                   prod(adapted_block_size))
                adapted_block_size.append(
                    sp.Min(block_size[i] * factor, widths[i]))
            block_size = tuple(adapted_block_size) + extend_bs

        block_size = tuple(
            sp.Min(bs, max_bs)
            for bs, max_bs in zip(block_size, self._maximum_block_size))
        grid = tuple(
            div_ceil(length, block_size)
            for length, block_size in zip(widths, block_size))
        extend_gr = (1, ) * (3 - len(grid))

        return {'block': block_size, 'grid': grid + extend_gr}
    def cylinder_grasp_affordance(self, gripper, obj_input):
        frame = obj_input.get_frame()
        shape = obj_input.get_dimensions()
        cylinder_z = frame.col(2)
        cylinder_pos = pos_of(frame)

        gripper_x = gripper.frame.col(0)
        gripper_z = gripper.frame.col(2)
        gripper_pos = pos_of(gripper.frame)
        c_to_g = gripper_pos - cylinder_pos

        zz_align = sp.Abs(gripper_z.dot(cylinder_z))
        xz_align = gripper_x.dot(cylinder_z)
        dist_z = cylinder_z.dot(c_to_g)
        border_z = (shape[2] - gripper.height) * 0.5
        cap_dist_normalized_signed = dist_z / border_z
        cap_dist_normalized = sp.Abs(cap_dist_normalized_signed)

        cap_top_grasp = 1 - sp.Max(-xz_align * sp.Min(cap_dist_normalized_signed, 1), 0)
        cap_bottom_grasp = 1 - sp.Min(xz_align * sp.Max(cap_dist_normalized_signed, -1), 0)

        dist_z_center_normalized = sp.Max(1 - cap_dist_normalized, 0)
        dist_ax = sp.sqrt(frame.col(0).dot(c_to_g) ** 2 + frame.col(1).dot(c_to_g) ** 2)

        center_grasp = (1 - dist_z_center_normalized - dist_ax) * zz_align

        return sp.Max(center_grasp, cap_top_grasp, cap_bottom_grasp) * obj_input.get_class_probability()
Exemple #3
0
def calc_set_union(set_a, set_b):
    if isinstance(set_a, subsets.Indices) or isinstance(set_b, subsets.Indices):
        raise NotImplementedError('Set union with indices is not implemented.')
    if not (isinstance(set_a, subsets.Range) and isinstance(set_b, subsets.Range)):
        raise TypeError('Can only compute the union of ranges.')
    if len(set_a) != len(set_b):
        raise ValueError('Range dimensions do not match')
    union = []
    for range_a, range_b in zip(set_a, set_b):
        r_union = []
        for i in range(3):
            if isinstance(range_a[i], SymExpr):
                a_exact = range_a[i].expr
                a_approx = range_a[i].approx
            else:
                a_exact = range_a[i]
                a_approx = range_a[i]
            if isinstance(range_b[i], SymExpr):
                b_exact = range_b[i].expr
                b_approx = range_b[i].approx
            else:
                b_exact = range_b[i]
                b_approx = range_b[i]
            if i in {0, 2}:
                r_union.append(SymExpr(sympy.Min(a_exact, b_exact), sympy.Min(a_approx, b_approx)))
            else:
                r_union.append(SymExpr(sympy.Max(a_exact, b_exact), sympy.Max(a_approx, b_approx)))
        union.append(r_union)
        # union.append([
        #     sympy.Min(range_a[0], range_b[0]),
        #     sympy.Max(range_a[1], range_b[1]),
        #     sympy.Min(range_a[2], range_b[2]),
        # ])
    return subsets.Range(union)
    def _execute_controller(self):

        rate = rospy.Rate(10)

        P, I = sp.var('p i')
        min_linear_vel = sp.var("minLinearVel")
        min_angular_vel = sp.var("minAngularVel")

        distErr, distVerticalErr, vertDiff, lastLinearOut = sp.var(
            'distErr distVerticalErr vertDiff lastLinearOut')
        anglErr, lastAngularOut, anglDiff = sp.var(
            'anglErr lastAngularOut anglDiff')

        self.linear_eq = sp.Piecewise(
            (sp.Max(P * distErr + I * lastLinearOut, min_linear_vel),
             sp.And(sp.Abs(anglErr) < anglDiff, distErr >= 0)),
            (sp.Min(P * distErr + I * lastLinearOut, -min_linear_vel),
             sp.And(sp.Abs(anglErr) < anglDiff, distErr < 0)),
            (0, sp.Abs(anglErr) >= anglDiff))

        self.linear_vertical_eq = sp.Piecewise(
            (sp.Max(P * distVerticalErr + I * lastLinearOut,
                    min_linear_vel), distVerticalErr > vertDiff), (0, True))

        angular_eq_temp = P * anglErr + I * lastAngularOut

        self.angular_eq = sp.Piecewise(
            (sp.Min(angular_eq_temp, -min_angular_vel), angular_eq_temp < 0),
            (sp.Max(angular_eq_temp, min_angular_vel), angular_eq_temp >= 0))

        constansts = {
            P: self.current_config["p"],
            I: self.current_config["i"],
            min_linear_vel: self.current_config["min_linear_vel"],
            min_angular_vel: self.current_config["min_angular_vel"],
            anglDiff: self.current_config["angular_diff"],
            vertDiff: self.current_config["vertical_diff"]
        }

        variables = [
            distErr, distVerticalErr, lastLinearOut, anglErr, lastAngularOut
        ]

        self.linear_eq = self.linear_eq.subs(constansts)
        self.linear_vertical_eq = self.linear_vertical_eq.subs(constansts)
        self.angular_eq = self.angular_eq.subs(constansts)

        while not rospy.is_shutdown():

            if self._goal != None and self._enabled:
                self._command_to(self._goal, variables)

            self._reach_publisher.publish(Bool(self._reach))

            rate.sleep()
Exemple #5
0
 def f(self, yvec, params):
     import sympy as sp
     return NumSysLin.f(self, [
         m * yi for m, yi in zip(
             self.max_concs(params, min_=lambda x: sp.Min(*x),
                            dtype=object), yvec)
     ], params)
Exemple #6
0
def detect_reduction_type(wcr_str, openmp=False):
    """ Inspects a lambda function and tries to determine if it's one of the 
        built-in reductions that frameworks such as MPI can provide.

        :param wcr_str: A Python string representation of the lambda function.
        :param openmp: Detect additional OpenMP reduction types.
        :return: dtypes.ReductionType if detected, dtypes.ReductionType.Custom
                 if not detected, or None if no reduction is found.
    """
    if wcr_str == '' or wcr_str is None:
        return None

    # Get lambda function from string
    wcr = eval(wcr_str)
    wcr_ast = ast.parse(wcr_str).body[0].value.body

    # Run function through symbolic math engine
    a = sympy.Symbol('a')
    b = sympy.Symbol('b')
    try:
        result = wcr(a, b)
    except (TypeError, AttributeError,
            NameError):  # e.g., "Cannot determine truth value of relational"
        result = None

    # Check resulting value
    if result == sympy.Max(a, b) or (isinstance(wcr_ast, ast.Call)
                                     and isinstance(wcr_ast.func, ast.Name)
                                     and wcr_ast.func.id == 'max'):
        return dtypes.ReductionType.Max
    elif result == sympy.Min(a, b) or (isinstance(wcr_ast, ast.Call)
                                       and isinstance(wcr_ast.func, ast.Name)
                                       and wcr_ast.func.id == 'min'):
        return dtypes.ReductionType.Min
    elif result == a + b:
        return dtypes.ReductionType.Sum
    elif result == a * b:
        return dtypes.ReductionType.Product
    elif result == a & b:
        return dtypes.ReductionType.Bitwise_And
    elif result == a | b:
        return dtypes.ReductionType.Bitwise_Or
    elif result == a ^ b:
        return dtypes.ReductionType.Bitwise_Xor
    elif isinstance(wcr_ast, ast.BoolOp) and isinstance(wcr_ast.op, ast.And):
        return dtypes.ReductionType.Logical_And
    elif isinstance(wcr_ast, ast.BoolOp) and isinstance(wcr_ast.op, ast.Or):
        return dtypes.ReductionType.Logical_Or
    elif (isinstance(wcr_ast, ast.Compare)
          and isinstance(wcr_ast.ops[0], ast.NotEq)):
        return dtypes.ReductionType.Logical_Xor
    elif result == b:
        return dtypes.ReductionType.Exchange
    # OpenMP extensions
    elif openmp and result == a - b:
        return dtypes.ReductionType.Sub
    elif openmp and result == a / b:
        return dtypes.ReductionType.Div

    return dtypes.ReductionType.Custom
Exemple #7
0
def test_kappa_expressions():
    Monomer('A', ['site'], {'site': ['u']})
    Parameter('two', 2)
    Parameter('kr', 0.1)
    Parameter('num_A', 1000)
    Expression('kf', 1e-5 / two)
    Expression('test_sqrt', -1 + sympy.sqrt(1 + two))
    Expression('test_pi', sympy.pi)
    Expression('test_e', sympy.E)
    Expression('test_log', sympy.log(two))
    Expression('test_exp', sympy.exp(two))
    Expression('test_sin', sympy.sin(two))
    Expression('test_cos', sympy.cos(two))
    Expression('test_tan', sympy.tan(two))
    Expression('test_max', sympy.Max(two, kr, 2.0))
    Expression('test_min', sympy.Min(two, kr, 2.0))
    Expression('test_mod', sympy.Mod(10, two))
    Expression('test_piecewise',
               sympy.Piecewise((0.0, two < 400.0), (1.0, True)))
    Initial(A(site=('u')), num_A)
    Rule('dimerize_fwd',
         A(site='u') + A(site='u') >> A(site=('u', 1)) % A(site=('u', 1)), kf)
    Rule('dimerize_rev',
         A(site=('u', 1)) % A(site=('u', 1)) >> A(site='u') + A(site='u'), kr)
    # We need an arbitrary observable here to get a Kappa output file
    Observable('A_obs', A())
    # Accommodates Expression in kappa simulation
    run_simulation(model, time=0)

    Rule('degrade_dimer', A(site=('u', ANY)) >> None, kr)
    Observable('dimer', A(site=('u', ANY)))
    # Accommodates site with explicit state and arbitrary bond
    run_simulation(model, time=0, seed=_KAPPA_SEED)
Exemple #8
0
def transform_set(x, expr, sympy_set):
    """ Transform a sympy_set by an expression

    >>> x = sympy.Symbol('x')
    >>> domain = sympy.Interval(-sympy.pi / 4, -sympy.pi / 6, False, True) | sympy.Interval(sympy.pi / 6, sympy.pi / 4, True, False)
    >>> transform_set(x, -2 * x, domain)
    [-pi/2, -pi/3) U (pi/3, pi/2]
    """

    if isinstance(sympy_set, sympy.Union):
        return sympy.Union(
            transform_set(x, expr, arg) for arg in sympy_set.args)
    if isinstance(sympy_set, sympy.Intersection):
        return sympy.Intersection(
            transform_set(x, expr, arg) for arg in sympy_set.args)

    f = sympy.Lambda(x, expr)
    if isinstance(sympy_set, sympy.Interval):
        left, right = f(sympy_set.left), f(sympy_set.right)

        if left < right:
            new_left_open = sympy_set.left_open
            new_right_open = sympy_set.right_open
        else:
            new_left_open = sympy_set.right_open
            new_right_open = sympy_set.left_open

        return sympy.Interval(sympy.Min(left, right), sympy.Max(left, right),
                              new_left_open, new_right_open)

    if isinstance(sympy_set, sympy.FiniteSet):
        return sympy.FiniteSet(list(map(f, sympy_set)))
Exemple #9
0
    def compile(self):
        #code.interact(local=locals())

        # define the render function
        dist = float('inf')
        for face in self.faces:
            dist = sp.Min(dist, face.intersect(None, {}))

        # print the render function
        codegen(('distance', dist),
                'C',
                to_files=True,
                prefix='dr_cube/distance',
                header=True,
                empty=True)

        # print the derivitives
        for var in self.get_syms():
            deriv = sp.diff(dist, var)
            name = 'diff_{0}'.format(var)
            deriv = deriv.replace(sp.Heaviside, simp_heaviside)
            codegen((name, deriv),
                    'C',
                    to_files=True,
                    prefix='dr_cube/' + name,
                    header=True,
                    empty=True)
Exemple #10
0
    def _create_strided_range(self, sdfg: SDFG, state: SDFGState,
                              map_entry: nodes.MapEntry):
        map_exit = state.exit_node(map_entry)
        dim_idx = self.dim_idx
        new_dim_prefix = self.new_dim_prefix
        tile_size = self.tile_size
        divides_evenly = self.divides_evenly
        tile_stride = self.tile_stride
        if tile_stride == 0:
            tile_stride = tile_size
        if tile_stride != tile_size:
            raise NotImplementedError

        # Retrieve parameter and range of dimension to be strip-mined.
        target_dim = map_entry.map.params[dim_idx]
        td_from, td_to, td_step = map_entry.map.range[dim_idx]
        new_dim = self._find_new_dim(sdfg, state, map_entry, new_dim_prefix,
                                     target_dim)
        new_dim_range = (td_from, td_to, tile_size)
        new_map = nodes.Map(map_entry.map.label, [new_dim],
                            subsets.Range([new_dim_range]))

        dimsym = dace.symbolic.pystr_to_symbolic(new_dim)
        td_from_new = dimsym
        if divides_evenly:
            td_to_new = dimsym + tile_size - 1
        else:
            if isinstance(td_to, dace.symbolic.SymExpr):
                td_to = td_to.expr
            td_to_new = dace.symbolic.SymExpr(
                sympy.Min(dimsym + tile_size - 1, td_to),
                dimsym + tile_size - 1)
        td_step_new = td_step

        return new_dim, new_map, (td_from_new, td_to_new, td_step_new)
Exemple #11
0
def calc_set_union(set_a, set_b):
    if isinstance(set_a, subsets.Indices) or isinstance(
            set_b, subsets.Indices):
        raise NotImplementedError('Set union with indices is not implemented.')
    if not (isinstance(set_a, subsets.Range)
            and isinstance(set_b, subsets.Range)):
        raise TypeError('Can only compute the union of ranges.')
    if len(set_a) != len(set_b):
        raise ValueError('Range dimensions do not match')
    union = []
    for range_a, range_b in zip(set_a, set_b):
        union.append([
            sympy.Min(range_a[0], range_b[0]),
            sympy.Max(range_a[1], range_b[1]),
            sympy.Min(range_a[2], range_b[2]),
        ])
    return subsets.Range(union)
    def cie_optical_depth_correction(self):
        ciefudge = 1.0

        mdensity = sympy.Symbol('mdensity')
        tau = (mdensity / 1.96e16)**2.0
        tau = sympy.Max(mdensity, 1e-5)
        ciefudge = sympy.Min((1.0 - sympy.exp(-tau)) / tau, 1.0)
        return ciefudge
Exemple #13
0
def sort(fs):
    fs = list(fs)
    for i in range(len(fs) - 1, -1, -1):
        for j in range(i, -1, -1):
            Fi = fs[i]
            Fj = fs[j]
            fs[j] = sympy.Min(Fi, Fj)
            fs[i] = sympy.Max(Fi, Fj)
    return fs
Exemple #14
0
def simplify_ext(expr):
    """
    An extended version of simplification with expression fixes for sympy.
    :param expr: A sympy expression.
    :return: Simplified version of the expression.
    """
    a = sympy.Wild('a')
    b = sympy.Wild('b')
    c = sympy.Wild('c')

    # Push expressions into both sides of min/max.
    # Example: Min(N, 4) + 1 => Min(N + 1, 5)
    dic = expr.match(sympy.Min(a, b) + c)
    if dic:
        return sympy.Min(dic[a] + dic[c], dic[b] + dic[c])
    dic = expr.match(sympy.Max(a, b) + c)
    if dic:
        return sympy.Max(dic[a] + dic[c], dic[b] + dic[c])
    return expr
 def _infer_binary_ops(self, node):
     funcs = {
         'Add': lambda l: l[0] + l[1],
         'Div': lambda l: l[0] // l[1],  # integer div in sympy
         'Max': lambda l: sympy.Max(l[0], l[1]),
         'Min': lambda l: sympy.Min(l[0], l[1]),
         'Mul': lambda l: l[0] * l[1],
         'Sub': lambda l: l[0] - l[1]
     }
     assert node.op_type in funcs
     self._compute_on_sympy_data(node, funcs[node.op_type])
Exemple #16
0
 def intersect(self, other):
     minCorner = [
         sp.Max(self.minCorner[d], other.minCorner[d])
         for d in range(self.dim)
     ]
     maxCorner = [
         sp.Max(minCorner[d],
                sp.Min(self.maxCorner[d], other.maxCorner[d]))
         for d in range(self.dim)
     ]
     return AABB(minCorner, maxCorner)
Exemple #17
0
def widen_bound(op: IComparisonOp[Any], old: sym.Number, new: sym.Number) -> sym.Number:
    if op == operator.le:
        mx = sym.Max(old, new)  # type: ignore
        assert isinstance(mx, sym.Number)
        return mx
    elif op == operator.ge:
        mn = sym.Min(old, new)  # type: ignore
        assert isinstance(mn, sym.Number)
        return mn
    else:
        raise Exception("unsupported operator")
Exemple #18
0
def overapproximate(expr):
    """
    Takes a sympy expression and returns its maximal possible value
    in specific cases.
    """
    if isinstance(expr, list):
        return [overapproximate(elem) for elem in expr]
    if isinstance(expr, SymExpr):
        if expr.expr != expr.approx:
            return expr.approx
        else:
            return overapproximate(expr.expr)
    if not isinstance(expr, sympy.Basic):
        return expr
    a = sympy.Wild('a')
    b = sympy.Wild('b')
    c = sympy.Wild('c')

    # If Min(x, N-y), return the non-symbolic of the two components
    match = expr.match(sympy.Min(a, b) + c)
    if match is not None and len(match) == 3:
        # First, construct the min expression with "c" inline
        newexpr = sympy.Min(match[a] + match[c], match[b] + match[c])
        # Match again
        match = newexpr.match(sympy.Min(a, b))
        if match is not None and len(match) == 2:
            if issymbolic(match[a]) and not issymbolic(match[b]):
                return match[b]
            if issymbolic(match[b]) and not issymbolic(match[a]):
                return match[a]

    # If ceiling((k * ((N - 1) / k))) + k), return N
    a = sympy.Wild('a', properties=[lambda k: k.is_Symbol or k.is_Integer])
    b = sympy.Wild('b', properties=[lambda k: k.is_Symbol or k.is_Integer])
    int_floor = sympy.Function('int_floor')
    match = expr.match(sympy.ceiling(b * int_floor(a - 1, b)) + b)
    if match is not None and len(match) == 2:
        return match[a]

    return expr
Exemple #19
0
    def _create_from_tile_numbers(self, sdfg: SDFG, state: SDFGState,
                                  map_entry: nodes.MapEntry):
        map_exit = state.exit_node(map_entry)

        # Retrieve transformation properties.
        dim_idx = self.dim_idx
        new_dim_prefix = self.new_dim_prefix
        divides_evenly = self.divides_evenly
        number_of_tiles = self.tile_size
        tile_stride = self.tile_stride

        number_of_tiles = dace.symbolic.pystr_to_symbolic(number_of_tiles)

        # Retrieve parameter and range of dimension to be strip-mined.
        target_dim = map_entry.map.params[dim_idx]
        td_from, td_to, td_step = map_entry.map.range[dim_idx]
        tile_size = map_entry.map.range.size_exact()[dim_idx] / number_of_tiles

        if tile_stride == 0:
            tile_stride = tile_size
        if tile_stride != tile_size:
            raise NotImplementedError

        new_dim = self._find_new_dim(sdfg, state, map_entry, new_dim_prefix,
                                     target_dim)
        new_dim_range = (td_from, number_of_tiles - 1, 1)
        new_map = nodes.Map(map_entry.map.label, [new_dim],
                            subsets.Range([new_dim_range]))

        dimsym = dace.symbolic.pystr_to_symbolic(new_dim)
        td_from_new = dimsym * tile_size
        if divides_evenly:
            td_to_new = (dimsym + 1) * tile_size - 1
        else:
            if isinstance(td_to, dace.symbolic.SymExpr):
                td_to = td_to.expr
            td_to_new = dace.symbolic.SymExpr(
                sympy.Min((dimsym + 1) * tile_size - 1, td_to),
                (dimsym + 1) * tile_size - 1)
        td_step_new = td_step
        return new_dim, new_map, (td_from_new, td_to_new, td_step_new)
Exemple #20
0
def optimize(g):
    """
    Find parameters by optimizing over the given table of estimated inequalities
    """

    # Build the objective
    g['Terms'] = g.apply(lambda row: sp.Min(row.g, 0)**2, axis=1)
    objective = g.Terms.sum()
    variables = list(objective.atoms(sp.Symbol))

    # Turn the objective into a function
    def function(values):
        z = zip(variables, values)
        return float(objective.subs(z))

    # Create the intial guess
    initial_guess = np.ones(len(variables))

    # Optimize!
    return [
        opt.minimize(function, initial_guess, method='nelder-mead'), variables
    ]
Exemple #21
0
def test_bng_printer():
    # Constants
    assert _bng_print(sympy.pi) == '_pi'
    assert _bng_print(sympy.E) == '_e'

    x, y = sympy.symbols('x y')

    # Binary functions
    assert _bng_print(sympy.sympify('x & y')) == 'x && y'
    assert _bng_print(sympy.sympify('x | y')) == 'x || y'

    # Trig functions
    assert _bng_print(sympy.sin(x)) == 'sin(x)'
    assert _bng_print(sympy.cos(x)) == 'cos(x)'
    assert _bng_print(sympy.tan(x)) == 'tan(x)'
    assert _bng_print(sympy.asin(x)) == 'asin(x)'
    assert _bng_print(sympy.acos(x)) == 'acos(x)'
    assert _bng_print(sympy.atan(x)) == 'atan(x)'
    assert _bng_print(sympy.sinh(x)) == 'sinh(x)'
    assert _bng_print(sympy.cosh(x)) == 'cosh(x)'
    assert _bng_print(sympy.tanh(x)) == 'tanh(x)'
    assert _bng_print(sympy.asinh(x)) == 'asinh(x)'
    assert _bng_print(sympy.acosh(x)) == 'acosh(x)'
    assert _bng_print(sympy.atanh(x)) == 'atanh(x)'

    # Logs and powers
    assert _bng_print(sympy.log(x)) == 'ln(x)'
    assert _bng_print(sympy.exp(x)) == 'exp(x)'
    assert _bng_print(sympy.sqrt(x)) == 'sqrt(x)'

    # Rounding
    assert _bng_print(sympy.Abs(x)) == 'abs(x)'
    assert _bng_print(sympy.floor(x)) == 'rint(x - 0.5)'
    assert _bng_print(sympy.ceiling(x)) == '(rint(x + 1) - 1)'

    # Min/max
    assert _bng_print(sympy.Min(x, y)) == 'min(x, y)'
    assert _bng_print(sympy.Max(x, y)) == 'max(x, y)'
    def __init__(self, epsilon, debug=False):
        self.debug = debug
        self.epsilon = epsilon

        self.A = 'A'
        self.A_bar = 'A_bar'
        self.B = 'B'
        self.B_bar = 'B_bar'

        self.operators = [self.A]
        self.operators_bar = [self.A_bar]

        x_1, x_2, z_1, z_2 = sp.symbols('x_1 x_2 z_1 z_2')
        eps_inv = 1. / epsilon

        I_1_left_integrand = z_1 * z_2 * (1 - epsilon * (z_2 - z_1))**2
        I_1_right_integrand = z_1 * z_2 * (1 - epsilon * (z_1 - z_2))**2

        I_1_1_right = sp.integrate(I_1_right_integrand, (z_2, 0, z_1))
        I_1_1_left = sp.integrate(I_1_left_integrand, (z_2, z_1, x_2))
        I_1_1 = sp.integrate(I_1_1_left + I_1_1_right, (z_1, 0, x_1))

        I_1_2_right = sp.integrate(I_1_right_integrand,
                                   (z_2, 0, z_1 - eps_inv))
        I_1_2 = sp.integrate(I_1_2_right, (z_1, eps_inv, x_1))

        I_1_3_left = sp.integrate(I_1_left_integrand, (z_1, 0, z_2 - eps_inv))
        I_1_3 = sp.integrate(I_1_3_left,
                             (z_2, eps_inv, sp.Min(x_1 + eps_inv, x_2)))

        I_1_4_left = sp.integrate(I_1_left_integrand, (z_1, 0, x_1))
        I_1_4 = sp.integrate(I_1_4_left, (z_2, x_1 + eps_inv, x_2))

        self.I_1_1 = self.functionise_sympy([x_1, x_2], I_1_1)
        self.I_1_2 = self.functionise_sympy([x_1, x_2], I_1_2)
        self.I_1_3 = self.functionise_sympy([x_1, x_2], I_1_3)
        self.I_1_4 = self.functionise_sympy([x_1, x_2], I_1_4)

        I_2_left_integrand = z_1 * (z_2 - 1) * (1 - epsilon * (z_2 - z_1))**2

        I_2_left_c1 = sp.integrate(I_2_left_integrand,
                                   (z_2, x_2, z_1 + eps_inv))
        I_2_c1 = sp.integrate(I_2_left_c1,
                              (z_1, sp.Max(x_2 - eps_inv, 0.), x_1))

        I_2_left_c2 = sp.integrate(I_2_left_integrand,
                                   (z_1, z_2 - eps_inv, x_1))
        I_2_c2 = sp.integrate(I_2_left_c2,
                              (z_2, x_2, sp.Min(x_1 + eps_inv, 1.)))

        self.I_2_c1 = self.functionise_sympy([x_1, x_2], I_2_c1)
        self.I_2_c2 = self.functionise_sympy([x_1, x_2], I_2_c2)

        I_3_right_integrand = (z_1 - 1) * z_2 * (1 - epsilon * (z_1 - z_2))**2
        I_3_left_integrand = (z_1 - 1) * z_2 * (1 - epsilon * (z_2 - z_1))**2

        # assuming x_2 + eps_inv < 1...
        I_3_1_c1_right = sp.integrate(I_3_right_integrand,
                                      (z_2, z_1 - eps_inv, z_1))
        I_3_1_c1_left = sp.integrate(I_3_left_integrand,
                                     (z_2, z_1, z_1 + eps_inv))
        I_3_1_c1 = sp.integrate(I_3_1_c1_right + I_3_1_c1_left,
                                (z_1, x_1, x_2 - eps_inv))
        # sp.Max(x_1, eps_inv)
        I_3_2_c1_right = sp.integrate(I_3_right_integrand,
                                      (z_2, z_1 - eps_inv, z_1))
        I_3_2_c1_left = sp.integrate(I_3_left_integrand, (z_2, z_1, x_2))
        I_3_2_c1 = sp.integrate(I_3_2_c1_right + I_3_2_c1_left,
                                (z_1, sp.Max(x_2 - eps_inv, x_1), x_2))

        I_3_3_c1_right = sp.integrate(I_3_right_integrand,
                                      (z_2, z_1 - eps_inv, x_2))
        I_3_3_c1 = sp.integrate(I_3_3_c1_right,
                                (z_1, x_2, sp.Min(x_2 + eps_inv, 1.)))

        I_3_4_c1_right = sp.integrate(I_3_right_integrand,
                                      (z_2, z_1 - eps_inv, 0))
        I_3_4_c1 = sp.integrate(I_3_4_c1_right, (z_1, x_1, eps_inv))

        self.I_3_1_c1 = self.functionise_sympy([x_1, x_2], I_3_1_c1)
        self.I_3_2_c1 = self.functionise_sympy([x_1, x_2], I_3_2_c1)
        self.I_3_3_c1 = self.functionise_sympy([x_1, x_2], I_3_3_c1)
        self.I_3_4 = self.functionise_sympy([x_1, x_2], I_3_4_c1)

        I_4_right_integrand = (z_1 - 1) * (z_2 - 1) * (1 - epsilon *
                                                       (z_1 - z_2))**2
        I_4_left_integrand = (z_1 - 1) * (z_2 - 1) * (1 - epsilon *
                                                      (z_2 - z_1))**2

        I_4_1_left = sp.integrate(I_4_left_integrand, (z_1, x_1, z_2))
        I_4_1_right = sp.integrate(I_4_right_integrand, (z_1, z_2, 1))
        I_4_1 = sp.integrate(I_4_1_right + I_4_1_left, (z_2, x_2, 1))

        I_4_2_right = sp.integrate(I_4_right_integrand,
                                   (z_2, x_2, z_1 - eps_inv))
        I_4_2 = sp.integrate(I_4_2_right, (z_1, x_2 + eps_inv, 1))

        I_4_3_left = sp.integrate(
            I_4_left_integrand,
            (z_1, sp.Max(x_2 - eps_inv, x_1), z_2 - eps_inv))
        I_4_3 = sp.integrate(I_4_3_left, (z_2, sp.Max(x_2, x_1 + eps_inv), 1))

        I_4_4_left = sp.integrate(I_4_left_integrand, (z_2, x_2, 1))
        I_4_4 = sp.integrate(I_4_4_left, (z_1, x_1, x_2 - eps_inv))

        self.I_4_1 = self.functionise_sympy([x_1, x_2], I_4_1)
        self.I_4_2 = self.functionise_sympy([x_1, x_2], I_4_2)
        self.I_4_3 = self.functionise_sympy([x_1, x_2], I_4_3)
        self.I_4_4 = self.functionise_sympy([x_1, x_2], I_4_4)

        # x' < x - eps_inv
        A_k_I_1a = sp.integrate(
            x_2 * (z_2 - 1) * (1 - epsilon * (x_1 - z_2))**2,
            (z_2, sp.Max(x_1 - eps_inv, 0), x_1))
        A_k_I_1b = sp.integrate(
            x_2 * (z_2 - 1) * (1 - epsilon * (z_2 - x_1))**2,
            (z_2, x_1, sp.Min(x_1 + eps_inv, 1)))

        # x' > x + eps_inv
        A_k_I_2a = sp.integrate(
            (x_2 - 1) * z_2 * (1 - epsilon * (x_1 - z_2))**2,
            (z_2, sp.Max(x_1 - eps_inv, 0), x_1))
        A_k_I_2b = sp.integrate(
            (x_2 - 1) * z_2 * (1 - epsilon * (z_2 - x_1))**2,
            (z_2, x_1, sp.Min(x_1 + eps_inv, 1)))

        # x' < x, x' > x - eps_inv
        A_k_I_4a = sp.integrate(
            (x_2 - 1) * z_2 * (1 - epsilon * (x_1 - z_2))**2,
            (z_2, sp.Max(x_1 - eps_inv, 0), x_2))
        A_k_I_4b = sp.integrate(
            x_2 * (z_2 - 1) * (1 - epsilon * (x_1 - z_2))**2, (z_2, x_2, x_1))
        A_k_I_4c = A_k_I_1b

        # x' > x, x' < x + eps_inv
        A_k_I_5a = A_k_I_2a
        A_k_I_5b = sp.integrate(
            (x_2 - 1) * z_2 * (1 - epsilon * (z_2 - x_1))**2, (z_2, x_1, x_2))
        A_k_I_5c = sp.integrate(
            x_2 * (z_2 - 1) * (1 - epsilon * (z_2 - x_1))**2,
            (z_2, x_2, sp.Min(x_1 + eps_inv, 1)))

        self.A_k_I_1 = self.functionise_sympy([x_1, x_2], A_k_I_1a + A_k_I_1b)
        self.A_k_I_2 = self.functionise_sympy([x_1, x_2], A_k_I_2a + A_k_I_2b)
        self.A_k_I_3 = self.functionise_sympy([x_1, x_2], A_k_I_2a + A_k_I_1b)
        self.A_k_I_4 = self.functionise_sympy([x_1, x_2],
                                              A_k_I_4a + A_k_I_4b + A_k_I_4c)
        self.A_k_I_5 = self.functionise_sympy([x_1, x_2],
                                              A_k_I_5a + A_k_I_5b + A_k_I_5c)
Exemple #23
0
 def _print_Min(self, expr):
     """ Adapted from sympy/printing/cxxcode.py """
     if len(expr.args) == 1:
         return self._print(expr.args[0])
     return "[min] {} {}".format(self._print(expr.args[0]),
                                 self._print(sympy.Min(*expr.args[1:])))
def forward_projection(volume: pystencils.Field,
                       projection: pystencils.Field,
                       projection_matrix,
                       step_size=1,
                       cubic_bspline_interpolation=False,
                       add_to_projector=False,
                       central_ray_point=None):
    # is_projection_stack = projection.spatial_dimensions == volume.spatial_dimensions

    interpolation_mode = 'cubic_spline' if cubic_bspline_interpolation else 'linear'
    volume_texture = pystencils.interpolation_astnodes.Interpolator(
        volume, interpolation_mode)
    ndim = volume.spatial_dimensions
    projection_matrix = pystencils_reco.ProjectiveMatrix(projection_matrix)

    t = pystencils_reco.typed_symbols('_parametrization', 'float32')
    texture_coordinates = sympy.Matrix(
        pystencils_reco.typed_symbols(f'_t:{ndim}', 'float32'))
    u = projection.physical_coordinates_staggered
    x = volume.index_to_physical(texture_coordinates)

    is_perspective = projection_matrix.matrix.cols == ndim + 1

    if is_perspective:
        eqn = projection_matrix @ sympy.Matrix([*x, 1]) - sympy.Matrix(
            [*(t * u), t])
    else:
        # this also works for perspective/cone beam projection (but may lead to instable parametrization)
        eqn = projection_matrix @ x - u
    ray_equations = sympy.solve(eqn, texture_coordinates, rational=False)

    if not is_perspective:
        t = [t for t in texture_coordinates
             if t not in ray_equations.keys()][0]
        assert len(
            ray_equations.keys()
        ) == ndim - 1, "projection_matrix does not appear to define a projection"
    ray_equations = sympy.Matrix(
        [ray_equations[s] if s != t else t for s in texture_coordinates])

    projection_vector = sympy.diff(ray_equations, t)
    projection_vector_norm = projection_vector.norm()
    projection_vector /= projection_vector_norm

    conditions = pystencils_reco._geometry.coordinate_in_field_conditions(
        volume, ray_equations)

    if not central_ray_point:
        central_ray_point = [0] * projection.spatial_dimensions
    central_ray = projection_vector.subs({
        i: j
        for i, j in zip(pystencils.x_vector(projection.spatial_dimensions),
                        central_ray_point)
    })

    intersection_candidates = []
    for i in range(ndim):
        solution_min = sympy.solve(ray_equations[i], t, rational=False)
        solution_max = sympy.solve(ray_equations[i] - volume.spatial_shape[i],
                                   t,
                                   rational=False)
        intersection_candidates.extend(solution_min + solution_max)

    intersection_point1 = sympy.Piecewise(
        *[(f, sympy.And(*conditions).subs({t: f}))
          for f in intersection_candidates], (-0, True))
    intersection_point2 = sympy.Piecewise(
        *[(f, sympy.And(*conditions).subs({t: f}))
          for f in reversed(intersection_candidates)], (-0, True))
    assert intersection_point1 != intersection_point2, \
        "The intersections are unconditionally equal, reconstruction volume is not in detector FOV!"

    # perform a integer set analysis here?
    # space = isl.Space.create_from_names(isl.DEFAULT_CONTEXT, set=[str(t) for t in texture_coordinates])
    # ray_set = isl.BasicSet.universe(space)
    # for i, t in enumerate(texture_coordinates):
    #    # dafaq?
    #    ray_set.add_constraint(isl.Constraint.ineq_from_names(space, {str(texture_coordinates): 1}))
    #    ray_set.add_constraint(isl.Constraint.ineq_from_names(space,
    #                                                        # {1: -volume.shape[i],
    # str(texture_coordinates): -1}))
    #    ray_set.add_constraint(isl.Constraint.eq_from_name(space, ray_equations[i].subs({ #TODO

    min_t = sympy.Min(intersection_point1, intersection_point2)
    max_t = sympy.Max(intersection_point1, intersection_point2)
    # parametrization_dim = list(ray_equations).index(t)
    # min_t = 0
    # max_t = volume.spatial_shape[parametrization_dim]

    line_integral, num_steps, min_t_tmp, max_t_tmp, intensity_weighting, step = pystencils.data_types.typed_symbols(
        'line_integral, num_steps, min_t_tmp, max_t_tmp, intensity_weighting, step',
        'float32')
    i = pystencils.data_types.TypedSymbol('i', 'int32')
    num_steps = pystencils.data_types.TypedSymbol('num_steps', 'int32')

    # step = step_size / projection_vector_norm
    # tex_coord = ray_equations.subs({t: min_t_tmp + i * step})
    tex_coord = ray_equations.subs({t: min_t_tmp}) + projection_vector * i

    if callable(volume.coordinate_transform):
        intensity_weighting_sym = projection_vector.dot(central_ray)**2
    else:
        intensity_weighting_sym = projection_vector.dot(central_ray)**2

    assignments = {
        min_t_tmp:
        min_t,
        max_t_tmp:
        max_t,
        num_steps:
        sympy.ceiling(
            (max_t_tmp - min_t_tmp) / (step_size / projection_vector_norm)),
        line_integral:
        sympy.Sum(volume_texture.at(tex_coord), (i, 0, num_steps)),
        intensity_weighting:
        intensity_weighting_sym,
        projection.center():
        (line_integral * step_size * intensity_weighting) +
        (projection.center() if add_to_projector else 0)
        # projection.center(): (max_t_tmp - min_t_tmp) / step # Uncomment to get path length
    }

    # def create_autodiff(self, constant_fields=None):
    # backward_assignments = backward_projection(AdjointField(projections),
    # AdjointField(volume),
    # projection_matrix,
    # 1)
    # self._autodiff = pystencils.autodiff.AutoDiffOp(
    # assignments, "op", constant_fields=constant_fields, backward_assignments=backward_assignments)

    # assignments._create_autodiff = types.MethodType(create_autodiff, assignments)

    return assignments
Exemple #25
0
def or_(truth0, truth1):
    exp = sympy.Min(truth0.expression, truth1.expression)
    sig = sympy.Max(truth0.sigma, truth1.sigma)
    return Truth(exp, sig)
 def _infer_Min(self, node):
     self._compute_on_sympy_data(node, lambda l: sympy.Min(l[0], l[1]))
Exemple #27
0
def diffusion_reaction(fluctuations: bool):
    # parameters
    L = (32, 32)
    stencil_factor = np.sqrt(1 / (1 + np.sqrt(2)))

    dh = ps.create_data_handling(domain_size=L,
                                 periodicity=True,
                                 default_target=ps.Target.CPU)

    species = 2
    n_fields = []
    j_fields = []
    r_flux_fields = []
    for i in range(species):
        n_fields.append(dh.add_array(f'n_{i}', values_per_cell=1))
        j_fields.append(
            dh.add_array(f'j_{i}',
                         values_per_cell=3**dh.dim // 2,
                         field_type=ps.FieldType.STAGGERED_FLUX))
        r_flux_fields.append(dh.add_array(f'r_{i}', values_per_cell=1))
    velocity_field = dh.add_array('v', values_per_cell=dh.dim)

    D = 0.00666
    time = 1000
    r_order = [2.0, 0.0]
    r_rate_const = 0.00001
    r_coefs = [-2, 1]

    def grad(f):
        return sp.Matrix([ps.fd.diff(f, i) for i in range(dh.dim)])

    flux_eq = -D * grad(n_fields[0])
    fvm_eq = ps.fd.FVM1stOrder(n_fields[0], flux=flux_eq)
    vof_adv = ps.fd.VOF(j_fields[0], velocity_field, n_fields[0])
    continuity_assignments = fvm_eq.discrete_continuity(j_fields[0])
    # merge calculation of advection and diffusion terms
    flux = []
    for adv, div in zip(vof_adv, fvm_eq.discrete_flux(j_fields[0])):
        assert adv.lhs == div.lhs
        flux.append(ps.Assignment(adv.lhs, adv.rhs + div.rhs))
    flux = ps.AssignmentCollection(flux)

    if (fluctuations):
        rng_symbol_gen = random_symbol(flux.subexpressions, dim=dh.dim)
        for i in range(len(flux.main_assignments)):
            n = j_fields[0].staggered_stencil[i]
            assert flux.main_assignments[i].lhs == j_fields[
                0].staggered_access(n)

            # calculate mean density
            dens = (n_fields[0].neighbor_vector(n) +
                    n_fields[0].center_vector)[0] / 2
            # multyply by smoothed haviside function so that fluctuation will not get bigger that the density
            dens *= sp.Max(
                0,
                sp.Min(1.0, n_fields[0].neighbor_vector(n)[0]) *
                sp.Min(1.0, n_fields[0].center_vector[0]))

            # lenght of the vector
            length = sp.sqrt(len(j_fields[0].staggered_stencil[i]))

            # amplitude of the random fluctuations
            fluct = sp.sqrt(2 * dens * D) * sp.sqrt(
                1 / length) * stencil_factor
            # add fluctuations
            fluct *= 2 * (next(rng_symbol_gen) - 0.5) * sp.sqrt(3)

            flux.main_assignments[i] = ps.Assignment(
                flux.main_assignments[i].lhs,
                flux.main_assignments[i].rhs + fluct)

        # Add the folding to the flux, so that the random numbers persist through the ghostlayers.
        fold = {
            ps.astnodes.LoopOverCoordinate.get_loop_counter_symbol(i):
            ps.astnodes.LoopOverCoordinate.get_loop_counter_symbol(i) % L[i]
            for i in range(len(L))
        }
        flux.subs(fold)

    r_flux = ps.AssignmentCollection(
        [ps.Assignment(j_fields[i].center, 0) for i in range(species)])
    reaction = r_rate_const
    for i in range(species):
        reaction *= sp.Pow(n_fields[i].center, r_order[i])
    if (fluctuations):
        rng_symbol_gen = random_symbol(r_flux.subexpressions, dim=dh.dim)
        reaction_fluctuations = sp.sqrt(
            sp.Abs(reaction)) * 2 * (next(rng_symbol_gen) - 0.5) * sp.sqrt(3)
        reaction_fluctuations *= sp.Min(1, sp.Abs(reaction**2))
    else:
        reaction_fluctuations = 0.0
    for i in range(species):
        r_flux.main_assignments[i] = ps.Assignment(
            r_flux_fields[i].center,
            (reaction + reaction_fluctuations) * r_coefs[i])

    continuity_assignments.append(
        ps.Assignment(n_fields[0].center,
                      n_fields[0].center + r_flux_fields[0].center))

    flux_kernel = ps.create_staggered_kernel(flux).compile()
    reaction_kernel = ps.create_kernel(r_flux).compile()

    pde_kernel = ps.create_kernel(continuity_assignments).compile()

    sync_conc = dh.synchronization_function(
        [n_fields[0].name, n_fields[1].name])

    def f(t, r, n0, fac, fluctuations):
        """Calculates the amount of product created after a certain time of a reaction with form xA -> B

        Args:
            t: Time of the reation
            r: Reaction rate constant
            n0: Initial density of the 
            fac: Reaction order of A (this in most cases equals the stochometric coefficient x)
            fluctuations: Boolian whether fluctuations were included during the reaction.
        """
        if fluctuations:
            return 1 / fac * (n0 + n0 / (n0 - (n0 + 1) * np.exp(fac * r * t)))
        return 1 / fac * (n0 - (1 / (fac * r * t + (1 / n0))))

    def run(density_init: float, velocity: np.ndarray, time: int):
        for i in range(species):
            dh.fill(n_fields[i].name,
                    np.nan,
                    ghost_layers=True,
                    inner_ghost_layers=True)
            dh.fill(j_fields[i].name,
                    0.0,
                    ghost_layers=True,
                    inner_ghost_layers=True)
            dh.fill(r_flux_fields[i].name,
                    0.0,
                    ghost_layers=True,
                    inner_ghost_layers=True)

        # set initial values for velocity and density
        for i in range(dh.dim):
            dh.fill(velocity_field.name,
                    velocity[i],
                    i,
                    ghost_layers=True,
                    inner_ghost_layers=True)
        dh.fill(n_fields[0].name, density_init)
        dh.fill(n_fields[1].name, 0.0)

        measurement_intervall = 10
        data = []

        sync_conc()
        for i in range(time):
            if (i % measurement_intervall == 0):
                data.append([
                    i,
                    dh.gather_array(n_fields[1].name).mean(),
                    dh.gather_array(n_fields[0].name).mean()
                ])
            dh.run_kernel(reaction_kernel, seed=41, time_step=i)
            for s_idx in range(species):
                flux_kernel(n_0=dh.cpu_arrays[n_fields[s_idx].name],
                            j_0=dh.cpu_arrays[j_fields[s_idx].name],
                            v=dh.cpu_arrays[velocity_field.name],
                            seed=42 + s_idx,
                            time_step=i)
                pde_kernel(n_0=dh.cpu_arrays[n_fields[s_idx].name],
                           j_0=dh.cpu_arrays[j_fields[s_idx].name],
                           r_0=dh.cpu_arrays[r_flux_fields[s_idx].name])
            sync_conc()

        data = np.array(data).transpose()
        x = data[0]
        analytical_value = f(x, r_rate_const, density_init, abs(r_coefs[0]),
                             fluctuations)

        # test mass conservation
        np.testing.assert_almost_equal(
            dh.gather_array(n_fields[0].name).mean() +
            2 * dh.gather_array(n_fields[1].name).mean(), density_init)

        r_tol = 2e-3
        if fluctuations:
            r_tol = 3e-2
        np.testing.assert_allclose(data[1], analytical_value, rtol=r_tol)

    return lambda density_init, v: run(density_init, np.array(v), time)
Exemple #28
0
def advection_diffusion_fluctuations(dim: int):
    # parameters
    if dim == 2:
        L = (32, 32)
        stencil_factor = np.sqrt(1 / (1 + np.sqrt(2)))
    elif dim == 3:
        L = (16, 16, 16)
        stencil_factor = np.sqrt(1 /
                                 (1 + 2 * np.sqrt(2) + 4.0 / 3.0 * np.sqrt(3)))

    dh = ps.create_data_handling(domain_size=L,
                                 periodicity=True,
                                 default_target=ps.Target.CPU)

    n_field = dh.add_array('n', values_per_cell=1)
    j_field = dh.add_array('j',
                           values_per_cell=3**dim // 2,
                           field_type=ps.FieldType.STAGGERED_FLUX)
    velocity_field = dh.add_array('v', values_per_cell=dim)

    D = 0.00666
    time = 10000

    def grad(f):
        return sp.Matrix([ps.fd.diff(f, i) for i in range(dim)])

    flux_eq = -D * grad(n_field)
    fvm_eq = ps.fd.FVM1stOrder(n_field, flux=flux_eq)

    vof_adv = ps.fd.VOF(j_field, velocity_field, n_field)

    # merge calculation of advection and diffusion terms
    flux = []
    for adv, div in zip(vof_adv, fvm_eq.discrete_flux(j_field)):
        assert adv.lhs == div.lhs
        flux.append(ps.Assignment(adv.lhs, adv.rhs + div.rhs))
    flux = ps.AssignmentCollection(flux)

    rng_symbol_gen = random_symbol(flux.subexpressions, dim=dh.dim)
    for i in range(len(flux.main_assignments)):
        n = j_field.staggered_stencil[i]
        assert flux.main_assignments[i].lhs == j_field.staggered_access(n)

        # calculate mean density
        dens = (n_field.neighbor_vector(n) + n_field.center_vector)[0] / 2
        # multyply by smoothed haviside function so that fluctuation will not get bigger that the density
        dens *= sp.Max(
            0,
            sp.Min(1.0,
                   n_field.neighbor_vector(n)[0]) *
            sp.Min(1.0, n_field.center_vector[0]))

        # lenght of the vector
        length = sp.sqrt(len(j_field.staggered_stencil[i]))

        # amplitude of the random fluctuations
        fluct = sp.sqrt(2 * dens * D) * sp.sqrt(1 / length) * stencil_factor
        # add fluctuations
        fluct *= 2 * (next(rng_symbol_gen) - 0.5) * sp.sqrt(3)

        flux.main_assignments[i] = ps.Assignment(
            flux.main_assignments[i].lhs, flux.main_assignments[i].rhs + fluct)

    # Add the folding to the flux, so that the random numbers persist through the ghostlayers.
    fold = {
        ps.astnodes.LoopOverCoordinate.get_loop_counter_symbol(i):
        ps.astnodes.LoopOverCoordinate.get_loop_counter_symbol(i) % L[i]
        for i in range(len(L))
    }
    flux.subs(fold)

    flux_kernel = ps.create_staggered_kernel(flux).compile()

    pde_kernel = ps.create_kernel(
        fvm_eq.discrete_continuity(j_field)).compile()

    sync_conc = dh.synchronization_function([n_field.name])

    # analytical density distribution calculation
    def P(rho, density_init):
        res = []
        for r in rho:
            res.append(
                np.power(density_init, r) * np.exp(-density_init) /
                np.math.gamma(r + 1))
        return np.array(res)

    def run(density_init: float, velocity: np.ndarray, time: int):
        dh.fill(n_field.name,
                np.nan,
                ghost_layers=True,
                inner_ghost_layers=True)
        dh.fill(j_field.name,
                np.nan,
                ghost_layers=True,
                inner_ghost_layers=True)

        # set initial values for velocity and density
        for i in range(dim):
            dh.fill(velocity_field.name,
                    velocity[i],
                    i,
                    ghost_layers=True,
                    inner_ghost_layers=True)
        dh.fill(n_field.name, density_init)

        measurement_intervall = 10
        warm_up = 1000
        data = []

        sync_conc()
        for i in range(warm_up):
            dh.run_kernel(flux_kernel, seed=42, time_step=i)
            dh.run_kernel(pde_kernel)
            sync_conc()

        for i in range(time):
            dh.run_kernel(flux_kernel, seed=42, time_step=i + warm_up)
            dh.run_kernel(pde_kernel)
            sync_conc()
            if (i % measurement_intervall == 0):
                data = np.append(data,
                                 dh.gather_array(n_field.name).ravel(), 0)

        # test mass conservation
        np.testing.assert_almost_equal(
            dh.gather_array(n_field.name).mean(), density_init)

        n_bins = 50

        density_value, bins = np.histogram(data, density=True, bins=n_bins)
        bins_mean = bins[:-1] + (bins[1:] - bins[:-1]) / 2
        analytical_value = P(bins_mean, density_init)
        print(density_value - analytical_value)
        np.testing.assert_allclose(density_value, analytical_value, atol=2e-3)

    return lambda density_init, v: run(density_init, np.array(v), time)
    def __init__(self, epsilon):
        x, y = sp.symbols('x y')
        z = sp.Symbol('z')
        eps_inv = 1. / epsilon

        def phi_L(m, n):
            return (1 - epsilon * (m - n))**4 * (4 * epsilon * (m - n) + 1)

        def phi_R(m, n):
            return phi_L(n, m)

        I1_1 = sp.integrate(
            phi_L(x, z) * phi_L(y, z), (z, sp.Max(y - eps_inv, 0), x))
        I1_2 = sp.integrate(
            phi_R(x, z) * phi_L(y, z),
            (z, sp.Max(x, y - eps_inv), sp.Min(y, x + eps_inv)))
        I1_3 = sp.integrate(
            phi_R(x, z) * phi_R(y, z), (z, y, sp.Min(x + eps_inv, 1)))

        self.I1_1 = self.functionise_sympy([x, y], I1_1)
        self.I1_2 = self.functionise_sympy([x, y], I1_2)
        self.I1_3 = self.functionise_sympy([x, y], I1_3)

        self.epsilon = epsilon
        self.eps_inv = eps_inv

        def lap_phi_L(m, n):
            return -20 * epsilon**2 * (1 - epsilon *
                                       (m - n))**3 + 60 * epsilon**3 * (
                                           m - n) * (1 - epsilon * (m - n))**2

        def lap_phi_R(m, n):
            return lap_phi_L(n, m)

        I2_1 = sp.integrate(
            lap_phi_L(x, z) * phi_L(y, z), (z, sp.Max(y - eps_inv, 0), x))
        I2_2 = sp.integrate(
            lap_phi_R(x, z) * phi_L(y, z),
            (z, sp.Max(x, y - eps_inv), sp.Min(y, x + eps_inv)))
        I2_3 = sp.integrate(
            lap_phi_R(x, z) * phi_R(y, z), (z, y, sp.Min(x + eps_inv, 1)))

        I3_1 = sp.integrate(
            phi_L(x, z) * lap_phi_L(y, z), (z, sp.Max(y - eps_inv, 0), x))
        I3_2 = sp.integrate(
            phi_R(x, z) * lap_phi_L(y, z),
            (z, sp.Max(x, y - eps_inv), sp.Min(y, x + eps_inv)))
        I3_3 = sp.integrate(
            phi_R(x, z) * lap_phi_R(y, z), (z, y, sp.Min(x + eps_inv, 1)))

        self.I2_1 = self.functionise_sympy([x, y], I2_1)
        self.I2_2 = self.functionise_sympy([x, y], I2_2)
        self.I2_3 = self.functionise_sympy([x, y], I2_3)

        self.I3_1 = self.functionise_sympy([x, y], I3_1)
        self.I3_2 = self.functionise_sympy([x, y], I3_2)
        self.I3_3 = self.functionise_sympy([x, y], I3_3)

        I4_1 = sp.integrate(
            lap_phi_L(x, z) * lap_phi_L(y, z), (z, sp.Max(y - eps_inv, 0), x))
        I4_2 = sp.integrate(
            lap_phi_R(x, z) * lap_phi_L(y, z),
            (z, sp.Max(x, y - eps_inv), sp.Min(y, x + eps_inv)))
        I4_3 = sp.integrate(
            lap_phi_R(x, z) * lap_phi_R(y, z), (z, y, sp.Min(x + eps_inv, 1)))
        self.I4_1 = self.functionise_sympy([x, y], I4_1)
        self.I4_2 = self.functionise_sympy([x, y], I4_2)
        self.I4_3 = self.functionise_sympy([x, y], I4_3)

        self.A = 'A'
        self.A_bar = 'A_bar'
        self.B = 'B'
        self.B_bar = 'B_bar'

        self.operators = [self.A, self.B]
        self.operators_bar = [self.A_bar, self.B_bar]
    def _infer_Slice(self, node):
        if get_opset(self.out_mp_) <= 9:
            axes = get_attribute(node, 'axes')
            starts = get_attribute(node, 'starts')
            ends = get_attribute(node, 'ends')
            steps = [1] * len(axes)
        else:
            starts = self._get_value(node, 1)
            ends = self._get_value(node, 2)
            assert starts is not None and ends is not None
            axes = self._try_get_value(node, 3)
            steps = self._try_get_value(node, 4)
            if axes is None:
                axes = list(range(0, len(starts)))
            if steps is None:
                steps = [1] * len(starts)

        new_shape = self._get_sympy_shape(node, 0)
        for i, s, e, t in zip(axes, starts, ends, steps):
            # TODO: handle step
            assert t == 1
            idx = handle_negative_axis(i, len(new_shape))
            if is_literal(e):
                if e >= int(2**31 - 1):  # max value of int32
                    e = new_shape[i]
                elif is_literal(new_shape[i]):
                    e = min(e, new_shape[i])
                else:
                    if e > 0:
                        e = sympy.Min(e, new_shape[i])
                    else:
                        e = new_shape[i] + e
            else:
                if is_literal(new_shape[i]):
                    e = sympy.Min(e, new_shape[i])
                else:
                    try:
                        if e >= new_shape[i]:
                            e = new_shape[i]
                    except Exception:
                        print(
                            'Unable to determine if {} <= {}, treat as equal'.
                            format(e, new_shape[i]))
                        e = new_shape[i]

            if is_literal(s) and int(s) < 0:
                s = new_shape[i] + s

            new_shape[idx] = e - s

        vi = self.known_vi_[node.output[0]]
        vi.CopyFrom(
            helper.make_tensor_value_info(
                node.output[0], vi.type.tensor_type.elem_type,
                get_shape_from_sympy_shape(new_shape)))
        if node.input[0] in self.sympy_data_:
            assert [0] == axes
            assert len(starts) == 1
            assert len(ends) == 1
            self.sympy_data_[node.output[0]] = self.sympy_data_[
                node.input[0]][starts[0]:ends[0]]