Exemple #1
0
def RGS_enum(m):
    """
    RGS_enum computes the total number of restricted growth strings
    possible for a superset of size m.

    Examples
    ========

    >>> from sympy.combinatorics.partitions import RGS_enum
    >>> from sympy.combinatorics.partitions import Partition
    >>> RGS_enum(4)
    15
    >>> RGS_enum(5)
    52
    >>> RGS_enum(6)
    203

    We can check that the enumeration is correct by actually generating
    the partitions. Here, the 15 partitions of 4 items are generated:

    >>> a = Partition(list(range(4)))
    >>> s = set()
    >>> for i in range(20):
    ...     s.add(a)
    ...     a += 1
    ...
    >>> assert len(s) == 15

    """
    if m < 1:
        return 0
    elif m == 1:
        return 1
    else:
        return bell(m)
Exemple #2
0
def RGS_enum(m):
    """
    RGS_enum computes the total number of restricted growth strings
    possible for a superset of size m.

    Examples
    ========

    >>> from sympy.combinatorics.partitions import RGS_enum
    >>> from sympy.combinatorics.partitions import Partition
    >>> RGS_enum(4)
    15
    >>> RGS_enum(5)
    52
    >>> RGS_enum(6)
    203

    We can check that the enumeration is correct by actually generating
    the partitions. Here, the 15 partitions of 4 items are generated:

    >>> a = Partition(list(range(4)))
    >>> s = set()
    >>> for i in range(20):
    ...     s.add(a)
    ...     a += 1
    ...
    >>> assert len(s) == 15

    """
    if (m < 1):
        return 0
    elif (m == 1):
        return 1
    else:
        return bell(m)
Exemple #3
0
    def coeff_bell(self, n):
        r"""
        self.coeff_bell(n) returns a sequence of Bell polynomials of the second kind.
        Note that ``n`` should be a integer.

        The second kind of Bell polynomials (are sometimes called "partial" Bell
        polynomials or incomplete Bell polynomials) are defined as

        .. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
                \sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
                    \frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
                    \left(\frac{x_1}{1!} \right)^{j_1}
                    \left(\frac{x_2}{2!} \right)^{j_2} \dotsb
                    \left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.

        * ``bell(n, k, (x1, x2, ...))`` gives Bell polynomials of the second kind,
          `B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.

        See Also
        ========

        sympy.functions.combinatorial.numbers.bell

        """

        inner_coeffs = [bell(n, j, tuple(self.bell_coeff_seq[:n-j+1])) for j in range(1, n+1)]

        k = Dummy('k')
        return sequence(tuple(inner_coeffs), (k, 1, oo))
def test_requires_partial():
    x, y, z, t, nu = symbols('x y z t nu')
    n = symbols('n', integer=True)

    f = x * y
    assert requires_partial(Derivative(f, x)) is True
    assert requires_partial(Derivative(f, y)) is True

    ## integrating out one of the variables
    assert requires_partial(
        Derivative(Integral(exp(-x * y),
                            (x, 0, oo)), y, evaluate=False)) is False

    ## bessel function with smooth parameter
    f = besselj(nu, x)
    assert requires_partial(Derivative(f, x)) is True
    assert requires_partial(Derivative(f, nu)) is True

    ## bessel function with integer parameter
    f = besselj(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # this is not really valid (differentiating with respect to an integer)
    # but there's no reason to use the partial derivative symbol there. make
    # sure we don't throw an exception here, though
    assert requires_partial(Derivative(f, n)) is False

    ## bell polynomial
    f = bell(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # again, invalid
    assert requires_partial(Derivative(f, n)) is False

    ## legendre polynomial
    f = legendre(0, x)
    assert requires_partial(Derivative(f, x)) is False

    f = legendre(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # again, invalid
    assert requires_partial(Derivative(f, n)) is False

    f = x**n
    assert requires_partial(Derivative(f, x)) is False

    assert requires_partial(
        Derivative(
            Integral((x * y)**n * exp(-x * y),
                     (x, 0, oo)), y, evaluate=False)) is False

    # parametric equation
    f = (exp(t), cos(t))
    g = sum(f)
    assert requires_partial(Derivative(g, t)) is False

    # function of unspecified variables
    f = symbols('f', cls=Function)
    assert requires_partial(Derivative(f, x)) is False
    assert requires_partial(Derivative(f, x, y)) is True
def test_requires_partial():
    x, y, z, t, nu = symbols('x y z t nu')
    n = symbols('n', integer=True)

    f = x * y
    assert requires_partial(Derivative(f, x)) is True
    assert requires_partial(Derivative(f, y)) is True

    ## integrating out one of the variables
    assert requires_partial(Derivative(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False

    ## bessel function with smooth parameter
    f = besselj(nu, x)
    assert requires_partial(Derivative(f, x)) is True
    assert requires_partial(Derivative(f, nu)) is True

    ## bessel function with integer parameter
    f = besselj(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # this is not really valid (differentiating with respect to an integer)
    # but there's no reason to use the partial derivative symbol there. make
    # sure we don't throw an exception here, though
    assert requires_partial(Derivative(f, n)) is False

    ## bell polynomial
    f = bell(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # again, invalid
    assert requires_partial(Derivative(f, n)) is False

    ## legendre polynomial
    f = legendre(0, x)
    assert requires_partial(Derivative(f, x)) is False

    f = legendre(n, x)
    assert requires_partial(Derivative(f, x)) is False
    # again, invalid
    assert requires_partial(Derivative(f, n)) is False

    f = x ** n
    assert requires_partial(Derivative(f, x)) is False

    assert requires_partial(Derivative(Integral((x*y) ** n * exp(-x * y), (x, 0, oo)), y, evaluate=False)) is False

    # parametric equation
    f = (exp(t), cos(t))
    g = sum(f)
    assert requires_partial(Derivative(g, t)) is False

    f = symbols('f', cls=Function)
    assert requires_partial(Derivative(f(x), x)) is False
    assert requires_partial(Derivative(f(x), y)) is False
    assert requires_partial(Derivative(f(x, y), x)) is True
    assert requires_partial(Derivative(f(x, y), y)) is True
    assert requires_partial(Derivative(f(x, y), z)) is True
    assert requires_partial(Derivative(f(x, y), x, y)) is True
Exemple #6
0
def test_sympy__functions__combinatorial__numbers__bell():
    from sympy.functions.combinatorial.numbers import bell
    assert _test_args(bell(x, y))
Exemple #7
0
def test_sympy__functions__combinatorial__numbers__bell():
    from sympy.functions.combinatorial.numbers import bell
    assert _test_args(bell(x, y))
Exemple #8
0
    def interpolate(self,
                    points,
                    values,
                    use_log=False,
                    nu_x=0,
                    nu_y=0,
                    nu_z=0,
                    method="cubic"):
        r"""Interpolate function value at a given point.

        Only implemented in three-dimensions.

        Parameters
        ----------
        points : np.ndarray, shape (M, 3)
            The 3D Cartesian coordinates of :math:`M` points in :math:`\mathbb{R}^3` for which
            the interpolant (i.e., the interpolated function) is evaluated.
        values : np.ndarray, shape (N,)
            Function values at each of the :math:`N` grid points.
        use_log : bool, optional
            If True, the logarithm of the function values are interpolated.
            Can only be used for interpolating derivatives when the derivative is not a
            mixed derivative.
        nu_x : int, optional
            If zero, then the function in x-direction is interpolated.
            If greater than zero, then the "nu_x"th-order derivative in the x-direction is
            interpolated.
        nu_y : int, optional
            If zero, then the function in y-direction is interpolated.
            If greater than zero, then the "nu_y"th-order derivative in the y-direction is
            interpolated.
        nu_z : int, optional
            If zero, then the function in z-direction is interpolated.
            If greater than zero, then the "nu_z"th-order derivative in the z-direction is
            interpolated.
        method : str, optional
            The method of interpolation to perform. Supported are "cubic" (most accurate but
            computationally expensive), "linear", or "nearest" (least accurate but cheap
            computationally). The last two methods use SciPy's RegularGridInterpolator function.

        Returns
        -------
        float :
            The interpolation of a function (or of it's derivatives) at a :math:`M` point.

        """
        if method not in ["cubic", "linear", "nearest"]:
            raise ValueError(
                f"Argument method should be either cubic, linear, or nearest , got {method}"
            )
        if self.ndim != 3:
            raise NotImplementedError(
                f"Interpolation only works for three dimension, got ndim={self.ndim}"
            )
        if values.shape[0] != np.prod(self.shape):
            raise ValueError(
                f"Number of function values {values.shape[0]} does not match number of "
                f"grid points {np.prod(self.shape)}.")

        if use_log:
            values = np.log(values)

        # Use scipy if linear and nearest is requested and raise error if it's not cubic.
        if method in ["linear", "nearest"]:
            x, y, z = self.get_points_along_axes()
            values = values.reshape(self.shape)
            interpolate = RegularGridInterpolator((x, y, z),
                                                  values,
                                                  method=method)
            return interpolate(points)

        # Interpolate the Z-Axis.
        def z_spline(z, x_index, y_index, nu_z=nu_z):
            # x_index, y_index is assumed to be in the grid while z is not assumed.
            # Get smallest and largest index for selecting func vals on this specific z-slice.
            # The `1` and `self.num_puts[2] - 2` is needed because I don't want the boundary.
            small_index = self.coordinates_to_index((x_index, y_index, 1))
            large_index = self.coordinates_to_index(
                (x_index, y_index, self.shape[2] - 2))
            val = CubicSpline(
                self.points[small_index:large_index, 2],
                values[small_index:large_index],
            )(z, nu_z)
            return val

        # Interpolate the Y-Axis from a list of interpolated points on the z-axis.
        def y_splines(y, x_index, z, nu_y=nu_y):
            # The `1` and `self.num_puts[1] - 2` is needed because I don't want the boundary.
            # Assumes x_index is in the grid while y, z may not be.
            val = CubicSpline(
                self.points[np.arange(1, self.shape[1] - 2) * self.shape[2],
                            1],
                [
                    z_spline(z, x_index, y_index, nu_z)
                    for y_index in range(1, self.shape[1] - 2)
                ],
            )(y, nu_y)
            # Trying to vectorize over z-axis and y-axis, this computes the interpolation for every
            #      pair of (y,z) pair when we're only interested in the diagonal. This is faster
            #      than running a list comprehensive every each point in y, but memory storage
            #      is larger.
            return np.diag(val)

        # Interpolate the point (x, y, z) from a list of interpolated points on x,y-axis.
        def x_spline(x, y, z, nu_x):
            val = CubicSpline(
                self.points[np.arange(1, self.shape[0] - 2) * self.shape[1] *
                            self.shape[2], 0],
                [
                    y_splines(y, x_index, z, nu_y)
                    for x_index in range(1, self.shape[0] - 2)
                ],
            )(x, nu_x)
            # Trying to vectorize over x-axis, this computes the interpolation for every
            #      pair of (x, (y, z)) pair when we're only interested in the diagonal. This is
            #      faster than running a list comprehensive/for loop every each point in y,
            #      but the memory storage is larger.
            return np.diag(val)

        if use_log:
            # All derivatives require the interpolation of f at (x,y,z)
            interpolated = np.exp(
                self.interpolate(points,
                                 values,
                                 use_log=False,
                                 nu_x=0,
                                 nu_y=0,
                                 nu_z=0))
            # Only consider taking the derivative in only one direction
            one_var_deriv = sum([nu_x == 0, nu_y == 0, nu_z == 0]) == 2

            # Return the derivative of f = exp(log(f)).
            if (nu_x, nu_y, nu_z) == (0, 0, 0):
                return interpolated
            elif one_var_deriv:
                # Taking the k-th derivative wrt to only one variable (x, y, z)
                # Interpolate d^k ln(f) d"deriv_var" for all k from 1 to "deriv_var"
                if nu_x > 0:
                    derivs = [
                        self.interpolate(points,
                                         values,
                                         use_log=False,
                                         nu_x=i,
                                         nu_y=0,
                                         nu_z=0) for i in range(1, nu_x + 1)
                    ]
                    deriv_var = nu_x
                elif nu_y > 0:
                    derivs = [
                        self.interpolate(points,
                                         values,
                                         use_log=False,
                                         nu_x=0,
                                         nu_y=i,
                                         nu_z=0) for i in range(1, nu_y + 1)
                    ]
                    deriv_var = nu_y
                else:
                    derivs = [
                        self.interpolate(points,
                                         values,
                                         use_log=False,
                                         nu_x=0,
                                         nu_y=0,
                                         nu_z=i) for i in range(1, nu_z + 1)
                    ]
                    deriv_var = nu_z
                # Sympy symbols and dictionary of symbols pointing to the derivative values
                sympy_symbols = symbols("x:" + str(deriv_var))
                symbol_values = {
                    "x" + str(i): float(derivs[i])
                    for i in range(0, deriv_var)
                }
                return interpolated * float(
                    sum([
                        bell(deriv_var, i,
                             sympy_symbols).evalf(subs=symbol_values)
                        for i in range(1, deriv_var + 1)
                    ]))
            else:
                raise NotImplementedError(
                    "Taking mixed derivative while applying the logarithm is not supported."
                )
        # Normal interpolation without logarithm.
        interpolated = x_spline(points[:, 0], points[:, 1], points[:, 2], nu_x)
        return interpolated
Exemple #9
0
def compute_cmbns_perfect_cj(nb_i, nb_o):
    '''
    Computes the number of combinations
    for a perfect coinjoin with nb_i inputs and nb_o outputs.

    A perfect coinjoin is defined as a transaction for which:
      - all inputs have the same amount
      - all outputs have the same amount
      - 0 fee are paid (equiv. to same fee paid by each input)
      - nb_i % nb_o == 0, if nb_i >= nb_o
        or
        nb_o % nb_i == 0, if nb_o >= nb_i

    Returns the number of combinations

    Parameters:
        nb_i = number of inputs
        nb_o = number of outputs

    Notes:
    Since all inputs have the same amount
    we can use exponential Bell polynomials to retrieve
    the number and structure of partitions for the set of inputs.

    Since all outputs have the same amount
    we can use a direct computation of combinations of k outputs among n.
    '''
    # Reverses inputs & outputs if nb_i > nb_o
    # (required for the use of EBP)
    if nb_i > nb_o:
        nb_i, nb_o = nb_o, nb_i

    # Checks structure of perfect coinjoin tx
    # (we must have an integer ratio between nb_o and nb_i)
    if nb_o % nb_i != 0:
        return None

    # Checks if we can use precomputed values
    if (nb_i <= 1) or (nb_o <= 1):
        return 1
    elif (nb_i <= 20) and (nb_o <= 60):
        return NB_CMBN_PRFCT_CJ[(nb_i, nb_o)]

    # Initializes the total number of combinations for the tx
    nb_cmbn = 0

    # Computes the ratio between #outputs and #inputs
    ratio_o_i = float(nb_o) / nb_i

    # Iterates over partioning of inputs in k_i parts
    for k_i in range(1, nb_i + 1):
        parts_k_i = bell(nb_i, k_i, symbols('x:%d' % (nb_i + 1))[1:])

        # Splits the Bell polynomial in its basic components
        dict_coeffs = parts_k_i.as_coefficients_dict()

        for monomial, coef_monomial in dict_coeffs.items():
            nb_cmbn_monomial = coef_monomial

            # Splits the monomial in its basic blocks
            # and extracts the exponent for each block
            dict_exp = monomial.as_powers_dict()

            # Initializes number of remaining free outputs
            # for computation of output combinations
            nb_free_o = nb_o

            for block, block_exp in dict_exp.items():
                block_nb_i = int(str(block)[1:])
                block_nb_o = int(ratio_o_i * block_nb_i)
                for _ in range(1, block_exp + 1):
                    nb_cmbn_monomial *= nC(symbols('x:%d' % nb_free_o),
                                           block_nb_o)
                    nb_free_o -= block_nb_o

            nb_cmbn += nb_cmbn_monomial

    return nb_cmbn