示例#1
0
 def test_threshold(self):
     # Regression test.  An early version of `pascal` returned an
     # array of type np.uint64 for n=35, but that data type is too small
     # to hold p[-1, -1].  The second assert_equal below would fail
     # because p[-1, -1] overflowed.
     p = pascal(34)
     assert_equal(2 * p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
     p = pascal(35)
     assert_equal(2 * p.item(-1, -2), p.item(-1, -1), err_msg="n = 35")
 def test_threshold(self):
     # Regression test.  An early version of `pascal` returned an
     # array of type np.uint64 for n=35, but that data type is too small
     # to hold p[-1, -1].  The second assert_equal below would fail
     # because p[-1, -1] overflowed.
     p = pascal(34)
     assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
     p = pascal(35)
     assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35")
示例#3
0
 def _sample_kernel_pascal_triangle(self):
     """Sample kernel weights using Pascal's Triangle approximation.
 """
     from scipy.linalg import pascal
     kernel = pascal(2 * self.k + 1, kind='lower')[-1]
     kernel = kernel / np.sum(kernel)
     return kernel
示例#4
0
def find_bezier_trajectory(coordinates, points):
    n = len(coordinates)

    pascal_coord = pascal(n, kind='lower')[-1]
    t = np.linspace(0, 1, points)

    bezier_x = np.zeros(points)
    bezier_y = np.zeros(points)

    for i in range(n):
        k = (t**(n - 1 - i))
        l = (1 - t)**i
        bezier_x += np.multiply(
            l, k) * pascal_coord[i] * coordinates[n - 1 - i][0]
        bezier_y += np.multiply(
            l, k) * pascal_coord[i] * coordinates[n - 1 - i][1]
    bezier_xd = []
    bezier_yd = []
    for i in range(len(bezier_x)):
        bezier_xd.append(int(bezier_x[i]))
        bezier_yd.append(int(bezier_y[i]))

    bezier_coordinates = np.transpose([bezier_xd, bezier_yd])
    # print(bezier_coordinates)
    return bezier_coordinates
示例#5
0
def mxbern(t, deg):
    elements = np.size(t)

    if type(t) == np.ndarray:
        n = len(t)
        m = elements / n
        if m != 1:
            raise ValueError('Input t must be a column vector')
        elif min(t) < 0 or max(t) > 1.0:
            raise ValueError('Input nodes t must be within [0, 1]')

        ct = 1.0 - t
        B = np.zeros([n, deg + 1])

        for i in range(0, deg + 1):
            B[:, i] = (t**i) * (ct**(deg - i))

        if deg < 23:
            lower = (np.cumprod(np.arange(deg, 0.9, -1)) /
                     np.cumprod(np.arange(1, deg + 0.1, 1)))
            full = lower.tolist()
            full.insert(0, 1.0)
            B = np.dot(B, np.diag(full))
        else:
            B = np.dot(B, np.diag(np.diag(np.fliplr(pascal(deg + 1)))))

    elif type(t) == float:
        ct = 1.0 - t
        B = np.zeros(deg + 1)

        for i in range(0, deg + 1):
            B[i] = (t**i) * (ct**(deg - i))

        if deg < 23:
            lower = (np.cumprod(np.arange(deg, 0.9, -1)) /
                     np.cumprod(np.arange(1, deg + 0.1, 1)))
            full = lower.tolist()
            full.insert(0, 1.0)
            B = np.dot(B, np.diag(full))
        else:
            B = np.dot(B, np.diag(np.diag(np.fliplr(pascal(deg + 1)))))

    else:
        raise ValueError('Wrong data type: Only numpy.ndarray or float is \
accepted')

    return B
示例#6
0
文件: _array.py 项目: coax-dev/coax
def diff_transform_matrix(num_frames, dtype='float32'):
    r"""
    A helper function that implements discrete differentiation for stacked
    state observations.

    Let's say we have a feature vector :math:`X` consisting of four stacked
    frames, i.e. the shape would be: ``[batch_size, height, width, 4]``.

    The corresponding diff-transform matrix with ``num_frames=4`` is a
    :math:`4\times 4` matrix given by:

    .. math::

        M_\text{diff}^{(4)}\ =\ \begin{pmatrix}
            -1 &  0 &  0 & 0 \\
             3 &  1 &  0 & 0 \\
            -3 & -2 & -1 & 0 \\
             1 &  1 &  1 & 1
        \end{pmatrix}

    such that the diff-transformed feature vector is readily computed as:

    .. math::

        X_\text{diff}\ =\ X\, M_\text{diff}^{(4)}

    The diff-transformation preserves the shape, but it reorganizes the frames
    in such a way that they look more like canonical variables. You can think
    of :math:`X_\text{diff}` as the stacked variables :math:`x`,
    :math:`\dot{x}`, :math:`\ddot{x}`, etc. (in reverse order). These
    represent the position, velocity, acceleration, etc. of pixels in a single
    frame.

    Parameters
    ----------
    num_frames : positive int

        The number of stacked frames in the original :math:`X`.

    dtype : dtype, optional

        The output data type.

    Returns
    -------
    M : 2d-Tensor, shape: [num_frames, num_frames]

        A square matrix that is intended to be multiplied from the left, e.g.
        ``X_diff = K.dot(X_orig, M)``, where we assume that the frames are
        stacked in ``axis=-1`` of ``X_orig``, in chronological order.

    """
    assert isinstance(num_frames, int) and num_frames >= 1
    s = jnp.diag(jnp.power(-1, jnp.arange(num_frames)))  # alternating sign
    m = s.dot(pascal(num_frames, kind='upper'))[::-1, ::-1]
    return m.astype(dtype)
示例#7
0
 def check_invpascal(n, kind, exact):
     ip = invpascal(n, kind=kind, exact=exact)
     p = pascal(n, kind=kind, exact=exact)
     # Matrix-multiply ip and p, and check that we get the identity matrix.
     # We can't use the simple expression e = ip.dot(p), because when
     # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
     # np.int64. The product of those dtypes is np.float64, which loses
     # precision when n is greater than 18.  Instead we'll cast both to
     # object arrays, and then multiply.
     e = ip.astype(object).dot(p.astype(object))
     assert_array_equal(e, eye(n), err_msg="n=%d  kind=%r exact=%r" % (n, kind, exact))
def main():
    count = 0
    pascals_triangle = pascal(101, kind="lower")

    for n in range(101):
        for r in range(n+1):
            # binomial(n, r) is just pascals_triangle[n, r]
            if pascals_triangle[n, r] > 1000000:
                count += 1

    answer = count
    print(answer)
 def check_invpascal(n, kind, exact):
     ip = invpascal(n, kind=kind, exact=exact)
     p = pascal(n, kind=kind, exact=exact)
     # Matrix-multiply ip and p, and check that we get the identity matrix.
     # We can't use the simple expression e = ip.dot(p), because when
     # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
     # np.int64. The product of those dtypes is np.float64, which loses
     # precision when n is greater than 18.  Instead we'll cast both to
     # object arrays, and then multiply.
     e = ip.astype(object).dot(p.astype(object))
     assert_array_equal(e, eye(n), err_msg="n=%d  kind=%r exact=%r" %
                                           (n, kind, exact))
示例#10
0
 def check_case(self, n, sym, low):
     assert_array_equal(pascal(n), sym)
     assert_array_equal(pascal(n, kind='lower'), low)
     assert_array_equal(pascal(n, kind='upper'), low.T)
     assert_array_almost_equal(pascal(n, exact=False), sym)
     assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
     assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
示例#11
0
 def check_case(self, n, sym, low):
     assert_array_equal(pascal(n), sym)
     assert_array_equal(pascal(n, kind='lower'), low)
     assert_array_equal(pascal(n, kind='upper'), low.T)
     assert_array_almost_equal(pascal(n, exact=False), sym)
     assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
     assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
示例#12
0
def search_pascal_multiples_fast1(row_limit):
    # create pascal array with library
    ptriangle = np.array(pascal(row_limit))

    # filter out the outer two rows 
    ptriangle = np.delete(ptriangle, [0,1], axis=0)
    ptriangle = np.delete(ptriangle, [0,1], axis=1)

    # uniques that occur more than once
    unique_num, counts = np.unique(ptriangle, return_counts=True)

    mask = np.where(counts > 3, True, False)

    return list(unique_num[mask])
示例#13
0
def p_k0k_vec_old(t, k, k0, ak, ck, gamma=4, N=30):
    # Compute P(I(t)=k|I(0)=k0,theta) by Crawford method
    # Inverse Laplace transform of continued fraction representation
    # Euler's transform
    A = gamma * np.log(10)
    idx = np.arange(0, N)
    s = (A + 2 * idx * np.pi * 1j) / 2 / t
    val = np.real(f_vec(s, int(k), int(k0), ak, ck))
    col = np.ones((N, N), dtype="int") * np.arange(N)
    lig = np.ones((N, N), dtype="int") * np.arange(N)[:, np.newaxis]
    temp = np.triu(np.choose(col - lig, val, mode="clip")) * (-1)**(col + lig)
    temp *= pascal(N, kind='upper') / 2**np.arange(1, N + 1)
    res = np.exp(A / 2) / t * (np.sum(temp) - val[0] / 2)
    return np.maximum(res, threshold)
示例#14
0
def basic_D(n, h):
    # pascal matrix
    P = pascal(n, kind='lower')

    # exponent matrix
    L = np.tril(np.ones(shape=(n, n)))
    R = np.tril(matrix_power(L, 2) - 1)

    # H matrix, H's raised to corresponding powers
    H = np.tril(np.power(h, R))

    # Create D poly shift matrix with offset h
    D = (H * P)

    return D.T
示例#15
0
def search_pascal_multiples_readable(row_limit):
    # create pascal array with library pascal from scipy.linalg subpackage
    pascal_array = np.array(pascal(row_limit))

    # filter out the outer two rows
    pascal_array = np.delete(pascal_array, [0, 1], axis=0)
    pascal_array = np.delete(pascal_array, [0, 1], axis=1)

    # store the unique numbers from the sliced array and their counts
    unique_num, counts = np.unique(pascal_array, return_counts=True)

    # create a mask, where the number of counts is greater than 3
    mask = np.where(counts > 3, True, False)

    return list(
        unique_num[mask]
    )  # return the array of unique numbers, whose count is greater than 3 as a list
示例#16
0
def find_bezier_lane(coordinates, points):
    n = len(coordinates)

    pascal_coord = pascal(n, kind='lower')[-1]
    t = np.linspace(0, 1, points)

    bezier_x = np.zeros(points)
    bezier_y = np.zeros(points)

    for i in range(n):
        k = (t**(n - 1 - i))
        l = (1 - t)**i
        bezier_x += np.multiply(
            l, k) * pascal_coord[i] * coordinates[n - 1 - i][0]
        bezier_y += np.multiply(
            l, k) * pascal_coord[i] * coordinates[n - 1 - i][2]

    return bezier_x, bezier_y
def poly_translation_matrix(n, h, kind='lower'):

    # pascal matrix
    P = pascal(n, kind='lower')

    # exponent matrix
    L = np.tril(np.ones(shape=(n, n)))
    R = np.tril(matrix_power(L, 2) - 1)

    # H matrix, H's raised to corresponding powers
    H = np.tril(np.power(-h, R))

    # Create S poly shift matrix with offset h
    Lh = (H * P)

    if kind == 'lower':
        return Lh

    # return upper triangular
    Uh = Lh.T
    return Uh
def poly_translation_matrix(n, delta_x, kind='lower'):

    # pascal matrix
    P = pascal(n, kind='lower')

    # exponent matrix
    L = np.tril(np.ones(shape=(n,n)))
    R = np.tril(matrix_power(L, 2)-1)

    # D matrix, delta_x's raised to corresponding powers
    D = np.tril(np.power(-delta_x,R)) 

    # Create S poly shift matrix with offset delta_x
    Ld = (D * P) 

    if kind == 'lower':
        return Ld

    # return upper triangular
    Ud = Ld.T
    return Ud
示例#19
0
def poly_shift_matrix(order, h):
    # h -- horizontal shift amount

    # n+1 coeficients to solve for -- n+1 x n+1 shift matrix
    n = order + 1

    # pascal matrix
    P = pascal(n, kind='upper')

    # create H
    H = np.eye(n)
    _h = 1
    for k in range(n):
        for i in range(n - k):
            H[i, i + k] = _h
        _h *= h

    # create shift matrix - flip for numpy convention
    S = np.flip(H * P)

    # return
    return S
示例#20
0
 def __compCoeffsBernstein(self, n):
     if (n == 4):
         tC = np.array([[1, -4, 6, -4, 1], [0, 4, -12, 12, -4],
                        [0, 0, 6, -12, 6], [0, 0, 0, 4, -4],
                        [0, 0, 0, 0, 1]])
     elif (n == 3):
         tC = np.array([[1, -3, 3, -1], [0, 3, -6, 3], [0, 0, 3, -3],
                        [0, 0, 0, 1]])
     elif (n == 5):
         tC = np.array([[1, -5, 10, -10, 5, -1], [0, 5, -20, 30, -20, 5],
                        [0, 0, 10, -30, 30, -10], [0, 0, 0, 10, -20, 10],
                        [0, 0, 0, 0, 5, -5], [0, 0, 0, 0, 0, 1]])
     elif (n == 2):
         tC = np.array([[1, -2, 1], [0, 2, -2], [0, 0, 1]])
     else:
         # Create Bernstein matrix for power basis for order n using the
         # Pascal matrix.
         c = (-1)**(1 + np.arange(n + 1))
         B1 = pascal(n + 1, kind='upper') * c[:, np.newaxis]
         endCol = B1[:, -1]
         tC = B1 * np.tile(endCol, (n + 1, 1))
     return tC
示例#21
0
def bezier_plot(coordinate_left, coordinate_right, img, points):
    # LEFT
    h, w = img.shape
    n = len(coordinate_left)
    pascal_coord = pascal(n, kind='lower')[-1]
    t = np.linspace(0, 1, points)
    p_x_left = np.zeros(points)
    p_y_left = np.zeros(points)
    p_x_right = np.zeros(points)
    p_y_right = np.zeros(points)
    # print(coordinate_left)
    for i in range(n):
        k = (t**(n - 1 - i))
        l = (1 - t)**i
        p_x_left += np.multiply(
            l, k) * pascal_coord[i] * coordinate_left[n - 1 - i][0]
        p_y_left += np.multiply(
            l, k) * pascal_coord[i] * coordinate_left[n - 1 - i][1]
        p_x_right += np.multiply(
            l, k) * pascal_coord[i] * coordinate_right[n - 1 - i][0]
        p_y_right += np.multiply(
            l, k) * pascal_coord[i] * coordinate_right[n - 1 - i][1]

    mask = np.zeros((h, w))
    # print(p_x_left,p_y_left)
    for i in range(len(p_x_left) - 1):
        mask = cv2.line(mask, (int(p_x_left[i]), int(p_y_left[i])),
                        (int(p_x_left[i + 1]), int(p_y_left[i + 1])),
                        (255, 255, 255))

    for i in range(len(p_x_right) - 1):
        mask = cv2.line(mask, (int(p_x_right[i]), int(p_y_right[i])),
                        (int(p_x_right[i + 1]), int(p_y_right[i + 1])),
                        (255, 255, 255))

    cv2.imshow("mkas", mask)
示例#22
0
inv_h_mat = sclinalg.invhilbert(3)
print('Hilbert matrix = \n', h_mat)
print('Inverse Hilbert matrix = \n', inv_h_mat)
print('||H * H^(-1) - I3 = \n', sclinalg.norm(h_mat @ inv_h_mat - I3))
print('cond(H) = ', np.linalg.cond(h_mat))
print('||H||_2 * ||H^(-1)||_2 = ',
      np.linalg.norm(h_mat) * np.linalg.norm(inv_h_mat))
print('cond(H, 1) = ', np.linalg.cond(h_mat, 1))
print('||H||_1 * ||H^(-1)||_1 = ',
      np.linalg.norm(h_mat, 1) * np.linalg.norm(inv_h_mat, 1))
print('cond(H, np.inf) = ', np.linalg.cond(h_mat, 1))
print('||H||_inf * ||H^(-1)||_inf = ',
      np.linalg.norm(h_mat, np.inf) * np.linalg.norm(inv_h_mat, np.inf))

# det(A)
p_mat = sclinalg.pascal(3)
print('Pascal matrix = \n', p_mat)
print('det(P) = |P| = ', sclinalg.det(p_mat))

# Companion行列と固有値
# p(x) = x^3 + x^2 + x + 1 = 0
poly_coef = [1, 1, 1, 1]
mat_c = sclinalg.companion(poly_coef)
print('Companion matrix = \n', mat_c)
eigval, ev = sclinalg.eig(mat_c)
ev = ev.T
print('Eigenvalues = ', eigval)
print('Eigenvectors = ', ev)
# C * v = lambda * v ?
for i in range(0, eigval.size):
    print('|| C * v - lambda[', i, '] * v||_2 = ',
示例#23
0
文件: linalg.py 项目: zsh-974/scipy
 def time_pascal(self, size):
     sl.pascal(size)
示例#24
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb  3 19:46:38 2019

@author: xsxsz
"""

import numpy as np
import scipy.linalg as linalg

a = np.mat(np.ones([3, 3]))
b = np.mat(np.ones([4, 3]))
c = np.mat(np.ones([3, 4]))
d = linalg.block_diag(a, b, c)
print(d)
print('----------')
e = linalg.pascal(6)
print(e)
print('----------')
f_value = np.array([0.3, 0.1, 0.4, 0.2])
s_value = np.array([0.2, 0.8, 0.7])
f = linalg.leslie(f_value, s_value)
print(f)
print('----------')
示例#25
0
 def test_big(self):
     p = pascal(50)
     assert_equal(p[-1, -1], comb(98, 49, exact=True))
示例#26
0
def locmle(z, xlim = None, Jmle = 35, d = 0., s = 1., ep = 1/100000., sw = 0, Cov_in = None):
	"""Uses z-values in [-xlim,xlim] to find mles for p0, del0, sig0 .

	Jmle is the number of iterations, beginning at (del0, sig0) = (d, s).
	sw = 1 returns the correlation matrix.
	z can be a numpy/scipy array or an ordinary Python array.
	Note that this function returns pandas Series."""
	N = len(z)
	if xlim is None:
		if N > 500000:
			b = 1
		else:
			b = 4.3 * np.exp(-0.26*np.log10(N))
		xlim = np.array([np.median(z), b*(np.percentile(z, 75)-np.percentile(z, 25))/(2*stats.norm.ppf(.75))])
	aorig = xlim[0] - xlim[1]
	borig = xlim[0] + xlim[1]
	z0 = np.array([el for el in z if el >= aorig and el <= borig])
	N0 = len(z0)
	Y = np.array([np.mean(z0), np.mean(np.power(z0, 2))])
	that = float(N0) / N
	# find MLE estimates
	for j in xrange(Jmle):
		bet = np.array([d/(s*s), -1/(2*s*s)])
		aa = (aorig - float(d)) / s
		bb = (borig - float(d)) / s
		H0 = stats.norm.cdf(bb) - stats.norm.cdf(aa)
		fa = stats.norm.pdf(aa)
		fb = stats.norm.pdf(bb)
		H1 = fa - fb
		H2 = H0 + aa * fa - bb * fb
		H3 = (2 + aa*aa) * fa - (2 + bb*bb) * fb
		H4 = 3 * H0 + (3 * aa + np.power(aa, 3)) * fa - (3 * bb + np.power(bb, 3)) * fb
		H = np.array([H0, H1, H2, H3, H4])
		r = float(d) / s
		I = pascal(5, kind = 'lower', exact = False)
		u1hold = np.power(s, range(5))
		u1 = np.matrix([u1hold for k in range(5)])
		II = np.power(r, np.matrix([[max(k-i, 0) for i in range(5)] for k in range(5)]))
		I = np.multiply(np.multiply(I, II), u1.transpose())
		E = np.array(I * np.matrix(H).transpose()).transpose()[0]/H0
		mu = np.array([E[1], E[2]])
		V = np.matrix([[E[2] - E[1]*E[1], E[3] - E[1] * E[2]],[E[3] - E[1] * E[2], E[4] - E[2]*E[2]]])
		addbet = np.linalg.solve(V, (Y - mu).transpose()).transpose()/(1+1./((j+1)*(j+1)))
		bett = bet + addbet
		if bett[1] > 0:
			bett = bet + .1 * addbet
		if pd.isnull(bett[1]) or bett[1] >= 0:
			break
		d = -bett[0]/(2 * bett[1])
		s = 1 / np.sqrt(-2. * bett[1])
		if np.sqrt(sum(np.array(np.power(bett - bet, 2)))) < ep:
			break
	if pd.isnull(bett[1]) or bett[1] >= 0:
		mle = np.array([np.nan for k in xrange(6)])
		Cov_lfdr = np.nan
		if pd.isnull(bett[1]):
			Cov_out = np.nan
		Cor = np.matrix([[np.nan]*3]*3)
	else:
		aa = (aorig - d) / s
		bb = (borig - d) / s
		H0 = stats.norm.cdf(bb) - stats.norm.cdf(aa)
		p0 = that / H0
		# sd calcs
		J = s*s * np.matrix([[1, 2 * d],[0, s]])
		JV = J * np.linalg.inv(V)
		JVJ = JV * J.transpose()
		mat = np.zeros((3,3))
		mat[1:,1:] = JVJ/N0
		mat[0,0] = (p0 * H0 * (1 - p0 * H0)) / N
		h = np.array([H1/H0, (H2 - H0)/H0])
		matt = np.eye(3)
		matt[0,:] = np.array([1/H0] + (-(p0/s) * h).tolist())
		matt = np.matrix(matt)
		C = matt * (mat * matt.transpose())
		mle = np.array([p0, d, s] + np.sqrt(np.diagonal(C)).tolist())
		if sw == 1:
			sd = mle[3:]
			Co = C/np.outer(sd, sd)
			Cor = Co[:,[1,2,0]][[1,2,0]]
			# switch to pandas dataframe for labeling
			Cor = pd.DataFrame(Cor, index=['d', 's','p0'], columns=['d','s','p0'])
		if Cov_in is not None:
			i0 = [i for i,x in enumerate(Cov_in['x']) if x > aa and x < bb]
			Cov_out = loccov(N, N0, p0, d, s, Cov_in['x'], Cov_in['X'], Cov_in['f'], JV, Y, i0, H, h, Cov_in['sw'])
	#label with pandas Series
	mle = pd.Series(mle[[1,2,0,4,5,3]], index=['del0', 'sig0', 'p0', 'sd_del0', 'sd_sig0', 'sd_p0'])
	out = {}
	out['mle'] = mle
	if sw == 1:
		out['Cor'] = Cor
	if Cov_in is not None:
		if Cov_in['sw'] == 2:
			out['pds_'] = Cov_out
		elif Cov_in['sw'] == 3:
			out['Ilfdr'] = Cov_out
		else:
			out['Cov_lfdr'] = Cov_out
	if sw == 1 or Cov_in is not None:
		return pd.Series(out)
	return mle
#
# Create pascal matrix
#

import numpy as np
from numpy.linalg import matrix_power
from scipy.linalg import pascal

h = 2
n = 5

# pascal matrix
P = pascal(n, kind='lower')
#print(P)

# exponent matrix
L = np.tril(np.ones(shape=(n, n)))
R = np.tril(matrix_power(L, 2) - 1)
print(R)

# H matrix, H's raised to corresponding powers
H = np.tril(np.power(h, R))
print(H)

# A
A = H * P
print(A)

# coeffcients: a0, a1, ..., an are multiplied row wise
# powers of x: x^0, x^1, ..., x^n are multiplied column wise
示例#28
0
def bezier_plot(coordinate_left, coordinate_right, mask_image, points, og_img):
    # LEFT
    h, w = mask_image.shape
    n = len(coordinate_left)
    pascal_coord = pascal(n, kind='lower')[-1]
    t = np.linspace(0, 1, points)
    p_x_left = np.zeros(points)
    p_y_left = np.zeros(points)
    p_x_right = np.zeros(points)
    p_y_right = np.zeros(points)
    # print(coordinate_left)
    for i in range(n):
        k = (t**(n - 1 - i))
        l = (1 - t)**i
        p_x_left += np.multiply(
            l, k) * pascal_coord[i] * coordinate_left[n - 1 - i][0]
        p_y_left += np.multiply(
            l, k) * pascal_coord[i] * coordinate_left[n - 1 - i][1]
        p_x_right += np.multiply(
            l, k) * pascal_coord[i] * coordinate_right[n - 1 - i][0]
        p_y_right += np.multiply(
            l, k) * pascal_coord[i] * coordinate_right[n - 1 - i][1]
    h1 = h + int(h / 2)
    mask1 = np.zeros((h, w))
    top_coord_left = np.zeros((len(p_y_left), 2))
    top_coord_right = np.zeros((len(p_y_right), 2))
    # print(p_x_left,p_y_left)
    print("\n")
    x_prev, z_prev = 0, 0
    top_coord_left[len(p_x_left) -
                   1][0], top_coord_left[len(p_x_left) - 1][1] = cam_world(
                       int(p_x_left[len(p_x_left) - 1]),
                       int(p_y_left[len(p_x_left) - 1]), mask1)
    top_coord_right[len(p_x_right) -
                    1][0], top_coord_right[len(p_x_right) - 1][1] = cam_world(
                        int(p_x_right[len(p_x_right) - 1]),
                        int(p_y_right[len(p_x_right) - 1]), mask1)
    center = np.array([(top_coord_left[-1][0] + top_coord_right[-1][0]) / 2,
                       (top_coord_left[-1][1] + top_coord_right[-1][1]) / 2])
    check = 0
    turn = "Straight"
    left_cut = [0, 0]
    right_cut = [0, 0]
    for i in range(len(p_x_left) - 1):
        # top_coord_left[i][0],top_coord_left[i][1]=cam_world(int(p_x_left[i]),int(p_y_left[i]),mask1)
        top_coord_left[i][0], top_coord_left[i][1] = cam_tp(
            int(p_x_left[i]), int(p_y_left[i]), mask1)
        if i > 0 and (top_coord_left[i][0] -
                      center[0]) * (top_coord_left[i - 1][0] - center[0]) < 0:
            if check == 0:
                left_cut = (center[0],
                            (top_coord_left[i][1] + top_coord_left[i - 1][0]) /
                            2)
            check = 1
        # if i>0:
        #     mask2=cv2.line(mask2,(int(x_new*20+w/2),int(800-z_new*20)),(int(x_prev*20+w/2),int(800-z_prev*20)),(255,255,255))
        mask1 = cv2.line(mask1, (int(p_x_left[i]), int(p_y_left[i])),
                         (int(p_x_left[i + 1]), int(p_y_left[i + 1])),
                         (255, 255, 255))

    # top_coord_left[i+1][0],top_coord_left[i+1][1]=cam_world(int(p_x_left[i+1]),int(p_y_left[i+1]),mask1)
    check = 0

    for i in range(len(p_x_right) - 1):
        # top_coord_right[i][0],top_coord_right[i][1]=cam_world(int(p_x_right[i]),int(p_y_right[i]),mask1)
        top_coord_right[i][0], top_coord_right[i][1] = cam_tp(
            int(p_x_right[i]), int(p_y_right[i]), mask1)
        if i > 0 and (top_coord_right[i][0] -
                      center[0]) * (top_coord_right[i - 1][0] - center[0]) < 0:
            if check == 0:
                right_cut = (
                    center[0],
                    (top_coord_right[i][1] + top_coord_right[i - 1][0]) / 2)
            check = 1
        # if i>0:
        #     mask2=cv2.line(mask2,(int(x_new*20+w/2),int(800-z_new*20)),(int(x_prev*20+w/2),int(800-z_prev*20)),(255,255,255))
        mask1 = cv2.line(mask1, (int(p_x_right[i]), int(p_y_right[i])),
                         (int(p_x_right[i + 1]), int(p_y_right[i + 1])),
                         (255, 255, 255))
    abra, kadabra = cam_tp1(p_x_right, p_y_right, mask1)
    print(abra, kadabra)
    print(top_coord_right)
    if right_cut[1] > left_cut[1]:
        turn = "Left"
    elif right_cut[1] < left_cut[1]:
        turn = "Right"

    mask1 = cv2.putText(mask1, turn, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 255, 255))

    # print(left_cut,right_cut)
    # print()
    # print(top_coord_right)

    # print(center)
    # print(top_coord_left[0][0],top_coord_left[-1][0])
    # print(top_coord_right[0][0],top_coord_right[-1][0])
    print(turn)
    mask1 = cv2.resize(mask1, (int((w + 2) / 2), int((h + 2) / 2)))
    cv2.imshow("mask1", mask1)
    if turn == "Right":
        # print("bezier"+str(1))
        return (1)
    elif turn == "Left":
        # print("bezier"+str(-1))
        return (-1)
    else:
        # print("bezier"+str(0))
        return (0)
示例#29
0
 def pascals_triangle(self):
     return la.pascal(self.num_aux_dm_indices +
                      1 if self.num_aux_dm_indices > self.truncation_level
                      else self.truncation_level + 1)
示例#30
0
 def test_big(self):
     p = pascal(50)
     assert p[-1, -1] == comb(98, 49, exact=True)
示例#31
0
 def test_big(self):
     p = pascal(50)
     assert_equal(p[-1, -1], comb(98, 49, exact=True))
示例#32
0
    def __init__(self, r, r_min, r_max, c, r_0=0.0, s=1.0, reduced=False):
        # remove zero high-order terms
        c = np.array(np.trim_zeros(c, 'b'), float)
        # if all coefficients are zero
        if len(c) == 0:
            # then both func and abel are also zero everywhere
            self.func = np.zeros_like(r)
            self.abel = self.func
            return
        # polynomial degree
        K = len(c) - 1

        if reduced:
            # rescale r to [0, 1] (to avoid FP overflow)
            r = r / r_max
            r_0 /= r_max
            s /= r_max
            abel_scale = r_max
            r_min /= r_max
            r_max = 1.0

        if s != 1.0:
            # apply stretch
            S = np.cumprod([1.0] + [1.0 / s] * K)  # powers of 1/s
            c *= S
        if r_0 != 0.0:
            # apply shift
            P = pascal(1 + K, 'upper', False)  # binomial coefficients
            rk = np.cumprod([1.0] + [-float(r_0)] * K)  # powers of -r_0
            T = toeplitz([1.0] + [0.0] * K, rk)  # upper-diag. (-r_0)^{l - k}
            c = (P * T).dot(c)

        # whether even and odd powers are present
        even = np.any(c[::2])
        odd = np.any(c[1::2])

        # index limits
        n = r.shape[0]
        i_min = np.searchsorted(r, r_min)
        i_max = np.searchsorted(r, r_max)

        # Calculate all necessary variables within [0, r_max]

        # x, x^2
        x = r[:i_max]
        x2 = x * x

        # integration limits y = sqrt(r^2 - x^2) or 0
        def sqrt0(x): return np.sqrt(x, np.zeros_like(x), where=x > 0)
        y_up = sqrt0(r_max * r_max - x2)
        y_lo = sqrt0(r_min * r_min - x2)

        # y r^k |_lo^up
        # (actually only even are neded for "even", and only odd for "odd")
        Dyr = np.outer(np.cumprod([1.0] + [r_max] * K), y_up) - \
              np.outer(np.cumprod([1.0] + [r_min] * K), y_lo)

        # ln(r + y) |_lo^up, only for odd k
        if odd:
            # ln x for x > 0, otherwise 0
            def ln0(x): return np.log(x, np.zeros_like(x), where=x > 0)
            Dlnry = ln0(r_max + y_up) - \
                    ln0(np.maximum(r_min, x) + y_lo)

        # One-sided Abel integral \int_lo^up r^k dy.
        def a(k):
            odd_k = k % 2
            # max. x power
            K = k - odd_k  # (k - 1 for odd k)
            # generate coefficients for all x^m r^{k - m} terms
            # (only even indices are actually used;
            #  for odd k, C[K] is also used for x^{k+1} ln(r + y))
            C = [0] * (K + 1)
            C[0] = 1 / (k + 1)
            for m in range(k, 1, -2):
                C[k - m + 2] = C[k - m] * m / (m - 1)
            # sum all terms using Horner's method in x
            a = C[K] * Dyr[k - K]
            if odd_k:
                a += C[K] * x2 * Dlnry
            for m in range(K - 2, -1, -2):
                a = a * x2 + C[m] * Dyr[k - m]
            return a

        # Generate the polynomial function
        func = np.zeros(n)
        span = slice(i_min, i_max)
        # (using Horner's method)
        func[span] = c[K]
        for k in range(K - 1, -1, -1):
            func[span] = func[span] * x[span] + c[k]
        self.func = func

        # Generate its Abel transform
        abel = np.zeros(n)
        span = slice(0, i_max)
        if reduced:
            c *= abel_scale
        for k in range(K + 1):
            if c[k]:
                abel[span] += c[k] * 2 * a(k)
        self.abel = abel
示例#33
0
 def pascals_triangle(self):
     return la.pascal(self.num_aux_dm_indices+1 if self.num_aux_dm_indices > self.truncation_level else self.truncation_level+1)
示例#34
0
        def cossin(self):
            r"""
            Radial distributions of
            :math:`\cos^n \theta \cdot \sin^m \theta` terms
            (*n* + *m* = **order**, and *n* + *m* = **order** − 1 for odd
            orders, with *m* always even).

            For **order** = 0:

                :math:`\cos^0 \theta` is the total intensity.

            For **order** = 1:

                :math:`\cos^0 \theta` is the total intensity,

                :math:`\cos^1 \theta` is the antisymmetric component.

            For **order** = 2

                :math:`\sin^2 \theta` corresponds to “perpendicular” (⟂)
                transitions,

                :math:`\cos^2 \theta` corresponds to “parallel” (∥)
                transitions.

            For **order** = 4

                :math:`\sin^4 \theta` corresponds to ⟂,⟂,

                :math:`\cos^2 \theta \cdot \sin^2 \theta` corresponds
                to ∥,⟂ and ⟂,∥,

                :math:`\cos^4 \theta` corresponds to ∥,∥.

            And so on.

            Notice that higher orders can represent lower orders as well:

               :math:`\sin^2 \theta + \cos^2 \theta= \cos^0 \theta
               \quad` (⟂ + ∥ = 1),

               :math:`\sin^4 \theta + \cos^2 \theta \cdot \sin^2 \theta
               = \sin^2 \theta \quad` (⟂,⟂ + ∥,⟂ = ⟂,⟂ + ⟂,∥ = ⟂),

               :math:`\cos^2 \theta \cdot \sin^2 \theta + \cos^4 \theta
               = \cos^2 \theta \quad` (∥,⟂ + ∥,∥ =  ⟂,∥ + ∥,∥ = ∥),

               and so forth.

            Returns
            -------
            cosnsinm : (# terms) × (rmax + 1) numpy array
                radial dependences of the :math:`\cos^n \theta \cdot \sin^m
                \theta` terms, ordered from lower to higher :math:`\cos \theta`
                powers
            """
            # conversion matrix (cos^k → cos^n sin^m) for even k
            CS = np.flip(pascal(1 + self.order // 2, 'upper'))
            # apply to all radii
            if self.odd:
                cs = np.empty_like(self.cn)
                # even powers
                cs[::2] = CS.dot(self.cn[::2])
                # odd powers
                if self.order % 2 == 0:  # even orders have
                    CS = CS[1:, 1:]  # one less odd term
                cs[1::2] = CS.dot(self.cn[1::2])
            else:
                cs = CS.dot(self.cn)
            return cs
示例#35
0
    def __init__(self, r, cos, r_min, r_max, c, r_0=0.0, s=1.0):
        if r.shape != cos.shape:
            raise ValueError('Shapes of r and cos arrays must be equal.')

        # trim negative r limits
        if r_max <= 0:
            # both func and abel must be zero everywhere
            self.func = np.zeros_like(r)
            self.abel = np.zeros_like(r)
            return
        if r_min < 0:
            r_min = 0

        c = np.array(c, dtype=float)  # convert / make copy
        if np.ndim(c) != 2:
            raise ValueError('Coefficients array c must be 2-dimensional.')
        # highest cos power with non-zero coefficient
        N = c.nonzero()[1].max(initial=-1)
        if N < 0:  # all coefficients are zero
            # so both func and abel are also zero everywhere
            self.func = np.zeros_like(r)
            self.abel = np.zeros_like(r)
            return
        # for each cos power: highest r power with non-zero coefficient
        M = [a.nonzero()[0].max(initial=-1) for a in c.T]

        if s != 1.0:
            # apply stretch
            S = np.cumprod([1.0] + [1.0 / s] * max(M))  # powers of 1/s
            c *= np.array([S]).T
        if r_0 != 0.0:
            # apply shift
            m = max(M)
            P = pascal(1 + m, 'upper', False)  # binomial coefficients
            rm = np.cumprod([1.0] + [-float(r_0)] * m)  # powers of -r_0
            T = toeplitz([1.0] + [0.0] * m, rm)  # upper-diag. (-r_0)^{i - j}
            c = (P * T).dot(c)

        rfull, cosfull = r, cos  # (r and cos will be limited below)

        # Generate the polynomial function
        self.func = np.zeros_like(rfull)
        # limit calculations to relevant domain (outside it func = 0)
        dom = (r_min <= rfull) & (rfull < r_max)
        r = rfull[dom]
        cos = cosfull[dom]

        # sum all non-zero terms using Horner's method
        for n in range(N, -1, -1):
            if n < N:
                self.func[dom] *= cos
            if M[n] < 0:
                continue
            p = np.full_like(r, c[M[n], n])
            for m in range(M[n] - 1, -1, -1):
                p *= r
                if c[m, n]:
                    p += c[m, n]
            self.func[dom] += p

        # Generate its Abel transform
        self.abel = np.zeros_like(rfull)
        # relevant domain (outside it abel = 0)
        # (excluding r = 0 to avoid singularities, see below)
        dom = (0 < rfull) & (rfull < r_max)
        r = rfull[dom]
        cos = cosfull[dom]
        # values at lower and upper integration limits
        rho = [np.maximum(r, r_min),
               r_max]  # = max(r, r_max) within domain
        z = [np.sqrt(rho[0]**2 - r**2),
             np.sqrt(rho[1]**2 - r**2)]
        f = [np.minimum(r / r_min, 1.0) if r_min else 1.0,
             r / r_max]  # = min(r/r_max, 1) within domain

        # antiderivatives (recursive and used several times, thus cached)
        @cache
        def F(k, lim):  # lim: 0 = lower limit, 1 = upper limit
            if k < 0:
                return (z[lim] * f[lim]**k - k * F(k + 2, lim)) / (1 - k)
            if k == 0:
                return z[lim]
            if k == 1:
                return r * np.log(z[lim] + rho[lim])
            if k == 2:
                return r * np.arccos(f[lim])
            if k == 3:  # (using explicit expression for higher efficiency)
                return z[lim] * f[lim]
            # k > 3:  (in principle, k > 2)
            k -= 2
            return (z[lim] * f[lim]**k + (k - 1) * F(k, lim)) / k

        # sum all non-zero terms using Horner's method
        for n in range(N, -1, -1):
            if n < N:
                self.abel[dom] *= cos
            if M[n] < 0:
                continue
            p = c[M[n], n] * 2 * (F(n - M[n], 1) - F(n - M[n], 0))
            for m in range(M[n] - 1, -1, -1):
                p *= r
                if c[m, n]:
                    p += c[m, n] * 2 * (F(n - m, 1) - F(n - m, 0))
            self.abel[dom] += p
        # value at r = 0 (excluded above), nonzero only for n = 0
        dom = np.where(rfull == 0)
        for m in range(M[0] + 1):
            k = m + 1
            self.abel[dom] += c[m, 0] * 2 * (r_max**k - r_min**k) / k

        # help garbage collector to release cache memory
        F = None
示例#36
0
    def __init__(self, r, r_min, r_max, c, r_0=0.0, s=1.0, reduced=False):
        n = r.shape[0]

        # trim negative r limits
        if r_max <= 0:
            # both func and abel must be zero everywhere
            self.func = np.zeros(n)
            self.abel = np.zeros(n)
            return
        if r_min < 0:
            r_min = 0

        # remove zero high-order terms
        c = np.array(np.trim_zeros(c, 'b'), float)
        # if all coefficients are zero
        if len(c) == 0:
            # then both func and abel are also zero everywhere
            self.func = np.zeros(n)
            self.abel = np.zeros(n)
            return
        # polynomial degree
        K = len(c) - 1

        if reduced:
            # rescale r to [0, 1] (to avoid FP overflow)
            r = r / r_max
            r_0 /= r_max
            s /= r_max
            abel_scale = r_max
            r_min /= r_max
            r_max = 1.0

        if s != 1.0:
            # apply stretch
            S = np.cumprod([1.0] + [1.0 / s] * K)  # powers of 1/s
            c *= S
        if r_0 != 0.0:
            # apply shift
            P = pascal(1 + K, 'upper', False)  # binomial coefficients
            rk = np.cumprod([1.0] + [-float(r_0)] * K)  # powers of -r_0
            T = toeplitz([1.0] + [0.0] * K, rk)  # upper-diag. (-r_0)^{l - k}
            c = (P * T).dot(c)

        # whether even and odd powers are present
        even = np.any(c[::2])
        odd = np.any(c[1::2])

        # index limits
        i_min = np.searchsorted(r, r_min)
        i_max = np.searchsorted(r, r_max)

        # Calculate all necessary variables within [0, r_max]

        # x, x^2
        x = r[:i_max]
        x2 = x * x

        # integration limits y = sqrt(r^2 - x^2) or 0
        def sqrt0(x): return np.sqrt(x, np.zeros_like(x), where=x > 0)
        y_up = sqrt0(r_max * r_max - x2)
        y_lo = sqrt0(r_min * r_min - x2)

        # y r^k |_lo^up
        # (actually only even are neded for "even", and only odd for "odd")
        Dyr = np.outer(np.cumprod([1.0] + [r_max] * K), y_up) - \
              np.outer(np.cumprod([1.0] + [r_min] * K), y_lo)

        # ln(r + y) |_lo^up, only for odd k
        if odd:
            # ln x for x > 0, otherwise 0
            def ln0(x): return np.log(x, np.zeros_like(x), where=x > 0)
            Dlnry = ln0(r_max + y_up) - \
                    ln0(np.maximum(r_min, x) + y_lo)

        # One-sided Abel integral \int_lo^up r^k dy.
        def a(k):
            odd_k = k % 2
            # max. x power
            K = k - odd_k  # (k - 1 for odd k)
            # generate coefficients for all x^m r^{k - m} terms
            # (only even indices are actually used;
            #  for odd k, C[K] is also used for x^{k+1} ln(r + y))
            C = [0] * (K + 1)
            C[0] = 1 / (k + 1)
            for m in range(k, 1, -2):
                C[k - m + 2] = C[k - m] * m / (m - 1)
            # sum all terms using Horner's method in x
            a = C[K] * Dyr[k - K]
            if odd_k:
                a += C[K] * x2 * Dlnry
            for m in range(K - 2, -1, -2):
                a = a * x2 + C[m] * Dyr[k - m]
            return a

        # Generate the polynomial function
        func = np.zeros(n)
        span = slice(i_min, i_max)
        # (using Horner's method)
        func[span] = c[K]
        for k in range(K - 1, -1, -1):
            func[span] = func[span] * x[span] + c[k]
        self.func = func

        # Generate its Abel transform
        abel = np.zeros(n)
        span = slice(0, i_max)
        if reduced:
            c *= abel_scale
        for k in range(K + 1):
            if c[k]:
                abel[span] += c[k] * 2 * a(k)
        self.abel = abel
示例#37
0
def bezier_plot(coordinate_left,coordinate_right,mask_image,points,og_img):
    # LEFT
    h, w = mask_image.shape
    n=len(coordinate_left)
    pascal_coord=pascal(n,kind='lower')[-1]
    t=np.linspace(0,1,points)
    p_x_left=np.zeros(points)
    p_y_left=np.zeros(points)
    p_x_right=np.zeros(points)
    p_y_right=np.zeros(points)
    # print(coordinate_left)
    for i in range(n):
        k=(t**(n-1-i))
        l=(1-t)**i
        p_x_left+=np.multiply(l,k)*pascal_coord[i]*coordinate_left[n-1-i][0]
        p_y_left+=np.multiply(l,k)*pascal_coord[i]*coordinate_left[n-1-i][1]
        p_x_right+=np.multiply(l,k)*pascal_coord[i]*coordinate_right[n-1-i][0]
        p_y_right+=np.multiply(l,k)*pascal_coord[i]*coordinate_right[n-1-i][1]

    

    bottom_left=[p_x_left[p_y_left==max(p_y_left)],p_y_left[p_y_left==max(p_y_left)]]
    bottom_right=[p_x_right[p_y_right==max(p_y_right)],p_y_right[p_y_right==max(p_y_right)]]
    bottom_left=[bottom_left[0][0],bottom_left[1][0]]
    bottom_right=[bottom_right[0][0],bottom_right[1][0]]
    
    bottom_centre=[int((bottom_left[0]+bottom_right[0])/2),int((bottom_left[1]+bottom_right[1])/2)]
    search_point=bottom_centre.copy()
    search_point[1]=search_point[1]-150

    mask1=np.zeros((h,w))
    for i in range(len(p_x_left)-1):
        mask1=cv2.circle(mask1,(int(p_x_left[i]),int(p_y_left[i])),1,(255,255,255),5)
        mask1=cv2.line(mask1,(int(p_x_left[i]),int(p_y_left[i])),(int(p_x_left[i+1]),int(p_y_left[i+1])),(255,255,255))
    
    for i in range(len(p_x_right)-1):
        mask1=cv2.circle(mask1,(int(p_x_right[i]),int(p_y_right[i])),1,(255,255,255),5)
        mask1=cv2.line(mask1,(int(p_x_right[i]),int(p_y_right[i])),(int(p_x_right[i+1]),int(p_y_right[i+1])),(255,255,255))
    
    left_sort=np.argsort(p_x_left)
    p_x_left=p_x_left[left_sort]
    p_y_left=p_y_left[left_sort]

    right_sort=np.argsort(p_x_right)
    p_x_right=p_x_right[right_sort]
    p_y_right=p_y_right[right_sort]

    mask1=cv2.line(mask1,tuple(bottom_centre),tuple(search_point),(255,255,255))
    steer_val=0
    if search_point[0]<max(p_x_left):
        index=left_interval(search_point,p_x_left)
        if index:
            print(p_y_left[index],p_y_left[index-1])
            if p_y_left[index]>search_point[1] or p_y_left[index-1]>search_point[1]:
                print(p_x_left[index],p_y_left[index])
                print(index)
                steer_val=1
    else:
        mask1=cv2.putText(mask1,"No intersection",(50,50),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255))

    if search_point[0]>max(p_x_right):
        index=right_interval(search_point,p_x_right)
        if index:
            if p_y_right[index]>search_point[1] or p_y_right[index+1]>search_point[1]:
                print(p_x_right[index],p_y_right[index])
                print(index)
                steer_val=-1        
    else:
        mask1=cv2.putText(mask1,"No intersection",(50,60),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255))

    mask1=cv2.circle(mask1,tuple(bottom_centre),1,(255,255,255),5)
    mask1=cv2.circle(mask1,tuple(search_point),1,(255,255,255),5)
    cv2.imshow("bezier_op",mask1)
    return steer_val
示例#38
0
def subsets0(nsamp,n,p,*args):
    inpu=locals()
    inp=len(inpu)
    arug=inpu['arg']

    if len(arug)<1:
        ncomb=bc.bc0(n,p)
    else:
        ncomb=arug[0]

    if len(arug)<2:
        msg=1
    else:
        msg=arug[1]

    seq=np.arange(1,n+1)

    ######################################################    
    ## Combinatorial part to extract the subsamples
    # Key combinatorial variables used:
    # C = matrix containing the indexes of the subsets (subsamples) of size p
    # nselected = size(C,1), the number of all selected subsamples.
    # nselected = number of combinations on which the procedure is run.
    # rndsi = vector of nselected indexes randomly chosen between 1 e ncomb.
    Tcomb = 5e+7
    T2comb = 1e+8
    print("ncomb: "+str(ncomb)+" Tcomb: "+str(Tcomb))
   
    
    if (nsamp==0 or ncomb <= Tcomb):
        if nsamp==0:
            if (ncomb > 100000 and msg==1):
                print('Warning: you have specified to extract all subsets (ncomb='+str(ncomb)+')')
                print('The problem is combinatorial and the run may be very lengthy')
            nselected = ncomb
        else:
            nselected = nsamp

        # If nsamp = 0 matrix C contains the indexes of all possible subsets
        print("+++++++++++++++++++++++++++++++++++++**************************")
        
        C=cs.combsFS0(seq,p)
        #print(C)
    
        # If nsamp is > 0 just select randomly ncomb rows from matrix C
        if nsamp>0:
            print("ncomb: "+str(ncomb))
            print("nsamp: "+str(nsamp))
            # Extract without replacement nsamp elements from ncomb
            rndsi=rs.randsampleFS0(ncomb,nsamp,2)
            #print(rndsi)
            print("gooooooooooooooooooooooooooooooooooz")
            C = C[rndsi-1,:] ###########################################
            #print(C.shape)
            #print(C)
        #end
    else:

        print(" the codes have not been checked yet, they might be funny")

        
        if (nsamp > 100000 and msg==1):
            print('Warning: you have specified to extract more than 100000 subsets')
            print('To iterate so many times may be lengthy')
        nselected = nsamp
        
        usepascal=0

        if (ncomb>Tcomb and ncomb<T2comb):
        
            # Extract without replacement nsamp elements from ncomb
            rndsi=rs.randsampleFS0(ncomb,nsamp,2)
        
            if platform.system()=='Windows':
                print("yoooohoooo")


                stat = MEMORYSTATUSEX()
                ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
                print(stat.ullAvailPhys)



                
                #[~,sys]=memory
                bytesavailable=stat.ullAvailPhys
                if bytesavailable > 2*8*n**2:
                    pascalM=pascal(n)
                else:
                    pascalM=pascal(n)
                usepascal=1
            

        if n < 2**15:
            C=np.zeros((nselected,p),'int16')
        else:
            C=np.zeros((nselected,p),'int32')


        print(rndsi)

        for i in range(1,nselected+1):
            
            if (ncomb>Tcomb and ncomb<T2comb):
            
                if usepascal:
                    #print(" here n is: "+str(n))
                    #print(" here p is: "+str(p))
                    #print(" here rndsi[i] is: "+str(rndsi[i]))
                    #print(" here pascalM is: "+str(pascalM)) 
                    s, calls=lx.lexunrank0(n,p,rndsi[i-1],pascalM)
                    #print("sssssssssssssssssss")
                    #print(s)
                else:
                    s, calls=lx.lexunrank0(n,p,rndsi[i-1])

            else:
                
                s, calls=rs.randsampleFS0(n,p)

            C[i-1,:]=s
        


    
    return C, nselected
示例#39
0
 def time_pascal(self, size):
     sl.pascal(size)