def leaves_symbolic():

    n_min = 0
    n_max = 5

    t = np.zeros(n_max + 1)
    c = np.zeros(n_max + 1)

    for n in xrange(n_min, n_max + 1):
        t[n] = special.binom(2 * n, n) / (n + 1)
        c[n] = 0 if n < 1 else special.binom(2 * n - 2, n - 1)

    print "Calculated from solutions:"
    for n in xrange(n_min, n_max + 1):
        print "t[{0}] = {1}, c[{2}] = {3}".format(n, t[n], n, c[n])

    z = sp.symbols("z")
    t_of_z = 0
    c_of_z = 0
    for n in xrange(n_min, n_max + 1):
        t_of_z += t[n] * z ** n
        c_of_z += c[n] * z ** n
    print "T(z):"
    sp.pprint(t_of_z)
    print "C(z):"
    sp.pprint(c_of_z)
    print "z + 2 z T(z) C(z):"
    sp.pprint(sp.expand(sp.simplify(z + 2 * z * t_of_z * c_of_z)))
Ejemplo n.º 2
0
def bin_bals():
	mili_p_s = 1000
	s_p_m = 60
	m_p_h = 60
	h_p_d = 24
	d_p_y  = 365

	bins = 10 *s_p_m * m_p_h * h_p_d *d_p_y
	rates = [1,5,10,15,20,25,30,35,40,45,50,55,60]
	collisions_10= [(special.binom(mph * h_p_d*d_p_y,2) *1.0/ bins) for mph in rates]
	bins = 100 *s_p_m * m_p_h * h_p_d *d_p_y
	collisions_100= [(special.binom(mph * h_p_d*d_p_y,2) *1.0/ bins) for mph in rates]
	bins = 1000 *s_p_m * m_p_h * h_p_d *d_p_y
	collisions_1000= [(special.binom(mph * h_p_d*d_p_y,2) *1.0/ bins) for mph in rates]

	bins = 1 *s_p_m * m_p_h * h_p_d *d_p_y
	collisions_1= [(special.binom(mph * h_p_d*d_p_y,2) *1.0/ bins) for mph in rates]

	plt.plot(rates,collisions_1,'ko',label='1  second')
	plt.plot(rates,collisions_10,'bo',label='1/10 of a second')
	plt.plot(rates,collisions_100,'ro',label='1/100 of a second')
	plt.plot(rates,collisions_1000,'yo',label='1/1000 of a second')
	plt.gca().legend(loc='upper left',shadow=True)

	plt.show()
	for i in xrange(len(rates)):
		print "%s : %s"%(rates[i],collisions[i])
Ejemplo n.º 3
0
def restricted_composition(n,k,h,p):
	result = 0
	#Formula C
	if isinstance(h,int)and isinstance(p,int) and p!=n and h!=1:
		for j in range(0,k+1):
			result = result + summation_term_c32(k,j,n,h,p)
	#Formula 3.2
	elif isintance(h,list) and isinstance(p,list):
		diff = [j-i for i,j in zip(h,p)]
		result = special.binom(n-sum(h)+k-1,k-1)
		for j in range(1,k+1):
			result = result + summation_term_32(n,k,j,sum(h),diff)
	#Formula A
	elif isinstance(h,list) and isinstance(p,int) and p==n:
		nom = 0
		for i in h:
			nom += i
		result = special.binom(n+k-1-nom,k-1)
	#Formula B
	elif isinstance(p,list) and h==1:
		result = special.binom(n-1,k-1)
		for j in range(1,k+1):
			result = result + summation_term_b3(n,k,j,p)
	#Formula D
	elif isinstance(h,int) and p==n:
		result = special.binom(n-k*(t-1)-1,k-1)

	#Formula E
	elif h==1 and isinstance(p,int):
		for j in range(0,k+1):
			result = math.pow(-1,j)*special.binom(k,j)*(n-j*p-1,k-1)

	print result
Ejemplo n.º 4
0
    def _contingency(self):
        """ Compute TP, FP, TN, and FN
		for any pair of points.
	"""
        tp_fp, tp = 0.0, 0.0
        tn_fn, fn = 0.0, 0.0
        for i, v1 in enumerate(self.ysize[:, :]):
            c1, n1 = v1
            imemb1 = self._get_members(c1)
            gsub1 = self.g[imemb1]
            if n1 >= 1:
                tp_fp += sp.binom(n1, 2)
                for j in np.unique(gsub1):
                    size_ij = np.sum(gsub1 == j)
                    if size_ij >= 2:
                        tp += sp.binom(size_ij, 2)
                    size_j = np.sum(self.g == j)
                    fn += size_ij * (size_j - size_ij)
                    # print("%d %d %d   %d\t\t%d"%(j, size_j, size_ij, size_ij*(size_j-size_ij), fn))

            if i < self.ysize.shape[0] - 1:
                tn_fn += n1 * np.sum(self.ysize[i + 1 :, 1])

        self.tp = tp
        self.fp = tp_fp - tp

        fn = fn / 2
        self.tn = tn_fn - fn
        self.fn = fn

        # sys.stderr.write("TP: %.0f  FP: %.0f \t TN: %.0f  FN: %.0f\n"%(self.tp,self.fp,self.tn,self.fn))
        return None
Ejemplo n.º 5
0
    def expected_number_of_transactions_in_first_n_periods(self, n):
        """
        Expected number of transactions occurring across first n transaction opportunities. Used by Fader
        and Hardie to assess in-sample fit.

        Pr(X(n) = x|alpha, beta, gamma, delta)

        See (7) in Fader & Hardie 2010.

        Parameters:
            n: scalar, number of transaction opportunities

        Returns: DataFrame of predicted values, indexed by x

        """

        params = self._unload_params('alpha', 'beta', 'gamma', 'delta')
        alpha, beta, gamma, delta = params

        x_counts = self.data.groupby('frequency')['n_custs'].sum()
        x = asarray(x_counts.index)

        p1 = special.binom(n, x) * exp(special.betaln(alpha + x, beta + n - x) - special.betaln(alpha, beta) +
         special.betaln(gamma, delta + n) - special.betaln(gamma, delta))

        for j in np.arange(x.shape[0]):
            i = np.arange(x[j], n)
            p2 = np.sum(special.binom(i, x[j]) *
                        exp(special.betaln(alpha + x[j], beta + i - x[j]) - special.betaln(alpha, beta) +
                            special.betaln(gamma +1, delta + i) - special.betaln(gamma, delta)))
            p1[j] += p2

        idx = pd.Index(x, name='frequency')
        return DataFrame(p1 * x_counts.sum(), index=idx, columns=['model'])
Ejemplo n.º 6
0
def mcnf_kernel(R, d, c, norm=True):
	n, m = R.size
		
	x_choose_d = {n : binom(n, d)}
	nCd = x_choose_d[n]
	
	X = R.T*R
	
	if c == d == 1:
		K = X
	else:
		K = co.matrix(0.0, (m, m))
		for i in range(m):
			#if (i+1) % 100 == 0:
			#	print "%d/%d" %(i+1,m)
			
			for j in range(i, m):
					
				xii = n - int(X[i,i])
				if xii not in x_choose_d:
					x_choose_d[xii] = binom(xii, d)
				
				xjj = n - int(X[j,j])
				if xjj not in x_choose_d:
					x_choose_d[xjj] = binom(xjj, d)
				
				xij = n - int(X[i,i]) - int(X[j,j]) + int(X[i,j])
				if xij not in x_choose_d:
					x_choose_d[xij] = binom(xij, d)
				
				r = nCd - x_choose_d[xii] - x_choose_d[xjj] + x_choose_d[xij]
				K[i,j] = K[j,i] = binom(r, c)
	
	return force_normalize(K) if norm else K
Ejemplo n.º 7
0
def satisfy_campi_garatti_condition_raw(dimension, sample_size, drop_size,
                                 chance_constraint_rhs, confidence):
    return (spsp.binom(drop_size + dimension - 1, drop_size) *
            np.sum([spsp.binom(sample_size, i) *
                    chance_constraint_rhs**i *
                    (1. - chance_constraint_rhs)**(sample_size - i)
                    for i in range(drop_size + dimension - 1)])) < confidence
Ejemplo n.º 8
0
def boxspline_gen(x1,x2,n,r1,r2):
    print(x1[5,5], x2[5,5])

    det = r1[0]*r2[1]-r1[1]*r2[0]
    x1 = -abs(x1); x2 = abs(x2);
    u = 1./det*( r2[1]*x1 - r2[0]*x2)
    v = 1./det*(-r1[1]*x1 + r1[0]*x2)

    ind = where(v>0)
    v[ind] = -v[ind]
    u[ind]=u[ind]+v[ind]

    ind = where(v>u/2)
    v[ind]=u[ind]-v[ind]

    print(u[5,5], v[5,5])

    val=np.zeros(np.shape(x1))
    for K in range(-n,n+1) :#mceil(u.max())):
        for L in range(-n,n+1) :#mceil(v.max())) :
            for i in range(0,min(n+K, n+L)+1):
                coeff=(-1.)**(K+L+i)*binom(n,i-K)*binom(n,i-L)*binom(n,i)
                for d in range (0,n) :
                    aux=abs(2./sqrt(3.)*(r1[1]*u+r2[1]*v)+K-L)
                    aux2=(2.*(r1[0]*u+r2[0]*v)-K-L-aux)/2
                    aux2[where(aux2<0)]=0
                    val = val + coeff*binom(n-1+d,d)/factorial(2*n-1+d)/factorial(n-1-d)\
                        * aux**(n-1-d)\
                        * aux2**(2*n-1+d)


    print(val[4:7,4:7])
    return val
Ejemplo n.º 9
0
def cic_decim_bit_truncation(in_len, out_len, decim, cic_order, cic_delay, stage):
    """Returns how many bits to truncate from a stage in a CIC decmiator."""
    # Use the nomenclature from Hogenauer
    N = cic_order
    M = cic_delay
    R = decim
    Bin = in_len
    Bout = out_len

    # First define the CIC impulse response function
    ha = lambda j, k: sum([( (-1)**l * binom(N, l) * binom(N - j + k - R * M * l, k - R * M * l)) for l in range(0, int(math.floor(k / (R * M)) + 1))])
    hb = lambda j, k: (-1)**k * binom(2 * N + 1 - j, k)
    def h(j, k):
        if j in range(1, N + 1):
            return ha(j, k)
        else:
            return hb(j, k)

    # This is the variance error gain at stage j
    def F(j):
        if j in range(1, 2 * N + 1):
            return math.sqrt(sum([h(j, k) ** 2 for k in range(0, (R * M - 1) * N + j - 1)]))
        else:
            return 1.0
    
    # Find the maximum number of bits that would be truncated
    Bmax = cic_decim_max_bits(in_len, decim, cic_order, cic_delay)
    B2N1 = Bmax - Bout + 1
    # And find it's variance
    sigma2N1 = math.sqrt((1/12.) * ((2 ** B2N1) ** 2))
    sigmaT2N1 = math.sqrt((sigma2N1 ** 2) * (F(2 * N + 1) ** 2))

    # Finally, compute how many bits to discard in this stage to keep the
    # overall error below the final stage's truncation
    return int(math.floor(-math.log(F(stage), 2) + math.log(sigmaT2N1, 2) + .5 * math.log(6./N, 2)))
Ejemplo n.º 10
0
def summation_term_c32(k,j,n,t,w):
	temp1 = math.pow(-1,j)*special.binom(k,j)
	nom = n-k*(t-1)+j*(t-w-1)-1
	denom = k-1 
	if nom < denom:
		return 0
	result = temp1*special.binom(nom,denom)
	return result
Ejemplo n.º 11
0
def B3Poly(i,n):
    """Restituisce il polinomio di Bernstein (i,n), implementato con
    numpy.polynomial e usando l'espressione in serie di potenze"""
    outPoly = P.Polynomial([0])
    for k in range(i,n+1):
        nextTerm = k*[0]
        nextTerm.append((-1)**(i-k) * binom(n,i) * binom(i,k))
        outPoly += nextTerm
    return outPoly
Ejemplo n.º 12
0
def ApproximatedProbability (N, A, pattern, t):
	k = len(pattern)
	n = N - (t*k)
	print k,n
	denominator = A**N
	numerator = special.binom(n+t,t)*A**n
	print special.binom(n+t,t),A**n
	print 'Result:',numerator,'/',denominator,'=',
	return numerator / denominator
Ejemplo n.º 13
0
def M(j, x):
    '''Probability Weighted Moments.

    x must be sorted.

    See [1] and eq. (32) in [2]
    '''
    n = len(x)
    return sum([x[i-1] * binom(i-1, j)/binom(n-1, j)
                for i in range(j+1, n)])/n
Ejemplo n.º 14
0
def clebgor(j1, j2, j, m1, m2, m):
    """
    Parameters:    j1, j2, j: the angular momenta input
                   m1, m2, m: the z components of angular momenta input
    Returns:       The numerical value of the Clebsch-Gordan coeffcient.
    Remarks:       Note that in the sum none of the binomial coeffcients
                   can have negative values.  Thus, zmin is there to make
                   sure that the sums have a cut-off.
    """
    zmin = int(min([j1 - m1, j2 + m2]))
    J = j1 + j2 + j
    return (int(m1 + m2 == m) *
            int(np.abs(j1 - j2) <= j <= (j1 + j2)) *
            int(np.abs(m1) <= j1) *
            int(np.abs(m2) <= j2) *
            int(np.abs(m) <= j) *
            int((j1 + m1) >= 0.0) *
            int((j2 + m2) >= 0.0) *
            int((j + m) >= 0.0) *
            int(J >= 0) *
            np.sqrt(binom(2 * j1, J - 2 * j) *
                    binom(2 * j2, J - 2 * j) /
                    (binom(J + 1, J - 2 * j) *
                    binom(2 * j1, j1 - m1) *
                    binom(2 * j2, j2 - m2) *
                    binom(2 * j, j - m))) *
            np.sum([(-1) ** z * binom(J - 2 * j, z) *
                    binom(J - 2 * j2, j1 - m1 - z) *
                    binom(J - 2 * j1, j2 + m2 - z) for z in range(zmin + 1)]))
Ejemplo n.º 15
0
    def solve_for_cs(D, T, deg):
        """Takes an input series of values (D) and uses them to solve Ax = b
        D = values
        T = spacing between D

        """

        # Create Q
        #   am = [1, 4, 6, 4, 1] (binomial array for deg 4)
        am = np.array([[binom(deg, i) for i in range(deg + 1)]])

        # cm = matrix of 1's and -1's (based on index, e.g. for even deg: 0,0 = 1; 0,1 = -1;, 0,2 = 1, etc)
        cm = (1 - 2*np.mod(np.sum(np.indices((deg+1, deg+1)), axis=0), 2*np.ones((deg+1, deg+1))))

        # Fix for odd deg (the array needs to be flipped... -1, 1 instead of 1, -1)
        if deg % 2:
            cm *= -1

        # A is square binomial matrix scaled by matrix of 1's and -1s (scalar of two from differentiation)
        Q = 2*am*am.T*cm

        # Create R
        #   R is matrix of exponents for (t-1) [[8, 7, 6, 5, 4], [7, 6, 5, 4, 3], ...[4, 3, 2, 1, 0]]
        R = np.flip(np.arange(deg+1) + np.arange(deg+1)[:,None])

        # Create S
        #   S is matrix of exponents for t [[0, 1, 2, 3, 4], [1, 2, 3, 4, 5], ...[4, 5, 6, 7, 8]]
        S = np.arange(deg+1) + np.arange(deg+1)[:,None]

        # Matrix A
        A = -1 * np.array(
            [np.sum(q * (T-1)**r * T**s) for q, r, s in zip(Q.ravel(), R.ravel(), S.ravel())]
        ).reshape((deg+1, deg+1))

        #   am is 1 row matrix of binomial coefficients
        am = np.array([binom(deg, i) for i in range(deg + 1)])

        # bm is matrix of 1's and -1's (based on index)
        bm = (-1 + 2*np.mod(np.sum(np.indices((1, deg+1)), axis=0).ravel(), 2 * np.ones((deg+1))))

        # U is binomial coefficients scaled by matrix of 1's and -1's (scalar of two from differentiation)
        U = 2*am*bm

        V = np.arange(deg, -1, -1)
        W = np.arange(deg + 1)

        # Vector b
        b = np.array(
            [np.sum(u * (T-1)**v * D*T**w) for u, v, w in zip(U.ravel(), V.ravel(), W.ravel())]
        ).reshape((deg+1, 1))

        # Solve Ax = b
        return solve(A, b).ravel()
Ejemplo n.º 16
0
 def MinNumPoints(self):
     """
     Returns the minimum number of points still required.
     """
     from scipy.special import binom
     m = self.num_vars
     n = 1
     r = binom(m + n, n)
     while r < self.num_points:
         n += 1
         r = binom(m + n, n)
     self.degree = n
     return int(r - self.num_points)
def calcprob(d, n, t, h):
    """
    Return the probability for `h` hits in a roll of `n` fair, identical
    `d`-sided exploding dice, where every die equal or higher to `t` is
    deemed a hit.

    Taken from: http://math.stackexchange.com/a/1649514/11949
    """
    factor = (t-1)**n/d**(n+h)
    probsum = 0.0
    for k in range(0, max([h,n])+1 ):
        probsum += binom(n,k) * binom(n+h-k-1, h-k) * (d*(d-t)/(t-1))**k
    return factor * probsum
Ejemplo n.º 18
0
def chords_per_finger_count(buttons, rockers, fingers):
    # print "How many ways are there to put {fingers} fingers on {buttons} buttons and {rockers} rockers?".format(fingers = fingers, buttons = buttons, rockers = rockers)
    count = 0
    for fingers_on_buttons in range(fingers+1):
        fingers_on_rockers = fingers - fingers_on_buttons;
        if fingers_on_rockers <= rockers and fingers_on_buttons <= fingers_on_buttons:
            button_combinations = binom(buttons, fingers_on_buttons)
            # print "There are {} ways to put {} fingers on {} buttons".format(button_combinations, fingers_on_buttons, buttons)
            rocker_combinations =binom(rockers, fingers_on_rockers) * 2 ** (fingers_on_rockers)
            # print "There are {} ways to put {} fingers on {} rockers".format(rocker_combinations, fingers_on_rockers, rockers)
            count += button_combinations * rocker_combinations
            # print "If {fingers_on_buttons} are on the buttons and {fingers_on_rockers} are on the rockers, then there are {new_possibilities} ".format(fingers_on_buttons = fingers_on_buttons, fingers_on_rockers = fingers_on_rockers, new_possibilities = button_combinations * rocker_combinations)
    return count
def main():

    # number of balls in a bag
    N    = 450
    # Blue of which are of the color blue, the rest are red
    Blue = 70
    
    # we are pulling  balls out at random
    # the probability  of succesfully pulling the ball out is mu
    mu = 0.01

    # if we repeat the drawing experiment very many times, 
    # what is the expected number of blue balls we'll manage to pull out?
    expected = 0
    for blue  in range (Blue+1):
        for n  in range (blue, N+1): # n is the number of balls drawn
            expected += special.binom(Blue, blue)*special.binom(N-Blue, n-blue)*math.pow(mu,n)*math.pow(1-mu, N-n)*blue
    print "expected, calculated: ", expected

    expectedMIT = 0
    Red = N-Blue
    
    for  blue  in range (Blue+1):
        expectedMIT += blue*beta_binomial (blue, Blue, mu*Red, Red)
    print "expectedMIT, calculated: ", expectedMIT
   
    # now try the same thing through a simulation
    bag = []
    for i in range(N): 
        bag.append("r")

    for i in range(Blue):
        done = False
        while not done:
            rand_pos = random.randint(0,N-1)
            if bag[rand_pos] == "r":
                bag[rand_pos] = "b"
                done = True

    avg = 0.0
    # say we do 1000 experiments
    number_of_exps = 1000
    for exp in range(number_of_exps):
        count = 0
        for i  in range(N):
            if random.random() < mu:
                # we have chosen this position ("pulled out this ball")
                if bag[i] == "b": count +=1
        avg += count
    avg /= number_of_exps
    print "avg in the experiment: ", avg
Ejemplo n.º 20
0
def spin_q_function(rho, theta, phi):
    """Husimi Q-function for spins.

    Parameters
    ----------

    state : qobj
        A state vector or density matrix for a spin-j quantum system.

    theta : array_like
        theta-coordinates at which to calculate the Q function.

    phi : array_like
        phi-coordinates at which to calculate the Q function.

    Returns
    -------

    Q, THETA, PHI : 2d-array
        Values representing the spin Q function at the values specified
        by THETA and PHI.

    """

    if rho.type == 'bra':
        rho = rho.dag()

    if rho.type == 'ket':
        rho = ket2dm(rho)

    J = rho.shape[0]
    j = (J - 1) / 2

    THETA, PHI = meshgrid(theta, phi)

    Q = np.zeros_like(THETA, dtype=complex)

    for m1 in arange(-j, j+1):

        Q += binom(2*j, j+m1) * cos(THETA/2) ** (2*(j-m1)) * sin(THETA/2) ** (2*(j+m1)) * \
            rho.data[int(j-m1), int(j-m1)]

        for m2 in arange(m1+1, j+1):

            Q += (sqrt(binom(2*j, j+m1)) * sqrt(binom(2*j, j+m2)) *
                  cos(THETA/2) ** (2*j-m1-m2) * sin(THETA/2) ** (2*j+m1+m2)) * \
                (exp(1j * (m2-m1) * PHI) * rho.data[int(j-m1), int(j-m2)] +
                 exp(1j * (m1-m2) * PHI) * rho.data[int(j-m2), int(j-m1)])

    return Q.real, THETA, PHI
Ejemplo n.º 21
0
Archivo: e.py Proyecto: dulte/FYS2160
def numberOfMacroStates(NA,NB,q):

    numMacrostates = np.zeros(q+1)
    probability = np.zeros_like(numMacrostates)

    for i in range(q+1):
        qa = i
        qb = q-qa

        numMacrostates[i] = binom(qa + NA -1,qa)*binom(qb + NB -1,qb)

    probability = numMacrostates/np.sum(numMacrostates)

    return numMacrostates, probability
Ejemplo n.º 22
0
def holo_diff_first_order(x,y,N):
    from numpy import zeros
    M = (N-1)/2
    m = (N-3)/2
    out = zeros(x.shape[0])
    L=len(x)

    for k in np.arange(1,M+1,1):
        zaehler = y[M+k:L-M+k] - y[M-k:L-M-k]
        nenner  = x[M+k:L-M+k] - x[M-k:L-M-k]
        nenner[ np.where(nenner == 0) ] = 1e-3    #to avoid trouble when x is constant
        c       = (binom(2*m , m -k +1) - binom(2*m,m-k-1)) / 2**(2*m+1)
        out[M:-M] = out[M:-M] + 2 * k * c * zaehler/nenner 
    return out
Ejemplo n.º 23
0
def mSel(a, mu, s, k, m):
    hyperg = hyp1f1(k + mu, m + 2.0 * mu, s)
    ret = binom(m, k)
    ret /= zSel(a, mu, s)
    ret *= math.exp(gammaln(k + mu) + gammaln(m - k + mu) - gammaln(m + 2.0 * mu))
    ret *= (1.0 - math.exp(-(1.0 - a) * s) * hyperg)
    return ret
Ejemplo n.º 24
0
    def pdf(self):
        r"""
        Generate the vector of probabilities for the Beta-binomial
        (n, a, b) distribution.

        The Beta-binomial distribution takes the form

        .. math::
            p(k \,|\, n, a, b) =
            {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)},
            \qquad k = 0, \ldots, n,

        where :math:`B` is the beta function.

        Parameters
        ----------
        n : scalar(int)
            First parameter to the Beta-binomial distribution
        a : scalar(float)
            Second parameter to the Beta-binomial distribution
        b : scalar(float)
            Third parameter to the Beta-binomial distribution

        Returns
        -------
        probs: array_like(float)
            Vector of probabilities over k

        """
        n, a, b = self.n, self.a, self.b
        k = np.arange(n + 1)
        probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)
        return probs
def loss_function(params, func=simulate_dynamics, time_scale=1, df=func_data):
    """Calculate the loss function.

    Parameters
    ----------
    params : array-like, parameters that determine starting state of population
    func : model begin fit to the data
    time_scale : int (optional), the number of discrete time steps per year
    number : int, (optional), the number of discretized states and actions
             default is one "generation" of the dynamics per year
    df : data to use, default is from func_data

    Returns
    ----------
    negLL : float, negative log likelihood to be minimized
    """
    # Simulate the dynamics from 1100 to 1500
    X_sol, Y_sol, prior = func(params, n_years=401, time_scale=time_scale)
    # Get p(m_2) over time
    m2_sol = np.asarray([prior.dot(line)[0,0] for line in X_sol[:,1::2]])
    # Append solution trajectory to data frame
    # Use approximate value when trajectory reaches boundary
    df['p'] = np.minimum(m2_sol.ravel(), np.repeat(.9999999, len(m2_sol)))
    # Add binomial coefficient
    df['binom'] = binom(df.ones + df.zeros, df.ones)
    # Calculate log-likelihood for
    df['LL'] = np.log(df.binom) + (df.ones * np.log(df.p)) + (df.zeros * np.log(1 - df.p))
    # Only use years that have tokens
    df = df[df['has.tokens'] == 1]
    # Calculate log likelihood given m2_sol
    LL = np.sum(df['LL'])
    negLL = -1*LL
    # Minimizing negative log-likelihood is equivalent to maximizing log-likelihood
    return negLL
Ejemplo n.º 26
0
def coverage_probability(full_amount, cover_amount, number_of_data):
    if cover_amount == full_amount:
        return 0.0, 0.0  # since the full range is already covered, the coverage possibility would be 100%
        # but as this indicates that no significant rule can be determined, 100% are treated as 0%

    p = float(0)
    for k in range(int(full_amount - 1)):
        p += ((-1) ** k) * special.binom(full_amount, full_amount - k) * ((
                                                                              full_amount - k) / full_amount) ** number_of_data

    pc = float(0)
    for k in range(int(cover_amount - 1)):
        pc += ((-1) ** k) * special.binom(cover_amount, cover_amount - k) * ((
                                                                                 cover_amount - k) / cover_amount) ** number_of_data

    return p, pc
def herald_on_N(N,Mmax,t,tt,s,nmax=20):
    """Claculates the density matrix for a state heralded on N photons in arm a
    up to Mmax photons and after going losses t and tt in arms a and b and where the squeezing parameters
    are in the list s"""
    m=len(s)
    if m==1:
        return herald_on_Nsm(N,0,Mmax,t,tt,s[0],nmax=20)
    else:
        total_number=special.binom(N+m-1,N)
        combinations=comb(N,m)
        #print total_number
        ci=np.empty((m,N+1,Mmax+1))
        for i in range(m):
            for n in range(N+1):
                ci[i,n]=herald_on_Nsm(n,0,Mmax,t,tt,s[i],nmax)
            #print len(ci[i])
        dmindices=itertools.product(range(Mmax+1),repeat=m)
        dm=np.zeros([Mmax+1 for i in range(m)])
        for i in dmindices:
            ssum=0.0
            for j in combinations:
                prod=1.0
                for k in range(m):
                    prod*=ci[k,j[k],i[k]]
                ssum+=prod
            dm[i]=ssum
        return dm
Ejemplo n.º 28
0
    def getBetaSampleShift(self, y, N, j):
        """
        Construct beta sample using shifting procedure.

        Parameters
        ----------  
        y : array
            Values from which to estimate noise
        N : int
            Last order of the Taylor series taken into account
        j : int
            Jump parameter (>0)

        Returns
        -------
        Beta sample : array
            An array holding all available beta values.
        """
        # Calculate the required coefficients (a_k)
        self.ak = self.get_ak(N)

        # Required chunk length to obtain realization
        # of beta = (N+2) + (N-1)*(j-1) = (N+1)*j + 1
        cl = (N + 1) * j + 1

        b = np.zeros(len(y) - cl + 1)

        for i in smo.range(len(y) - cl + 1):
            # Realizations of the beta sample
            b[i] = np.sum(y[i:i + cl:j] * self.ak)

        # Normalize to obtain width of sigma_0
        b /= np.sqrt(ss.binom(2 * N + 2, N + 1))
        return b
def fourWay_ew_collisionRate(b,k,c,phi):
    '''
    compute the rate of (b;k[1],...,k[len(k)];s)-collisions
    since s = b -sum(k), it does not need to be an argument
    it is assumed that all entries in the vector k are integers greater
    than 0, and that len(k) < 4
    '''
#    k = [x for x in k if x>1]
#    K = sum(k)
#    if all([i==1 for i in k]) or K > b or K < 2 :
#        return 0
#    else:
#        r = len(k)
#        K = sum(k)
#        return sum([binom(b-K,l)*lambda_ew_collisionRate(b,K,c,phi)*np.prod(range(4,4-(r+l),-1))/4.0**(K+l) for l in range(0,4-r+1)])
#        
#        P_k = multinomial(K,k)/(4.0**K) * multinomial(len(k)-k.count(0),[k.count(i) for i in range(1,K+1)])
#        print P_k,multinomial(K,k),(4.0**K),multinomial(len(k),[k.count(i) for i in range(1,K+1)])
        # The last factor in the above, counts the number of different ways
        # to arrange the numbers k[1],...k[len(k)]
#        return P_k * lambda_ew_collisionRate(b,K,c,phi)

    k = [x for x in k if x>1] #remove all 1 and 0 entires from k
    K = sum(k) #Total number of affected blocks
#    print b,k,K
    if all([i==1 for i in k]) or K > b or K < 2 :
        return 0
    else:
        r = len(k)
        s = b-K
        l_max = min(4-r,s)
        return sum([(binom(s,l) * lambda_ew_collisionRate(b,K+l,c,phi))*(fallingFactorial(4,l+r)/(4.0**(K+l))) for l in range(0,l_max+1)])
Ejemplo n.º 30
0
def monotone_disjunctive_kernel(X,T=None,d=2):
    X, T = check_X_T(X, T)
    L = np.dot(X,T.T)
    n = X.shape[1]

    XX = np.dot(X.sum(axis=1).reshape(X.shape[0],1), np.ones((1,T.shape[0])))
    TT = np.dot(T.sum(axis=1).reshape(T.shape[0],1), np.ones((1,X.shape[0])))
    N_x = n - XX
    N_t = n - TT
    N_xz = N_x - TT.T + L

    N_d = binom(n, d)
    N_x = binom(N_x,d)
    N_t = binom(N_t,d)
    N_xz = binom(N_xz,d)
    return (N_d - N_x - N_t.T + N_xz)
Ejemplo n.º 31
0
    def explain(self, incoming_instance, **kwargs):
        # convert incoming input to a standardized iml object
        instance = convert_to_instance(incoming_instance)
        match_instance_to_data(instance, self.data)

        # find the feature groups we will test. If a feature does not change from its
        # current value then we know it doesn't impact the model
        self.varyingInds = self.varying_groups(instance.x)
        self.varyingFeatureGroups = [
            self.data.groups[i] for i in self.varyingInds
        ]
        self.M = len(self.varyingFeatureGroups)

        # find f(x)
        if self.keep_index:
            model_out = self.model.f(instance.convert_to_df())
        else:
            model_out = self.model.f(instance.x)
        if isinstance(model_out, (pd.DataFrame, pd.Series)):
            model_out = model_out.values
        self.fx = model_out[0]

        if not self.vector_out:
            self.fx = np.array([self.fx])

        # if no features vary then there no feature has an effect
        if self.M == 0:
            phi = np.zeros((len(self.data.groups), self.D))
            phi_var = np.zeros((len(self.data.groups), self.D))

        # if only one feature varies then it has all the effect
        elif self.M == 1:
            phi = np.zeros((len(self.data.groups), self.D))
            phi_var = np.zeros((len(self.data.groups), self.D))
            diff = self.link.f(self.fx) - self.link.f(self.fnull)
            for d in range(self.D):
                phi[self.varyingInds[0], d] = diff[d]

        # if more than one feature varies then we have to do real work
        else:
            self.l1_reg = kwargs.get("l1_reg", "auto")

            # pick a reasonable number of samples if the user didn't specify how many they wanted
            self.nsamples = kwargs.get("nsamples", "auto")
            if self.nsamples == "auto":
                self.nsamples = 2 * self.M + 2**11

            # if we have enough samples to enumerate all subsets then ignore the unneeded samples
            self.max_samples = 2**30
            if self.M <= 30:
                self.max_samples = 2**self.M - 2
                if self.nsamples > self.max_samples:
                    self.nsamples = self.max_samples

            # reserve space for some of our computations
            self.allocate()

            # weight the different subset sizes
            num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))
            num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))
            weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i))
                                      for i in range(1, num_subset_sizes + 1)])
            weight_vector[:num_paired_subset_sizes] *= 2
            weight_vector /= np.sum(weight_vector)
            log.debug("weight_vector = {0}".format(weight_vector))
            log.debug("num_subset_sizes = {0}".format(num_subset_sizes))
            log.debug("num_paired_subset_sizes = {0}".format(
                num_paired_subset_sizes))
            log.debug("M = {0}".format(self.M))

            # fill out all the subset sizes we can completely enumerate
            # given nsamples*remaining_weight_vector[subset_size]
            num_full_subsets = 0
            num_samples_left = self.nsamples
            group_inds = np.arange(self.M, dtype='int64')
            mask = np.zeros(self.M)
            remaining_weight_vector = copy.copy(weight_vector)
            for subset_size in range(1, num_subset_sizes + 1):

                # determine how many subsets (and their complements) are of the current size
                nsubsets = binom(self.M, subset_size)
                if subset_size <= num_paired_subset_sizes: nsubsets *= 2
                log.debug("subset_size = {0}".format(subset_size))
                log.debug("nsubsets = {0}".format(nsubsets))
                log.debug(
                    "self.nsamples*weight_vector[subset_size-1] = {0}".format(
                        num_samples_left *
                        remaining_weight_vector[subset_size - 1]))
                log.debug(
                    "self.nsamples*weight_vector[subset_size-1/nsubsets = {0}".
                    format(num_samples_left *
                           remaining_weight_vector[subset_size - 1] /
                           nsubsets))

                # see if we have enough samples to enumerate all subsets of this size
                if num_samples_left * remaining_weight_vector[
                        subset_size - 1] / nsubsets >= 1.0 - 1e-8:
                    num_full_subsets += 1
                    num_samples_left -= nsubsets

                    # rescale what's left of the remaining weight vector to sum to 1
                    if remaining_weight_vector[subset_size - 1] < 1.0:
                        remaining_weight_vector /= (
                            1 - remaining_weight_vector[subset_size - 1])

                    # add all the samples of the current subset size
                    w = weight_vector[subset_size - 1] / binom(
                        self.M, subset_size)
                    if subset_size <= num_paired_subset_sizes: w /= 2.0
                    for inds in itertools.combinations(group_inds,
                                                       subset_size):
                        mask[:] = 0.0
                        mask[np.array(inds, dtype='int64')] = 1.0
                        self.addsample(instance.x, mask, w)
                        if subset_size <= num_paired_subset_sizes:
                            mask[:] = np.abs(mask - 1)
                            self.addsample(instance.x, mask, w)
                else:
                    break
            log.info("num_full_subsets = {0}".format(num_full_subsets))

            # add random samples from what is left of the subset space
            samples_left = self.nsamples - self.nsamplesAdded
            log.debug("samples_left = {0}".format(samples_left))
            if num_full_subsets != num_subset_sizes:
                weight_left = np.sum(weight_vector[num_full_subsets:])
                rand_sample_weight = weight_left / samples_left
                log.info("weight_left = {0}".format(weight_left))
                log.info("rand_sample_weight = {0}".format(rand_sample_weight))
                remaining_weight_vector = weight_vector[num_full_subsets:]
                remaining_weight_vector /= np.sum(remaining_weight_vector)
                log.info("remaining_weight_vector = {0}".format(
                    remaining_weight_vector))
                log.info("num_paired_subset_sizes = {0}".format(
                    num_paired_subset_sizes))
                ind_set = np.arange(len(remaining_weight_vector))
                while samples_left > 0:
                    mask[:] = 0.0
                    np.random.shuffle(group_inds)
                    ind = np.random.choice(ind_set,
                                           1,
                                           p=remaining_weight_vector)[0]
                    mask[group_inds[:ind + num_full_subsets + 1]] = 1.0
                    samples_left -= 1
                    self.addsample(instance.x, mask, rand_sample_weight)

                    # add the compliment sample
                    if samples_left > 0:
                        mask -= 1.0
                        mask[:] = np.abs(mask)
                        self.addsample(instance.x, mask, rand_sample_weight)
                        samples_left -= 1

            # execute the model on the synthetic samples we have created
            self.run()

            # solve then expand the feature importance (Shapley value) vector to contain the non-varying features
            phi = np.zeros((len(self.data.groups), self.D))
            phi_var = np.zeros((len(self.data.groups), self.D))
            for d in range(self.D):
                vphi, vphi_var = self.solve(self.nsamples / self.max_samples,
                                            d)
                phi[self.varyingInds, d] = vphi
                phi_var[self.varyingInds, d] = vphi_var

        if not self.vector_out:
            phi = np.squeeze(phi, axis=1)
            phi_var = np.squeeze(phi_var, axis=1)

        return phi
Ejemplo n.º 32
0
 def check(N,K):
     S = range(N)
     assert binom(N,K) == length(sample(S, K, ordered=0, replace=0))
     #A01 = sample(S, K, ordered=0, replace=1)   # ???
     assert binom(N,K) * factorial(K) == length(sample(S, K, ordered=1, replace=0))
     assert N**K == length(sample(S, K, ordered=1, replace=1))
Ejemplo n.º 33
0
 def test_nchoosek(self):
     assert nchoosek(3, 2) == binom(3, 2)
Ejemplo n.º 34
0
    def explain(self, incoming_instance, **kwargs):
        # convert incoming input to a standardized iml object
        instance = convert_to_instance(incoming_instance)
        match_instance_to_data(instance, self.data)

        # find the feature groups we will test. If a feature does not change from its
        # current value then we know it doesn't impact the model
        self.varyingInds = self.varying_groups(instance.x)
        if self.data.groups is None:
            self.varyingFeatureGroups = np.array([i for i in self.varyingInds])
            self.M = self.varyingFeatureGroups.shape[0]
        else:
            self.varyingFeatureGroups = [self.data.groups[i] for i in self.varyingInds]
            self.M = len(self.varyingFeatureGroups)
            groups = self.data.groups
            # convert to numpy array as it is much faster if not jagged array (all groups of same length)
            if self.varyingFeatureGroups and all(len(groups[i]) == len(groups[0]) for i in self.varyingInds):
                self.varyingFeatureGroups = np.array(self.varyingFeatureGroups)
                # further performance optimization in case each group has a single value
                if self.varyingFeatureGroups.shape[1] == 1:
                    self.varyingFeatureGroups = self.varyingFeatureGroups.flatten()

        # find f(x)
        if self.keep_index:
            model_out = self.model.f(instance.convert_to_df())
        else:
            model_out = self.model.f(instance.x)
        if isinstance(model_out, (pd.DataFrame, pd.Series)):
            model_out = model_out.values
        self.fx = model_out[0]

        if not self.vector_out:
            self.fx = np.array([self.fx])

        # if no features vary then no feature has an effect
        if self.M == 0:
            phi = np.zeros((self.data.groups_size, self.D))
            phi_var = np.zeros((self.data.groups_size, self.D))

        # if only one feature varies then it has all the effect
        elif self.M == 1:
            phi = np.zeros((self.data.groups_size, self.D))
            phi_var = np.zeros((self.data.groups_size, self.D))
            diff = self.link.f(self.fx) - self.link.f(self.fnull)
            for d in range(self.D):
                phi[self.varyingInds[0],d] = diff[d]

        # if more than one feature varies then we have to do real work
        else:
            self.l1_reg = kwargs.get("l1_reg", "auto")

            # pick a reasonable number of samples if the user didn't specify how many they wanted
            self.nsamples = kwargs.get("nsamples", "auto")
            if self.nsamples == "auto":
                self.nsamples = 2 * self.M + 2**11

            # if we have enough samples to enumerate all subsets then ignore the unneeded samples
            self.max_samples = 2 ** 30
            if self.M <= 30:
                self.max_samples = 2 ** self.M - 2
                if self.nsamples > self.max_samples:
                    self.nsamples = self.max_samples

            # reserve space for some of our computations
            self.allocate()

            # weight the different subset sizes
            num_subset_sizes = np.int(np.ceil((self.M - 1) / 2.0))
            num_paired_subset_sizes = np.int(np.floor((self.M - 1) / 2.0))
            weight_vector = np.array([(self.M - 1.0) / (i * (self.M - i)) for i in range(1, num_subset_sizes + 1)])
            weight_vector[:num_paired_subset_sizes] *= 2
            weight_vector /= np.sum(weight_vector)
            log.debug("weight_vector = {0}".format(weight_vector))
            log.debug("num_subset_sizes = {0}".format(num_subset_sizes))
            log.debug("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
            log.debug("M = {0}".format(self.M))

            # fill out all the subset sizes we can completely enumerate
            # given nsamples*remaining_weight_vector[subset_size]
            num_full_subsets = 0
            num_samples_left = self.nsamples
            group_inds = np.arange(self.M, dtype='int64')
            mask = np.zeros(self.M)
            remaining_weight_vector = copy.copy(weight_vector)
            for subset_size in range(1, num_subset_sizes + 1):

                # determine how many subsets (and their complements) are of the current size
                nsubsets = binom(self.M, subset_size)
                if subset_size <= num_paired_subset_sizes: nsubsets *= 2
                log.debug("subset_size = {0}".format(subset_size))
                log.debug("nsubsets = {0}".format(nsubsets))
                log.debug("self.nsamples*weight_vector[subset_size-1] = {0}".format(
                    num_samples_left * remaining_weight_vector[subset_size - 1]))
                log.debug("self.nsamples*weight_vector[subset_size-1]/nsubsets = {0}".format(
                    num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets))

                # see if we have enough samples to enumerate all subsets of this size
                if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:
                    num_full_subsets += 1
                    num_samples_left -= nsubsets

                    # rescale what's left of the remaining weight vector to sum to 1
                    if remaining_weight_vector[subset_size - 1] < 1.0:
                        remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])

                    # add all the samples of the current subset size
                    w = weight_vector[subset_size - 1] / binom(self.M, subset_size)
                    if subset_size <= num_paired_subset_sizes: w /= 2.0
                    for inds in itertools.combinations(group_inds, subset_size):
                        mask[:] = 0.0
                        mask[np.array(inds, dtype='int64')] = 1.0
                        self.addsample(instance.x, mask, w)
                        if subset_size <= num_paired_subset_sizes:
                            mask[:] = np.abs(mask - 1)
                            self.addsample(instance.x, mask, w)
                else:
                    break
            log.info("num_full_subsets = {0}".format(num_full_subsets))

            # add random samples from what is left of the subset space
            nfixed_samples = self.nsamplesAdded
            samples_left = self.nsamples - self.nsamplesAdded
            log.debug("samples_left = {0}".format(samples_left))
            if num_full_subsets != num_subset_sizes:
                remaining_weight_vector = copy.copy(weight_vector)
                remaining_weight_vector[:num_paired_subset_sizes] /= 2 # because we draw two samples each below
                remaining_weight_vector = remaining_weight_vector[num_full_subsets:]
                remaining_weight_vector /= np.sum(remaining_weight_vector)
                log.info("remaining_weight_vector = {0}".format(remaining_weight_vector))
                log.info("num_paired_subset_sizes = {0}".format(num_paired_subset_sizes))
                ind_set = np.random.choice(len(remaining_weight_vector), 4 * samples_left, p=remaining_weight_vector)
                ind_set_pos = 0
                used_masks = {}
                while samples_left > 0 and ind_set_pos < len(ind_set):
                    mask.fill(0.0)
                    ind = ind_set[ind_set_pos] # we call np.random.choice once to save time and then just read it here
                    ind_set_pos += 1
                    subset_size = ind + num_full_subsets + 1
                    mask[np.random.permutation(self.M)[:subset_size]] = 1.0

                    # only add the sample if we have not seen it before, otherwise just
                    # increment a previous sample's weight
                    mask_tuple = tuple(mask)
                    new_sample = False
                    if mask_tuple not in used_masks:
                        new_sample = True
                        used_masks[mask_tuple] = self.nsamplesAdded
                        samples_left -= 1
                        self.addsample(instance.x, mask, 1.0)
                    else:
                        self.kernelWeights[used_masks[mask_tuple]] += 1.0

                    # add the compliment sample
                    if samples_left > 0 and subset_size <= num_paired_subset_sizes:
                        mask[:] = np.abs(mask - 1)

                        # only add the sample if we have not seen it before, otherwise just
                        # increment a previous sample's weight
                        if new_sample:
                            samples_left -= 1
                            self.addsample(instance.x, mask, 1.0)
                        else:
                            # we know the compliment sample is the next one after the original sample, so + 1
                            self.kernelWeights[used_masks[mask_tuple] + 1] += 1.0

                # normalize the kernel weights for the random samples to equal the weight left after
                # the fixed enumerated samples have been already counted
                weight_left = np.sum(weight_vector[num_full_subsets:])
                log.info("weight_left = {0}".format(weight_left))
                self.kernelWeights[nfixed_samples:] *= weight_left / self.kernelWeights[nfixed_samples:].sum()

            # execute the model on the synthetic samples we have created
            self.run()

            # solve then expand the feature importance (Shapley value) vector to contain the non-varying features
            phi = np.zeros((self.data.groups_size, self.D))
            phi_var = np.zeros((self.data.groups_size, self.D))
            for d in range(self.D):
                vphi, vphi_var = self.solve(self.nsamples / self.max_samples, d)
                phi[self.varyingInds, d] = vphi
                phi_var[self.varyingInds, d] = vphi_var

        if not self.vector_out:
            phi = np.squeeze(phi, axis=1)
            phi_var = np.squeeze(phi_var, axis=1)

        return phi
Ejemplo n.º 35
0
def P(L, n, s):
    return binom(n - 1, L - 1) * ((1 / s)**(L - 1)) * ((1 - (1 / s))**(n - L))
Ejemplo n.º 36
0
    def smolyak_sparse_grid(self, polynom, level):
        """
        SMOLYAK_SPARSE_GRID builds a spase grid for multi-dimensional integration.
        It is based on Clenshaw-Curtis points and weights with growth rule: 2**(l-1) + 1.

        Parameters
        ----------
        polynom (PolyChaos) :
            PolyChaos instance
        level (int) :
            level of the integration

        Results
        -------
        x_points (ndarray) :
            2D-ndarray with all integration points (with repetition)
        eps_points (ndarray) :
            2D-ndarray with all integration points (with repetition) in the suitable range
            for the othogonal polynomials
        weights (ndarray) :
            2D-ndarray with weights to be used with points (with repetition)
        unique_points (ndarray) :
            2D-ndarray with non-repeated integration points


        AUTHOR: Luca Giaccone ([email protected])
        DATE: 24.12.2018
        HISTORY:
        """

        # min and max level
        o_min = max([level + 1, polynom.dim])
        o_max = level + polynom.dim

        # get multi-index for all level
        comb = np.empty((0, polynom.dim), dtype=int)
        for k in range(o_min, o_max + 1):
            multi_index, _ = self.index_with_sum(polynom.dim, k)
            comb = np.append(comb, multi_index, axis=0)

        # initialize final array
        x_points = np.empty((0, polynom.dim))
        eps_points = np.empty((0, polynom.dim))
        weights = np.empty((0, ))

        # define integration points and weights
        for lev in comb:
            local_x = []
            local_eps = []
            local_w = []
            coeff = (-1)**(level + polynom.dim - np.sum(lev)) * binom(
                polynom.dim - 1, level + polynom.dim - np.sum(lev))

            # cycle on integration variables
            for l, k, p in zip(lev, polynom.distrib, polynom.param):
                # set integration type depending on distrib
                if k.upper() == 'U':
                    kind = 'CC'
                elif k.upper() == 'N':
                    kind = 'GH'

                # get number of integration points
                n = self.growth_rule(l, kind=kind)
                # get gauss points and weight
                x0, w0 = quad_coeff(rule=n, kind=kind)

                # change of variable
                if (k.upper() == 'U'):
                    eps = np.copy(x0)
                    w = np.copy(w0) * 0.5
                    if p != [-1, 1]:
                        x = (p[1] - p[0]) / 2 * x0 + (p[1] + p[0]) / 2
                    else:
                        x = np.copy(x0)

                elif (k.upper() == 'N'):
                    eps = np.sqrt(2) * 1 * x0 + 0
                    x = eps * p[1] + p[0]
                    w = w0 * (1 / np.sqrt(np.pi))

                # store local points
                local_x.append(x)
                local_eps.append(eps)
                local_w.append(w)

            # update final array
            x_points = np.concatenate(
                (x_points,
                 np.concatenate(columnize(*np.meshgrid(*local_x)), axis=1)))
            eps_points = np.concatenate(
                (eps_points,
                 np.concatenate(columnize(*np.meshgrid(*local_eps)), axis=1)))
            weights = np.concatenate((weights, coeff * np.prod(np.concatenate(
                columnize(*np.meshgrid(*local_w)), axis=1),
                                                               axis=1)))

        # get unique points
        unique_x_points, inverse_index = np.unique(x_points,
                                                   axis=0,
                                                   return_inverse=True)

        return x_points, eps_points, weights, unique_x_points, inverse_index
Ejemplo n.º 37
0
import numpy as np
from scipy.special import binom

n = 25

mat = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
I = np.identity(3, dtype=float)

val = np.zeros((3, 3))

var = I
for k in range(n + 1):
    val += binom(n, k) * var
    var = np.dot(var, mat)

print(val)
Ejemplo n.º 38
0
def get_multinomial_coefficient(params):
    if len(params) == 1:
        return 1
    if params[-1] == 0:
        return get_multinomial_coefficient(params[:-1])
    return int(binom(sum(params), params[-1])) * get_multinomial_coefficient(params[:-1])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib

from scipy.special import binom

m = 30
p = lambda x,k: binom(m,k)*(x**k)*((1-x)**(m-k))
g = lambda x,n: np.sum([p(x,k) for k in range(n+1)])
g(0.1,0)

xrange = np.arange(0,1,0.01)
prior = lambda x: 10 if (x<0.6 and x>0.4) else 1
S = np.sum([prior(x) for x in xrange])
priormemo = {x: prior(x)/S for x in xrange}
prior = lambda x: priormemo[x]
N = lambda k: np.sum([p(x,k)*prior(x) for x in xrange])
Nmemo = {k:N(k) for k in range(m+1)}
N = lambda k: Nmemo[k]
N(2)
pp = lambda x,k: (p(x,k)*prior(x))/N(k)

cumpp = lambda i,k: np.sum([pp(y,k) for y in xrange[:i]])
cumpp2 = lambda i,k: np.sum([pp(y,k) for y in xrange[i+1:]])

fbayes = lambda k: np.min([x for i,x in enumerate(xrange) if cumpp2(i,k)<d])
fbayesmemo = {k:fbayes(k) for k in range(m+1)}
fbayes = lambda k: fbayesmemo[k]

plt.scatter(range(m),list(map(f,range(m))))
Ejemplo n.º 40
0
 def get_rate_matrix(self, states):
     print('Computing rate matrix')
     #print(self.species)
     #dstates = states[self.species[0]]
     #for i_specie in range(1,len(self.species)):
     #    specie = self.species[i_specie]
     #    print(dstates)
     #    print(states[specie])
     #    print(np.array(np.meshgrid(dstates,states[specie])).T.reshape(-1,2))
     #    exit()
     indep_species = deepcopy(self.species)
     for conservation_law in self.conservation_laws:
         for specie in conservation_law.keys():
             if specie != 'total':
                 indep_species.remove(specie)
                 break
         else:
             print('ERROR')
     enum_states = np.array(
         np.meshgrid(*[states[specie]
                       for specie in indep_species])).T.reshape(
                           -1, len(indep_species))
     print('Number of enumerated states: ', enum_states.shape)
     enum_states_full = np.nan * np.ones(
         (enum_states.shape[0], len(self.species)))
     for i_specie, specie in enumerate(self.species):
         if specie in indep_species:
             i_indep_species = indep_species.index(specie)
             enum_states_full[:, i_specie] = enum_states[:, i_indep_species]
         else:
             for conservation_law in self.conservation_laws:
                 if specie in conservation_law.keys():
                     enum_states_full[:,
                                      i_specie] = conservation_law['total']
                     for specie_j in conservation_law.keys():
                         if (specie_j != 'total') and (specie_j != specie):
                             j_indep_species = indep_species.index(specie_j)
                             enum_states_full[:,
                                              i_specie] -= enum_states[:,
                                                                       j_indep_species]
     print('Enumerated states: ', enum_states_full)
     K = np.zeros((enum_states_full.shape[0], enum_states_full.shape[0]))
     for i in range(enum_states_full.shape[0]):
         state_i = enum_states_full[i, :]
         print('Searching connection for state {0:d}'.format(i))
         for j in range(enum_states_full.shape[0]):
             state_j = enum_states_full[j, :]
             diff = (state_j - state_i).astype(int)
             for i_reaction in range(self.A.shape[0]):
                 if np.all(self.A[i_reaction, :] == diff):
                     number_of_each_reagents = np.power(
                         state_i, self.P[i_reaction, :])
                     #print('\tadding component {0:d} {1:d}'.format(i,j))
                     for ind_col in np.where(self.P[i_reaction, :] > 1):
                         number_of_each_reagents[ind_col] = special.binom(
                             state_i[ind_col], self.P[i_reaction, ind_col])
                     number_of_reagents = np.prod(number_of_each_reagents)
                     #print('species ',self.species)
                     #print('state_i ',state_i)
                     #print('state_j ',state_j)
                     #print('P ',self.P[i_reaction,:])
                     #print('A ',self.A[i_reaction,:])
                     #print('number_of_each_reagents ',number_of_each_reagents)
                     #print('number_of_reagents ',number_of_reagents)
                     K[j, i] += number_of_reagents * self.rates[i_reaction]
     for i in range(enum_states_full.shape[0]):
         K[i, i] = -np.sum(K[:, i])
     print('Rate matrix: ', K)
     T = sp.linalg.expm(K)
     print('Transition matrix: ', T)
     print(np.sum(T, axis=0))
     eigv = np.linalg.eigvals(T)
     inds = np.argsort(np.abs(eigv))[-1::-1]
     print('eigvs = ', eigv[inds])
     output = ''
     for i in inds:
         eig = eigv[i]
         output += '\tEigenvalue = {0:f}\n'.format(eig)
         output += '\tTimescale = {0:f}\n'.format(-1.0 /
                                                  np.log(np.abs(eig)))
         if eig.imag != 0:
             r, phi = cmath.polar(eig)
             output += '\t\tperiod = {0:f}\n'.format(2 * np.pi /
                                                     np.abs(phi))
     print(output)
     exit()
Ejemplo n.º 41
0
    def get_propensity(self, state=None):
        """
        Example:
            Species :
                M          =          2
                P          =          3
            Transition matrix :
                                  ->  M               M ->             M -> M + P             P ->
                M                  1                   -1                  0                   0
                P                  0                   0                   1                   -1
                rate            1.000000            2.000000            3.000000            4.0000
                state
                    [2
                     3]
                P
                    [[0 0]
                     [1 0]
                     [1 0]
                     [0 1]
                    ]
            number_of_each_reagent
                [[1 1]	First reaction -> M: No reagent
                 [2 1]  Second reaction M -> : M is the reagent
                 [2 1]  Third reaction M -> M + P:  M is the reagent
                 [1 3]] Forth reaction P ->: P is the reagent
            number_of_reagents
                [1  First reaction -> M: No reagent
                 2  Second reaction M -> : M is the reagent, so 2 molecules
                 2  Third reaction M -> M + P:  M is the reagent, so 2 molecules
                 3] Forth reaction P ->: P is the reagent so 3 molecules
            number_of_reagents * self.rates
                [1	This is rate_1(1) * 1 (that's why we need a 1 in number_of_reagents for a constant rate reaction)
                 4      This is rate_2(2) * M(2)
                 6      This is rate_3(3) * M(2)
                 12]    This is rate_4(4) * P(3)

        """
        if state is None:
            state = self.state
        number_of_each_reagents = np.power(state, self.P)
        # print 'state: ',state
        # print 'self.P: ',self.P
        # print 'number_of_each_reagents(before correction): ',number_of_each_reagents
        inds_row, inds_col = np.where(self.P > 1)
        # This correction is needed for reactions that use more than one molecule of the same kind
        for ind_row, ind_col in zip(inds_row, inds_col):
            # self.P[ind_row,ind_col] = Number of molecules needed for the reaction
            # state[ind_col] = Number of molecules that are present
            # e.g. A + 3*B -> C
            # with number_of_each_reagents = np.power(state,self.P) the contributio of B is B**3
            # here we correct to binomial_coefficient(B,3)
            number_of_each_reagents[ind_row, ind_col] = special.binom(
                state[ind_col], self.P[ind_row, ind_col])
        # print 'number_of_each_reagents(after correction): ',number_of_each_reagents
        number_of_reagents = np.prod(number_of_each_reagents, axis=1)
        # print 'number_of_reagents: ',number_of_reagents
        propensities = number_of_reagents * self.rates
        for i_reaction, rate_patch in self.rate_patches.items():
            input_values = [
                state[self.species.index(specie)]
                for specie in inspect.getargspec(rate_patch).args
            ]
            propensities[i_reaction] *= rate_patch(*input_values)
        # print 'propensities: ',propensities
        return propensities
Ejemplo n.º 42
0
def spin_q_function(rho, theta, phi):
    r"""The Husimi Q function for spins is defined as ``Q(theta, phi) =
    SCS.dag() * rho * SCS`` for the spin coherent state ``SCS = spin_coherent(
    j, theta, phi)`` where j is the spin length.
    The implementation here is more efficient as it doesn't
    generate all of the SCS at theta and phi (see references).

    The spin Q function is normal when integrated over the surface of the
    sphere

    .. math:: \frac{4 \pi}{2j + 1}\int_\phi \int_\theta
              Q(\theta, \phi) \sin(\theta) d\theta d\phi = 1

    Parameters
    ----------
    state : qobj
        A state vector or density matrix for a spin-j quantum system.
    theta : array_like
        Polar (colatitude) angle at which to calculate the Husimi-Q function.
    phi : array_like
        Azimuthal angle at which to calculate the Husimi-Q function.

    Returns
    -------
    Q, THETA, PHI : 2d-array
        Values representing the spin Husimi Q function at the values specified
        by THETA and PHI.

    References
    ----------
    [1] Lee Loh, Y., & Kim, M. (2015). American J. of Phys., 83(1), 30–35.
    https://doi.org/10.1119/1.4898595

    """

    if rho.type == 'bra':
        rho = rho.dag()

    if rho.type == 'ket':
        rho = ket2dm(rho)

    J = rho.shape[0]
    j = (J - 1) / 2

    THETA, PHI = meshgrid(theta, phi)

    Q = np.zeros_like(THETA, dtype=complex)

    for m1 in arange(-j, j + 1):
        Q += binom(2 * j, j + m1) * cos(THETA / 2) ** (2 * (j + m1)) * \
             sin(THETA / 2) ** (2 * (j - m1)) * \
             rho.data[int(j - m1), int(j - m1)]

        for m2 in arange(m1 + 1, j + 1):
            Q += (sqrt(binom(2 * j, j + m1)) * sqrt(binom(2 * j, j + m2)) *
                  cos(THETA / 2) ** (2 * j + m1 + m2) *
                  sin(THETA / 2) ** (2 * j - m1 - m2)) * \
             (exp(1j * (m1 - m2) * PHI) * rho.data[int(j - m1), int(j - m2)] +
              exp(1j * (m2 - m1) * PHI) * rho.data[int(j - m2), int(j - m1)])

    return Q.real, THETA, PHI
Ejemplo n.º 43
0
    def make_ops(self):

        def diag(bands,locs):
            return sparse.dia_matrix((bands,locs),shape=(len(bands[0]),len(bands[0])))

        a,b, N = self.consts['a'],self.consts['b'], self.max_degree+1
        n = np.arange(0,N)
        na = n+a
        nb = n+b
        nab = n+a+b
        nnab = 2*n+a+b

        # (1-z) <a,b| = <a-1,b| A-
        if a > 0:
            if a+b==0:
                middle = na/(2*n+1)
                lower  = (nb+1)/(2*n+1)
                middle[0]  = 2*a
            else:
                middle = 2*na*nab/(nnab*(nnab+1))
                lower  = 2*(n+1)*(nb+1)/((nnab+1)*(nnab+2))
            self.op['A-'] = diag([-np.sqrt(lower),np.sqrt(middle)],[-1,0])

        # <a,b| = <a+1,b| A+
        if a+b == 0 or a+b == -1:
            middle = (na+1)/(2*n+1)
            upper  = nb/(2*nab+1)
            middle[0], upper[0] = (1+a)*(1-(a+b)), 0
        else:
            middle = 2*(na+1)*(nab+1)/((nnab+1)*(nnab+2))
            upper  = 2*n*nb/(nnab*(nnab+1))
        self.op['A+'] = diag([np.sqrt(middle),-np.sqrt(upper)],[0,+1])

        # (1+z) <a,b| = <a,b-1| B-
        if b > 0:
            if a+b == 0:
                middle = nb/(2*n+1)
                lower  = (na+1)/(2*n+1)
                middle[0] = 2*b
            else:
                middle = 2*nb*nab/(nnab*(nnab+1))
                lower  = 2*(n+1)*(na+1)/((nnab+1)*(nnab+2))
            self.op['B-'] = diag([np.sqrt(lower),np.sqrt(middle)],[-1,0])

        # <a,b| = <a,b+1| B+
        if a+b == 0 or a+b == -1:
            middle = (nb+1)/(2*n+1)
            upper  = na/(2*nab+1)
            middle[0], upper[0] = (1+b)*(1-(a+b)), 0
        else:
            middle = 2*(nb+1)*(nab+1)/((nnab+1)*(nnab+2))
            upper  = 2*n*na/(nnab*(nnab+1))
        self.op['B+'] = diag([np.sqrt(middle),np.sqrt(upper)],[0,+1])

        # ( a - (1-z)*d/dz ) <a,b| = <a-1,b+1| C-
        if a > 0:
            self.op['C-'] = diag([np.sqrt(na*(nb+1))],[0])

        # ( b + (1+z)*d/dz ) <a,b| = <a+1,b-1| C+
        if b > 0:
            self.op['C+'] = diag([np.sqrt((na+1)*nb)],[0])

        # ( a(1+z) - b(1-z) - (1-z^2)*d/dz ) <a,b| = <a-1,b-1| D-
        if a > 0 and b > 0:
            self.op['D-'] = diag([np.sqrt((n+1)*nab)],[-1])

        # d/dz <a,b| = <a+1,b+1| D+
        self.op['D+'] = diag([np.sqrt(n*(nab+1))],[+1])

        # z <a,b| = <a,b| J
        self.op['J'] = 0.5*(self.pull('B+',self.op['B+']) - self.pull('A+',self.op['A+']))

        self.op['z=+1'] = np.sqrt(nnab+1)*np.sqrt(fun.binom(na,a))*np.sqrt(fun.binom(nab,a))
        self.op['z=-1'] = ((-1)**n)*np.sqrt(nnab+1)*np.sqrt(fun.binom(nb,b))*np.sqrt(fun.binom(nab,b))
        if a+b==-1:
            self.op['z=+1'][0] = np.sqrt(np.sin(np.pi*np.abs(a))/np.pi)
            self.op['z=-1'][0] = np.sqrt(np.sin(np.pi*np.abs(b))/np.pi)
Ejemplo n.º 44
0
def sterm(n, k, j):
    """
    This is a numerical calculation of the base term of a Stirling number term.
    """
    return int((-1)**(k - j) * scis.binom(k, j) * j**n)
Ejemplo n.º 45
0
 def __init__(cls, *args, **kwargs):
     cls._ORDER = 2
     cls._binom_coeffs = [[sp.binom(n, k) for k in range(cls._ORDER + 1)]
                          for n in range(cls._ORDER + 1)]
Ejemplo n.º 46
0
cost_mat_inv_z8 = cp.asarray(loadmat(paths+'matrices/cost_mat_inv_z8.mat')['inv_z'])

cost_mat_inv_x9 = cp.asarray(loadmat(paths+'matrices/cost_mat_inv_x9.mat')['inv_x'])
cost_mat_inv_y9 = cp.asarray(loadmat(paths+'matrices/cost_mat_inv_y9.mat')['inv_y'])
cost_mat_inv_z9 = cp.asarray(loadmat(paths+'matrices/cost_mat_inv_z9.mat')['inv_z'])

Aeq_x = cp.asarray(loadmat(paths+'matrices/Aeq_x.mat')['Aeq_x'])
Aeq_y = cp.asarray(loadmat(paths+'matrices/Aeq_y.mat')['Aeq_y'])
Aeq_z = cp.asarray(loadmat(paths+'matrices/Aeq_z.mat')['Aeq_z'])

Afc = cp.asarray(loadmat(paths+'matrices/Afc.mat')['Afc'])

rho = cp.asarray(loadmat(paths+'matrices/rho.mat')['rho_init'][0])
print (rho)

ncomb = int(binom(nbot,2))

x_init, y_init, z_init, x_fin, y_fin, z_fin = init_final_pos()
vx_init = cp.zeros(nbot)
vy_init = cp.zeros(nbot)
vz_init = cp.zeros(nbot)
ax_init = cp.zeros(nbot)
ay_init = cp.zeros(nbot)
az_init = cp.zeros(nbot)

vx_fin = cp.zeros(nbot)
vy_fin = cp.zeros(nbot)
vz_fin = cp.zeros(nbot)
ax_fin = cp.zeros(nbot)
ay_fin = cp.zeros(nbot)
az_fin = cp.zeros(nbot)
Ejemplo n.º 47
0
def f_xtheta(x, theta, k, m):
    #return(2*(m-k+1)*(1-1.0/4**k*sum([binom(k,i)*3*(3*x)**i/(3+8*theta*i) for i in range(k+1)])))
    return ((1 - 1.0 / 4**k * sum([
        binom(k, i) * 3 * (3 * x)**i / (3 + 8 * theta * i)
        for i in range(k + 1)
    ])))
Ejemplo n.º 48
0
def get_exponent_matrix(spatial_dimension: int, poly_degree: int, lp_degree: Union[float, int]) -> np.ndarray:
    """ creates an array of "multi-indices" symmetric in each dimension

    pre allocate the right amount of memory
    then starting from the 0 vector, lexicographically "count up" and add all valid exponent vectors

    NOTE: this has only been tested for up to dim 4 and up to deg 5.
        be aware that the preallocated array might be too small
        (<- will raise an IndexError by accessing the array out of bounds)!

    :param spatial_dimension: the dimensionality m
    :param poly_degree: the highest exponent which should occur in the exponent matrix
    :param lp_degree: the value for p of the l_p-norm
    :return: an array of shape (m, x) with all possible exponent vectors v with: l_p-norm(v) < poly_degree
        sorted lexicographically.
    """
    n = poly_degree
    m = spatial_dimension
    p = lp_degree
    p = float(p)  # float dtype expected

    max_nr_exp = (n + 1) ** m
    if p < 0.0:
        raise ValueError('values for p must be larger than 0.')
    # for some special cases it is known how many distinct exponent vectors exist:
    if p == 1.0:
        nr_expected_exponents = binom(m + n, n)
    elif p == np.inf or m == 1 or n == 1:
        # in dimension 1 all lp-degrees are equal!
        nr_expected_exponents = max_nr_exp
    else:
        # for values p << 2 the estimation based on the hypersphere volume tends to underestimate
        # the required amount of space! -> clip values for estimation.
        # TODO find more precise upper bound.
        # -> increases the volume in order to make sure there truly is enough memory being allocated
        p_estimation = max(p, 1.7)
        vol_lp_sphere = lp_hypersphere_vol(m, radius=1.0, p=p_estimation)
        vol_hypercube = 2 ** m
        vol_fraction = vol_lp_sphere / vol_hypercube
        nr_expected_exponents = max_nr_exp * vol_fraction

    nr_expected_exponents = int(nr_expected_exponents)
    if nr_expected_exponents > 1e6:
        raise ValueError(f'trying to generate exponent matrix with {max_nr_exp} entries.')
    exponents = np.empty((nr_expected_exponents, m), dtype=INT_DTYPE)
    nr_filled_exp = fill_exp_matrix(exponents, p, n)

    # NOTE: validity checked by tests:
    # if nr_expected_exponents == nr_filled_exp and lp_degree != np.inf:
    #     raise ValueError('potentially not enough memory has been allocated to fit all valid exponent vectors! '
    #                      'check increasing the `incr_factor`')
    # just use relevant part:
    out = exponents[:nr_filled_exp, :]
    #  NOTE: since tiny_array is a view onto huge_array, so long as a reference to tiny_array exists the full
    #  big memory allocation will remain. creating an independent copy however will take up resources!
    #  cf. http://numpy-discussion.10968.n7.nabble.com/Numpy-s-policy-for-releasing-memory-td1533.html
    #  NOTE: will be slow for very large arrays. since everything needs not be copied!
    if nr_filled_exp * 2 < nr_expected_exponents:
        warn(f'more than double the required memory has been allocated ({nr_filled_exp} filled '
             f'<< {nr_expected_exponents} allocated). inefficient!')
        out = out.copy()  # independent copy
        del exponents  # free up memory
    return out
Ejemplo n.º 49
0
""" Advent of Code Day 10 """

import numpy as np
import itertools
from scipy.special import binom

adapters = np.loadtxt('./input_day10.txt', dtype=np.int64)
adapters = np.concatenate(([0], np.sort(adapters), [np.max(adapters) + 3]))
diffs = np.diff(adapters)

# Part I
count1 = (diffs == 1).sum()
count3 = (diffs == 3).sum()
print(count1 * count3)

# Part II
# Assuming there are no voltage diffs of 2 in the sorted adapter list.
# For each island of N<=5 adapters with voltage diff = 1, we can drop groups
# of 0, 1 or 2 adapters (if island size allows).
nperms = 1
for diff, group in itertools.groupby(diffs):
    if diff == 1:
        group = list(group)
        nfree = len(group) - 1
        fac = np.sum([binom(nfree, i) for i in [0, 1, 2]])
        if nfree > 3:  # no cases like this in the input...
            raise ValueError('Invalid forumula')
        nperms *= fac

print(int(nperms))
 def delannoy(m, n):
     s = 0
     for k in range(min(m, n) + 1):
         s += binom(m, k) * binom(n, k) * 2**k
     return s
Ejemplo n.º 51
0
def minNforHDIpower(genPriorMean,
                    genPriorN,
                    HDImaxwid=None,
                    nullVal=None,
                    ROPE=None,
                    desiredPower=0.8,
                    audPriorMean=0.5,
                    audPriorN=2,
                    HDImass=0.95,
                    initSampSize=1,
                    verbose=True):
    import numpy as np
    from HDIofICDF import HDIofICDF, beta
    from scipy.special import binom, betaln

    if HDImaxwid != None and nullVal != None:
        sys.exit('One and only one of HDImaxwid and nullVal must be specified')
    if ROPE == None:
        ROPE = [nullVal, nullVal]

# Convert prior mean and N to a, b parameter values of beta distribution.
    genPriorA = genPriorMean * genPriorN
    genPriorB = (1.0 - genPriorMean) * genPriorN
    audPriorA = audPriorMean * audPriorN
    audPriorB = (1.0 - audPriorMean) * audPriorN
    # Initialize loop for incrementing sampleSize
    sampleSize = initSampSize
    # Increment sampleSize until desired power is achieved.
    while True:
        zvec = np.arange(0,
                         sampleSize + 1)  # All possible z values for N flips.
        # Compute probability of each z value for data-generating prior.
        pzvec = np.exp(
            np.log(binom(sampleSize, zvec)) +
            betaln(zvec + genPriorA, sampleSize - zvec + genPriorB) -
            betaln(genPriorA, genPriorB))
        # For each z value, compute HDI. hdiMat is min, max of HDI for each z.
        hdiMat = np.zeros((len(zvec), 2))
        for zIdx in range(0, len(zvec)):
            z = zvec[zIdx]
            # Determine the limits of the highest density interval
            # hdp is a function from PyMC package and takes a sample vector as
            # input, not a function.
            hdiMat[zIdx] = HDIofICDF(beta,
                                     credMass=HDImass,
                                     a=(z + audPriorA),
                                     b=(sampleSize - z + audPriorB))
        if HDImaxwid != None:
            hdiWid = hdiMat[:, 1] - hdiMat[:, 0]
            powerHDI = np.sum(pzvec[hdiWid < HDImaxwid])
        if nullVal != None:
            powerHDI = np.sum(pzvec[(hdiMat[:, 0] > ROPE[1]) |
                                    (hdiMat[:, 1] < ROPE[0])])
        if verbose:
            print(" For sample size = %s\npower = %s\n" %
                  (sampleSize, powerHDI))

        if powerHDI > desiredPower:
            break
        else:
            sampleSize += 1
    return sampleSize
Ejemplo n.º 52
0
if __name__ == '__main__':

    def print_header(text):
        print('\n\033[1m\033[31m\033[4m{}\033[0m'.format(text))

    # Original solutions
    print_header('Original solutions')
    print(list(recur(nuts, [], None)))

    # Approximate distribution of solutions.
    #
    # We use Hoeffding's inequality to be 0.95 confident that we are 0.01
    # within true probabilities. That is n >= 1/(2*epsilon^2) * log(2/alpha) ~=
    # 18444.
    trials = 18444
    all_nuts = {Nut(perm) for perm in permutations(range(1, 7))}
    counts = Counter()

    for _ in range(trials):
        nuts = set(sample(all_nuts, 7))
        n_sols = sum(1 for _ in recur(nuts, [], None))
        counts[n_sols] += 1

    print_header('Distribution of number of solutions')
    total = binom(len(all_nuts), 7)
    for k, v in counts.most_common():
        f = v / trials
        print('{:<4}{:<10.2e}{:<8.1%}{}'.format(k, f * total, f,
                                                '=' * int(f * 40)))
Ejemplo n.º 53
0
def fBinomial(x, n, p):
    k = np.around(x)
    return sp.binom(n, k) * p**k * (1. - p)**(n - k)
Ejemplo n.º 54
0
def AlgebraicCoalescentJCExpectedKmerDistance(x, theta, k):
    #return(2*(m-k+1)*(1-1.0/4**k*sum([binom(k,i)*3*(3*x)**i/(3+8*theta*i) for i in range(k+1)])))
    return ((1 - 1.0 / 4**k * sum([
        binom(k, i) * 3 * (3 * x)**i / (3 + 8 * theta * i)
        for i in range(k + 1)
    ])))
Ejemplo n.º 55
0
def hockey_stick_pmf_unstable(n, k):
    pmf = np.zeros(shape=(n, ), dtype=np.float)
    for idx in range(n):
        i = idx + 1
        pmf[idx] = binom(i - 1, k - 1) / binom(n, k)
    return pmf
def out_deg_distro(n, k, d, vd, c, m, alpha):
    z = compute_z(d, vd, c, m, alpha)
    return binom(n - 1, k) * z**k * (1 - z)**(n - k - 1)
Ejemplo n.º 57
0
def binomial(n, p, towhere):
    probability = 0
    for indx in range(towhere + 1):
        probability += spc.binom(n, indx) * p**indx * (1 - p)**(n - indx)
    return probability
Ejemplo n.º 58
0
def catalan(n):
    return int(binom(2*n, n)) // (n+1)
Ejemplo n.º 59
0
def grunspan_prob(n, q):
    p = 1 - q
    prob = 1.0
    for k in range(n):
        prob -= (p**n * q**k - q**n * p**k) * binom(k + n - 1, k)
    return prob
Ejemplo n.º 60
0
def _point_probability(k, n, l, p: float = 0.5):  # noqa:E741
    return binom(n - l, k) * p**k * (1 - p)**(n - k - l)