コード例 #1
0
def AVG(n, pi, maxIters=float('inf'), tol=TOL):
    '''
	INPUT:
		n :: Integer
			# number of bins
		pi :: NPArray<Float>
			# optimal stationary distribution
			# CAN'T HAVE ANY ZERO ENTRIES!
	OUTPUT:
		NPArray<NPArray<Float>>
			# P^(I) via AVG Method
	'''
    L = lambda mat: np.linalg.norm(np.dot(mat, pi) - pi)
    M = lambda mat: np.linalg.norm(corrEv(mat) - pi)
    if n < 2:
        print "n must be >= 2"
    elif n == 2:
        output = brS(2, pi)
        return output, L(output), M(output)
    N = lambda x: normize(np.random.random([x, x]))
    output = N(n)  # P^(I)
    p = patch(n)
    Z = (n - 1)**2
    A = np.zeros([n, n])
    for _ in range((n - 1)**2):
        cp = next(p)  # current patch
        Ai = np.zeros([n, n])
        # Ai = np.identity(n)
        s = sum(pi[cp[0]])
        Ai[cp[0][0]:(cp[0][1] + 1),
           cp[1][0]:(cp[1][1] + 1)] = brS(2, pi[cp[0]] / s)
        A += Ai
    output = A / Z
    return output, L(output), M(output)
コード例 #2
0
def GI2(n, pi, maxIters=float('inf'), tol=TOL):
    '''
	INPUT:
		n :: Integer
			# number of bins
		pi :: NPArray<Float>
			# optimal stationary distribution
			# CAN'T HAVE ANY ZERO ENTRIES!
	OUTPUT:
		NPArray<NPArray<Float>>
			# P^(I) via Gibbs Sampling-inspired Method
	'''
    output = np.zeros([n, n])  # P^(I)
    for col in xrange(n):
        output[:, col] = np.transpose(genStat(n))
    ev = corrEv(output)
    indices = range(n)
    while not listMatch(np.dot(output, ev), pi) and maxIters > 0:  # s1, loop
        # s2, isolate
        alterRow = np.random.choice(indices, size=[2],
                                    replace=False).astype(int)
        alterCol = np.random.choice(indices, size=[2],
                                    replace=False).astype(int)
        alterRow = np.array([min(alterRow), max(alterRow)
                             ])  # sort in order of lowest to highest
        alterCol = np.array([min(alterCol), max(alterCol)
                             ])  # sort in order of lowest to highest
        subpi = np.zeros(2)
        subpi[0] = pi[alterRow[0]]
        subpi[1] = pi[alterRow[1]]
        # s3b, note how much space was formerly taken up
        resMass_mat = (output[alterRow[0]][alterCol[0]] + output[alterRow[1]][alterCol[0]], \
         output[alterRow[0]][alterCol[1]] + output[alterRow[1]][alterCol[1]])
        resMass_pi = sum(subpi)
        # s3, normalize
        subpi /= sum(subpi)
        # s4, optimize extracted 2-equation system
        submat = brS(n, subpi)  # !!! Use bS, rS, brS methods. !!!
        # s5a, denormalize
        submat[:, 0] *= resMass_mat[0]
        submat[:, 1] *= resMass_mat[1]
        subpi *= resMass_pi
        # s5, substitute in new values renormalized to Q
        output[alterRow[0]][alterCol[0]] = submat[0][0]
        output[alterRow[1]][alterCol[0]] = submat[1][0]
        output[alterRow[0]][alterCol[1]] = submat[0][1]
        output[alterRow[1]][alterCol[1]] = submat[1][1]
        ev = corrEv(output)
        maxIters -= 1
    return output
コード例 #3
0
def rS(n, pi, iters=float('inf'), tol=TOL):
	'''
	INPUT:
		n :: Integer
			# number of bins
		pi :: NPArray<Float>
			# optimal stationary distribution
	OUTPUT:
		NPArray<NPArray<Float>>
			# P^(I) via Random Search Method
			# only works in 2x2 case
			# BECAUSE IT'S 2x2 CASE I CAN USE 2x2 P^(M) SERIES FORMULA
	'''
	output = np.zeros([2,2]) # P^(I)
	for col in xrange(2):
		output[:,col] = np.transpose(genStat(2))
	ev = corrEv(output)
	b1 = 0.0 if pi[0] >= pi[1] else 1.0-(pi[0]/pi[1])
	b2 = 1.0
	output[1][1] = np.average([b1,b2])
	output = resMat(output)
	while not listMatch(np.dot(output, ev), pi, tol=tol) and iters > 0: # s1, loop
		output[1][1] = (b2-b1)*np.random.random()+b1
		output[0][0] = p22p1(output[1][1], pi) # calculate p_11
		output = resMat(output) # calculate p_12, p_21
		ev = corrEv(output)
		iters -= 1
	return output
コード例 #4
0
def patchR(n):
    '''
	Generates random 2x2 patch
	'''
    output = np.zeros([2, 2])  # [row_indices, col_indices]
    for i in range(2):
        output[i] = np.random.choice(range(n), size=[2],
                                     replace=False).astype(int)
    return output
コード例 #5
0
def Series(N, n, indMat):
    '''
	INPUT:
		N :: Integer
			# number of people
		n :: Integer
			# number of bins
		indMat :: List<List<Float>>
			# individual matrix
	OUTPUT:
		List<List<Float>>
			# mass matrix according to my generalized series formula (without Java speedup)
	'''
    sl = wc_count(N, n)  # side length
    print 'Progress:'
    output = np.zeros([sl, sl])
    n0s = np.zeros([n, n])
    i = 0  # current row index
    for Ti in weak_compositions(N, n):  # rows of mass matrix (s^(2))
        j = 0  # current column index
        for Tj in weak_compositions(N, n):  # columns of mass matrix (s^(1))

            b1 = 0  # block 1
            for scwc in p_wc(
                    np.array(Tj), n,
                    np.array(Ti)):  # loop over successively-constrained wcs
                b2 = 1  # block 2
                for d_idx, donor in enumerate(Tj):
                    if donor > 0:
                        b2 *= mn(donor, scwc[d_idx])
                        for t in xrange(n):  # block 3
                            b2 *= indMat[t][d_idx]**scwc[d_idx][t]
                b1 += b2

            output[i][j] = b1
            j += 1
        i += 1
        print 100.0 * float(i) / float(sl), '%'  # progress output
    return output
コード例 #6
0
def CMAES(n, pi, tol=10.0 * TOL):
    # build x0
    x0 = np.zeros(n**2)
    for i in range(n):
        nxt = np.random.uniform(0, 1, n)
        x0[i * n:i * n + n] = nxt / sum(nxt)
    # build sigma0
    sigma0 = .25**n  # ~1/4 of search domain width => try with .25 and (.25)**n
    f = cma.fitness_functions.FitnessFunctions()
    preFit = lambda x: corrEig(x, pi, n, tol)
    fitnessFunction = lambda x: f.fun_as_arg(x, preFit)
    output = cma.fmin(fitnessFunction, x0, sigma0)
    print output
    print '\n'
    return devectorize(
        output[5], n)  # incumbent solution found at this entry in output array
コード例 #7
0
def p_wc(ls, n, initial_maxs, initial_output=[], currIdx=0):
    '''
	INPUT:
		ls :: NPArray<Integer>
		n :: Integer
			# len(ls)
		initial_maxs :: NPArray<Integer>
			# should have length len(ls)
	OUTPUT:
		generated List<NPArray<Integer>>
			# a successively bounded wc list as generated
	'''
    n0s = np.zeros(n)
    if currIdx == n or ls == [] or listMatch(initial_maxs, n0s):
        yield initial_output
    else:
        for bwc in bounded_wcs(ls[currIdx], n, n0s, initial_maxs):
            next_wc = np.array(bwc).astype(int)
            i_o = deepcopy(initial_output)
            i_o.append(next_wc)
            for x in p_wc(ls, n, initial_maxs - next_wc, i_o, currIdx + 1):
                yield x
コード例 #8
0
def NRS(n, pi, iterCols=float('inf'), iterInCol=10000, tol=TOL):
	'''
	INPUT:
		n :: Integer
			# number of bins
		pi :: NPArray<Float>
			# optimal stationary distribution
	OUTPUT:
		NPArray<NPArray<Float>>
			# P^(I) via Naive Random Search Method
	'''
	# initialization and normalization
	if iterCols == None:
		iterCols = n*10000
	output = np.random.random((n,n))
	for col in xrange(n):
		output[:,col] /= sum(output[:,col])
	out = np.linalg.eig(output)
	n0s = np.zeros(n)
	totalIts = iterCols; totalItsIn = iterInCol
	while (not abs(out[0][0] - 1.0) < tol or not listMatch(out[1][:,0], pi)) and iterCols!=0:
		for col in xrange(n):
			p_old = n0s
			iterInCol = totalItsIn
			temp = output
			while not listMatch(output[:,col], p_old) and iterInCol!=0:
				next_try = np.random.random((n,))
				next_try /= sum(next_try) # Line 5 in LaTeX
				temp[:,col] = next_try
				out = np.linalg.eig(temp)
				if np.linalg.norm(out[1][:,0] - pi)**2 < np.linalg.norm(p_old - pi)**2:
					output = temp
					p_old = output[:,col]
				iterInCol -= 1
		out = np.linalg.eig(output)
		iterCols -= 1
	return output
コード例 #9
0
def VSEA(N, n, indMC):  # non-unique, identity-less individuals
    '''
	INPUT:
		N :: Integer
			# number of people
		n :: Integer
			# number of bins
		indMC :: NPArray<NPArray<Float>>
			# individual matrix
	OUTPUT:
		List<List<Float>>
			# mass matrix
	'''
    #
    # Block A
    #
    sl = int(comb(N + n - 1, n - 1))  # side length
    output = np.zeros([sl, sl])
    TESTLIST = []
    vectList = buildVectList(n, indMC)  # lacks 0 vectors
    zeroVect = np.zeros(n)
    #
    # Block B
    #
    i = 0  # current row index
    for Ti in weak_compositions(N, n):  # rows of mass matrix
        print 'Progress:', 100.0 * float(sl * i) / float(sl**2), '%'
        j = 0  # current column index
        for Tj in weak_compositions(N, n):  # columns of mass matrix
            #
            # Block C
            #
            diff = np.array(Tj) - np.array(
                Ti)  # Prospective state transition: Tj -> Ti
            vectList_copy = deepcopy(vectList)
            # add 0 vectors to vectList
            max_0s_per_bin = []
            for bn in xrange(n):
                max_0s_per_bin.append(min(Tj[bn], Ti[bn]))
                if Tj[bn] > 0 and Ti[bn] > 0:
                    vectList_copy.append((np.zeros(n), indMC[bn][bn], bn))
            #
            # Block D
            #
            # get every way of allotting N tokens across all vectors in vectList
            lvlc = len(vectList_copy)
            transition_prob_terms = []
            for wc in weak_compositions(
                    N, lvlc
            ):  # N = number of tokens, lvlc = number of vectors available
                sum_vect = np.zeros(n)
                breakIt = False
                static_in_bin = np.zeros(
                    n)  # number of people staying in their bin
                possibly_correct_vects = [
                ]  # :: List<Tuple< vector, prob, number_of_times_used, idx0 >>  # idx0 if 0 vect, gives corresponding diagonal index of particular 0 vect
                for vectTup_idx in xrange(lvlc):
                    if wc[vectTup_idx] > 0:  #check if vector is used
                        # check if too many of the same zero vector has been invoked
                        if listMatch(
                                vectList_copy[vectTup_idx][0],
                                zeroVect):  # when we encounter a zero vector
                            static_in_bin[vectList_copy[vectTup_idx]
                                          [2]] += wc[vectTup_idx]
                            if max_0s_per_bin[vectList_copy[vectTup_idx]
                                              [2]] < wc[vectTup_idx]:
                                breakIt = True  # too many of the same zero vector has been invoked
                                break
                        sum_vect += float(
                            wc[vectTup_idx]) * vectList_copy[vectTup_idx][0]
                        possibly_correct_vects.append(
                            (vectList_copy[vectTup_idx][0],
                             vectList_copy[vectTup_idx][1], wc[vectTup_idx]))
                if breakIt:  # skip to next wc
                    continue
                #
                # Block E
                #
                if listMatch(
                        diff, sum_vect
                ):  # check if current weak composition wc of vectors equals diff
                    # coefficient determination
                    leaving = np.zeros(n)
                    entering = {}
                    prod = 1
                    for pre_pvs in possibly_correct_vects:
                        pvs = np.ndarray.tolist(pre_pvs[0])
                        if not listMatch(
                                pvs, zeroVect
                        ):  # when we DON'T encounter a zero vector
                            for bi in xrange(n):
                                if pvs[bi] == -1:
                                    leaving[bi] += pre_pvs[2]
                                    if bi not in entering:
                                        entering[bi] = np.zeros(n)
                                    entering[bi][pvs.index(1)] += pre_pvs[2]
                                    break  # jump to next pre_pvs
                    # check if too many people are leaving a bin <-- REALLY HACKY! >:(
                    for bi in xrange(n):
                        if leaving[bi] + static_in_bin[bi] > Ti[bi]:
                            breakIt = True
                            break
                    if breakIt:  # skip to next wc
                        continue
                    #
                    # Block F
                    #
                    prod *= allotCombos(Ti, leaving,
                                        entering)  # get coefficient
                    # get P^I probabilities that correspond to selected basis vectors
                    #   get exponents for all probabilities that correspond to the
                    #   quantities of each of their respective basis vector
                    pvstEST = 1
                    for pvs in possibly_correct_vects:
                        prod *= pvs[1]**pvs[2]
                        pvstEST *= pvs[1]**pvs[2]
                    transition_prob_terms.append(
                        prod
                    )  # this is just one term that contributes to one entry in the mass matrix
            output[j][i] = sum(transition_prob_terms)
            j += 1  # add element to mass matrix
        i += 1
    return output