Example #1
0
    def initialize(self, K):
        """Accepts K number of topics in document.
            Initializes all of the hidden variable arrays now that it knows dimensions
            of topics, vocabulary, etc.
        """
        assert self.documents is not None

        # give at least more documents than topics
        # so that it's not singular
        assert self.D > K

        self.K = K

        D = self.D
        W = self.W

        # "it suffices to fix alpha to uniform 1/K"
        # initialize to ones so that the topics are more evenly distributed
        # good for small datasets
        self.alpha = np.ones((K,)) * (3.0 / K)

        # Initialize the variational distribution q(beta|lambda)
        self.beta = topiclib.initialize_beta(K, W)

        document_Nds = self.num_words_per(self.documents)
        self.phi = [(np.ones((document_Nds[d], K))*(1.0/K)) for d in xrange(D)]

        self.gamma = np.ones((D, K)) * (1.0 / K)
        graphlib.initialize_random(self.gamma)

        self.is_initialized = True
 def __init__(self, neuralLayerNum, numNodes, nextNeuralLayerNumNodes):
     
     if nextNeuralLayerNumNodes != None:
         self.numNodes = numNodes+1
         self.isOutputLayer = False
         self.transMat = np.ones([self.numNodes, nextNeuralLayerNumNodes+1], dtype=float)
         
         self.rpropDelta = DEL_INIT * np.ones([self.numNodes, nextNeuralLayerNumNodes+1], dtype=float)
         self.rpropDelEDelW = np.ones([self.numNodes, nextNeuralLayerNumNodes+1], dtype=float)
         self.rpropEdgesDelta = np.zeros([self.numNodes, nextNeuralLayerNumNodes+1], dtype=float)
     else:
         #Output layer
         self.numNodes = numNodes
         self.isOutputLayer = True
         self.transMat = None
         
         self.rpropDelta = None
         self.rpropDelEDelW = None
         self.rpropEdgesDelta = None
     
     self.neuralLayerNum = neuralLayerNum
     self.nodesInput = np.zeros([self.numNodes], dtype=float)
     self.nodesOutput = np.zeros([self.numNodes], dtype=float)
     self.nodesDelta = np.zeros([self.numNodes], dtype=float)
     
     #set the bias node output to 1. and it never changes
     if not self.isOutputLayer:
         self.nodesOutput[-1] = 1.
Example #3
0
File: rewi.py Project: sveitser/of
    def update_link(self, i):
        n = self.n
        neis = self.graph.neighbors(i)
        degs = np.array(self.graph.degree().values())
        candidates = np.ones(n, dtype=int)
        candidates[i] = 0
        #candidates[neis] = 0
        candidates[neis] = [0] * len(neis) # pypy
        oi = self.opinions[i]

        if self.types[i] == 0:    
            candidates[self.opinions[candidates] != oi] = 0
        else:
            candidates[self.opinions[candidates] == oi] = 0

        #candidates = np.nonzero(candidates)[0]
        candidates = [k for k in range(n) if candidates[k] == 1] # pypy
        degs = degs[candidates]
        
        if list(candidates):

            #newnei = candidates[randint(len(candidates))]
            newnei = candidates[randweight(degs/float(sum(degs)))];

            if neis:
                oldnei = neis[randint(len(neis))]

                # if oldnei would become isolated don't do anything
                if len(list(self.graph.edge[oldnei].keys())) <= 1:
                    return
                                
                self.graph.add_edge(i, newnei)
                self.graph.remove_edge(i, oldnei)
Example #4
0
def lda_recalculate_log_beta(text, log_beta, log_phi):
    """
    update topics: βk,wnew ∝ ΣdΣn 1(wd,n = w) φkd,n

    Accepts log beta matrix (KxW) and 
        log phi, a D-length list of (N x K) matrices.
    """
    (K,W) = log_beta.shape
    D = len(log_phi)

    # todo: jperla: should use -inf or a different really small number?!
    log_beta[:,:] = np.ones(log_beta.shape) * float('-1000')
    
    if isinstance(text[0], np.ndarray):
        for d in xrange(D):
            for n,word in enumerate(text[d]):
                for k in xrange(K):
                    log_beta[k,word] = np.logaddexp(log_beta[k,word], log_phi[d][n][k])
    else:
        for d in xrange(D):
            for n,word,count in iterwords(text[d]):
                for k in xrange(K):
                    log_beta[k,word] = np.logaddexp(log_beta[k,word], log_phi[d][n][k])
    graphlib.log_row_normalize(log_beta)
    return log_beta
Example #5
0
File: tsp.py Project: M4573R/algo2
def solve(data,size):
    max_size=2**size
    A = np.ones([max_size,size],float)*sys.float_info.max
    A[0,0] = 0
    for m in range(2,size+1):
        subsets = it.combinations(range(1,size),m-1)

        for s in subsets:
            s = (0,)+s
            v = get_value(s)
            for j in s:
                if 0 == j: continue
                vmin = sys.float_info.max
                v_tmp = get_value_without_v(s,j)
                for k in s:
                    if k == j: continue

                    v_cand = A[v_tmp,k] + get_dist(data,k,j)
                    if v_cand < vmin:
                        vmin = v_cand
                A[v,j] = vmin

    rst = sys.float_info.max
    v = get_value(range(size))
    for j in range(size):
        v_cand = A[v,j] + get_dist(data,j,0)
        if v_cand < rst:
            rst = v_cand

    return rst
Example #6
0
 def test_sub_where(self):
     from numpypy import where, ones, zeros, array
     a = array([1, 2, 3, 0, -3])
     v = a.view(self.NoNew)
     b = where(array(v) > 0, ones(5), zeros(5))
     assert (b == [1, 1, 1, 0, 0]).all()
     # where returns an ndarray irregardless of the subtype of v
     assert not isinstance(b, self.NoNew)
Example #7
0
def calculate_EZZT_from_small_log_phis(phi1, phi2):
    """
        Accepts a big phi matrix (like ((Nd+Nc) x (K+J))
        Calculates E[ZdZdT].
        Returns the final matrix ((K+J) x (K+J)).

        (Also, E[ZdZdT] = (1/N2)(ΣNΣm!=nφd,nφd,mT  +  ΣNdiag{φd,n})
    """
    Nd,K = phi1.shape
    Nc,J = phi2.shape
    (Ndc, KJ) = (Nd+Nc, K+J)
    inner_sum = np.zeros((KJ, KJ))

    p1 = np.matrix(phi1)
    p2 = np.matrix(phi2)

    for i in xrange(K):
        for j in xrange(K):
            m = logdotexp(np.matrix(p1[:,i]), np.matrix(p1[:,j]).T)
            m += np.diagonal(np.ones(Nd) * -1000)
            inner_sum[i,j] = logsumexp(m.flatten())

    for i in xrange(J):
        for j in xrange(J):
            m = logdotexp(np.matrix(p2[:,i]), np.matrix(p2[:,j]).T)
            m += np.diagonal(np.ones(Nc) * -1000)
            inner_sum[K+i,K+j] = logsumexp(m.flatten())

    for i in xrange(K):
        for j in xrange(J):
            m = logdotexp(np.matrix(p1[:,i]), np.matrix(p2[:,j]).T)
            inner_sum[i,K+j] = logsumexp(m.flatten())

    for i in xrange(J):
        for j in xrange(K):
            m = logdotexp(np.matrix(p2[:,i]), np.matrix(p1[:,j]).T)
            inner_sum[K+i,j] = logsumexp(m.flatten())

    big_phi_sum = np.concatenate((logsumexp(phi1, axis=0),
                                  logsumexp(phi2, axis=0)), axis=1)
    ensure(big_phi_sum.shape == (KJ,))
    for i in xrange(KJ):
        inner_sum[i,i] = logsumexp([inner_sum[i,i], big_phi_sum[i]])

    inner_sum -= np.log(Ndc * Ndc)
    return inner_sum
Example #8
0
 def test_ones(self):
     from numpypy import ones
     a = ones(3)
     assert len(a) == 3
     assert a[0] == 1
     raises(IndexError, "a[3]")
     a[2] = 4
     assert a[2] == 4
Example #9
0
def initialize_uniform(matrix):
    """Accepts a matrix with a defined shape.
        Initializes it to to be uniform probability on row.
        Each row on last dimension should sum to 1.
        Returns the original matrix, modified.
    """
    nrows,ncols = matrix.shape
    matrix = np.ones(matrix.shape)*(1.0/ncols)
    return matrix
Example #10
0
    def test_vstack(self):
        import numpypy as np

        a = np.array([1, 2, 3])
        b = np.array([2, 3, 4])
        c = np.vstack((a, b))
        assert np.array_equal(c, [[1, 2, 3],
                                  [2, 3, 4]])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.vstack((a, b))
        assert np.array_equal(c, [[1],
                                  [2],
                                  [3],
                                  [2],
                                  [3],
                                  [4]])

        for shape1, shape2 in [[(2, 1), (3, 1)],
                               [(2, 4), [3, 4]]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.vstack((a, b)) ==
                          np.ones((a.shape[0] + b.shape[0],
                                   a.shape[1])))

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(3, 2, 4), (7, 2, 4)],
                               [(0, 2, 7), (10, 2, 7)],
                               [(0, 2, 7), (0, 2, 7)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.vstack((a, b)) ==
                          np.ones((a.shape[0] + b.shape[0],
                                   a.shape[1],
                                   a.shape[2])))
Example #11
0
    def test_hstack(self):
        import numpypy as np
        a = np.array((1, 2, 3))
        b = np.array((2, 3, 4))
        c = np.hstack((a, b))
        assert np.array_equal(c, [1, 2, 3, 2, 3, 4])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.hstack((a, b))
        assert np.array_equal(c, [[1, 2],
                                  [2, 3],
                                  [3, 4]])

        for shape1, shape2 in [[(1, 2), (1, 3)],
                               [(4, 2), (4, 3)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.hstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1] + b.shape[1])))

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(2, 3, 4), (2, 7, 4)],
                               [(1, 4, 7), (1, 10, 7)],
                               [(1, 4, 7), (1, 0, 7)],
                               [(1, 0, 7), (1, 0, 7)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.hstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1] + b.shape[1],
                                   a.shape[2])))
Example #12
0
    def test_dstack(self):
        import numpypy as np
        a = np.array((1, 2, 3))
        b = np.array((2, 3, 4))
        c = np.dstack((a, b))
        assert np.array_equal(c, [[[1, 2], [2, 3], [3, 4]]])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.dstack((a, b))
        assert np.array_equal(c, [[[1, 2]], [[2, 3]], [[3, 4]]])

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(4, 2, 3), (4, 2, 7)],
                               [(7, 2, 0), (7, 2, 10)],
                               [(7, 2, 0), (7, 2, 0)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.dstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1],
                                   a.shape[2] + b.shape[2])))

        for shape1, shape2 in [[(4, 2, 3, 5), (4, 2, 7, 5)],
                               [(7, 2, 0, 5), (7, 2, 10, 5)],
                               [(7, 2, 0, 5), (7, 2, 0, 5)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.dstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1],
                                   a.shape[2] + b.shape[2],
                                   a.shape[3])))
Example #13
0
    def test_hstack(self):
        import numpypy as np
        a = np.array((1, 2, 3))
        b = np.array((2, 3, 4))
        c = np.hstack((a, b))
        assert np.array_equal(c, [1, 2, 3, 2, 3, 4])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.hstack((a, b))
        assert np.array_equal(c, [[1, 2],
                                  [2, 3],
                                  [3, 4]])

        for shape1, shape2 in [[(1, 2), (1, 3)],
                               [(4, 2), (4, 3)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.hstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1] + b.shape[1])))

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(2, 3, 4), (2, 7, 4)],
                               [(1, 4, 7), (1, 10, 7)],
                               [(1, 4, 7), (1, 0, 7)],
                               [(1, 0, 7), (1, 0, 7)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.hstack((a, b)) ==
                          np.ones((a.shape[0],
                                   a.shape[1] + b.shape[1],
                                   a.shape[2])))
Example #14
0
    def test_vstack(self):
        import numpypy as np

        a = np.array([1, 2, 3])
        b = np.array([2, 3, 4])
        c = np.vstack((a, b))
        assert np.array_equal(c, [[1, 2, 3],
                                  [2, 3, 4]])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.vstack((a, b))
        assert np.array_equal(c, [[1],
                                  [2],
                                  [3],
                                  [2],
                                  [3],
                                  [4]])

        for shape1, shape2 in [[(2, 1), (3, 1)],
                               [(2, 4), [3, 4]]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.vstack((a, b)) ==
                          np.ones((a.shape[0] + b.shape[0],
                                   a.shape[1])))

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(3, 2, 4), (7, 2, 4)],
                               [(0, 2, 7), (10, 2, 7)],
                               [(0, 2, 7), (0, 2, 7)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(np.vstack((a, b)) ==
                          np.ones((a.shape[0] + b.shape[0],
                                   a.shape[1],
                                   a.shape[2])))
Example #15
0
    def test_dstack(self):
        import numpypy as np
        a = np.array((1, 2, 3))
        b = np.array((2, 3, 4))
        c = np.dstack((a, b))
        assert np.array_equal(c, [[[1, 2], [2, 3], [3, 4]]])

        a = np.array([[1], [2], [3]])
        b = np.array([[2], [3], [4]])
        c = np.dstack((a, b))
        assert np.array_equal(c, [[[1, 2]], [[2, 3]], [[3, 4]]])

        #skip("https://bugs.pypy.org/issue1394")
        for shape1, shape2 in [[(4, 2, 3), (4, 2, 7)], [(7, 2, 0), (7, 2, 10)],
                               [(7, 2, 0), (7, 2, 0)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(
                np.dstack((a, b)) == np.ones((a.shape[0], a.shape[1],
                                              a.shape[2] + b.shape[2])))

        for shape1, shape2 in [[(4, 2, 3, 5), (4, 2, 7, 5)],
                               [(7, 2, 0, 5), (7, 2, 10, 5)],
                               [(7, 2, 0, 5), (7, 2, 0, 5)]]:
            a, b = np.ones(shape1), np.ones(shape2)
            assert np.all(
                np.dstack((a, b)) == np.ones((a.shape[0], a.shape[1],
                                              a.shape[2] + b.shape[2],
                                              a.shape[3])))
Example #16
0
def calculate_big_log_phi(phi1, phi2):
    """ Pretends that two separate sets of D phi matrices (Nd x K) each
        are one big phi matrix.
        This is needed for the latent model.
        The trick is to make a big matrix with four quadrants.
        The top left quadrant has the phi1 matrix, the bottom right has phi2.
        The remaining two quadrants are filled with zeros.
    """
    (n1, k1) = phi1.shape
    (n2, k2) = phi2.shape
    big_phi = np.ones((n1 + n2, k1 + k2)) * float('-1000')
    big_phi[0:n1,0:k1] = phi1
    big_phi[n1:n1+n2,k1:k1+k2] = phi2
    return big_phi
 def __init__(self, neuralLayerNum, numNodes, nextNeuralLayerNumNodes):
     
     self.numNodes = numNodes
     self.neuralLayerNum = neuralLayerNum
     self.nodesInput = np.zeros([self.numNodes], dtype=float)
     self.nodesOutput = np.zeros([self.numNodes], dtype=float)
     self.nodesDelta = np.zeros([self.numNodes], dtype=float)
     
     if nextNeuralLayerNumNodes != None:
         self.isOutputLayer = False
         self.transMat = np.ones([self.numNodes, nextNeuralLayerNumNodes], dtype=float)
     else:
         self.isOutputLayer = True
         self.transMat = None
Example #18
0
def dijkstra(graph,source):
    n = len(graph.vertices)
    sp = np.ones([n],'float')*sys.float_info.max
    sp[source]=0
    visited = np.zeros([n],'bool')
    all_visited=False
    while (not all_visited):
        next_id = get_next(sp,visited,n)
        if next_id >= 0:
            v = graph.vertices[next_id]
            for e in v.out_edges:
                w = e.tail
                sp[w.id] = min(sp[w.id],sp[next_id]+e.cost)
            visited[next_id]=True
        else:
            all_visited = True
    return sp
Example #19
0
def bellman_ford(graph,source):
    n = len(graph.vertices)
    A = np.ones([n+1,n],'float')*sys.float_info.max
    A[0,source] = 0
    for i in range(1,n+1):
        for v in graph.vertices:
            a1 = A[i-1,v.id]
            a2 = sys.float_info.max
            for e in v.in_edges:
                a3 = A[i-1,e.head.id] + e.cost
                a2 = a3 if a3 < a2 else a2
            A[i,v.id] = min(a1,a2)

    has_neg_cycles = False
    for i in range(n):
        if not A[n-1,i] == A[n,i]:
            has_neg_cycles = True
            break

    return A[n-1] if not has_neg_cycles else []
Example #20
0
 def test_multidim_ones(self):
     from numpypy import ones
     a = ones((1, 2, 3))
     assert a[0, 1, 2] == 1.0
Example #21
0
    def initialize(self, Ku, Ks, Kb):
        """Accepts K number of topics in document.
            Initializes all of the hidden variable arrays now that it knows dimensions
            of topics, vocabulary, etc.
        """
        assert self.documents is not None
        assert Ku is not None
        assert Ks is not None
        assert Kb is not None

        K = Ku + Ks + Kb

        # give at least more documents than topics
        # so that it's not singular
        assert self.D > K

        self.K = K
        self.Ku = Ku
        self.Ks = Ks
        self.Kb = Kb

        self.Kc = self.Ku + self.Ks
        self.Kl = self.Ks + self.Kb

        W = self.W

        # Initialize the variational distribution q(beta|lambda)
        self.beta = topiclib.initialize_beta(K, W)

        # "it suffices to fix alpha to uniform 1/K"
        # initialize to ones so that the topics are more evenly distributed
        # good for small datasets
        self.alphaU = np.ones((Ku,)) * (1.0 / Ku)
        self.alphaS = np.ones((Ks,)) * (1.0 / Ks)
        self.alphaB = np.ones((Kb,)) * (1.0 / Kb)

        # todo: not using this yet
        #self.alphaD = ...
        
        def uniform_phi(Nds, size):
            D = len(Nds)
            return [(np.ones((Nds[d], size)) * (1.0 / size)) for d in xrange(D)]

        document_Nds = self.num_words_per(self.documents)
        self.phiD = uniform_phi(document_Nds, self.Ku)
        comment_Nds = self.num_words_per(self.comments)
        self.phiC = uniform_phi(comment_Nds, self.Kc)
        labeled_Nds = self.num_words_per(self.labeled)
        self.phiL = uniform_phi(labeled_Nds, self.Kl)
        background_Nds = self.num_words_per(self.background)
        self.phiB = uniform_phi(background_Nds, self.Kb)

        self.num_document_words = sum(document_Nds)
        self.num_comment_words = sum(comment_Nds)
        self.num_labeled_words = sum(labeled_Nds)
        self.num_background_words = sum(background_Nds)

        biggest = float(max(self.num_document_words, self.num_comment_words,
                      self.num_labeled_words, self.num_background_words))
        self.document_multiplier = biggest / self.num_document_words
        self.comment_multiplier = biggest / self.num_comment_words
        self.labeled_multiplier = biggest / self.num_labeled_words
        self.background_multiplier = biggest / self.num_background_words

        self.gammaD = np.ones((self.D, self.Ku)) * (1.0 / self.Ku)
        self.gammaC = np.ones((self.D, self.Kc)) * (1.0 / self.Kc)
        self.gammaL = np.ones((self.L, self.Kl)) * (1.0 / self.Kl)
        self.gammaB = np.ones((self.B, self.Kb)) * (1.0 / self.Kb)
        graphlib.initialize_random(self.gammaD)
        graphlib.initialize_random(self.gammaC)
        graphlib.initialize_random(self.gammaL)
        graphlib.initialize_random(self.gammaB)

        self.eta = graphlib.random_normal(0, 2.0, (Ks,))
        self.sigma_squared = 0.5

        print 'eta start: {0}'.format(self.eta)

        self.is_initialized = True
Example #22
0
 def test_where(self):
     from numpypy import where, ones, zeros, array
     a = [1, 2, 3, 0, -3]
     a = where(array(a) > 0, ones(5), zeros(5))
     assert (a == [1, 1, 1, 0, 0]).all()
Example #23
0
 def test_where_invalidates(self):
     from numpypy import where, ones, zeros, array
     a = array([1, 2, 3, 0, -3])
     b = where(a > 0, ones(5), zeros(5))
     a[0] = 0
     assert (b == [1, 1, 1, 0, 0]).all()
Example #24
0
 def test_where_differing_dtypes(self):
     from numpypy import array, ones, zeros, where
     a = [1, 2, 3, 0, -3]
     a = where(array(a) > 0, ones(5, dtype=int), zeros(5, dtype=float))
     assert (a == [1, 1, 1, 0, 0]).all()
Example #25
0
 def uniform_phi(Nds, size):
     D = len(Nds)
     return [(np.ones((Nds[d], size)) * (1.0 / size)) for d in xrange(D)]
	def __run(score_task_knapsack, m_tasks, tasks, mac):	

		n = len(tasks)

		Tu = []
		Td = range(0, n)
		Pu = numpy.zeros(2, float)
		Z  = 0
		X  = numpy.zeros(n, int)
		Tc = []
		B  = numpy.ones(2, float)

		P  = numpy.zeros((n, 2), float)
		G  = numpy.zeros(n, float)

		U  = numpy.zeros(n, float)

		for x in range(0, n):
			P[x][0] = m_tasks[tasks[x]].CPU_usage / mac.free_CPU()
			P[x][1] = m_tasks[tasks[x]].mem_usage / mac.free_mem()

		keep_going = True

		cnt = math.sqrt(2)
		#w_cpu = 0.6
		#w_mem = 1 - w_cpu

		#print "MAC = %d, ntasks = %d" % (mac.machine_ID, n)

		while keep_going :
	
			# step 2
			del Tc
			Tc = []
			
			for i in Td:
				if P[i][0] <= (1. - Pu[0]) and P[i][1] <= (1. - Pu[1]):
					Tc.append(i)

			#print "2"

			# step 3
			# terminate if Tc = empty
			if len(Tc) == 0:
				keep_going = False
			else:

				# step 4
				# (a)
				if (numpy.dot(Pu, Pu) == 0.):
					for i in Tc:
						d    = sum(P[i])
						G[i] = (score_task_knapsack(m_tasks[tasks[i]], mac) * cnt)/d
				# (b)
				else:
					mod_Pu = math.sqrt(numpy.dot(Pu, Pu))
					E      = numpy.array(Pu * (1./mod_Pu))
				
					for i in Tc:
						d    = numpy.dot(P[i], E)
						G[i] = score_task_knapsack(m_tasks[tasks[i]], mac) / d

				#print "4"

				# step 5
				v_max = -1
				i_max = 0
				for i in Tc:
					if G[i] > v_max:
						v_max = G[i]
						i_max = i


				#print "5"
				# step 6
				Tu.append(i_max)
				Td.remove(i_max)
				Pu = Pu + P[i_max]
				Z  = Z + m_tasks[tasks[i_max]].CPU_usage


		#print "(%f, %f)" % (mac.capacity_CPU, mac.capacity_memory)
		#print Pu

		return Tu
Example #27
0
 def test_ones_long(self):
     from numpypy import ones, longlong
     a = ones(10, dtype=long)
     for i in range(10):
         assert isinstance(a[i], longlong)
         assert a[1] == 1
Example #28
0
    def test_ones_bool(self):
        from numpypy import ones, True_

        a = ones(10, dtype=bool)
        for i in range(10):
            assert a[i] is True_
Example #29
0
def add_bias(A):
    return np.hstack(( np.ones((A.shape[0],1)), A )) # Add 1 as bias.
Example #30
0
def add_bias(A):
    return np.hstack((np.ones((A.shape[0], 1)), A))  # Add 1 as bias.
Example #31
0
 def test_broadcast_setslice(self):
     from numpypy import zeros, ones
     a = zeros((100, 100))
     b = ones(100)
     a[:, :] = b
     assert a[13, 15] == 1
Example #32
0
 def test_broadcast_call2(self):
     from numpypy import zeros, ones
     a = zeros((4, 1, 5))
     b = ones((4, 3, 5))
     b[:] = (a + a)
     assert (b == zeros((4, 3, 5))).all()
Example #33
0

def pyloop(a, b, c):
    N = len(a)
    assert N == len(b) == len(c)
    res = numpy.zeros(N)
    for i in range(N):
        res[i] = a[i] + b[i] * c[i]
    return res


def c_loop(a, b, c):
    return numpy.add(a, numpy.multiply(b, c))


a = numpy.zeros(10000000)
b = numpy.ones(10000000)
c = numpy.ones(10000000)

x = time.clock()
res1 = pyloop(a, b, c)
y = time.clock()
print 'pyloop: %.4f secs' % (y - x)

x = time.clock()
res2 = c_loop(a, b, c)
y = time.clock()
print 'c_loop: %.4f secs' % (y - x)

assert (res1 == res2).all()