Пример #1
0
def entropy_conf(PP):

    N = PP.shape[0]
    connectivity = sum(PP, 1)
    avg_conn = mean(connectivity)
    #Lagrangians
    z = connectivity / (sqrt(avg_conn * N))
    old_z = z
    loops = 10000
    precision = 1e-5

    for idx in range(loops):
        zT = z[:, np.newaxis]
        D = ev("(zT * z) + 1.")
        UD = ev("z/D")
        del D
        for i in xrange(N):
            UD[i, i] = 0.
        z = connectivity / sum(UD, 1)
        rz = ev("abs(1.-z/old_z)")
        rz[np.isinf(rz)] = 0.
        rz[np.isnan(rz)] = 0.

        if max(rz) < precision:
            break
        old_z = z
    z2 = outer(z, z)
    for i in xrange(N):
        z2[i, i] = 0.
    P = z2 / (z2 + 1)
    Q = 1. - P
    S = -ev("sum(log(P**P) + log(Q**Q) )") / 2.
    print "number of loops ", idx + 1
    return S
Пример #2
0
def giulia_config_entropy(edgelist):

    start = timeit.default_timer()

    g = nx.read_edgelist(edgelist, delimiter=',')
    nodes = list(g.nodes())
    nodes.sort()

    PP = np.array(nx.to_numpy_matrix(g,nodelist=nodes))

    N=PP.shape[0];
    connectivity = sum(PP,1);
    avg_conn = mean(connectivity)
    #Lagrangians
    z = connectivity/(sqrt(avg_conn*N))
    old_z = z
    loops = 10000
    precision = 1

    for idx in range(loops):
        zT = z[:,np.newaxis]
        D = ev("(zT * z) + 1.")
        UD = ev("z/D")
        del D
        for i in xrange(N):  UD[i,i]=0.
        z = connectivity / sum(UD,1)
        rz= ev("abs(1.-z/old_z)")
        rz[np.isinf(rz)]=0.
        rz[np.isnan(rz)]=0.

        if max(rz)<precision:
            break
        old_z = z
    z2 = outer(z,z)
    for i in xrange(N):  z2[i,i]=0.
    P = z2 / ( z2 + 1)
    Q=1.-P
    S = -ev("sum(log(P**P) + log(Q**Q) )")/2.
    # print "number of loops ", idx+1
    stop = timeit.default_timer()


    output = dict()
    output['num_nodes'] = n
    output['num_edges'] = len(g.edges())
    output['giulia_config_entropy'] = S
    output['runtime'] = stop-start
    output['edgelist'] = edgelist

    return output
Пример #3
0
    def getGramMatrix(self, A, B, K2=None, w=None):
        Q = asmatrix(diagflat(1.0 / self.bw2))

        AQ = A * Q
        K = mul(AQ, A).sum(1) + mul(B * Q, B).sum(1).T
        K -= 2.0 * AQ * B.T

        if K2 is not None:
            K = w * K + (1 - w) * K2

        K = ev('exp(-0.5 * K)')

        return asmatrix(K)
Пример #4
0
    def _dualFunction(self, params):
        theta = asmatrix(params[0:self.numFeatures]).T
        eta = params[-1]
        epsilon = self.epsilonAction

        V = self.PHI_S * theta
        VHat = self.PHI_HAT * theta

        advantage = self.Q - V
        maxAdvantage = advantage.max()
        QNorm = self.Q - maxAdvantage
        advantage = (QNorm - V) / eta

        g = 0
        gD = zeros((self.numFeatures + 1,))

        if advantage.max() > 500:
            g = 1e30 - eta
            gD[-2] = -1
            return g, gD

        expAdvantage = ev('exp(advantage)')
        sumExpAdvantage = expAdvantage.sum()

        realmin = finfo(double).tiny
        if sumExpAdvantage < realmin:
            sumExpAdvantage = realmin

        gLogPart = (1.0 / self.numSamples) * sumExpAdvantage

        g += eta * log(gLogPart) + VHat + maxAdvantage
        g += eta * epsilon + self.alphaL2ThetaPunishment * (theta.T * theta)

        # gradient
        if (eta * sumExpAdvantage) == 0:
            gDEta = 1e100
        else:
            gDEta = epsilon + log(gLogPart) - \
                    mul(expAdvantage, QNorm - V).sum() / (eta * sumExpAdvantage)
        gD[-1] = gDEta

        gDTheta = self.PHI_HAT + mul(-self.PHI_S, expAdvantage).sum(0) / \
            sumExpAdvantage + 2 * self.alphaL2ThetaPunishment * theta.T
        gD[0:self.numFeatures] = gDTheta

        return g, 0.5 * gD
Пример #5
0
def _get_fee_value(transaction, fee_expression):
    fee_expression = fee_expression.replace('FEE', str(transaction.grille.fee))
    return str(float(ev(fee_expression)))
Пример #6
0
def _calculate_real_revenue_of_each_participant_of_transaction(json_expression, fee_value):
    for e, val in json_expression.items():
        json_expression.update({e: '{0}{1}'.format(val[:1], ev('{0}*{1}'.format(val[1:], fee_value)))})
    return json_expression
Пример #7
0
    def _computeWeightingFromThetaAndEta(self, theta, eta):
        advantage = self.Q - self.PHI_S * theta
        maxAdvantage = max(advantage)

        w = ev('exp((advantage - maxAdvantage) / eta)')
        return w / w.sum()
def giulia_spatial_entropy(edgelist, nodelist):

    start = time.time()

    g = nx.read_edgelist(edgelist, delimiter=',')
    nodes = list(g.nodes())
    nodes.sort()

    PP = np.array(nx.to_numpy_matrix(g, nodelist=nodes))

    n = len(nodes)
    print("I'm here...Number:", str(n))
    distance = np.zeros((n, n))

    ens_id_expr_df = pd.read_csv(nodelist, names=['ens_id', 'expr_val'])
    ens_id_expr_df = ens_id_expr_df.set_index('ens_id')

    ens_id_expr_map = ens_id_expr_df.to_dict()['expr_val']

    for row in tqdm(range(n)):  # first tqdm
        for col in range(n):
            #try:
            x = ens_id_expr_map[nodes[row]]
            y = ens_id_expr_map[nodes[col]]
            distance[row][col] = math.fabs(x - y)

            #except KeyError:
            #distance[row][col] = 0

    Nbin = int(math.sqrt(n) + 1)

    print("before linear binning...")
    #linear binning
    mi, ma = np.min(distance[distance > 0]), np.max(distance)
    limiti = np.linspace(mi, ma, Nbin + 1)
    limiti[-1] += 0.1 * ma
    limiti[0] -= 0.1 * mi

    b = np.searchsorted(limiti, distance) - 1

    print("before massimo...")
    massimo = np.max(b) + 1

    # BC gives how many links fall in each bin
    BC = [np.sum(b[PP > 0] == i) / 2 for i in range(massimo)]

    N = PP.shape[0]

    connectivity = np.sum(PP, 1)
    avg_conn = np.mean(connectivity)

    print("before lagragian...")
    #Lagrangians
    z = connectivity / (math.sqrt(avg_conn * N))
    w = BC / (avg_conn * N)

    old_z = z
    old_w = w

    loops = 5  # CHANGE to 10000 again

    precision = 1E-5
    #precision = 1E-3 # CHANGED NOW!

    for idx in tqdm(range(loops)):  # second tqdm
        bigW = w.take(b)

        for i in range(N):
            bigW[i, i] = 0.

        U = ev("bigW * z")
        UT = U.T
        D = ev("(UT * z) + 1.")

        UD = ev("U/D")

        del D, U, UT

        for i in range(N):
            UD[i, i] = 0.

        z = connectivity / np.sum(UD, 1)

        zt = z[:].reshape(N, 1)
        D2 = ev("(z*zt) / ( bigW *(z*zt) + 1.)")

        B2 = np.array(
            [np.sum(np.where(b == i, D2, 0.)) for i in range(len(w))]) / 2.

        print("And calculating B2 AND D2 done!!!!! inside for loop out of 5")

        w = np.where((BC != 0) & (B2 != 0), BC / B2, 0)
        rz = ev("abs(1.-z/old_z)")
        rw = ev("abs(1.-w/old_w)")
        rz[np.isinf(rz)] = 0.
        rw[np.isinf(rw)] = 0.
        rz[np.isnan(rz)] = 0.
        rw[np.isnan(rw)] = 0.

        if max(rz) < precision and max(rw) < precision:
            break

        old_z = z
        old_w = w

    print("JUST OUT OF THE BIG FORR LOOP...!")

    bigW = w.take(b)
    for i in range(N):
        bigW[i, i] = 0.

    z2 = bigW * np.outer(z, z)
    P = z2 / (z2 + 1)
    Q = 1. - P

    print("And calculations done!!!!!")
    S = -1 * (np.sum(np.log(P**P) + np.log(Q**Q))) / 2.
    #    S = -1*(np.sum((P*np.log(P)) + (Q*np.log(Q) )))/2.
    print("Done S  ....!!!!!")
    stop = time.time()

    output = dict()
    output['num_nodes'] = n
    output['num_edges'] = len(g.edges())
    output['giulia_spatial_entropy'] = S
    output['runtime'] = stop - start
    output['nodelist'] = nodelist
    output['edgelist'] = edgelist

    return output
Пример #9
0
def giulia_spatial_entropy(edgelist, nodelist):

    start = timeit.default_timer()

    g = nx.read_edgelist(edgelist, delimiter=',')
    nodes = list(g.nodes())
    nodes.sort()

    PP = np.array(nx.to_numpy_matrix(g,nodelist=nodes))

    n = len(nodes)
    distance = np.zeros((n,n))

    ens_id_expr_df = pd.read_csv(nodelist,names=['ens_id','expr_val'])
    ens_id_expr_df = ens_id_expr_df.set_index('ens_id')
    ens_id_expr_map = ens_id_expr_df.to_dict()['expr_val']

    for row in range(n):
        for col in range(n):
            x = ens_id_expr_map[nodes[row]]
            y = ens_id_expr_map[nodes[col]]
            distance[row][col] = math.fabs(x-y)

    Nbin = int(math.sqrt(n)+1)

    #linear binning
    mi,ma = np.min(distance[distance>0]),np.max(distance)
    limiti = linspace(mi,ma,Nbin+1)
    limiti[-1]+=0.1*ma
    limiti[0]-=0.1*mi

    b = searchsorted(limiti,distance)-1
    massimo = np.max(b)+1
    BC = [ sum(b[PP>0]==i)/2 for i in range(massimo) ]

    N=PP.shape[0];
    connectivity = sum(PP,1);
    avg_conn = mean(connectivity)

    #Lagrangians
    z = connectivity/(sqrt(avg_conn*N))
    w = BC / (avg_conn*N)

    old_z = z
    old_w = w

    loops = 10000
    precision = 1


    for idx in range(loops):
        bigW = w.take(b)

        for i in xrange(N):  bigW[i,i]=0.

        U = ev("bigW * z")
        UT = U.T    
        D = ev("(UT * z) + 1.")

        UD = ev("U/D")

        del D,U,UT

        for i in xrange(N):  UD[i,i]=0.

        z = connectivity / sum(UD,1)

        zt = z[:].reshape(N,1)
        D2 = ev("(z*zt) / ( bigW *(z*zt) + 1.)")

        B2 = array([ ev("sum(where(b==i,D2,0.))") for i in range(len(w)) ])/2.

        w = ev("where( (BC!=0) & (B2!=0),BC/B2,0 )")
        rz= ev("abs(1.-z/old_z)")
        rw= ev("abs(1.-w/old_w)")
        rz[np.isinf(rz)]=0.
        rw[np.isinf(rw)]=0.
        rz[np.isnan(rz)]=0.
        rw[np.isnan(rw)]=0.

        if max(rz)<precision and max(rw)<precision:
            break

        old_z = z
        old_w = w


    bigW = w.take(b)
    for i in xrange(N):  bigW[i,i]=0.

    z2 = bigW * outer(z,z)
    P = z2 / ( z2 + 1)
    Q=1.-P
    S = -ev("sum(log(P**P) + log(Q**Q) )")/2.
    # print "number of loops ", idx+1


    stop = timeit.default_timer()


    output = dict()
    output['num_nodes'] = n
    output['num_edges'] = len(g.edges())
    output['giulia_spatial_entropy'] = S
    output['runtime'] = stop-start
    output['nodelist'] = nodelist
    output['edgelist'] = edgelist

    return output
Пример #10
0
def entropy_dist(PP, distance, Nbin):

    #linear binning
    mi, ma = np.min(distance[distance > 0]), np.max(distance)
    limiti = linspace(mi, ma, Nbin + 1)
    limiti[-1] += 0.1 * ma
    limiti[0] -= 0.1 * mi

    b = searchsorted(limiti, distance) - 1
    massimo = np.max(b) + 1
    BC = [sum(b[PP > 0] == i) / 2 for i in range(massimo)]

    N = PP.shape[0]
    connectivity = sum(PP, 1)
    avg_conn = mean(connectivity)

    #Lagrangians
    z = connectivity / (sqrt(avg_conn * N))
    w = BC / (avg_conn * N)

    old_z = z
    old_w = w

    loops = 10000
    precision = 1e-5

    for idx in range(loops):
        bigW = w.take(b)

        for i in xrange(N):
            bigW[i, i] = 0.

        U = ev("bigW * z")
        UT = U.T
        D = ev("(UT * z) + 1.")

        UD = ev("U/D")

        del D, U, UT

        for i in xrange(N):
            UD[i, i] = 0.

        z = connectivity / sum(UD, 1)

        zt = z[:].reshape(N, 1)
        D2 = ev("(z*zt) / ( bigW *(z*zt) + 1.)")

        B2 = array([ev("sum(where(b==i,D2,0.))") for i in range(len(w))]) / 2.

        w = ev("where( (BC!=0) & (B2!=0),BC/B2,0 )")
        rz = ev("abs(1.-z/old_z)")
        rw = ev("abs(1.-w/old_w)")
        rz[np.isinf(rz)] = 0.
        rw[np.isinf(rw)] = 0.
        rz[np.isnan(rz)] = 0.
        rw[np.isnan(rw)] = 0.

        if max(rz) < precision and max(rw) < precision:
            break

        old_z = z
        old_w = w

    bigW = w.take(b)
    for i in xrange(N):
        bigW[i, i] = 0.

    z2 = bigW * outer(z, z)
    P = z2 / (z2 + 1)
    Q = 1. - P
    S = -ev("sum(log(P**P) + log(Q**Q) )") / 2.
    print "number of loops ", idx + 1

    return S