Пример #1
0
 def alpha(t):
     if t==1:
         return self.pInitial()*np.array([self.pEmission(z,X[0]) for z in self.states])
     
     evidence = np.array([self.pEmission(z,X[t-1]) for z in self.states])
     trans = np.array(M(t).T*np.matrix(alpha(t-1)).T)[...,0]
     return  evidence*trans
Пример #2
0
def grad(func, para, args):
    epi = 0.000001
    para_num = para.shape
    grad_array = array(para_num)
    grad_array[0] = 0.44
    #grad_array.show()
    for i in range(para.shape):
        #x0.show()
        #print(i)
        para_for = array(para)
        para_for[i] = para[i] + epi
        para_back = array(para)
        para_back[i] = para[i] - epi
        fx1 = func(para_for, *args)
        fx2 = func(para_back, *args)
        '''
        fx1.show()
        fx2.show()
        fx3=fx1-fx2
        fx3.show()
        fx4=fx3/(2*epi)
        fx4.show()
        '''
        grad_array[i] = (fx1 - fx2) / (2 * epi)
    return grad_array
Пример #3
0
    def generate(self,T):
    	assert T >= 1
        try:
        	Z = [self.sampleInitial()]
        	X = [self.sampleEmision(Z[0])]

        	for t in xrange(1,T):
        		Z.append(self.sampleTransition(Z[t-1], X[t-1]))
        		X.append(self.sampleEmision(Z[t]))

        	return np.array(Z), np.array(X)
        except ZeroDivisionError:
            return self.generate(T)
Пример #4
0
def stoch_block_parameterized(blocks, p_cc, p_in):
    pMatrix = np.zeros((len(blocks), len(blocks)))
    comms = {}
    k = 0
    for i in range(len(blocks)):
        for j in range(blocks[i]):
            comms[k] = i
            k += 1
    for i in range(len(pMatrix)):
        for j in range(len(pMatrix)):
            if i == j:
                pMatrix[i][j] = p_in
            else:
                pMatrix[i][j] = p_cc
    G = nx.generators.stochastic_block_model(blocks, pMatrix)
    A = np.array(nx.to_numpy_matrix(G))

    def parameterized(cc_weight):
        B = deepcopy(A)
        for i in range(len(A)):
            for j in range(len(A)):
                if comms[i] is not comms[j] and A[i][j] != 0:
                    B[i][j], B[j][i] = cc_weight, cc_weight
        return B

    return A, parameterized
Пример #5
0
def linear(x, fc, alpha=None, beta=None):

    Y = x[:]

    if (alpha == None or beta == None):

        initial_values = array([0.3, 0.1])
        boundaries = [(0, 1), (0, 1)]
        type = 'linear'

        parameters = min_opt_rmse(RMSE,
                                  para=initial_values,
                                  args=(Y, type),
                                  bounds=boundaries,
                                  approx_grad=True)
        alpha, beta = parameters

    a = [Y[0]]
    b = [Y[1] - Y[0]]
    y = [a[0] + b[0]]
    rmse = 0

    for i in range(len(Y) + fc):

        if i == len(Y):
            Y.append(a[-1] + b[-1])

        a.append(alpha * Y[i] + (1 - alpha) * (a[i] + b[i]))
        b.append(beta * (a[i + 1] - a[i]) + (1 - beta) * b[i])
        y.append(a[i + 1] + b[i + 1])

    rmse = sqrt(
        sum([(m - n)**2 for m, n in zip(Y[:-fc], y[:-fc - 1])]) / len(Y[:-fc]))

    return Y[-fc:], alpha, beta, rmse
Пример #6
0
def min_opt_rmse(func,
                 para,
                 args,
                 bounds=None,
                 maxiter=10000,
                 step=0.0001,
                 approx_grad=True):
    para_num = para.shape
    grad_array = array(para_num)
    iter = 0
    for iter in range(maxiter):
        grad_array = grad(func, para, args)
        if iter == 0:
            print(grad_array)
        if bounds == None:
            para = para - grad_array * step
        else:
            for i in range(grad_array.shape):
                temp = para[i] - grad_array[i] * step
                if temp > bounds[i][1]:
                    para[i] = bounds[i][1]
                elif temp < bounds[i][0]:
                    para[i] = bounds[i][0]
                else:
                    para[i] = temp

    return para
Пример #7
0
    def sampleEmision(self,z):
        x = []

        for i in range(self.num_polls):
            alpha = self.b[i]*z
            x.append(np.random.dirichlet(alpha))        

        x = np.array(x)
        return x
Пример #8
0
def small_world_parameterized(N_tot, k, p, getLatticeInfo=False):
    A_0 = np.array(
        nx.to_numpy_matrix(nx.generators.watts_strogatz_graph(N_tot, k, 0)))
    A = np.array(
        nx.to_numpy_matrix(nx.generators.watts_strogatz_graph(N_tot, k, p)))
    non_lattice_edges = []
    for i in range(N_tot - 1):
        for j in range(i + 1, N_tot):
            if A[i][j] != A_0[i][j] and A[i][j] == 1:
                non_lattice_edges.append((i, j))

    def parameterized(nl_weight):
        B = deepcopy(A)
        for e in non_lattice_edges:
            B[e[0]][e[1]], B[e[1]][e[0]] = nl_weight, nl_weight
        return B

    if getLatticeInfo:
        return A, parameterized, non_lattice_edges
    return A, parameterized
Пример #9
0
    def logLiklihood(self, X, new_params):

        # likelihood of initial state
        l = np.log(self.pInitial())
        p = self.pState(X,0)
        initial = sum(p*l)

        # likelihood of transitions
        transition = 0
        for x in X[:-1]:
            for j in xrange(self.bins):
                l = np.log(self.pTransition(j,X[t-1]))
                p = self.pStatePair(j,X,t)
                transition += sum(p*l)

        # likelihood of observations
        emission = 0
        for t, x in enumerate(X):
            l = np.array([np.log(self.pEmission(z,X[t]))])
            p = self.pState(X,t)
            emission += sum(p*l)

        return initial + transition + emission
Пример #10
0
def multiplicative(x, m, fc, alpha=None, beta=None, gamma=None):

    Y = x[:]

    if (alpha == None or beta == None or gamma == None):

        initial_values = array([0.5, 0.5, 0.5])
        boundaries = [(0, 1), (0, 1), (0, 1)]
        type = 'multiplicative'

        parameters = min_opt_rmse(RMSE,
                                  para=initial_values,
                                  args=(Y, type, m),
                                  bounds=boundaries,
                                  approx_grad=True)
        alpha, beta, gamma = parameters

    a = [sum(Y[0:m]) / float(m)]
    b = [(sum(Y[m:2 * m]) - sum(Y[0:m])) / m**2]
    s = [Y[i] / a[0] for i in range(m)]
    y = [(a[0] + b[0]) * s[0]]
    rmse = 0

    for i in range(len(Y) + fc):

        if i == len(Y):
            Y.append((a[-1] + b[-1]) * s[-m])

        a.append(alpha * (Y[i] / s[i]) + (1 - alpha) * (a[i] + b[i]))
        b.append(beta * (a[i + 1] - a[i]) + (1 - beta) * b[i])
        s.append(gamma * (Y[i] / (a[i] + b[i])) + (1 - gamma) * s[i])
        y.append((a[i + 1] + b[i + 1]) * s[i + 1])

    rmse = sqrt(
        sum([(m - n)**2 for m, n in zip(Y[:-fc], y[:-fc - 1])]) / len(Y[:-fc]))

    return Y[-fc:], alpha, beta, gamma, rmse
Пример #11
0
def core_periphery_analysis(network0):
    network0 /= np.sum(network0)
    C, Q_core = bct.core_periphery_dir(network0)
    per_nodes = []
    for i in range(len(C)):
        if C[i] == 0:
            per_nodes.append(i)
    G = nx.from_numpy_matrix(network0)
    G_per = G.subgraph(per_nodes)
    per_network = np.array(nx.to_numpy_matrix(G_per))
    M_per, Q_comm_per = bct.community_louvain(per_network)
    print(Q_comm_per, "Q")
    # print(M_per, Q_comm_per)
    per_comm_assignments = {}
    for i in range(len(per_nodes)):
        per_comm_assignments[per_nodes[i]] = M_per[i]
    classifications = [
        [], [], []
    ]  # index 0 means periphery-periphery edge, 1 means periphery-core, 2 means core-core
    for i in range(len(network0) - 1):
        for j in range(i + 1, len(network0)):
            if network0[i][j] > 0:
                classifications[C[i] + C[j]].append((i, j))
    return classifications, per_comm_assignments, G_per, M_per
Пример #12
0
 def beta(t):
     if t==(len(X)):
         return np.ones(self.bins)
     evidence = np.array([self.pEmission(z,X[t-1]) for z in self.states])
     return np.array(M(t)*np.matrix(evidence*beta(t+1)).T)[...,0]
Пример #13
0
def get_hierarchical_modular(n, modules, edges, p, alpha, getCommInfo=False):
    pairings = {}
    assignments = np.zeros(n, dtype=int)
    cross_module_edges = []
    weights = np.array([(1 + i)**-alpha for i in range(n)])
    dists = []
    module_dist = np.zeros(modules)
    for i in range(modules):
        pairings[i] = []
    A = np.zeros((n, n))
    for i in range(n):
        randomModule = seeded_rng.randint(0, modules)
        pairings[randomModule].append(i)
        assignments[i] = randomModule
    for j in range(modules):
        dist = np.array([weights[i] for i in pairings[j]])
        module_dist[j] = np.sum(dist)
        dist /= np.sum(dist)
        dists.append(dist)
    module_dist /= np.sum(module_dist)

    # nodesPerMod = n // modules
    # for i in range(modules):
    #     for j in range(nodesPerMod):
    #         pairings[i].append(nodesPerMod * i + j)
    #         assignments[nodesPerMod *i + j] = i
    # for i in range(modules - 1):
    #     if len(pairings[i]) < 3 or len(pairings[i+1]) < 3:
    #         return None, None
    #     e0, e1 = seeded_rng.choice(pairings[i], 1), seeded_rng.choice(pairings[i+1], 1)
    #     A[e0, e1], A[e1, e0] = 1, 1
    #     cross_module_edges.append((e0, e1))
    def add_modular_edge():
        randomComm = seeded_rng.choice(modules, p=module_dist)
        while len(pairings[randomComm]) < 2:
            randomComm = seeded_rng.choice(modules, p=module_dist)
        selection = seeded_rng.choice(pairings[randomComm],
                                      2,
                                      replace=False,
                                      p=dists[randomComm])
        while A[selection[0], selection[1]] != 0:
            randomComm = seeded_rng.choice(modules, p=module_dist)
            while len(pairings[randomComm]) < 2:
                randomComm = seeded_rng.choice(modules, p=module_dist)
            selection = seeded_rng.choice(pairings[randomComm],
                                          2,
                                          replace=False,
                                          p=dists[randomComm])
        A[selection[0], selection[1]] += 1
        A[selection[1], selection[0]] += 1

    def add_between_edge():
        randomComm, randomComm2, e0, e1 = 0, 0, 0, 0
        while randomComm == randomComm2 or A[e0, e1] != 0:
            randomComm, randomComm2 = seeded_rng.choice(
                modules, p=module_dist), seeded_rng.choice(modules,
                                                           p=module_dist)
            e0 = seeded_rng.choice(pairings[randomComm],
                                   1,
                                   replace=False,
                                   p=dists[randomComm])
            e1 = seeded_rng.choice(pairings[randomComm2],
                                   1,
                                   replace=False,
                                   p=dists[randomComm2])
        A[e0, e1] += 1
        A[e1, e0] += 1
        cross_module_edges.append((e0, e1))

    inModuleEdges = int(round(edges * p))
    betweenEdges = edges - inModuleEdges
    # betweenEdges = edges - inModuleEdges - modules + 1
    # if betweenEdges < 0:
    #     print("NEGATIVE")
    for i in range(inModuleEdges):
        add_modular_edge()
    for i in range(betweenEdges):
        add_between_edge()

    def parameterized(cc_weight):
        B = deepcopy(A)
        for e in cross_module_edges:
            B[e[0], e[1]], B[e[1], e[0]] = cc_weight, cc_weight
        return B

    if getCommInfo:
        return A, parameterized, pairings, assignments
    else:
        return A, parameterized
Пример #14
0
def get_degrees(A):
    return np.array([sum(A[i]) for i in range(len(A))])
Пример #15
0
                   handletextpad=0,
                   borderpad=0,
                   loc='center left',
                   bbox_to_anchor=(1, 0.5))
    plt.tight_layout()
    network0 = networks_orig[textbook_index]
    network_opt = networks[textbook_index][beta_index]
    #get_diff_stats(network0, network_opt)

    data = []
    for i in range(10):
        print(i)
        data_curr = get_diff_stats(networks_orig[i], networks[i][14])
        data.extend(data_curr)
    data.sort()
    data = np.array(data)

    bin_size = 500
    new_pairs = []
    stdev_pairs = []
    j = 0
    # while data[j][0] == 0:
    #     j += 1
    while j < len(data):
        count, x, y = 0, [], []
        while count < bin_size and j + count < len(data):
            x.append(data[j + count][0])
            y.append(data[j + count][1])
            count += 1
        if count < bin_size:
            break
Пример #16
0
def get_lattice_graph(dim):
    return np.array(nx.to_numpy_matrix(nx.grid_graph(dim, periodic=True)))
Пример #17
0
    plt.legend(frameon=False,
               prop={'size': 12},
               labelspacing=.2,
               handletextpad=0.0,
               borderpad=0)

    plt.figure(1, figsize=(5.5, 4.5))
    plt.rcParams.update({'font.size': 16})
    plt.xlabel('Fraction of edges within communities')
    plt.ylabel('KL Divergence Ratio')
    plt.xlim([0.18, .98])
    plt.ylim([0.4, 1.03])

    beta, param_vals, opts, scores_orig, scores = process(0, name)
    plt.scatter(param_vals,
                np.array(scores) / np.array(scores_orig),
                label=r"$\beta = 10^{-3}$",
                s=20,
                color=gradient[0])
    plt.plot(param_vals,
             np.array(scores) / np.array(scores_orig),
             color=gradient[0],
             linewidth=.8)

    for i in range(1, 6):
        beta, param_vals, opts, scores_orig, scores = process(i, name)
        plt.scatter(param_vals,
                    np.array(scores) / np.array(scores_orig),
                    label=r"$\beta = $" + str(beta)[0:4],
                    s=20,
                    color=gradient[i])
Пример #18
0
def get_automorphisms(A):
    IG = ig.Graph.Adjacency(A.tolist())
    return np.transpose(np.array(IG.get_automorphisms_vf2()))
Пример #19
0
        #print(i)
        para_for = array(para)
        para_for[i] = para[i] + epi
        para_back = array(para)
        para_back[i] = para[i] - epi
        fx1 = func(para_for, *args)
        fx2 = func(para_back, *args)
        '''
        fx1.show()
        fx2.show()
        fx3=fx1-fx2
        fx3.show()
        fx4=fx3/(2*epi)
        fx4.show()
        '''
        grad_array[i] = (fx1 - fx2) / (2 * epi)
    return grad_array


def square(para, *args):
    #x0.show()
    pingfang = (para - 2) * (para - 2)
    #print((pingfang*args[0]).sum()+4)
    return (pingfang * args[0]).sum() + 4


if __name__ == '__main__':
    #(grad(square,array([2]),args=(1))).show()
    #(grad(square,array([5]),args=(1))).show()
    para = min_opt_rmse(square, array([1]), (1, 2), bounds=[(0, 1.5)])
    print(para)
Пример #20
0
def get_regular_graph(N, d):
    return np.array(nx.to_numpy_matrix(nx.random_regular_graph(d, N)))
Пример #21
0
def drawImageObjectSizes(
        path_image,
        pixel_density=(100, 100)  # (x,y) [px/m]
):

    # import the necessary packages
    from scipy.spatial import distance as dist
    from imutils import perspective
    from imutils import contours
    import numpy as np
    import argparse
    import imutils
    import cv2
    import time

    def midpoint(ptA, ptB):
        return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)

#    # construct the argument parse and parse the arguments
#    ap = argparse.ArgumentParser()
#    ap.add_argument("-i", "--image", required=True,
#       help="path to the input image")
#    ap.add_argument("-w", "--width", type=float, required=True,
#       help="width of the left-most object in the image (in inches)")
#    args = vars(ap.parse_args())

# load the image, convert it to grayscale, and blur it slightly

    image = cv2.imread(path_image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (7, 7), 0)

    # perform edge detection, then perform a dilation + erosion to
    # close gaps in between object edges
    edged = cv2.Canny(gray, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # find contours in the edge map
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # sort the contours from left-to-right and initialize the
    # 'pixels per metric' calibration variable
    (cnts, _) = contours.sort_contours(cnts)
    #    pixel_density = None

    # loop over the contours individually
    for c in cnts:
        # if the contour is not sufficiently large, ignore it
        if cv2.contourArea(c) < 100:
            continue

        # compute the rotated bounding box of the contour
        orig = image.copy()
        box = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
        box = np.array(box, dtype="int")

        # order the points in the contour such that they appear
        # in top-left, top-right, bottom-right, and bottom-left
        # order, then draw the outline of the rotated bounding
        # box
        box = perspective.order_points(box)
        cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)

        # loop over the original points and draw them
        for (x, y) in box:
            cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

        # unpack the ordered bounding box, then compute the midpoint
        # between the top-left and top-right coordinates, followed by
        # the midpoint between bottom-left and bottom-right coordinates
        (tl, tr, br, bl) = box
        (tltrX, tltrY) = midpoint(tl, tr)
        (blbrX, blbrY) = midpoint(bl, br)

        # compute the midpoint between the top-left and top-right points,
        # followed by the midpoint between the top-righ and bottom-right
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)

        # draw the midpoints on the image
        cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
        cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

        # draw lines between the midpoints
        cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                 (255, 0, 255), 2)
        cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                 (255, 0, 255), 2)

        # compute the Euclidean distance between the midpoints
        dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
        dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

        # if the pixels per metric has not been initialized, then
        # compute it as the ratio of pixels to supplied metric
        # (in this case, inches)
        #       if pixel_density is None:
        #          pixel_density = dB / args["width"]

        # compute the size of the object
        dimA = dA / pixel_density[0]
        dimB = dB / pixel_density[1]

        # draw the object sizes on the image
        cv2.putText(orig, "{:.1f}in".format(dimA),
                    (int(tltrX - 15), int(tltrY - 10)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
        cv2.putText(orig, "{:.1f}in".format(dimB),
                    (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
                    0.65, (255, 255, 255), 2)

        # show the output image
        fn = pl.figure()
        ax = fn.add_subplot(111)
        ax.imshow(orig)
        pl.show()


#       time.sleep(15)
#       cv2.imshow("Image", orig)
#       cv2.waitKey(0)