Beispiel #1
0
def kirkoff_inverse_sampler(graph):

    size = len(graph) - 1
    T = []
    edge_list = list(graph.edges())
    edge = edge_list[0]
    shuffle(edge_list)
    while len(T) <= size - 1:
        print("T currently", len(T))
        edge = choice(list(graph.edges()))
        total_inverse = 0
        for edge in list(graph.edges()):
            H = nx.contracted_edge(graph, edge)
            p = np.exp(log_number_trees(H)) / np.exp(log_number_trees(graph))
            #Replace this with a computation of effective resistance!
            #You also don't need to compute this so many times!
            total_inverse += 1/p
        edge = choice(list(graph.edges()))
        H = nx.contracted_edge(graph, edge)
        q = np.exp(log_number_trees(H)) / np.exp(log_number_trees(graph))
        p = (1 / q) / total_inverse
        print(p, q, total_inverse)
        c = uniform(0,1)
        if c <= p:
            T.append(edge)
            graph = nx.contracted_edge(graph, edge, self_loops=False)
        else:
            if q < .999999:
                graph.remove_edge(edge[0], edge[1])
    tree = nx.Graph()
    tree.add_edges_from(T)
    edge = T[0]
    return (tree, edge)
def compare(shape_1, shape_2, num = 7):
    '''compares the number of spanning trees in the two shapes under
    sequential refinement'''
    spanning_trees = []
    for i in range(num):
        sp_1 = log_number_trees(shape_1)
        sp_2 = log_number_trees(shape_2)
        spanning_trees.append([sp_1, sp_2])
        print( len( list(shape_1.nodes())), len(list(shape_2.nodes())))
        shape_1 = refinement(shape_1)
        shape_2 = refinement(shape_2)
    return spanning_trees
Beispiel #3
0
def make_partition_list(graph, number_samples = 100, tree_algorithm = random_spanning_tree_wilson, equi = True):
    
    #Note -- currently this is configured only for 2 partitions
    total_number_trees_edges_pairs = np.exp(log_number_trees(graph))*(len(graph.nodes()) - 1)
    
    uniform_trees = []
    for i in range(number_samples):
        uniform_trees.append(tree_algorithm(graph))
        
    partitions = []
    for tree in uniform_trees:
        if equi == -1:
            e = random.choice(list(tree.edges()))
            blocks = remove_edges_map(graph, tree, [e])
            new_partition = partition_class(graph, blocks, tree, e, total_number_trees_edges_pairs)
            new_partition.set_likelihood()
            partitions.append(new_partition)
        else:
            out = almost_equi_split(tree, 2,.1)
            if out != None:
                e = out[0]
                blocks = remove_edges_map(graph, tree, [e])
                new_partition = partition_class(graph, blocks, tree, e, total_number_trees_edges_pairs)
                new_partition.set_likelihood()
                partitions.append(new_partition)
    return partitions
Beispiel #4
0
 def set_likelihood(self):
     total = 0
     for block in self.partition:
         total += log_number_trees(block) 
     connector_graph = nx.Graph()
     connector_graph.add_nodes_from(self.partition)
     for subgraph_1 in self.partition:
         for subgraph_2 in self.partition:
             if subgraph_1 != subgraph_2:
                 cutedges = cut_edges(graph, subgraph_1, subgraph_2)
                 if cutedges != []:
                     connector_graph.add_edge(subgraph_1, subgraph_2, weight = len(cutedges))
     cut_weight = log_number_trees(connector_graph, True)
     total += cut_weight
     self.likelihood = np.exp(total) / self.total_number_trees_edges_pairs
     self.connector_graph = connector_graph
Beispiel #5
0
def test():
    number_trees = 10
    graph = nx.grid_graph([10,10])
    
    #For 10x10 grid, estimated sample space size using 30000 trees is:
    sample_size = 3.14529733509e+12
        
    
    total_number_trees_edges_pairs = np.exp(log_number_trees(graph))*(len(graph.nodes()) - 1)
        
    ongoing_partition_list = []
    for i in range(10):
        partition_list = make_partition_list(graph, number_trees)
        for partition in partition_list:
            partition.estimated_sample_space_size = sample_size
        ongoing_partition_list += partition_list
        #cut_total = integrate(partition_list, cut_size)
        #size_total = integrate(partition_list, constant_function)
        #likelihood = integrate(partition_list, likelihood_function)
        #print(total_number_trees_edges_pairs / size_total)
        #This computes the average likelihood T_A T_B cut(A,B)
        print(integrate(partition_list, total_variation))
        #print(cut_total/ size_total)
        #print(cut_total, size_total)
        
    print(estimate_number_partitions(graph, ongoing_partition_list))
Beispiel #6
0
def kirkoff_sampler(graph):
    size = len(graph) - 1
    T = []
    edge_list = list(graph.edges())
    edge = edge_list[0]
    shuffle(edge_list)
    while len(T) <= size - 1:
        edge = choice(list(graph.edges()))
        H = nx.contracted_edge(graph, edge)
        p = np.exp(log_number_trees(H)) / np.exp(log_number_trees(graph))
        print(p)
        c = uniform(0,1)
        if c <= p:
            T.append(edge)
            graph = nx.contracted_edge(graph, edge, self_loops=False)
        else:
            graph.remove_edge(edge[0], edge[1])
    tree = nx.Graph()
    tree.add_edges_from(T)
    return tree
def visualize_partition_with_likelihoods(graph, partition, color_likelihood = False):
    for i in range(len(partition)):
        for vertex in partition[i].nodes():
            graph.nodes[vertex]["district"] = i
            graph.nodes[vertex]["pos"] = vertex
    for edge in graph.edges():
        graph.edges[edge]["tree"] = 0
        
    color_map_likelihood =  { i : log_number_trees(partition[i]) for i in range(len(partition))}
    color_map = {i : i for i in range(len(partition) + 2)}
    node_colors = [color_map[graph.nodes[vertex]["district"] ] for vertex in graph.nodes()]
    node_colors_likelihood = [color_map_likelihood[graph.nodes[vertex]["district"] ] for vertex in graph.nodes()]
    edge_colors = [graph.edges[edge]["tree"] for edge in graph.edges()]
    plt.subplot(211)
    nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), cmap=plt.get_cmap('jet'), node_color=node_colors, edge_color=edge_colors, node_size = 10)
    plt.subplot(212)
    nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), cmap=plt.get_cmap('Blues'), node_color=node_colors_likelihood, edge_color=edge_colors, node_size = 10)
    
    plt.show()
Beispiel #8
0
def cut_size(partition):
    return np.exp(log_number_trees(partition.connector_graph, True))
Beispiel #9
0
#        rectangle_trees.append(log_number_trees(rectangle_graph))
#        square_trees.append(log_number_trees(square_graph))
#    print(np.mean(rectangle_trees), np.std(rectangle_trees))
#    print(np.mean(square_trees), np.std(square_trees))
#    print("difference:", np.mean(rectangle_trees) - np.mean(square_trees), "total std:", np.std(rectangle_trees) + np.std(square_trees))

square_data = {}
repititions = 20
sample_size_set = [100, 1000, 10000]
for number_samples in sample_size_set:
    square_trees = []
    rectangle_trees = []
    for i in range(repititions):
        sample_rectangle = make_rectangle(number_samples, 1)
        rectangle_graph = points_to_delaunay_graph(sample_rectangle)
        rectangle_trees.append(log_number_trees(rectangle_graph))
    square_data[number_samples] = [[
        np.mean(rectangle_trees),
        np.std(rectangle_trees)
    ]]

data = {}
repititions = 20
sample_size_set = [100, 1000, 10000]
skew_size_set = [10, 100]
for skews in skew_size_set:
    data[skews] = {}
    for number_samples in sample_size_set:
        square_trees = []
        rectangle_trees = []
        for i in range(repititions):