def closeness_neighbors(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.closeness_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "closeness neighbors"

  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(SURROUND_TOP)
  nodes = map(lambda (x, y): x, clse_cent)

  current_seed = 0
  rtn = []
  while current_seed < seed_num:
    current_node = nodes[current_seed % len(nodes)]
    current_neighbors = G.neighbors(current_node)
    rtn += random.sample(set(current_neighbors) - set(rtn) - set(nodes), 1)
    current_seed += 1

  return rtn
示例#2
0
def high_degrees_fast(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  """
  Find the high-degree nodes of the given graph by sorting on the adjacency 
  list lengths and slicing.
  
  Parameters:
    seed_num: Number of nodes to choose.
    graph_json_filename: Filename where the adjacency list lives as JSON.
    graph_json_str: Graph as an adjacency list string in JSON.
  
  Return: List of 'seed_num' highest degree nodes. 
  """
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.degree_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "hi high-degree"

  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(seed_num)

  return map(lambda (x, y): x, clse_cent)
def closeness_component(seed_num, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None:
    return []

  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  components = list(nx.connected_components(G))
  components = filter(lambda x: len(x) > 0.1 * len(G), components)
  total_size = sum(map(lambda x: len(x), components))
  total_nodes = 0
  rtn = []
  for comp in components[1:]:
    num_nodes = int(float(len(comp)) / total_size * seed_num)
    component = G.subgraph(list(comp))
    clse_cent = nx.closeness_centrality(component)
    collector = collections.Counter(clse_cent)
    clse_cent = collector.most_common(num_nodes)
    rtn += map(lambda (x, y): x, clse_cent)
    total_nodes += num_nodes

  num_nodes = seed_num - total_nodes
  component = G.subgraph(list(components[0]))
  clse_cent = nx.closeness_centrality(component)
  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(num_nodes)
  rtn += map(lambda (x, y): x, clse_cent)
  return rtn
示例#4
0
 def __init__(self,
              path_to_pb,
              files,
              scale=0.5,
              to_line=False,
              prune_method='simple',
              outdir=None,
              out_suffix=None,
              gpu_device='0',
              test_orientation=True):
     """
     parameter:
         path_to_pb: path to trained tensorflow pb file
         files: list of image file paths
         to_line: approximate line height and write a line map
         scale: scale-factor, typically ARU-Net works well on low scales,
             which speeds up the inference a lot
         prune_method: options=['simple'] TODO: extent
         out_dir: output folder
         out_suffix: suffix to append to the filename (also decideds file
         extension = png if nothing set)
         gpu_device: device number as string
         test_orientation: test for the orientation. Note: cannot test for
         flips / 180 degree orientation :/
     """
     self.graph = load_graph(path_to_pb)
     self.files = files
     self.to_line = to_line
     self.scale = scale
     self.prune_method = prune_method
     self.outdir = outdir
     self.out_suffix = out_suffix
     self.gpu_device = gpu_device
     self.test_orientation = test_orientation
def question1_plot():
    """
    plot for question 1
    """
    network_graph = load_graph(NETWORK_URL)
    n = 1239
    p = 0.004
    m = 3
    er_graph = er(n, 0.004)
    upa_graph = upa(n, 3)

    graphs = [network_graph, er_graph, upa_graph]
    attack_orders = [random_order(graph) for graph in graphs]

    resiliences = [compute_resilience(graph, attack_order) for
                   graph, attack_order in zip(graphs, attack_orders)]

    removed_num = range(n + 1)
    for resil in resiliences:
        plt.plot(removed_num, resil)
    legend_text = ['Computer Network', 'ER Graph, p = %.3f' % (p),
                   'UPA Graph, m = %d' % (m)]
    plt.legend(legend_text, loc="upper right")
    plt.xlabel('the number of nodes removed')
    plt.ylabel('the size of the largest connect component')
    plt.title('Graph resiliences')
    plt.show()
示例#6
0
 def __init__(self, network):
     self.graph_path = network
     self.graph = load_graph(network)
     self.free_flow_speed = 100
     self.congestion_speed = 20
     self.init_travel_time()
     self.traveller_state_dict = {}
     self.car_logger = CarLogger()
     self.planner_logger = PlannerLogger()
     self.event_logger = EventLogger()
示例#7
0
 def __init__(self, network):
     self.graph = load_graph(network)
     self.events = [
         "road_works_start", "road_works_start", "road_works_start",
         "road_works_start", "road_works_end"
     ]
     # self.events = ["road_works_start", "road_works_start", "road_works_start", "road_works_start",
     #               "road_works_end", "spawn_random_agents", "spawn_random"]
     self.event_factors = [round(i * 0.1, 2) for i in range(2, 10)]
     self.event_times = [i for i in range(3, 10)]
     self.num_agents = [i for i in range(1, 6)]
     self.agent_counter = 1
     self.construction_sites = set()
def eigenvector_centrality(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None and graph is None:
    return []

  G = None
  if graph is not None:
    G = graph
  elif graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  clse_cent = nx.get_node_attributes(G, "centrality")
  if len(clse_cent) == 0:
    clse_cent = nx.eigenvector_centrality(G)
    nx.set_node_attributes(G, "centrality", clse_cent)
    print "hi eigen-vector"

  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(seed_num)

  return map(lambda (x, y): x, clse_cent)
def cut_control(seed_num, graph_json_filename=None, graph_json_str=None):
  """
  Randomly choose 'seed_num' number of cut-vertices from the given graph.
  If there are fewer cut vertices than 'seed_num' then choose the vertices
  adjacent to cut vertices .
  
  Parameters:
    seed_num: Number of nodes to choose.
    graph_json_filename: Filename where the adjacency list lives as JSON.
    graph_json_str: Graph as an adjacency list string in JSON.
    
  Return: List of the chosen nodes.
  """
  
  if graph_json_filename is None and graph_json_str is None:
    return []
  
  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)
  iters_since_change = 0
  cut_vertices = list(set(nx.articulation_points(G)))
  while (len(cut_vertices) < seed_num):
    rand_node = cut_vertices[rand.randint(0, len(cut_vertices) - 1)]
    adj_node_lst = G.neighbors(rand_node)
    rand_adj_node = adj_node_lst[rand.randint(0, len(adj_node_lst) - 1)]
    if (rand_adj_node not in cut_vertices):
      iters_since_change = 0
      cut_vertices.append(rand_adj_node)
    else:
      iters_since_change = iters_since_change + 1
    if (iters_since_change > 10):
      break
  
  return cut_vertices[0:seed_num]
示例#10
0
def plot_titles_graph():
    graph = util.load_graph(cfg.paths['titles-refs-graph'])
    hist = util.load_csv_hist(cfg.paths['titles-refs-hist'])
    fig, ax, mapping = plot_graph(graph,
                                  hist,
                                  relabel=RELABEL_TITLES,
                                  max_n_nodes=MAX_N_TITLE_NODES)

    fig.set_size_inches(get_savefig_size(MAX_N_TITLE_NODES), forward=False)
    fig.savefig(cfg.paths['titles-graph-plot'], dpi=333)
    print('saved titles graph plot to "{}"'.format(
        cfg.paths['titles-graph-plot']))
    if mapping is not None:
        util.save_json(cfg.paths['titles-graph-plot-mapping'], mapping)
        print('saved titles graph plot mapping to "{}"'.format(
            cfg.paths['titles-graph-plot-mapping']))
示例#11
0
def plot_authors_graph():
    graph = util.load_graph(cfg.paths['authors-refs-graph'])
    hist = get_def_dict(util.load_csv_hist(cfg.paths['authors-refs-hist']),
                        int)
    fig, ax, mapping = plot_graph(graph,
                                  hist,
                                  relabel=RELABEL_AUTHORS,
                                  max_n_nodes=MAX_N_AUTHOR_NODES)

    fig.set_size_inches(get_savefig_size(MAX_N_AUTHOR_NODES), forward=False)
    fig.savefig(cfg.paths['authors-graph-plot'], dpi=333)
    print('saved authors graph plot to "{}"'.format(
        cfg.paths['authors-graph-plot']))
    if mapping is not None:
        util.save_json(cfg.paths['authors-graph-plot-mapping'], mapping)
        print('saved authors graph plot mapping to "{}"'.format(
            cfg.paths['authors-graph-plot-mapping']))
示例#12
0
    def __init__(self, args: argparse.Namespace) -> None:
        """Creates a SampleStack object.

        Parameters
        ---------
        args : argparse.Namespace
            the command-line arguments provided by the user
        """
        # Load graph
        # Create stack of samples
        # Use List as stack
        self.t_load_start = timeit.default_timer()
        self.full_graph, self.true_block_assignment = load_graph(args)
        self.t_load_end = timeit.default_timer()
        self.stack = list()  # type: List[Tuple[Graph, Sample]]
        self.create_sample_stack(args)
        self.t_sample_end = timeit.default_timer()
示例#13
0
    def __init__(self, network, num_routes, agent_type, start_node=None, destination_node=None, max_speed=100, ):
        self.num_routes = num_routes
        self.route_count = 0
        self.agent_type = agent_type
        self.graph = load_graph(network)
        self.init_state = True
        self.max_speed = max_speed
        self.current_speed = max_speed
        self.travelled_edge_distance = 0
        self.current_edge_distance = 0
        self.next_node = None
        self.final_edge = False

        if start_node is not None and destination_node is not None:
            self.current_node = start_node
            self.destination_node = destination_node
        else:
            self.current_node = self.choose_start_node()
            self.destination_node = self.choose_destination_node()

        if self.agent_type == "local":
            self.route = self.get_route()
            self.node_count = 0
            self.set_next_node()
def sccf_helper(seed_num, graph=None, graph_json_filename=None, graph_json_str=None):

  # parse the graph
  G = None
  if graph is not None:
    G = graph
  elif graph_json_filename is not None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  # initialize queue for subgraphs
  # try to get about 2 nodes in each cluster
  node_per_cluster = 2
  max_depth = int(math.ceil(np.log2(seed_num / node_per_cluster))) + 1

  cluster_queue = Queue.Queue()
  cluster_queue.put(G)

  # divide graph into 2**max_depth clusters
  while (cluster_queue.qsize() < 2**max_depth ):
    G_curr = cluster_queue.get()
    # work only on the largest connected component
    G_curr_c = max(nx.connected_component_subgraphs(G_curr), key=len)
    if (G_curr_c.size() < 2 * node_per_cluster):
      # put it back if cluster is too small
      cluster_queue.put(G_curr_c)
      continue
    # get fiedler vector
    fiedler_vector = nx.fiedler_vector(G_curr_c, normalized=True, tol=1e-01)
    node_list_sub_1 = []
    node_list_sub_2 = []
    node_list = G_curr_c.nodes()
    # split positive and negative terms in fielder vector
    for i in range(len(fiedler_vector)):
      if (fiedler_vector[i] >= 0):
        node_list_sub_1.append(node_list[i])
      else:
        node_list_sub_2.append(node_list[i])
    # seperate the graph into two subgraphs
    if (len(node_list_sub_1) >= node_per_cluster):
      # ignore clusters too small
      G_sub_1 = G_curr_c.subgraph(node_list_sub_1)
      cluster_queue.put(G_sub_1)
    if (len(node_list_sub_2) >= node_per_cluster):
      # ignore clusters too small
      G_sub_2 = G_curr_c.subgraph(node_list_sub_2)
      cluster_queue.put(G_sub_2)

  # get node_per_cluster highest degree nodes from each cluster
  candidate_nodes = []
  candidate_neighbors = {}
  while not (cluster_queue.empty()):
    G_curr = cluster_queue.get()
    # measure used to pick node with in clusters
    degree_dict = nx.closeness_centrality(G_curr)
    node_keys = sorted(degree_dict, key=degree_dict.get, 
                                    reverse=True)[:node_per_cluster]
    for i in node_keys:
      # append i and a neighbor of i
      if (i not in candidate_nodes):
        candidate_nodes.append(i)
        candidate_neighbors[i] = list(nx.all_neighbors(G, i))
      
  # return candidate nodes and neighbors
  return candidate_nodes, candidate_neighbors
示例#15
0
 def __init__(self, path_to_pb, scale=0.33, mode='L'):
     self.graph = load_graph(path_to_pb)
     self.scale = scale
     self.mode = mode
示例#16
0
def tf_classify():
    # TODO: python -m scripts.label_image     --graph=tf_files/retrained_graph.pb      --image=test/aurelia.jpeg
    import socket

    print("In tf_classify handler from {}".format(socket.getfqdn()))

    file_name = "models/mobilenet/example/3475870145_685a19116d.jpg"
    file_name = "https://www.eopugetsound.org/sites/default/files/styles/magazinewidth_592px/public/topical_article/images/moon_jellyfish.jpg?itok=Esreg6zX"

    # Get payload
    payload = request.get_json(silent=True, force=True)

    if payload == None:
        if request.get_data() != None:
            payload = json.loads(request.get_data())

    if payload != None:
        if payload.get("nlp").get("entities").get("url"):
            file_name = payload.get("nlp").get("entities").get("url")[0].get(
                "raw")

            # Load model file
            model_file = "models/mobilenet/retrained_graph.pb"
            label_file = "models/mobilenet/retrained_labels.txt"
            input_height = 224
            input_width = 224
            input_mean = 128
            input_std = 128
            input_layer = "input"
            output_layer = "final_result"

            graph = util.load_graph(model_file)
            t = util.read_tensor_from_image_file(file_name,
                                                 input_height=input_height,
                                                 input_width=input_width,
                                                 input_mean=input_mean,
                                                 input_std=input_std)

            input_name = "import/" + input_layer
            output_name = "import/" + output_layer
            input_operation = graph.get_operation_by_name(input_name)
            output_operation = graph.get_operation_by_name(output_name)

            with tf.Session(graph=graph) as sess:
                start = time.time()
                results = sess.run(output_operation.outputs[0],
                                   {input_operation.outputs[0]: t})
                end = time.time()

            results = np.squeeze(results)

            top_k = results.argsort()[-5:][::-1]
            labels = util.load_labels(label_file)

            print('\nEvaluation time (1-image): {:.3f}s\n'.format(end - start))
            template = "{} (score={:0.5f})"

            print(top_k)

            for i in top_k:
                print(template.format(labels[i], results[i]))

            # I really don't know, my best guess is []

            if results[0] < 0.1:
                response = "I really don't know, my best guess is that this looks like a " + labels[
                    top_k[0]]
            else:
                response = 'I think this is a ' + labels[top_k[0]]

            response = 'I think this is a ' + labels[top_k[0]]

            return jsonify(
                status=200,
                replies=[{
                    'type': 'text',
                    'content': response
                }],
                conversation={'memory': {
                    'plankton': labels[top_k[0]]
                }})
示例#17
0
else:
    adj_matrix = np.random.choice([0, 1], size=(n, n),
                                  p=[0.8, 0.2]).astype(np.float64)
    np.save(source[:-4], adj_matrix)

e = np.ones(adj_matrix.shape[0])
adj_matrix = add_random(adj_matrix, e=e)
A = scale_rows(adj_matrix)
r = page_rank(A, e=np.ones(adj_matrix.shape[0]))

print("\nresult for small test. r:\n", np.round(r, 4))

print("checking SNAP...")
for filename in ["p2p-Gnutella08.txt", "Wiki-Vote.txt"]:
    print(filename)
    adjacency_matrix = load_graph(filename)
    n = adjacency_matrix.shape[0]

    for e_val in [
            1, 0.5, 0
    ]:  # different values of d are equivalent to changing values in e
        e = np.full(n, fill_value=e_val, dtype=np.float32)
        print(f"d*e values = [{e_val}/{adjacency_matrix.shape[0]}, ...]")
        adj_matrix = add_random(adjacency_matrix, e=e)
        A = scale_rows(adj_matrix)
        r = page_rank(A, e=e, delta=1e-8)
        print(f"r min ={np.min(r)}, r max = {np.max(r)}\n")

    print("e[i]=0 except e[0]=1")
    e = np.zeros(n, dtype=np.float32)
    e[0] = 1
示例#18
0
import random
import matplotlib.pyplot as plt
import statistics
import time
from util import load_graph
from PopGenerator import PopGenerator
from GA import GA


random.seed()

instance = './data/kroa100.tsp'
graph = load_graph(instance)
m = 2
max_iters = 1000
experiments_count = 3

evaluators_conf = (GA.get_min_by_fitness, GA.get_min_by_penalty,
                   GA.get_min_by_DEB, GA.get_min_by_hierarchy)
breaks_generator_conf = (PopGenerator.generate_breaks_rules, PopGenerator.generate_breaks_fully_random,
                         PopGenerator.generate_breaks_fully_random, PopGenerator.generate_breaks_fully_random)
names_conf = ('Constrained generation', 'Penalization function',
              "Deb's rules", "Stochastic hierarchy")


print(instance+' '+str(m))
for i_conf in range(0, len(evaluators_conf)):
    print(names_conf[i_conf]+'\n')
    experiments_list = []
    evaluator = evaluators_conf[i_conf]
    generator = PopGenerator(graph, breaks_generator_conf[i_conf])
示例#19
0
def spectrum_cluster(seed_num, graph_json_filename=None, graph_json_str=None):
  """
  Identifies clusters in the network using laplacian spectrum, and loops over
  each cluster to pick the node with largest degree until seed_num nodes were
  chosen. 

  Parameters:
    seed_num: Number of nodes to choose.
    graph_json_filename: Filename where the adjacency list lives as JSON.
    graph_json_str: Graph as an adjacency list string in JSON.
    
  Return: List of the chosen nodes.
  """

  # parse the graph
  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  # initialize queue for subgraphs
  # try to get about 2 nodes in each cluster
  node_per_cluster = 2
  max_depth = int(math.ceil(np.log2(seed_num / node_per_cluster))) + 1

  cluster_queue = Queue.Queue()
  cluster_queue.put(G)

  # divide graph into 2**max_depth clusters
  while (cluster_queue.qsize() < 2**max_depth ):
    G_curr = cluster_queue.get()
    # work only on the largest connected component
    G_curr_c = max(nx.connected_component_subgraphs(G_curr), key=len)
    if (G_curr_c.size() < 2 * node_per_cluster):
      # put it back if cluster is too small
      cluster_queue.put(G_curr_c)
      continue
    # get fiedler vector
    fiedler_vector = nx.fiedler_vector(G_curr_c, normalized=True, tol=1e-04)
    node_list_sub_1 = []
    node_list_sub_2 = []
    node_list = G_curr_c.nodes()
    # split positive and negative terms in fielder vector
    for i in range(len(fiedler_vector)):
      if (fiedler_vector[i] >= 0):
        node_list_sub_1.append(node_list[i])
      else:
        node_list_sub_2.append(node_list[i])
    # seperate the graph into two subgraphs
    if (len(node_list_sub_1) >= node_per_cluster):
      # ignore clusters too small
      G_sub_1 = G_curr_c.subgraph(node_list_sub_1)
      cluster_queue.put(G_sub_1)
    if (len(node_list_sub_2) >= node_per_cluster):
      # ignore clusters too small
      G_sub_2 = G_curr_c.subgraph(node_list_sub_2)
      cluster_queue.put(G_sub_2)

  # get node_per_cluster highest degree nodes from each cluster
  candidate_nodes = []
  while not (cluster_queue.empty()):
    G_curr = cluster_queue.get()
    # measure used to pick node with in clusters
    degree_dict = nx.degree(G_curr)
    node_keys = sorted(degree_dict, key=degree_dict.get, 
                                    reverse=True)[:node_per_cluster]
    for i in node_keys:
      candidate_nodes.append(i)
  # randomly pick seed_num nodes from candidate_nodes
  rtn = list(np.random.choice(candidate_nodes, replace=False, size=seed_num))
  return rtn
示例#20
0
    # -----------------------------
    x_test, y_test = util.get_data("test")

    # np.random.seed(0)
    # sample_index = np.random.randint(len(y_test), size=sample_size)
    sample_index = np.arange(len(y_test))

    x_test_sample = x_test[sample_index]
    y_test_sample = y_test[sample_index]
    # print("x_sample:{}, y_sample:{}".format(x_test_sample.shape, y_test_sample.shape))
    init_end_time = time.time()
    print("loading data takes {:6.4f} ms".format(
        (init_end_time - init_time) * 1000))
    print("predicting cases:")

    graph = util.load_graph(frozen_model)
    if not __debug__:
        print('\n'.join(map(str, [op.name for op in graph.get_operations()])))
    session = tf.Session(graph=graph)
    begin_time = time.time()
    X, Y = util.get_tensor_by_op_name(graph, ["input", "label"])
    output, accuracy, cost = util.get_tensor_by_op_name(
        graph, ["output", "accuracy", "cost"])

    label_prob = session.run(output,
                             feed_dict={
                                 X: x_test_sample,
                                 Y: y_test_sample
                             })
    end_time = time.time()
示例#21
0
        else:
            # cluster间
            nx.draw_networkx_edges(G,
                                   pos,
                                   edgelist=edgelist,
                                   width=3,
                                   alpha=0.8,
                                   edge_color=colors[index])

    # 可视化label
    nx.draw_networkx_labels(G, pos, labels, font_size=12)

    plt.axis('off')
    plt.show()


if __name__ == '__main__':
    # 加载网络并可视化
    G = util.load_graph("network/test.txt")
    pos = nx.spring_layout(G)
    nx.draw(G, pos, with_labels=True, font_weight='bold')
    plt.show()

    # GN算法
    algo = GN(G)
    partition = algo.execute()
    print(partition)

    # 可视化结果
    showCommunity(algo._G_cloned, partition, pos)
示例#22
0

# End of parse_arguments()

if __name__ == "__main__":
    args = parse_arguments()
    t_start = timeit.default_timer()

    if args.sample_type != "none":
        samplestack = SampleStack(args)
        sampled_graph, sampled_graph_partition, vertex_mapping, block_mapping, evaluation = samplestack.unstack(
            args)
        full_graph, full_graph_partition, evaluation = samplestack.extrapolate_sample_partition(
            sampled_graph_partition, vertex_mapping, args, evaluation)
    else:
        graph, true_block_assignment = load_graph(args)
        t_load = timeit.default_timer()
        t_sample = timeit.default_timer()
        print("Performing stochastic block partitioning")
        evaluation = Evaluation(args, graph)
        # Please refer to the graph-tool documentation under graph-tool.inference for details on the input parameters
        partition = minimize_blockmodel_dl(graph,
                                           shrink_args={'parallel': True},
                                           verbose=args.verbose,
                                           mcmc_equilibrate_args={
                                               'verbose': args.verbose,
                                               'epsilon': 1e-4
                                           })
        t_partition = timeit.default_timer()

    t_end = timeit.default_timer()