Exemplo n.º 1
0
def test_random_algorithm_4_3():
    filename = "instances/1.in"
    G = create_graph(filename)
    solution = random_algorithm_4(G)
    print("Penalty: " + str(solution[1]))
    print("Cycles: " + solution[0])
    print(validate_formatted_solution(solution[0]))
Exemplo n.º 2
0
def test_random_algorithm_4_3():
	filename = "instances/1.in"
	G = create_graph(filename)
	solution = random_algorithm_4(G)
	print("Penalty: " + str(solution[1]))
	print("Cycles: " + solution[0])
	print(validate_formatted_solution(solution[0]))
Exemplo n.º 3
0
def execute_random4(index, write_file):
	filename = "instances/" + str(index) + ".in"
	G = create_graph(filename)
	solution = random_algorithm_4(G)
	formatted_solution = [index, "Random4", solution[1], solution[0]]
	list_solutions = [formatted_solution,]
	add_solutions(list_solutions, write_file)
Exemplo n.º 4
0
def execute_greedy(index):
    filename = "instances/" + str(index) + ".in"
    G = create_graph(filename)
    solution = greedy_algorithm(G)
    formatted_solution = [index, "Greedy", solution[1], solution[0]]
    list_solutions = [formatted_solution]
    add_solutions(list_solutions)
Exemplo n.º 5
0
def execute(index):
    filename = "instances/" + str(index) + ".in"
    G = create_graph(filename)
    solution = algorithm(G)
    formatted_solution = [index, "Random", solution[1], solution[0]]
    print(solution[0])
    list_solutions = [formatted_solution,]
    add_solutions(list_solutions)
Exemplo n.º 6
0
def execute_greedy(index):
    filename = "instances/" + str(index) + ".in"
    G = create_graph(filename)
    solution = greedy_algorithm(G)
    formatted_solution = [index, "Greedy", solution[1], solution[0]]
    list_solutions = [formatted_solution]
    outfile = "SOLUTIONS RECORDS - T.txt"
    add_solutions(list_solutions, outfile)
Exemplo n.º 7
0
def test_comprehensive_solution_validation1():
	tokens = line.split("|")
	number_tokens = line.split(":")
	instance_number = int(number_tokens[0])
	solution = tokens[2]
	filename = "instances/" + str(instance_number) + ".in"
	G = create_graph(filename)
	print(comprehensive_solution_validation(solution, G))
Exemplo n.º 8
0
def test_comprehensive_solution_validation1():
    tokens = line.split("|")
    number_tokens = line.split(":")
    instance_number = int(number_tokens[0])
    solution = tokens[2]
    filename = "instances/" + str(instance_number) + ".in"
    G = create_graph(filename)
    print(comprehensive_solution_validation(solution, G))
Exemplo n.º 9
0
def execute_greedy(index):
	filename = "instances/" + str(index) + ".in"
	G = create_graph(filename)
	solution = greedy_algorithm(G)
	formatted_solution = [index, "Greedy", solution[1], solution[0]]
	list_solutions = [formatted_solution]
	outfile = "SOLUTIONS RECORDS - T.txt"
	add_solutions(list_solutions, outfile)
Exemplo n.º 10
0
def execute_random4(index, write_file):
    filename = "instances/" + str(index) + ".in"
    G = create_graph(filename)
    solution = random_algorithm_4(G)
    formatted_solution = [index, "Random4", solution[1], solution[0]]
    list_solutions = [
        formatted_solution,
    ]
    add_solutions(list_solutions, write_file)
Exemplo n.º 11
0
def execute(index):
    filename = "instances/" + str(index) + ".in"
    G = create_graph(filename)
    solution = algorithm(G)
    formatted_solution = [index, "Random", solution[1], solution[0]]
    print(solution[0])
    list_solutions = [
        formatted_solution,
    ]
    add_solutions(list_solutions)
Exemplo n.º 12
0
def detailed_compare(search_fs,
                     edges=10,
                     nodes=10,
                     iterations=100,
                     plot=False):
    results = {}
    sums = {}
    for search_f in search_fs:
        results[search_f.__name__] = []
        sums[search_f.__name__] = 0
    for i in range(0, iterations):
        graph = create_graph(edges, nodes)
        for search_f in search_fs:
            res = search_f(graph)
            results[search_f.__name__].append(res)
            sums[search_f.__name__] += res
    plt.figure(figsize=(8, 6))
    ax = plt.subplot(111)
    if plot:
        for search_f in search_fs:
            line, = ax.plot(range(1, iterations + 1),
                            results[search_f.__name__])
            line.set_label(search_f.__name__)
        ax.set_ylim(bottom=0)
        plt.title('{0} čvorova/{1} ivica'.format(nodes, edges))
    else:
        averages = []
        _averages = {}
        for search_f in search_fs:
            name = search_f.__name__
            averages.append(sums[name] / iterations)
            print(name, averages[-1])
            _averages[name] = averages[-1]
            plt.bar([name], averages[-1], zorder=10)
        plt.grid(True, which='both', axis='y', zorder=0, alpha=1)
        plt.grid(which='minor', alpha=.3)
        major_ticks = np.arange(0, max(averages) + 1, 5)
        minor_ticks = np.arange(0, max(averages) + 1, 1)
        ax.set_yticks(major_ticks)
        ax.set_yticks(minor_ticks, minor=True)
        plt.legend(list(
            map(lambda f: f.__name__ + ' ' + str(_averages[f.__name__]),
                search_fs)),
                   loc='upper center',
                   bbox_to_anchor=(0.5, -0.05))
        plt.tick_params(axis='x', bottom=False, labelbottom=False)
        plt.title('Prosek za {0} čvorova/{1} ivica u {2} iteracija\n'.format(
            nodes, edges, iterations))
        plt.suptitle('\n\n(manje je bolje)', fontsize=8)
    plt.show()
Exemplo n.º 13
0
def test(search_f, edges=10, nodes=10, draw=True):
    print('Creating graph')
    graph = create_graph(edges, nodes)

    print('Testing', search_f.__name__)
    start = time.time()
    result = search_f(graph)
    end = time.time()
    print('{} with {} nodes and {} edges took {:.3f}s'.format(
        search_f.__name__, nodes, edges, end - start))
    print('Final result', result)

    if draw:
        draw_graph(graph, result[0])
Exemplo n.º 14
0
def execute_random2(index, graphs):
	"""
	Executes random_algorithm_2 and returns a formatted solution.
	*** NO LONGER WRITES TO SOLUTION FILE ***
	"""
	filename = "instances/" + str(index) + ".in"
	if index not in graphs:
		G = create_graph(filename)
		graphs[index] = G
	else:
		G = graphs[index]
	solution = random_algorithm_2(G)
	formatted_solution = [index, "Random", solution[1], solution[0]]
	return formatted_solution, graphs
Exemplo n.º 15
0
def compare(search_f_1, search_f_2, edges=10, nodes=10, iterations=100):
    print('Comparing {0} and {1}'.format(search_f_1.__name__,
                                         search_f_2.__name__))
    results = {search_f_1.__name__: 0, search_f_2.__name__: 0, 'equal': 0}
    for i in range(0, iterations):
        graph = create_graph(edges, nodes)
        res_1 = search_f_1(graph)
        res_2 = search_f_2(graph)

        if res_1 < res_2:
            results[search_f_1.__name__] += 1
        elif res_2 < res_1:
            results[search_f_2.__name__] += 1
        else:
            results['equal'] += 1
    print(results)
Exemplo n.º 16
0
def sketch_graph(args):
    data_dim = 20
    lo_dim = 5

    g1 = utils.create_graph(data_dim, 'random_regular')

    #args.n_epochs = 300 <--parameters like this can be set here or in command line
    args.Lx = utils.graph_to_lap(g1)
    args.m = len(args.Lx)
    args.n = lo_dim
    # sketch graphs of lo_dim.
    # Returns optimization loss, transport plan P, and Laplacian of sketched graph
    loss, P, Ly = graph.graph_dist(args, plot=False)
    print('sketched graph Laplacian {}'.format(Ly))
    #can convert Ly to a networkx graph with utils.lap_to_graph(Ly)
    return loss, P, Ly
Exemplo n.º 17
0
def compare(f1, f2, edges=10, nodes=10, iterations=100):
    print('Comparing {} and {}'.format(f1.__name__, f2.__name__))
    results = {f1.__name__: 0, f2.__name__: 0, 'equal': 0}
    for i in range(0, iterations):
        print(i)
        graph = create_graph(edges, nodes)
        res_1 = f1(graph)[1]
        res_2 = f2(graph)[1]

        if res_1 > res_2:
            results[f1.__name__] += 1
        elif res_2 > res_1:
            results[f2.__name__] += 1
        else:
            results['equal'] += 1
    print(results)
Exemplo n.º 18
0
def create_graphs_view(n_, args, fname, n_graphs, low=None, save=True):
    """
    classify graphs
    """
    labels = []
    graphs = []
    #create ran
    low = max(int(n_ // 1.4), 25) if low is None else low
    n_l = np.random.randint(low, high=n_ + 1, size=n_graphs * 15)  ##
    labels.extend([0] * n_graphs)
    for i in range(n_graphs):
        params = {'n_blocks': 2}
        n = n_l[0 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'block', params=params))
    labels.extend([1] * n_graphs)
    for i in range(n_graphs):
        #sketching dist ~5e2
        n = n_l[1 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'random_regular'))
    #'''
    labels.extend([2] * n_graphs)
    #confused with 1?
    for i in range(n_graphs):
        n = n_l[2 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'strogatz'))
    labels.extend([3] * n_graphs)
    for i in range(n_graphs):
        params = {'prob': .2}
        graphs.append(utils.create_graph(n, 'binomial', params=params))

    labels.extend([4] * n_graphs)
    for i in range(n_graphs):
        n = n_l[4 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'barabasi'))
    labels.extend([7] * n_graphs)
    for i in range(n_graphs):
        graphs.append(utils.create_graph(n, 'powerlaw_tree'))
    labels.extend([8] * n_graphs)
    for i in range(n_graphs):
        #all close to
        #ideal lr .05, can learn but large loss
        params = {'n_cliques': max(1, n // 7), 'clique_sz': 7}
        graphs.append(utils.create_graph(n // 7 * 7, 'caveman', params=params))
    if save:
        with open(fname, 'wb') as f:
            pickle.dump({'graphs': graphs, 'labels': np.array(labels)}, f)
    #save graphs
    return graphs, np.array(labels)
Exemplo n.º 19
0
def plot_results_by_iter(function, edges=10, nodes=10, iterations=100):
    results = []
    graph = create_graph(edges, nodes)
    for i in range(0, iterations):
        print(i)
        results.append(function(graph, iterations=i)[1])

    plt.figure(figsize=(12, 6))
    ax = plt.subplot(111)
    ax.plot(range(1, iterations + 1), results)
    plt.grid(True, which='both', axis='y', zorder=0, alpha=1)
    major_ticks = np.arange(min(results) - 2, max(results) + 2, 1)
    ax.set_yticks(major_ticks)
    ax.set_label(function.__name__)
    plt.title("{} - Results by iterations \n{} nodes / {} edges".format(
        function.__name__, nodes, edges))

    plt.show()
Exemplo n.º 20
0
def compare_iterations(search_f, edges=10, nodes=10, iterations=100):
    results = []
    graph = create_graph(edges, nodes)
    for i in range(1, iterations + 1):
        results.append(search_f(graph, iterations=i))
    print(results)
    plt.figure(figsize=(12, 6))
    ax = plt.subplot(111)
    ax.plot(results)
    plt.grid(True, which='both', axis='y', zorder=0, alpha=1)
    major_ticks = np.arange(min(results) - 1, max(results) + 1, 1)
    ax.set_yticks(major_ticks)
    plt.title(search_f.__name__ + ' - Rezultati po broju iteracija\n\n')
    plt.suptitle(
        '\n\n{0} čvorova/{1} ivica do {2} iteracija\n(manje je bolje)'.format(
            nodes, edges, iterations),
        fontsize=8)
    plt.show()
def main():

    simulation_runs = 5
    num_ants = 5
    q = 10000
    nc_max = 100

    aco = ACO(simulation_runs=simulation_runs,
              num_ants=num_ants,
              pheromone_quantity=q,
              num_colonies=nc_max)
    tsp = TSP()

    graph_nodes_list = [9, 10, 11]
    alpha_list = [1, 3, 7]
    beta_list = [1, 3, 7]
    ro_list = [0.0, 0.5, 0.9, 1.0]
    table_data = []

    for graph_nodes in graph_nodes_list:
        cost_graph = create_graph(graph_nodes)
        # print_graph(cost_graph)
        total_tsp_cost, tsp_solution = tsp.run(cost_graph)
        for alpha in alpha_list:
            for beta in beta_list:
                for ro in ro_list:
                    print(
                        "graph_nodes = {3}, alpha = {0}, beta = {1}, ro = {2}".
                        format(alpha, beta, ro, graph_nodes))
                    solution, total_aco_cost = aco.run(cost_graph, alpha, beta,
                                                       ro)
                    error_value = abs(total_tsp_cost - total_aco_cost)
                    table_data += [[
                        str(graph_nodes),
                        str(alpha),
                        str(beta),
                        str(ro),
                        str(total_aco_cost),
                        str(total_tsp_cost),
                        str(error_value)
                    ]]
                    pass

    print_table_data(table_data)
Exemplo n.º 22
0
def get_stats():
    iterations, max_nodes = 10000, 64
    stats = {}

    for _ in range(iterations):
        graph = create_graph(max_nodes)
        number_of_nodes = len(graph) - 1

        actual_apsp = get_apsp(graph)
        computed_apsp, communication_rounds = compute_apsp(graph)
        assert actual_apsp == computed_apsp

        ratio = round(communication_rounds / number_of_nodes, 1)
        if ratio in stats: stats[ratio] += 1
        else: stats[ratio] = 1

    for key in stats:
        stats[key] = round(stats[key] / iterations, 6)

    return stats
Exemplo n.º 23
0
def test(search_fs, edges=10, nodes=10, draw=True):
    print('Creating graph')
    graph = create_graph(edges, nodes)

    for search_f in search_fs:
        print()
        print('Testing', search_f.__name__)
        start = time.time()

        result = search_f(graph)

        end = time.time()
        print('{3} with {0} nodes and {1} edges took {2:.3f}s'.format(
            nodes, edges, end - start, search_f.__name__))
        print('Final result', result)

        if draw:
            draw_graph(graph)

        for (v, w) in graph.edges:
            graph[v][w]['in_matching'] = False
Exemplo n.º 24
0
def avg_results(functions, edges=10, nodes=10, iterations=100):
    results = {}
    sums = {}
    for f in functions:
        results[f.__name__] = []
        sums[f.__name__] = 0
    for i in range(0, iterations):
        print(i)
        graph = create_graph(edges, nodes)
        for f in functions:
            res = f(graph)[1]
            results[f.__name__].append(res)
            sums[f.__name__] += res
    plt.figure(figsize=(8, 6))
    ax = plt.subplot(111)
    averages = []
    _averages = {}
    for search_f in functions:
        name = search_f.__name__
        averages.append(sums[name] / iterations)
        print(name, averages[-1])
        _averages[name] = averages[-1]
        plt.bar([name], averages[-1])

    plt.grid(True, which='both', axis='y', zorder=0, alpha=1)
    plt.grid(which='minor', alpha=.3)
    major_ticks = np.arange(0, max(averages) + 1, 5)
    minor_ticks = np.arange(0, max(averages) + 1, 1)
    ax.set_yticks(major_ticks)
    ax.set_yticks(minor_ticks, minor=True)
    plt.legend(list(
        map(lambda f: f.__name__ + ' ' + str(_averages[f.__name__]),
            functions)),
               loc='upper center',
               bbox_to_anchor=(0.5, 0),
               ncol=2)
    plt.tick_params(axis='x', bottom=False, labelbottom=False)
    plt.title('Average for {} nodes and {} edges in {} iterations\n'.format(
        nodes, edges, iterations))
    plt.show()
Exemplo n.º 25
0
    def _init_curbs(self, option='nearest'):
        """
        initialize the curbs and how they find their neighbors on the defined network
        """

        road_network = utils.create_graph(self.net_xml)

        # create agent curbs
        curb_ids = []
        root = ET.parse(self.add_xml).getroot()
        for child in root.iter('additional'):
            for kid in child.iter('parkingArea'):
                curb_ids.append(kid.get('id'))
        
        curbs = {}
        for curb_id in curb_ids:
            curbs[curb_id] = curbside.SmartCurbside(1, self.add_xml, self.net_xml, curb_id, 
                                                    ['passenger', 'delivery'], road_network)
        for curb in curbs.values():
            curb.find_neighborhood(road_network, curbs, option)
            # curb.dlv_cap = curb.tot_cap
        return curbs, curb_ids
Exemplo n.º 26
0
def create_graphs(n_, args, fname, n_graphs, low=None, save=True):
    """
    Create graphs
    """
    labels = []
    graphs = []
    #create ran
    low = max(int(n_ // 1.4), 25) if low is None else low
    n_l = np.random.randint(low, high=n_ + 1, size=n_graphs * 15)  ##
    labels.extend([0] * n_graphs)
    for i in range(n_graphs):
        params = {'n_blocks': 2}
        n = n_l[0 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'block', params=params))
    labels.extend([1] * n_graphs)
    for i in range(n_graphs):
        #sketching dist ~5e2
        n = n_l[1 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'random_regular'))

    labels.extend([4] * n_graphs)
    for i in range(n_graphs):
        n = n_l[4 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'barabasi'))
    labels.extend([5] * n_graphs)
    for i in range(n_graphs):
        params = {'n_blocks': 3}
        n = n_  #n_l[5*n_graphs + i]
        graphs.append(utils.create_graph(n, 'block', params=params))
    labels.extend([6] * n_graphs)
    for i in range(n_graphs):
        params = {'n_blocks': 4}
        n = n_  #n_l[6*n_graphs + i]
        #2 and 6 confused
        graphs.append(utils.create_graph(n, 'block', params=params))
    labels.extend([9] * n_graphs)
    for i in range(n_graphs):
        params = {'radius': .2}  #, 'clique_sz':7}
        n = n_l[9 * n_graphs + i]
        graphs.append(utils.create_graph(n, 'random_geometric', params=params))

    if save:
        with open(fname, 'wb') as f:
            pickle.dump({'graphs': graphs, 'labels': np.array(labels)}, f)
    #save graphs
    return graphs, np.array(labels)
Exemplo n.º 27
0
def perm_mi(args):
    '''
    Remove edges, permute, align, then measure MI.
    '''
    args.n_epochs = 1000
    params = {'n_blocks': 4}
    use_given_graph = False
    if use_given_graph:  #True:#False: #True:
        g = torch.load('mi_g_.pt')
    else:
        seed = 0 if args.fix_seed else None
        g = utils.create_graph(40, gtype='block', params=params, seed=seed)
        #torch.save(g, 'mi_g.pt')
    orig_cls = []
    for i in range(4):
        orig_cls.extend([i for _ in range(10)])
    orig_cls = np.array(orig_cls)
    Lg = utils.graph_to_lap(g)
    args.Lx = Lg.clone()
    args.m = len(Lg)

    #remove edges and permute
    n_remove = args.n_remove  #150
    rand_seed = 0 if args.fix_seed else None
    Lg_removed = utils.remove_edges(Lg, n_remove=n_remove, seed=rand_seed)
    Lg_perm, perm = utils.permute_nodes(Lg_removed.numpy(), seed=rand_seed)

    inv_perm = np.empty(args.m, perm.dtype)
    inv_perm[perm] = np.arange(args.m)

    ##Ly = torch.from_numpy(Lg_perm)
    Ly = torch.from_numpy(Lg_perm)  #Lg_removed.clone() #args.Lx.clone()
    args.n = len(Ly)
    #8 st_n_samples worked best, 5 sinkhorn iter, 1 as tau
    #align
    time0 = time.time()
    loss, P, Ly_ = graph.graph_dist(args, plot=False, Ly=Ly, take_ly_exp=False)
    dur_ot = time.time() - time0

    orig_idx = P.argmax(-1).cpu().numpy()
    perm_mx = False
    if perm_mx:
        P_max = P.max(-1, keepdim=True)[0]
        P[P < P_max - .1] = 0
        P[P > 0] = 1

    new_cls = orig_cls[perm][orig_idx].reshape(-1)
    mi = utils.normalizedMI(orig_cls, new_cls)
    #return mi
    Lx = args.Lx
    time0 = time.time()
    x_reg, y_reg, (P_st, loss_st) = st.find_permutation(Ly.cpu().numpy(),
                                                        Lx.cpu().numpy(),
                                                        args.st_it,
                                                        args.st_tau,
                                                        args.st_n_samples,
                                                        args.st_epochs,
                                                        args.st_lr,
                                                        loss_type='w',
                                                        alpha=0,
                                                        ones=True,
                                                        graphs=True)
    dur_st = time.time() - time0
    orig_idx = P_st.argmax(-1)

    new_cls_st = orig_cls[perm][orig_idx].reshape(-1)
    mi_st = utils.normalizedMI(orig_cls, new_cls_st)
    #print('{} COPT {} GOT {} dur ot {} dur st {}'.format(n_remove, mi, mi_st, dur_ot, dur_st))
    print('{} {} {} {} {}'.format(n_remove, mi, mi_st, dur_ot, dur_st))
    return mi
Exemplo n.º 28
0
import utils as utils

if __name__ == '__main__':
    nodes, edges = utils.read_input_file('input.txt')
    utils.verify_edge_numbers(nodes, edges)
    graph = utils.create_graph(nodes, edges)
    utils.save_graph_to_file(graph, "graph.png")
    utils.can_be_two_cliques(graph)
Exemplo n.º 29
0
        default=100,
        type=float)

    ap.add_argument("--node_distance",
                    help="Distance between nodes on the graph, default: 140.",
                    default=140)

    ap.add_argument(
        "--reactants",
        nargs='+',
        help="Reduce graph particles only to those defined here, default: None, "
        "meaning - left edges of all nodes selected.",
        default=None)

    args = ap.parse_args()

    species_kdiff, reactions = neurord_parse_reaction_file(
        filename=args.reaction_file)

    reactions = reaction_filter(reactions,
                                reactants_left=args.reactants,
                                percent_biggest_edges_to_left=args.left_edges)
    graph = create_graph(reactions=reactions,
                         reactants=args.reactants,
                         node_distance=args.node_distance)

    name = '_'.join(args.reactants) if args.reactants else 'all_reactions'
    os.makedirs(args.result_folder, exist_ok=True)
    graph.show('%s/%s_%s_percent.html' %
               (args.result_folder, name, args.left_edges))
Exemplo n.º 30
0

def random_walk(G):
    """
	Return the page rank of the graph G using random walk

	:param dict G: the graph
	"""
    nodes = list(G.nodes())
    K = 1000000
    curr = random.choice(nodes)
    visit = {curr: 1}
    out_edges = G.out_edges(curr)
    for _ in range(K):
        if len(out_edges) == 0:
            curr = random.choice(nodes)
        else:
            curr = random.choice(list(out_edges))[1]
        out_edges = G.out_edges(curr)
        visit[curr] = visit.get(curr, 0) + 1
    return visit


if __name__ == '__main__':
    PATH = 'graph.json'
    g = create_graph(PATH)

    rw = random_walk(g)
    for node, visit in sorted(rw.items(), key=lambda x: x[1],
                              reverse=True)[:K]:
        print(node, visit)
Exemplo n.º 31
0
def test_build_randomized_graph2():
    filename = "instances/1.in"
    G = create_graph(filename)
    rand_G = build_randomized_graph(G)
    print(rand_G.nodes())
    print(rand_G.edges())
Exemplo n.º 32
0
		new_child['total_children'] = total_children
		new_child['total_children_score'] = total_children_score
		new_child['immediate_children'] = immediate_children
		new_child['immediate_children_score'] = immediate_children_score
		new_graph['children'].append(new_child)
	return new_graph


if __name__ == '__main__':
	remake_folder('singles')
	remake_folder('convos')
	subs = get_post_groups('../comments_by_posts')
	for (subdir, sub) in subs:
		remake_folder('singles/' + sub)
		link_groups = os.listdir(subdir)
		for link_group in link_groups:
			with open(subdir + '/' + link_group, 'r') as f:
				comments = [json.loads(line) for line in f.readlines()]
				comments = sorted(comments, key=lambda comment: comment['link_id'])
				singles = []
				convos = []
				for link, g in itertools.groupby(comments, key=lambda comment: comment['link_id']):
					graph = create_graph(g)
					singles.append(make_singles(graph))
					convo_graph = make_convos(graph)
					if len(convo_graph['children']) != 0:
						convos.append(make_convo)
				with open
			break
		break
Exemplo n.º 33
0
                                                       seed=seed_)
        if i % 100 == 0:
            with open(
                    "supervised_data/batch_seed_" + str(i) + "_" +
                    str(i + 100) + ".pkl", 'rb') as handle:
                optimal_tour_dic = pickle.load(handle)

        optimal_tours = np.array(optimal_tour_dic[seed_])
        feed = {
            actor.input_: input_batch,
            actor.optimal_tour: optimal_tours,
            actor.temperature: np.array([temperature])
        }  # get feed dict   actor.predictions: real_lengths

        if actor.version == 'graph':
            graph_struct = create_graph(dist_batch)
            # graph_struct = create_MST_graph(dist_batch)
            feed[actor.graph_structure] = graph_struct

        _, loss_first, loss2_first, reward_first = sess.run(
            [actor.trn_op1, actor.loss, actor.loss_2, actor.reward],
            feed_dict=feed)

        feed[actor.optimal_tour] = optimal_tours[:, ::-1]

        _, summary, \
        v, loss, loss2, reward, \
        logits1, next_sampled, indices, \
        entropy, log_probs, = sess.run([actor.trn_op1, actor.merged,
                                        actor.v, actor.loss, actor.loss_2, actor.reward,
                                        actor.logits1, actor.idx, actor.encoded_ref,
Exemplo n.º 34
0
import matplotlib.pyplot as plt
from utils import create_graph
import networkx as nx

filename = "instances/12.in"
G = create_graph(filename)
nx.draw(G)
plt.show()
Exemplo n.º 35
0
def test_build_randomized_graph2():
	filename = "instances/1.in"
	G = create_graph(filename)
	rand_G = build_randomized_graph(G)
	print(rand_G.nodes())
	print(rand_G.edges())
Exemplo n.º 36
0
    paper_year_dict = {}
    logging.info('Parsing Year from dataset')

    for file in os.listdir(args.dataset):
        if file.startswith(('P', 'RM')):
            paper_year_dict = utils.parse_year(args.dataset + file,
                                               paper_year_dict)

    logging.info('Serialising Paper- Year Dictionary')
    utils.dump_file(args.dumps, 'paper_year_dict', paper_year_dict)

    global_citation_graph = ''
    logging.info('Parsing Dataset')

    global_citation_graph = utils.create_graph(args.graph_path,
                                               paper_year_dict)
    logging.info('Serialising Global Citation Graph')
    utils.dump_file(args.dumps, 'global_citation_graph_full',
                    global_citation_graph)

    logging.info('Removing Cycles')
    global_citation_graph = utils.remove_cycles(global_citation_graph)
    logging.info('Removed Cycles')
    logging.info('Serialising Decyclised Graph')
    utils.dump_file(args.dumps, 'global_citation_graph_full_decyclised',
                    global_citation_graph)
    logging.info('Creating  IDTs ...')

    #global_citation_graph = utils.get_pickle_dump("../dumps", "G_without_cycles")

    IDT_Dict = utils.IDT_init(global_citation_graph)
Exemplo n.º 37
0
from utils import find_total_penalty

#file_a = "CURRENT BEST SOLUTIONS"
#file_b = "SOLUTION_RECORDS_TONY.txt"
#combine(file_a, file_b)
#write_condensed_solutions("COMBINED SOLUTIONS")
"""G = create_graph("instances/12.in")
contains = False
edge_to_check = (119, 57)
for edge in G.edges():
	if edge_to_check == edge:
		contains = True
print(contains) 
optimal_sol_list = do_not_test_set("COMBINED SOLUTIONS")
out_file = open("Sub-Optimal Instances", "w")
read_file = open("Condensed Solution Records", "r")
read_data = read_file.readlines()
for element in range(1, 493):
	if element not in optimal_sol_list:
		line_data = read_data[element - 1]
		pre_penalty = line_data.split(":")
		penalty = pre_penalty[1].strip()
		out_file.write(str(element) + ": " + penalty + "\n") """

G = create_graph("instances/8.in")	
penalty = find_total_penalty(G)
print(penalty)
#CG = construct_cluster_graph(G)
#print_cluster_graph_cycles(CG)