예제 #1
0
def test_relabel_toposort():
    K5 = nx.complete_graph(4)
    G = nx.complete_graph(4)
    G = nx.relabel_nodes(G, {i: i + 1 for i in range(4)}, copy=False)
    assert nx.is_isomorphic(K5, G)

    G = nx.complete_graph(4)
    G = nx.relabel_nodes(G, {i: i - 1 for i in range(4)}, copy=False)
    assert nx.is_isomorphic(K5, G)
예제 #2
0
    def edge_bags_contains_kn(self, bags, n):
        """ Check to see if bags (map : int => []) contains monochromatic kn in any bag.
        """
        Kn = nx.complete_graph(n)
        for b in bags:
            # indices = list(combinations(range(len(bags[b])),n))
            # print >> sys.stderr, "C(n,k) = " + str(len(indices))
            # for ind in indices:
            edges = []
            G = nx.Graph()
            # for i in ind:
            # try:
            #   G.add_node(bags[b][i][0])
            # except:
            #   pass
            # try:
            #   G.add_node(bags[b][i][1])
            # except:
            #   pass
            # edges.append(bags[b][i])

            # Build induced subgraph from this set of edges and then check to see
            # if Kn is an induced subgraph in G
            G.add_edges_from(bags[b])
            GM = isomorphism.GraphMatcher(G, Kn)
            if GM.subgraph_is_isomorphic():
                return True

            # if self.is_edge_set_adjacent(edges):
            #   return True
        return False  # out of all C(n,k) choices, none yielded monochromatic Kn in the same bag
예제 #3
0
def testK8C5():
	K8 = nx.complete_graph(8)
	C1 = nx.cycle_graph(5)

	# Go through the old things here...
	oldNodes = K8.nodes()[:]
	index = len(K8.nodes())
	for v in C1.nodes():
		K8.add_node(v + index)
		for old in oldNodes:
			K8.add_edge(old, v + index)
	for e in C1.edges():
		K8.add_edge(e[0] + index, e[1] + index)

	print(index)
	print(K8.nodes())
	print(K8.edges())
	print(len(K8.edges()))

	# TODO: RUN THE REDUCTION CODE TONIGHT!
	r = reducer()
	numVars, cnf = r.reduce35(K8)
	outFile = open('nenov_arrow_3_5_k5c5.cnf', 'w')
	header, clauses = makeDimacsCNF(numVars, cnf)
	print(header)
	print(clauses)
	outFile.write(header + "\n")
	for c in clauses:
		for l in c:		
			outFile.write(str(l) + " ")
		outFile.write("0 \n")
예제 #4
0
def main():

	# TODO: read in all three params from command line
	# TODO: first must be a graph (adj matrix), second can be graphs or 

	# The two test cases shown in the presentation...
	F = nx.complete_graph(5)
	print("Checking K_5 -> (3,3)")
	print(folkman(F, 3, 3))

	#F = nx.complete_graph(6)
	#print("Checking K_6 -> (3,3)")
	#print(folkman(F, 3, 3))

	F = nx.complete_graph(8)
	print("Checking K_8 -> (3,4)")
	print(folkman(F, 3, 4))

	F = nx.complete_graph(8)
	print("Checking K_8 -> (4,3)")
	print(folkman(F, 4, 3))
예제 #5
0
    def iterative_edge_split_avoid_kn(self, b, n):
        """ Randomly split the edges of G into n bags.
        """
        bags = {}
        Kn = nx.complete_graph(n)
        for e in self.graph.edges():
            colors = 0
            while colors != b:
                color = colors
                if not (color in bags):
                    bags[color] = []

                # Check to see if adding e to the 'color' bag yields a monochromatic Kn
                edges = []
                G = nx.Graph()
                # for ee in bags[color]:
                #   try:
                #       G.add_node(ee[0])
                #   except:
                #       pass
                #   try:
                #       G.add_node(ee[1])
                #   except:
                #       pass
                # Add the edges now...
                G.add_edges_from(bags[color])
                # try:
                #   G.add_node(e[0])
                # except:
                #   pass
                # try:
                #   G.add_node(e[1])
                # except:
                #   pass
                G.add_edges_from([e])

                # Set up and check for subgraph isomorphism
                # If it contains a Kn, then don't color the edge with that color, try another one...
                GM = isomorphism.GraphMatcher(G, Kn)
                if GM.subgraph_is_isomorphic() == False:
                    bags[color].append(e)
                    break
                else:
                    colors = colors + 1
            if colors == b:
                # raise Exception("Could not find a color to add edge: " + str(e))
                return None
        return bags
from nxsim import NetworkSimulation
from churnsim.uk.ac.bristol.rechurn.modes.p2p.bittorrent.randompeerselection import randompeerfailure
from networkx import nx
import string
import random
from matplotlib import pyplot as plt

deletesizes = []
numberofnodes = 200
times = [i for i in range(100, 600, 50)]
#piecesizes=[i for i in range(100,700,50)]
piecesizes = [i for i in range(100, 600, 50)]
for time in times:
    G = nx.complete_graph(numberofnodes)

    nodes = [dict() for x in range(numberofnodes)]

    keys = range(numberofnodes)

    for i in keys:
        nodes[i]["pieces"] = []
        nodes[i]["id"] = 0
        nodes[i]["peerid"] = i
        nodes[i]["downloadlist"] = []
        nodes[i]["uploadlist"] = []
        nodes[i]["peerarrivaltime"] = 0
        nodes[i]["downloadspeed"] = 0
        nodes[i]["uploadspeed"] = 0

    seednode = random.randint(0, numberofnodes)
예제 #7
0
def play_many_games_semisupervised(num_agents,
                                   num_episodes,
                                   inner_speech,
                                   learning_rate,
                                   punishment_weight,
                                   punishment_talk,
                                   agents_memory,
                                   replay,
                                   beta_1,
                                   p_peek,
                                   num_choose_act,
                                   network_type,
                                   verbose=False):
    '''Run the game for many episodes to obtain simulation results'''

    # Parameters fixed throughout simulations

    num_choices = num_choose_act
    num_talking_symbols = num_choose_act
    winning_reward = 1
    mean_sample = 100
    punishment_weight = punishment_weight
    num_agents = num_agents
    n_type = network_type

    # Initialize environment and agents
    env = Two_Roles_Game_Many_Agents(num_choices, winning_reward, mean_sample,
                                     num_talking_symbols, num_agents,
                                     punishment_talk, punishment_weight)

    agents = []
    scores = []
    talks = []
    acts = []
    samples = []

    m = 20
    for i in range(num_agents):
        x = DQNAgent_student_teacher(num_talking_symbols, num_choices, beta_1,
                                     agents_memory)
        x.learning_rate = learning_rate
        agents.append(x)
        talks.append([])
        acts.append([])
        scores.append([])
    if n_type == 0:  #random
        G = nx.gnm_random_graph(num_agents, m)
    elif n_type == 1:  #fully connected
        G = nx.complete_graph(num_agents)
    elif n_type == 2:  #small-world
        G = nx.connected_watts_strogatz_graph(num_agents, 2, 0.2)
    elif n_type == 3:  #ring
        G = nx.connected_watts_strogatz_graph(num_agents, 2, 0)

    # Iterate the game
    episodes = num_episodes

    for e in range(episodes):

        # Selecting agents to play in the spisode
        my_sample1 = random.sample(range(num_agents), 1)[0]
        my_sample2 = random.choice(list(G.neighbors(my_sample1)))
        my_sample = [my_sample1, my_sample2]

        agent1 = agents[my_sample[0]]  # agent1 is always the speaker
        agent2 = agents[my_sample[1]]  # agent2 is always the listener

        # Initialize the scores
        score1 = 0
        score2 = 0

        state1, state2, _, _ = env.step(
            0, 0, 0, my_sample)  # initialize the environment

        # agent 1 talks
        action1 = agent1.act(state1)

        # update environment based on agent1's speech and actions
        state1, state2, reward1, reward2 = env.step(action1[0], 0, 0,
                                                    my_sample)
        score1 += reward1

        # Agent 2 acts based on agent 1's message
        action2 = agent2.act(state2)

        # Update the environment and coompute coordination rewards
        next_state1, next_state2, reward1, reward2 = env.step(
            action1[1], action2[1], 1, my_sample)

        score1 += reward1
        score2 += reward2

        # Save the transition to memory

        agents[my_sample[1]].remember(state2, action2, reward2, next_state2)

        # Semi-supervised updates:

        if random.random(
        ) < p_peek:  # peek another's action 20% of times: mimiking
            agents[my_sample[1]].remember(state2, action1, winning_reward,
                                          next_state2)

        agents[my_sample[0]].remember(state1, action1, reward1, next_state1)
        if random.random() < p_peek:
            agents[my_sample[0]].remember(state1, action2, winning_reward,
                                          next_state1)

        # Monitor progress
        if e % 100 == 0 and verbose:
            print("episode: {}/{}, score1: {}, score2: {}".format(
                e, episodes, score1, score2))

        # Train agents
        if len(agent1.memory) >= replay and len(agent2.memory) >= replay:
            agents[my_sample[0]].replay(replay)
            agents[my_sample[1]].replay(replay)
        else:
            print("replay more than memory")

        # Save data for later analysis
        talks[my_sample[0]].append(action1[0])
        talks[my_sample[1]].append(-1)  #didn't talk
        acts[my_sample[0]].append(action1[1])
        acts[my_sample[1]].append(action2[1])
        scores[my_sample[0]].append(score1)
        scores[my_sample[1]].append(score2)
        samples.append(my_sample)

    return [talks, acts, scores, samples]