Esempio n. 1
0
def run_result(g: nx.Graph, action1, action2, seed_set):
    seed_set1 = []
    seed_set2 = []

    for i in range(len(seed_set)):
        if (action1[i] > action2[i]) and (action1[i] != 0):
            seed_set1.append(seed_set[i])
        elif (action2[i] > action1[i]) and (action2[i] != 0):
            seed_set2.append(seed_set[i])
        elif (action1[i] == action2[i]) and (action1[i] != 0):
            r = np.random.uniform(0, 1)
            if r < 0.5:
                seed_set1.append(seed_set[i])
            else:
                seed_set2.append(seed_set[i])

    for node in seed_set1:
        tools.activate(g, node, 1)
    for node in seed_set2:
        tools.activate(g, node, 2)

    tools.diffuse(g, seed_set1, seed_set2)
    activated1_number = len(g.graph["1"])
    activated2_number = len(g.graph["2"])
    return activated1_number, activated2_number
Esempio n. 2
0
def diffusion_payoff(s1, s2):
    iterations = 5000
    r = np.zeros(iterations)
    for i in range(iterations):
        g = tools.load_graph(data_set)
        tools.diffuse(g, s1, s2)
        r[i] = len(g.graph["1"]) - len(g.graph["2"])
    return r
Esempio n. 3
0
def competition(data_set, nodes):
    iterations = 1000
    r = np.zeros(iterations)
    for i in range(iterations):
        a1 = ca.action_v_each(k, 2, nodes)
        a2 = ca.action_v_each(k, 1, nodes)
        s1, s2 = get_seeds(a1, a2)
        g = tools.load_graph(data_set)
        tools.diffuse(g, s1, s2)
        r[i] = len(g.graph["1"]) - len(g.graph["2"])
    return r
Esempio n. 4
0
def get_features_values_by_competition(graph):
    iterations = run.feature_iterations
    features = np.zeros((iterations * 1600, 7))
    counter = 0
    for iteration in range(iterations):
        g = graph.copy()

        while len(g.graph['free']) > 1:
            feature = tools.get_feature(g)
            features[counter][:] = feature
            counter += 1

            # first player action
            action_choice1 = np.random.randint(3)
            if action_choice1 == 0:
                seed1 = actions.action_degree(g)
            elif action_choice1 == 1:
                seed1 = actions.action_weight(g)
            elif action_choice1 == 2:
                seed1 = actions.action_blocking(g, 1)

            # illegal action
            if seed1 == -1:
                print("Illegal action")
                action_choice1 = 0
                seed1 = actions.action_degree(g)
            tools.activate_node(g, seed1, 1)

            # second player action
            action_choice2 = np.random.randint(3)
            if action_choice2 == 0:
                seed2 = actions.action_degree(g)
            elif action_choice2 == 1:
                seed2 = actions.action_weight(g)
            elif action_choice2 == 2:
                seed2 = actions.action_blocking(g, 2)

            # illegal action
            if seed2 == -1:
                print("Illegal action")
                action_choice2 = 0
                seed2 = actions.action_degree(g)
            tools.activate_node(g, seed2, 2)

            a1, a2 = tools.diffuse(g)
            for n in a1:
                tools.activate_node(g, n, 1)
            for n in a2:
                tools.activate_node(g, n, 2)

    return features[0:counter, :]
Esempio n. 5
0
def competition(a1, a2):
    s1, s2 = get_seeds(a1, a2)
    g = tools.load_graph(data_set)
    tools.diffuse(g, s1, s2)
    return len(g.graph["1"]) - len(g.graph["2"])
Esempio n. 6
0
def train(graph, feature_lmh_ranges):
    alpha = 0.5
    eps = 0.8
    gamma = 0.98
    d = 0.998

    opponent_action = run.opponent

    q_table = np.zeros((2187, 3))

    for iteration in tqdm(range(run.train_n_rounds)):
        g = graph.copy()
        state = tools.get_state(g, feature_lmh_ranges)

        for t in range(21):
            # first player action
            if np.random.random() < eps:
                action_choice1 = np.random.randint(3)
            else:
                action_choice1 = np.argmax(q_table[state, :])

            if action_choice1 == 0:
                seed1 = actions.action_degree(g)
            elif action_choice1 == 1:
                seed1 = actions.action_weight(g)
            elif action_choice1 == 2:
                seed1 = actions.action_blocking(g, 1)
            # elif action_choice1 == 3:
            #     seed1 = actions.action_last(g)
            # elif action_choice1 == 4:
            #     seed1 = actions.action_min_degree(g)

            # illegal action
            if seed1 == -1:
                print("Illegal action")
                action_choice1 = 0
                seed1 = actions.action_degree(g)

            tools.activate_node(g, seed1, 1)

            # second player action
            action_choice2 = opponent_action
            if action_choice2 == 0:
                seed2 = actions.action_degree(g)
            elif action_choice2 == 1:
                seed2 = actions.action_weight(g)
            elif action_choice2 == 2:
                seed2 = actions.action_blocking(g, 2)

            # illegal action
            if seed2 == -1:
                print("Illegal action")
                action_choice2 = 0
                seed2 = actions.action_degree(g)
            tools.activate_node(g, seed2, 2)

            a1, a2 = tools.diffuse(g)
            for n in a1:
                tools.activate_node(g, n, 1)
            for n in a2:
                tools.activate_node(g, n, 2)

            next_state = tools.get_state(g, feature_lmh_ranges)

            # delayed reward
            r = 0
            if t % 6 == 0:
                r = len(g.graph['1']) - len(g.graph['2'])

            # immediate reward
            # r = len(g.graph['1']) - len(g.graph['2'])

            q_table[state, action_choice1] = (
                1 - alpha) * q_table[state, action_choice1] + alpha * (
                    r + gamma * max(q_table[next_state, :]))
            state = next_state

        alpha = d * alpha
        eps = 0.9 - (0.6 * iteration / 500)

    return q_table
Esempio n. 7
0
def compete(graph: nx.Graph, q_table):
    opponent_action = 0

    counter = 0
    max_iteration = 20000000
    un_seen = 0

    while len(graph.graph['free']) > 1 and counter < max_iteration:
        state = tools.get_state(graph)

        # first player action
        action_choice1 = np.argmax(q_table[state, :])
        if np.sum(q_table[state, :]) == 0:
            un_seen += 1
            action_choice1 = np.random.randint(5)

        if action_choice1 == 0:
            seed1 = actions.action_degree(graph)
        elif action_choice1 == 1:
            seed1 = actions.action_weight(graph)
        elif action_choice1 == 2:
            seed1 = actions.action_blocking(graph, 1)
        elif action_choice1 == 3:
            seed1 = actions.action_last(graph)
        elif action_choice1 == 4:
            seed1 = actions.action_min_degree(graph)

        # illegal action
        if seed1 == -1:
            print("illegal 1")
            print(action_choice1)
            action_choice1 = 0
            seed1 = actions.action_degree(graph)

        tools.activate_node(graph, seed1, 1)

        # second player action
        action_choice2 = opponent_action
        if action_choice2 == 0:
            seed2 = actions.action_degree(graph)
        elif action_choice2 == 1:
            seed2 = actions.action_weight(graph)
        elif action_choice2 == 2:
            seed2 = actions.action_blocking(graph, 2)

        # illegal action
        if seed2 == -1:
            print("illegal 2")
            print(action_choice2)
            action_choice2 = 0
            seed2 = actions.action_degree(graph)
        tools.activate_node(graph, seed2, 2)

        a1, a2 = tools.diffuse(graph)
        for n in a1:
            tools.activate_node(graph, n, 1)
        for n in a2:
            tools.activate_node(graph, n, 2)

        counter += 1

    score = len(graph.graph['1']) - len(graph.graph['2'])
    print("counter: " + str(counter))
    print("unseen: " + str(un_seen))
    print("score: " + str(score))

    result = {"counter": counter, "un_seen": un_seen, "score": score}
    return result
Esempio n. 8
0
result_folder = "../results/motivations/" + data_set + "/"

k = 10
seed_set1 = np.loadtxt(result_folder + "k" + str(k) + "seed1.txt")
seed_set2 = np.loadtxt(result_folder + "k" + str(k) + "seed2.txt")

iterations = 5000
r1 = np.zeros(iterations)
r2 = np.zeros(iterations)
r = np.zeros(iterations)

start_time = time.time()

for i in range(iterations):
    g = tools.load_graph(data_set)
    tools.diffuse(g, seed_set1, seed_set2)

    n1 = len(g.graph["1"])
    n2 = len(g.graph["2"])
    r1[i] = n1
    r2[i] = n2
    r[i] = n1 - n2

end_time = time.time()

file = open(result_folder + "result_diffusion.txt", "a")
file.write("k: " + str(k) + "\n")
file.write("run time: " + str(end_time - start_time) + "\n")
file.write("seed1: " + str(seed_set1) + "\n")
file.write("seed2: " + str(seed_set2) + "\n")
file.write("diffusion-r1    mean: " + str(np.average(r1)) + "   std:  " +