Ejemplo n.º 1
0
def solve_mcn(G,
              Omega,
              Phi,
              Lambda,
              J=[],
              Omega_max=0,
              Phi_max=0,
              Lambda_max=0,
              exact=False,
              list_experts=[],
              exact_protection=False):
    """Solve the mcn instance given with the chosen method:
     - either the exact one and we apply the procedure described in
       https://cerc-datascience.polymtl.ca/wp-content/uploads/2017/11/Technical-Report_DS4DM-2017-012.pdf
    - either we use the trained list of neural networks.
      If this method is chosen, the budget_max are required as well as
      the list of experts

    Parameters:
    ----------
    G: networkx Digraph,
    Omega, Phi, Lambda: int,
    exact: bool,
           whether to apply the exact algorithm or not
    list_experts: list of pytorch neural nets,
                  loaded list of experts
    exact_protection: bool,
                      whether to use the exact algorithm for the protection phase
    Returns:
    -------
    value: int,
           values of the additional saved nodes
    D, I, P: lists,
             respectively, list of the vaccinated, attacked, protected nodes"""

    if exact:
        player = get_player(Omega, Phi, Lambda)
        if player == 0:
            value, D, I, P = solve_mcn_exact(G, Omega, Phi, Lambda)
            return (value, D, I, P)
        elif player == 1:
            I, _, P, value = AP(G, Phi, Lambda, target=1, J=J)
            return (value, [], I, P)
        elif player == 2:
            value, _, P = solve_defender(J, G, Lambda)
            return (value, [], [], P)
    else:
        return solve_mcn_heuristic(list_experts,
                                   G,
                                   Omega,
                                   Phi,
                                   Lambda,
                                   Omega_max,
                                   Phi_max,
                                   Lambda_max,
                                   J=J,
                                   exact_protection=exact_protection)
Ejemplo n.º 2
0
    def __init__(self, list_instances):
        """Initialize all the variables of the environment given the starting state.

        Parameters:
        ----------
        list_instances: list of Instance object"""

        self.batch_instance = list_instances
        self.mappings = []
        self.batch_instance_torch = None
        self.list_instance_torch = None
        self.batch_size = len(list_instances)
        self.rewards = [0] * self.batch_size
        self.Omega = list_instances[0].Omega
        self.Phi = list_instances[0].Phi
        self.Lambda = list_instances[0].Lambda
        self.Budget = self.Omega + self.Phi + self.Lambda
        self.player = get_player(self.Omega, self.Phi, self.Lambda)
        self.batch_torch()
        self.compute_mappings()
        self.update_budgets()
        self.next_player = get_player(self.next_Omega, self.next_Phi,
                                      self.next_Lambda)
Ejemplo n.º 3
0
    def compute_current_situation(self):

        self.next_G = [self.list_G_nx[action] for action in self.actions]
        self.next_J = [self.list_J[action] for action in self.actions]
        self.next_instance_torch = [
            self.list_instance_torch[action] for action in self.actions
        ]
        self.next_n_free = [n_free - 1 for n_free in self.n_free]
        self.next_Budget = self.Budget - 1
        self.update_budgets()
        self.next_player = get_player(self.next_Omega, self.next_Phi,
                                      self.next_Lambda)
        self.compute_all_possible_afterstates()
        self.next_n_nodes = [len(G) for G in self.next_list_G_nx]
        self.compute_next_state_tensors()
Ejemplo n.º 4
0
    def __init__(self, list_instances):
        """Initialize all the variables of the environment given the starting state.

        Parameters:
        ----------
        list_instances: list of Instance object"""

        self.batch_size = len(list_instances)
        self.Omega = list_instances[0].Omega
        self.Phi = list_instances[0].Phi
        self.Lambda = list_instances[0].Lambda
        self.Budget = self.Omega + self.Phi + self.Lambda
        self.n_free = [
            len(instance.G) + 1 - len(instance.J)
            for instance in list_instances
        ]
        self.list_G_nx = [instance.G for instance in list_instances]
        self.list_instance_torch = [None] * self.batch_size
        self.list_J = [instance.J for instance in list_instances]
        self.actions = list(range(self.batch_size))
        self.player = get_player(self.Omega, self.Phi, self.Lambda)
Ejemplo n.º 5
0
    def step(self, actions):

        next_instances = []
        rewards = [0] * self.batch_size
        for k in range(self.batch_size):
            action_k = int(actions[k])
            instance_k = self.batch_instance[k]
            J_k = instance_k.J.copy()
            G_k = instance_k.G.copy()
            node_k = self.mappings[k][action_k]
            if self.player == 1:
                J_k += [node_k]
            else:
                G_k, mapping = new_graph(G_k, node_k)
                J_k = [mapping[j] for j in J_k]
            # if we are not at the end of the episode
            # we need to compute the new states
            if self.next_player != 3:
                new_instance = Instance(G_k, self.next_Omega, self.next_Phi,
                                        self.next_Lambda, J_k, 0)
                next_instances.append(new_instance)
            # else, we need to compute the reward of taking the action
            else:
                rewards[k] = compute_saved_nodes(G_k, J_k)
        # update the variables of the environment
        self.rewards = rewards
        self.batch_instance = next_instances
        self.Budget -= 1
        self.Omega = self.next_Omega
        self.Phi = self.next_Phi
        self.Lambda = self.next_Lambda
        self.player = self.next_player
        if self.next_player != 3:
            self.batch_torch()
            self.compute_mappings()
            self.update_budgets()
            self.next_player = get_player(self.next_Omega, self.next_Phi,
                                          self.next_Lambda)
Ejemplo n.º 6
0
def solve_mcn_heuristic(list_experts,
                        G,
                        Omega,
                        Phi,
                        Lambda,
                        Omega_max,
                        Phi_max,
                        Lambda_max,
                        J=[],
                        exact_protection=False):
    """Given the list of target nets, an instance of the MCN problem and the maximum budgets
    allowed, solves the MCN problem using the list of experts"""

    player = get_player(Omega, Phi, Lambda)
    # if it's the protector turn and we are to use the exact protector agent
    if player == 2 and exact_protection:
        value, _, P = solve_defender(J, G, Lambda)
        return value, [], [], P
    else:
        # Initialize the environment
        instance = Instance(G, Omega, Phi, Lambda, J, 0)
        env = Environment([instance])
        # list of actions for the episode
        actions_episode = []
        val_actions = 0

        while env.Budget >= 1:

            # if the next player is the protector and we use the exact first attack
            if env.Budget == Lambda + 1 and exact_protection:
                J_att = env.list_J[env.actions[0]]
                G_att = env.list_G_nx[env.actions[0]]
                I, _, P, value = AP(G_att, 1, Lambda, target=1, J=J_att)
                actions_episode += I + P
                break

            env.compute_current_situation()
            target_net = get_target_net(
                list_experts,
                env.next_Omega,
                env.next_Phi,
                env.next_Lambda,
                Omega_max,
                Phi_max,
                Lambda_max,
            )
            # Take an action
            action, targets, value = take_action_deterministic(
                target_net,
                env.player,
                env.next_player,
                env.next_rewards,
                env.next_list_G_torch,
                env.free_nodes_weights,
                n_nodes=env.next_n_nodes_tensor,
                Omegas=env.next_Omega_tensor,
                Phis=env.next_Phi_tensor,
                Lambdas=env.next_Lambda_tensor,
                Omegas_norm=env.next_Omega_norm,
                Phis_norm=env.next_Phi_norm,
                Lambdas_norm=env.next_Lambda_norm,
                J=env.next_J_tensor,
            )
            # save the action to the memory of actions
            actions_episode.append(action)
            # Update the environment
            env.step([action])
            val_actions += np.sum(env.action_values)

        D, I, P = original_names_actions_episode(actions_episode, J, Phi,
                                                 Lambda, exact_protection)
        value += val_actions

        return (value, D, I, P)
Ejemplo n.º 7
0
def compute_node_values(G,
                        J,
                        Omega,
                        Phi,
                        Lambda,
                        exact=True,
                        Omega_max=None,
                        Phi_max=None,
                        Lambda_max=None,
                        list_experts=None):
    """Compute the value of each node of the graph given the budgets and already attacked nodes."""

    value_nodes = dict()
    weights = graph_weights(G)
    is_weighted = len(nx.get_node_attributes(G, 'weight').values()) != 0
    is_directed = False in [(v, u) in G.edges() for (u, v) in G.edges()]
    # for every node possible
    for k in G.nodes():
        # if the node is already attacked
        if k in J:
            # its value is null
            value_nodes[k] = 0
        else:
            G1 = G.copy()
            # get the player whose turn it is to play
            player = get_player(Omega, Phi, Lambda)
            # if it is the defender's turn
            if player == 0 or player == 2:
                # remove the node from the graph
                next_G, mapping = new_graph(G1, k)
                next_J = [mapping[node] for node in J]
                reward = weights[k]
            # if it is the attacker's turn
            else:
                # attack the node
                next_J = J + [k]
                next_G = G1
                reward = 0
            # compute the next budgets
            next_Omega = Omega
            next_Phi = Phi
            next_Lambda = Lambda
            if player == 0:
                next_Omega = Omega - 1
            elif player == 1:
                next_Phi = Phi - 1
            elif player == 2:
                next_Lambda = Lambda - 1

            if exact:
                # compute the value of the afterstate
                value, D, I, P = solve_mcn(next_G,
                                           next_Omega,
                                           next_Phi,
                                           next_Lambda,
                                           J=next_J,
                                           exact=True)
                # the value of the node is: reward + value of the afterstate
                value_nodes[k] = int(reward + value)
            else:
                # format the instance so that it can be read by our neural network
                instance = Instance(next_G, next_Omega, next_Phi, next_Lambda,
                                    next_J, 0)
                instance_torch = instance_to_torch(instance)
                batch_torch = Batch.from_data_list([instance_torch.G_torch
                                                    ]).to(device)
                # get the right expert
                target_net = get_target_net(list_experts, next_Omega, next_Phi,
                                            next_Lambda, Omega_max, Phi_max,
                                            Lambda_max)
                # compute the value of the afterstate
                value_approx = float(
                    target_net(batch_torch, instance_torch.n_nodes,
                               instance_torch.Omegas, instance_torch.Phis,
                               instance_torch.Lambdas,
                               instance_torch.Omegas_norm,
                               instance_torch.Phis_norm,
                               instance_torch.Lambdas_norm, instance_torch.J))
                if is_weighted:
                    value_nodes[k] = round(reward + value_approx, 1)
                else:
                    value_nodes[k] = round(Omega + Lambda + value_approx, 1)

    # plot the values
    nx.draw_spring(G,
                   with_labels=True,
                   node_size=600,
                   node_color=np.array(list(value_nodes.values())),
                   cmap='viridis_r',
                   alpha=1.0,
                   edge_color='gray',
                   arrows=is_directed,
                   width=3,
                   labels=value_nodes,
                   font_size=12,
                   font_color='white')