Beispiel #1
0
def add_state_random(G, pos, initiation_perc, show_attr=True, draw_graph=True):
    """ Add state variable values to the graph's nodes.
    
    State is the variable which describe state of node - if it is aware 
    of some information or not. 
    
    Parameters
    ----------
    G : graph
        A networkx graph object.
        
    pos : dictionary with 2 element ndarrays as values
       Object contains positions of nodes in the graph chart. Pos is used 
       to draw the graph after simulation step. 
        
    initiation_perc : float
       Percent of randomly aware nodes.
    
    show_attr : bool, optional
        Show list of wages and other generated attributes of nodes.
    
    draw_graph : bool, optional
        Draw graph.


    Returns
    -------
    G : graph
        A networkx graph object.


    """

    #===================#
    # Random initiation #
    #===================#

    # Add 'unaware' state for all nodes
    nx.set_node_attributes(G, 'unaware', 'state')  # (G, value, key)

    # Compute number of nodes
    N = G.number_of_nodes()
    # Return list of numbers of randomly aware agents
    infected_agents_id = random.sample(population=range(0, N),
                                       k=int(N * initiation_perc))
    # Set those nodes as aware
    for v in infected_agents_id:
        G.nodes[v]['state'] = 'aware'

    #========================#
    # Show nodes' attributes #
    #========================#

    if show_attr == True:
        print("Node attributes:")
        for (u, v) in G.nodes.data():
            print(u, v)

    #============#
    # Draw graph #
    #============#

    if draw_graph == True:
        fig_01, ax_01 = plt.subplots()  # enable to plot one by one
        # in separate windows
        dp.draw_graph(G=G, pos=pos)
Beispiel #2
0
def add_feature(
        G,
        pos,
        feature=None,
        feature_type=None,
        scaling=True,
        decimals=6,
        show_attr=True,  # show node weights and attributes
        show_weights=True,
        draw_graph=False):
    """ Add feature to the graph.
    
    Function dedicated for adding existing feature to the graph 
    with optional feature scaling.

    
    Parameters
    ----------
    
    G : graph
        A networkx graph object.
    
    pos : dictionary with 2 element ndarrays as values
       Object contains positions of nodes in the graph chart. Pos is used 
       to draw the graph after simulation step. 

    feature : ndarray
       ndarray in shape (<number of nodes/edges>, 1).

    feature_type : string
        Levels: "weights", "receptiveness", "extraversion", "engagement",
                "state", or custom ones which may be used for measuring
                feature importance in information propagation during
                modelling.
        
    scaling : bool, optional
        Scale weights to (0,1] range.
    
    decimals : integer, optional
        Number of decimal digits due to rounding weights.
        
    show_attr : bool, optional
        Show list of wages and other generated attributes of nodes.
    
    draw_graph : bool, optional
        Draw graph.


    Returns
    -------
    G : graph
        A networkx graph object.

    """

    # Values may be scaled so we cannot add it directly to graph,
    # but after generation and scaling, and filling zeros with 0.000001
    # for computation purposes

    # Only for numeric variables
    if scaling == True:

        # Scale weights to [0,1] range
        scaler = MinMaxScaler()
        scaler.fit(feature)
        feature = scaler.transform(feature)
        feature = np.round(feature, decimals)
        # eliminate zeros for computation purposes
        for (x, y), i in np.ndenumerate(feature):
            if i == 0:
                feature[x, y] = 0.000001

    #======================#
    # Add weights to graph #
    #======================#
    # Weights - are probabilities of contact between nodes of given social
    # network.

    if feature_type == "weights":

        # Add weights to the graph
        for i, (u, v) in enumerate(G.edges()):
            G[u][v]['weight'] = feature[i, 0]

    #====================================#
    # Set node attribute - receptiveness #
    #====================================#

    # Receptiveness - general parameter of each node, expressing how much
    # in general the actor is receptive in context of given social network.

    if feature_type == "receptiveness":

        # Add receptiveness parameter to nodes
        for v in G.nodes():
            G.nodes[v]['receptiveness'] = feature[v, 0]

    #===================================#
    # Set node attribute - extraversion #
    #===================================#

    # Extraversion is agent eagerness to express itself to other agents.

    if feature_type == "extraversion":

        # Add extraversion parameter to nodes
        for v in G.nodes():
            G.nodes[v]['extraversion'] = feature[v, 0]

    #=================================#
    # Set node attribute - engagement #
    #=================================#

    # Engagement - engagement with the information related topic,
    # strengthness of the experiences connected with information topic.
    # How much the information is objectivly relevant for actor.

    if feature_type == "engagement":

        # Add engagement parameter to nodes
        for v in G.nodes():
            G.nodes[v]['engagement'] = feature[v, 0]

    #============================#
    # Set node attribute - state #
    #============================#

    # "State" Variable levels:
    # * Unaware - is actor who did not internalized the information and
    #   is not able to pass it down.
    # * Aware - is the actor who internalized the information and is able
    #   to pass it down.

    if feature_type == "state":

        # Add engagement parameter to nodes
        for v in G.nodes():
            G.nodes[v]['state'] = feature[v, 0]

    #=======================================#
    # Set node attribute - custom parameter #
    #=======================================#

    if feature_type not in [
            "weights", "receptiveness", "extraversion", "engagement", "state"
    ]:

        # Add parameter to nodes
        for v in G.nodes():
            G.nodes[v][feature_type] = feature[v, 0]

    #========================#
    # Show nodes' attributes #
    #========================#

    if show_attr == True:
        print('\n' + "Nodes' attributes:" + '\n')
        for (u, v) in G.nodes.data():
            print(u, v)

    #========================#
    # Show nodes' attributes #
    #========================#

    if show_weights == True:

        # Show weights
        print('\n' + "Sorted weights:" + '\n')
        for i, (u, v, wt) in enumerate(
                sorted(G.edges.data('weight'), key=lambda x: x[2])):
            print(i, wt)

    #============#
    # Draw graph #
    #============#

    if draw_graph == True:
        dp.draw_graph(G=G, pos=pos)

    return G
Beispiel #3
0
def graph_stats(G, pos, draw_degree=True, show_attr=True, draw_graph=True):
    """ 
    Function for checking basic graph statistics, node attributes and
    wages.
    
    Parameters
    ----------
    
    G : graph
        A networkx graph object.
        
    pos : dictionary with 2 element ndarrays as values
       Object contains positions of nodes in the graph chart. Pos is used 
       to draw the graph after simulation step. 

    show_attr : bool
        Show nodes attributes and weights.
            
    draw_degree : bool
        Draw nodes degree distribution.
        
    draw_graph : bool
        Draw graph.
        
    Returns
    -------
    
    dict_stat : dictionary
        A dictionary with graph statistics.
       
    """

    #===============================#
    # Compute basic graph satistics #
    #===============================#

    nodes = len(G.nodes())

    edges = len(G.edges())

    mean_degree = st.mean([v for k, v in nx.degree(G)])

    avg_clustering_coef = nx.average_clustering(G,
                                                nodes=None,
                                                weight=None,
                                                count_zeros=True)

    avg_clustering_coef = round(avg_clustering_coef, 4)
    # https://en.wikipedia.org/wiki/Clustering_coefficient

    # https://networkx.github.io/documentation/stable/reference/
    # algorithms/generated/networkx.algorithms.cluster.average_
    # clustering.html#networkx.algorithms.cluster.average_clustering

    # average of local clustering coefficients (for each node)

    transitivity = nx.transitivity(G)  # fraction of all possible triangles
    transitivity = round(transitivity, 4)

    global dict_stat
    dict_stat = {
        'nodes': nodes,
        'edges': edges,
        'mean node degree': mean_degree,
        'average clustering coefficient': avg_clustering_coef,
        'transitivity': transitivity
    }

    print('\n' + "General information:" + '\n')
    for k, v in dict_stat.items():
        print(k, ': ', v)

    #========================#
    # Show nodes' attributes #
    #========================#

    if show_attr == True:
        print('\n' + "Node attributes:" + '\n')
        for (u, v) in G.nodes.data():
            print(u, v)

        print('\n' + "Sorted weights:" + '\n')
        #global wages_list
        #wages_list = []
        for i, (u, v, wt) in enumerate(
                sorted(G.edges.data('weight'), key=lambda x: x[2])):
            print(i, wt)
            #wages_list.append((i, wt))

    #==========================#
    # Degree distribution plot #
    #==========================#

    if draw_degree == True:
        # degree distribution
        degree_distribution = sorted([v for k, v in nx.degree(G)],
                                     reverse=True)
        x = range(len(degree_distribution))

        fig_01, ax_01 = plt.subplots()  # enable to plot one by one
        plt.scatter(x, degree_distribution, marker='o', c='blue', alpha=0.5)
        plt.ylabel('Node degree')
        plt.xlabel('Node number')
        plt.suptitle('Nodes degree distribution', fontsize=16)

    #============#
    # Draw graph #
    #============#

    if draw_graph == True:
        fig_01, ax_01 = plt.subplots()  # enable to plot one by one
        # in separate windows
        dp.draw_graph(G=G, pos=pos)
Beispiel #4
0
def graph_init(
    n=26,  # number of nodes
    k=5,  # number of single node neighbours before rewriting 
    # edges
    rewire_prob=0.1,  # probability of node rewrite 
    initiation_perc=0.1,  # percent of randomly informed nodes
    show_attr=True,  # show node weights and attributes
    draw_graph=True):  # probability of rewrite edge
    # in random place
    """ Graph initialization with watts_strogatz_graph() function. 

    Create a graph with added weights as edges attributes, and the 
    following nodes attributes: extraversion, receptiveness, engagement.
    
    The graph is ready to perform simulation in difpy package.
    
    
    Parameters
    ----------

    n : integer
       Nodes number of the graph.

    k : integer
        number of single node neighbours before rewriting edges
    
    rewire_prob : float
        probability of rewrite edge in random place

    initiation_perc : float
       Percent of randomly aware nodes.
    
    show_attr : bool, optional
        Show list of wages and other generated attributes of nodes.
    
    draw_graph : bool, optional
        Draw graph.


    Returns
    -------
    G : graph
        A networkx graph object.

    pos : dictionary with 2 element ndarrays as values
       Object contains positions of nodes in the graph chart. Pos is used 
       to draw the graph after simulation step. 
       
    """

    #==============#
    # Create graph #
    #==============#

    # Create basic watts-strogatz graph
    G = nx.watts_strogatz_graph(n=n, k=k, p=rewire_prob, seed=None)
    # Compute a position of graph elements
    pos = nx.spring_layout(G)

    #======================#
    # Add weights to graph #
    #======================#
    # Weights - are probabilities of contact between nodes of given social
    # network.
    # Weights are randomly sampled from exponential distribution.

    # Values have to be scaled so we cannot add it directly to graph,
    # but after generation and scaling, and filling zeros with 0.000001
    # for computation purposes

    # Create ndarray of weights
    weights = np.round(
        np.random.exponential(scale=0.1, size=G.number_of_edges()),
        6).reshape(G.number_of_edges(), 1)

    # Scale weights to [0,1] range
    scaler = MinMaxScaler()
    scaler.fit(weights)
    scaled_weights = scaler.transform(weights)
    scaled_weights = np.round(scaled_weights, 6)
    # eliminate zeros for computation purposes
    for (x, y), i in np.ndenumerate(scaled_weights):
        if i == 0:
            scaled_weights[x, y] = 0.000001

    # Add weights to the graph
    for i, (u, v) in enumerate(G.edges()):
        G[u][v]['weight'] = scaled_weights[i, 0]

    #============================#
    # Set node attribute - state #
    #============================#

    # "State" Variable levels:
    # * Unaware - is actor who did not internalized the information and
    #   is not able to pass it down. Initially, all nodes are
    #   in state: Unaware.
    # * Aware - is the actor who internalized the information and is able
    #   to pass it down.

    nx.set_node_attributes(G, 'unaware', 'state')  # (G, value, key)

    #====================================#
    # Set node attribute - receptiveness #
    #====================================#

    # Receptiveness - general parameter of each node, expressing how much
    # in general the actor is receptive in context of given social network.
    # Receptiveness is randomly sampled from normal distribution.

    # Create ndarray of receptiveness
    receptiveness = np.round(np.random.normal(size=G.number_of_edges()),
                             6).reshape(G.number_of_edges(), 1)

    # Scale weights to [0,1] range
    scaler = MinMaxScaler()
    scaler.fit(receptiveness)
    scaled_receptiveness = scaler.transform(receptiveness)
    scaled_receptiveness = np.round(scaled_receptiveness, 6)
    # eliminate zeros for computation purposes
    for (x, y), i in np.ndenumerate(scaled_receptiveness):
        if i == 0:
            scaled_receptiveness[x, y] = 0.000001

    # Add receptiveness parameter to nodes
    for v in G.nodes():
        G.nodes[v]['receptiveness'] = scaled_receptiveness[v, 0]

    #===================================#
    # Set node attribute - extraversion #
    #===================================#

    # Extraversion is agent eagerness to express itself to other agents
    # Extraversion is randomly sampled from normal distribution.

    # Create ndarray of extraversion
    extraversion = np.round(np.random.normal(size=G.number_of_edges()),
                            6).reshape(G.number_of_edges(), 1)

    # Scale weights to [0,1] range
    scaler = MinMaxScaler()
    scaler.fit(extraversion)
    scaled_extraversion = scaler.transform(extraversion)
    scaled_extraversion = np.round(scaled_extraversion, 6)
    # eliminate zeros for computation purposes
    for (x, y), i in np.ndenumerate(scaled_extraversion):
        if i == 0:
            scaled_extraversion[x, y] = 0.000001

    # Add receptiveness parameter to nodes
    for v in G.nodes():
        G.nodes[v]['extraversion'] = scaled_extraversion[v, 0]

    #=================================#
    # Set node attribute - engagement #
    #=================================#

    # Engagement - engagement with the information related topic,
    # strengthness of the experiences connected with information topic.
    # How much the information is objectivly relevant for actor.
    # Engagement is randomly sampled from exponential distribution.

    # Create ndarray of engagement
    engagement = np.round(np.random.exponential(size=G.number_of_edges()),
                          6).reshape(G.number_of_edges(), 1)

    # Scale weights to [0,1] range
    scaler = MinMaxScaler()
    scaler.fit(engagement)
    scaled_engagement = scaler.transform(engagement)
    scaled_engagement = np.round(scaled_engagement, 6)
    # eliminate zeros for computation purposes
    for (x, y), i in np.ndenumerate(scaled_engagement):
        if i == 0:
            scaled_engagement[x, y] = 0.000001

    # Add receptiveness parameter to nodes
    for v in G.nodes():
        G.nodes[v]['engagement'] = scaled_engagement[v, 0]

    #===================#
    # Random initiation #
    #===================#

    # Compute number of nodes
    N = G.number_of_nodes()
    # Return list of numbers of randomly aware agents
    infected_agents_id = random.sample(population=range(0, N),
                                       k=int(N * initiation_perc))
    # Set those nodes as aware
    for v in infected_agents_id:
        G.nodes[v]['state'] = 'aware'

    #=======================#
    # Show nodes attributes #
    #=======================#

    if show_attr == True:
        print("Node attributes:")
        for (u, v) in G.nodes.data():
            print(u, v)

        # Check how scaled weights looks like
        x = list(range(len(scaled_weights)))
        scaled_weights = np.sort(scaled_weights, axis=0)
        # show numbered values
        dict_0 = dict(zip(x, scaled_weights))
        print("Wages:")
        for u, v in dict_0.items():
            print(u, v)

    #============#
    # Draw graph #
    #============#

    if draw_graph == True:
        dp.draw_graph(G=G, pos=pos)
    # draw_colored_graph_2
    return G, pos
Beispiel #5
0
def simulation_step(
        G,  # NetworkX graph
        pos=None,
        kernel='weights',
        engagement_enforcement=1.00,
        custom_kernel=None,
        WERE_multiplier=10,
        oblivion=False,
        draw=False,
        show_attr=False):
    """ Perform one simulation step of information diffusion 
        in a graph G.
    
    
    Parameters
    ----------

    G : graph
        A networkx graph object.
        
        To use default WERE information propagation kernel, nodes of G 
        need to have extraversion, receptiveness, engagement parameters.
        
    pos : dictionary with 2 element ndarrays as values
       Object contains positions of nodes in the graph chart. Pos is used 
       to draw the graph after simulation step.
    
    kernel : string
        Levels: "weights", "WERE", "custom"
        
        * weights - means that probability of information propagation is 
            equals to bond value between actors
        * WERE - probability of information propagation equals 
            Weights-extraversion-receptiveness-engagement equation
        * custom - probability of information propagation is computed 
            with custom function
            
    engagement_enforcement : float
        Reinforcement of agent engagement by multiplier. 
        If engagement_enforcement == 1, no reinforcement occurs.
        
        1) Agent reinforce its engagement after oblivion, 
            (later its easier to internalize information again for this
            agent, at least with WERE kernel)
        2) Agent A reinforce its engagement during information diffusion step,
            when another agent B is trying to pass information towards 
            agent A, but agent A is already aware.
            
    custom_kernel : function
        Function which compute probability of information propagation
        for each node in simulation step.
    
    WERE_multiplier : Float, optional
        Multiplier used for scaling WERE kernel outcome.
    
    oblivion : bool, optional
        Option which enable agents information oblivion. 
        
    draw : bool, optional
        Draw graph.


    Returns
    -------
    G : graph
        A modified networkx graph object is returned after simulation.


    """

    for n in G.nodes():

        #=================#
        # Oblivion option #
        #=================#

        #  Oblivion and increasing engagement

        if oblivion == True:

            if G.nodes[n]['state'] == 'aware':

                # Calculate oblivion_probability for certain node (more aware neighbours - lower oblivion)
                # oblivion_prob - is random uniform, and
                # dependent on what percent of neighbour are aware

                aware = [
                    d['state'] for i, d in G.nodes.data()
                    if i in list(G.neighbors(n))
                ].count('aware')
                # Unaware neighbours number
                unaware = len(list(G.neighbors(n))) - aware

                # Oblivion factor (percent of unaware actors)
                oblivion_factor = (unaware + 0.0001) / ((aware + 0.0001) +
                                                        (unaware + 0.0001))

                # random factor
                random_factor = np.random.uniform(0, 1)

                # probability that actor will forget information, and will not be able to pass it down
                oblivion_prob = oblivion_factor * random_factor

                # Attempt to oblivion
                if np.random.uniform(0, 1) < oblivion_prob:
                    G.nodes[n]['state'] = 'unaware'

                    # increasing of engagement after oblivion
                    G.nodes[n]['engagement'] = np.round(
                        min(1,
                            G.nodes[n]['engagement'] * engagement_enforcement),
                        6)

        #========#
        # Kernel #
        #========#
        # If node is still aware, it disseminate information

        if G.nodes[n]['state'] == 'aware':

            global neighbour
            for neighbour in G.neighbors(n):

                if G.nodes[neighbour]['state'] == 'unaware':

                    #================#
                    # Weights kernel #
                    #================#

                    if kernel == 'weights':
                        prob_of_internalization = G[n][neighbour]['weight']

                    #=============#
                    # WERE kernel #
                    #=============#
                    # Weights-extraversion-receptiveness-engagement
                    # kernel

                    if kernel == 'WERE':

                        # calculate prob_of_internalization
                        prob_of_internalization =  G[n][neighbour]['weight'] \
                        * G.nodes[neighbour]['receptiveness'] \
                        * G.nodes[neighbour]['engagement'] \
                        * G.nodes[n]['extraversion'] \
                        * WERE_multiplier

                    #===============#
                    # Custom kernel #
                    #===============#

                    if kernel == 'custom':
                        prob_of_internalization = custom_kernel(n, neighbour)

                    #============================#
                    # Attempt to internalization #
                    #============================#

                    if np.random.uniform(0, 1) < prob_of_internalization:
                        G.nodes[neighbour]['state'] = 'aware'

                #===================#
                # Engagement rising #
                #===================#
                # if node is aware, his engagement in information
                # topic may rise with given probability
                else:
                    G.nodes[neighbour]['engagement'] = \
                    np.round(G.nodes[neighbour]['engagement'] * \
                             engagement_enforcement, 6)
                    # reinforcing already informed actors

    #=======================#
    # Show nodes attributes #
    #=======================#

    # Show nodes attributes
    if show_attr == True:
        for (u, v) in G.nodes.data():
            print(u, v)

    #============#
    # Draw graph #
    #============#

    if draw == True:
        fig_01, ax_01 = plt.subplots()  # enable to plot one by one
        # in separate windows
        dp.draw_graph(G, pos)

    return G