Example #1
0
def hk(s, op_eps, max_rounds, eps=1e-6, plot=False, conv_stop=True,
       save=False):
    '''Simulates the model of Hegselmann-Krause.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # The node chooses only those with a close enough opinion
            friends_i = np.abs(z_prev - z_prev[i]) <= op_eps
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hk' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, s=s, op_eps=op_eps,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #2
0
def friedkinJohnsen(A, s, max_rounds, eps=1e-6, plot=False, conv_stop=True,
                    save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the Friedkin-Jonsen model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    B = np.diag(np.diag(A))  # Stubborness matrix of the model
    A_model = A - B  # Adjacency matrix of the model

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = z

    for t in trange(1, max_rounds):
        z = np.dot(A_model, z) + np.dot(B, s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Friedkin-Johnsen converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'Friedkin-Johnsen')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'fj' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #3
0
def deGroot(A, s, max_rounds, eps=1e-6, plot=False, conv_stop=True,
            save=False):
    '''Simulates the DeGroot Model.

    Runs a maximum of max_rounds rounds of the DeGroot model. If the model
    converges sooner, the function returns.

    Args:
        A (NxN numpy array): Adjacency matrix

        s (1xN numpy array): Initial opinions vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        z = np.dot(A, z)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('DeGroot converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'DeGroot')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'dg' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #4
0
def friedkinJohnsen(A,
                    s,
                    max_rounds,
                    eps=1e-6,
                    plot=False,
                    conv_stop=True,
                    save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the Friedkin-Jonsen model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    B = np.diag(np.diag(A))  # Stubborness matrix of the model
    A_model = A - B  # Adjacency matrix of the model

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = z

    for t in trange(1, max_rounds):
        z = np.dot(A_model, z) + np.dot(B, s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Friedkin-Johnsen converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'Friedkin-Johnsen')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'fj' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #5
0
def kNN_dynamic(A,
                s,
                K,
                max_rounds,
                eps=1e-6,
                plot=False,
                conv_stop=True,
                save=False):
    '''Simulates the dynamic K-Nearest Neighbors Model.

    In this model, each nodes chooses his K-Nearest Neighbors during the
    averaging of his opinion. The adjacency matrix changes between rounds
    depending on the opinions.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        K (int): The number of the nearest neighbors to listen to

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        Q = np.zeros((N, N))
        # TODO: Verify that this contains the original paths of A
        A_squared = np.dot(A_model, A_model)
        for i in range(N):
            # Find 2-neighbors in the underlying social network
            neighbor2_i = A_squared[i, :] > 0
            # Sort the nodes by opinion distance
            sorted_dist = np.argsort(abs(z_prev - z_prev[i]))
            # Change the order of the logican neighbor2_i array
            neighbor2_i = neighbor2_i[sorted_dist]
            # Keep only sorted neighbors
            friends_i = sorted_dist[neighbor2_i]
            # In case that we have less than K friends numpy
            # will return the whole array (< K elements)
            k_nearest = friends_i[0:K]
            Q[i, k_nearest] = 1 / k_nearest.size
            z[i] = np.mean(z_prev[k_nearest])
        A_model = Q.copy()
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('K-Nearest Neighbors (dynamic) converged after {t} '
                  'rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'K-NN Dynamic', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'kNNd' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      K=K,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #6
0
def hk_local(A,
             s,
             op_eps,
             max_rounds,
             eps=1e-6,
             plot=False,
             conv_stop=True,
             save=False):
    '''Simulates the model of Hegselmann-Krause with an Adjacency Matrix.

    Contrary to the standard Hegselmann-Krause Model, here we make use of
    an adjacency matrix that represents an underlying social structure
    independent of the opinions held by the members of the society.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # Neighbors in the underlying social network
            neighbor_i = A_model[i, :] > 0
            opinion_close = np.abs(z_prev - z_prev[i]) <= op_eps
            # The node listens to those who share a connection with him
            # in the underlying network and also have an opinion
            # which is close to his own
            friends_i = np.logical_and(neighbor_i, opinion_close)
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause (Local Knowledge) converged after {t} '
                  'rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hkloc' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      op_eps=op_eps,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #7
0
def hk(s,
       op_eps,
       max_rounds,
       eps=1e-6,
       plot=False,
       conv_stop=True,
       save=False):
    '''Simulates the model of Hegselmann-Krause.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # The node chooses only those with a close enough opinion
            friends_i = np.abs(z_prev - z_prev[i]) <= op_eps
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hk' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      s=s,
                      op_eps=op_eps,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #8
0
def ga(A,
       B,
       s,
       max_rounds,
       eps=1e-6,
       plot=False,
       conv_stop=True,
       save=False,
       **kwargs):
    '''Simulates the Generalized Asymmetric Coevolutionary Game.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        B (NxN numpy array): The stubborness of each node

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

        **kargs: Arguments c, eps, and p for dynamic_weights function (eps and
        p need to be specified only if c='pow') (default: c='linear')

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    # Check if c function was specified
    if kwargs:
        c = kwargs['c']
        # Extra parameters for pow function
        eps_c = kwargs.get('eps', 0.1)
        p_c = kwargs.get('eps', 2)
    else:
        # Otherwise use linear as default
        c = 'linear'
        eps_c = None
        p_c = None

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        Q = dynamic_weights(A, s, z, c, eps_c, p_c) + B
        Q = rowStochastic(Q)
        B_temp = np.diag(np.diag(Q))
        Q = Q - B_temp
        z = np.dot(Q, z) + np.dot(B_temp, s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('G-A converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'ga' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      B=B,
                      c=c,
                      eps_c=eps_c,
                      p_c=p_c,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #9
0
def deGroot(A,
            s,
            max_rounds,
            eps=1e-6,
            plot=False,
            conv_stop=True,
            save=False):
    '''Simulates the DeGroot Model.

    Runs a maximum of max_rounds rounds of the DeGroot model. If the model
    converges sooner, the function returns.

    Args:
        A (NxN numpy array): Adjacency matrix

        s (1xN numpy array): Initial opinions vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        z = np.dot(A, z)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('DeGroot converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'DeGroot')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'dg' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #10
0
def meetFriend(A,
               s,
               max_rounds,
               eps=1e-6,
               plot=False,
               conv_stop=True,
               save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    # Cannot allow zero rows because rchoice() will fail
    if np.size(np.nonzero(A.sum(axis=1))) != N:
        raise ValueError("Matrix A has one or more zero rows")

    for t in trange(1, max_rounds):
        # Update the opinion for each node
        for i in range(N):
            r_i = rchoice(A[i, :])
            if r_i == i:
                op = s[i]
            else:
                op = z_prev[r_i]
            z[i] = (op + t * z_prev[i]) / (t + 1)
        z_prev = z
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Meet a Friend converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t + 1, :], 'Meet a friend')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        saveModelData(simid,
                      N=N,
                      max_rounds=max_rounds,
                      eps=eps,
                      rounds_run=t + 1,
                      A=A,
                      s=s,
                      opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Example #11
0
def kNN_dynamic(A, s, K, max_rounds, eps=1e-6, plot=False, conv_stop=True,
                save=False):
    '''Simulates the dynamic K-Nearest Neighbors Model.

    In this model, each nodes chooses his K-Nearest Neighbors during the
    averaging of his opinion. The adjacency matrix changes between rounds
    depending on the opinions.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        K (int): The number of the nearest neighbors to listen to

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        Q = np.zeros((N, N))
        # TODO: Verify that this contains the original paths of A
        A_squared = np.dot(A_model, A_model)
        for i in range(N):
            # Find 2-neighbors in the underlying social network
            neighbor2_i = A_squared[i, :] > 0
            # Sort the nodes by opinion distance
            sorted_dist = np.argsort(abs(z_prev - z_prev[i]))
            # Change the order of the logican neighbor2_i array
            neighbor2_i = neighbor2_i[sorted_dist]
            # Keep only sorted neighbors
            friends_i = sorted_dist[neighbor2_i]
            # In case that we have less than K friends numpy
            # will return the whole array (< K elements)
            k_nearest = friends_i[0:K]
            Q[i, k_nearest] = 1/k_nearest.size
            z[i] = np.mean(z_prev[k_nearest])
        A_model = Q.copy()
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('K-Nearest Neighbors (dynamic) converged after {t} '
                  'rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'K-NN Dynamic', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'kNNd' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, K=K,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #12
0
def hk_local(A, s, op_eps, max_rounds, eps=1e-6, plot=False, conv_stop=True,
             save=False):
    '''Simulates the model of Hegselmann-Krause with an Adjacency Matrix.

    Contrary to the standard Hegselmann-Krause Model, here we make use of
    an adjacency matrix that represents an underlying social structure
    independent of the opinions held by the members of the society.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # Neighbors in the underlying social network
            neighbor_i = A_model[i, :] > 0
            opinion_close = np.abs(z_prev - z_prev[i]) <= op_eps
            # The node listens to those who share a connection with him
            # in the underlying network and also have an opinion
            # which is close to his own
            friends_i = np.logical_and(neighbor_i, opinion_close)
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause (Local Knowledge) converged after {t} '
                  'rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hkloc' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, op_eps=op_eps,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #13
0
def ga(A, B, s, max_rounds, eps=1e-6, plot=False, conv_stop=True, save=False,
       **kwargs):
    '''Simulates the Generalized Asymmetric Coevolutionary Game.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        B (NxN numpy array): The stubborness of each node

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

        **kargs: Arguments c, eps, and p for dynamic_weights function (eps and
        p need to be specified only if c='pow') (default: c='linear')

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    # Check if c function was specified
    if kwargs:
        c = kwargs['c']
        # Extra parameters for pow function
        eps_c = kwargs.get('eps', 0.1)
        p_c = kwargs.get('eps', 2)
    else:
        # Otherwise use linear as default
        c = 'linear'
        eps_c = None
        p_c = None

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        Q = dynamic_weights(A, s, z, c, eps_c, p_c) + B
        Q = rowStochastic(Q)
        B_temp = np.diag(np.diag(Q))
        Q = Q - B_temp
        z = np.dot(Q, z) + np.dot(B_temp, s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('G-A converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'Hegselmann-Krause', dcolor=True)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'ga' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, B=B, c=c, eps_c=eps_c,
                      p_c=p_c, opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Example #14
0
def meetFriend(A, s, max_rounds, eps=1e-6, plot=False, conv_stop=True,
               save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    # Cannot allow zero rows because rchoice() will fail
    if np.size(np.nonzero(A.sum(axis=1))) != N:
        raise ValueError("Matrix A has one or more zero rows")

    for t in trange(1, max_rounds):
        # Update the opinion for each node
        for i in range(N):
            r_i = rchoice(A[i, :])
            if r_i == i:
                op = s[i]
            else:
                op = z_prev[r_i]
            z[i] = (op + t*z_prev[i]) / (t+1)
        z_prev = z
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Meet a Friend converged after {t} rounds'.format(t=t))
            break

    if plot:
        plotOpinions(opinions[0:t+1, :], 'Meet a friend')

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        saveModelData(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]