示例#1
0
 def test_set_states(self):
     g = nx.random_geometric_graph(10, 0.1)
     m = Model(g)
     m.set_states(['1', '2'])
     self.assertTrue((np.zeros((10, 2)) == m.node_states).any())
     self.assertEqual(m.state_names, ['1', '2'])
     self.assertEqual(m.state_map, {'1': 0, '2': 1})
示例#2
0
    def test_properties(self):
        np.random.seed(1337)
        random.seed(1337)

        g = nx.random_geometric_graph(50, 0.3)
        model = Model(g)

        def node_amount(G):
            return len(G.nodes())

        prop1 = PropertyFunction('1', nx.average_clustering, 1,
                                 {'G': model.graph})
        prop2 = PropertyFunction('2', node_amount, 1, {'G': model.graph})

        model.add_property_function(prop1)
        model.add_property_function(prop2)

        model.simulate(3)
        out = model.get_properties()

        self.assertDictEqual(
            out, {
                '1':
                [0.6747703316560499, 0.6747703316560499, 0.6747703316560499],
                '2': [50, 50, 50]
            })
示例#3
0
    def test_assign_array(self):
        # Network definition
        g = nx.random_geometric_graph(10, 0.1)
        m = Model(g)

        initial_state = {
            'x': 0,
        }

        def update_x():
            return {'x': np.arange(10)}

        # Model definition
        m.set_states(['x'])
        m.add_update(update_x)
        m.set_initial_state(initial_state)

        output = m.simulate(1)
        self.assertEqual(list(output['states'][0]), list(np.zeros((10, 1))))
        self.assertEqual(list(output['states'][1]), list(np.arange(10)))
示例#4
0
from dynsimf.models.Model import Model
from dynsimf.models.tools.SA import SensitivityAnalysis
from dynsimf.models.tools.SA import SAConfiguration

import networkx as nx
import numpy as np

import matplotlib.pyplot as plt
from SALib.test_functions import Ishigami

__author__ = "Mathijs Maijer"
__email__ = "*****@*****.**"

if __name__ == "__main__":
    g = nx.random_geometric_graph(1, 1)
    model = Model(g)

    constants = {'x1': 0, 'x2': 0, 'x3': 0}

    initial_state = {'ishigami': 0}

    def update(constants):
        ishigami_params = np.array([list(constants.values())])
        return {'ishigami': Ishigami.evaluate(ishigami_params)}

    # Model definition
    model.constants = constants
    model.set_states(['ishigami'])
    model.add_update(update, {'constants': model.constants})

    cfg = SAConfiguration({
示例#5
0
class HIOM(Example):

    def __init__(self):
        n = 400

        g = nx.watts_strogatz_graph(n, 2, 0.02)

        cfg = {
            'utility': False,
        }
        self.model = Model(g, ModelConfiguration(cfg))

        constants = {
            'dt': 0.01,
            'A_min': -0.5,
            'A_star': 1,
            's_O': 0.01,
            's_I': 0,
            'd_A': 0,
            'p': 1,
            'r_min': 0,
            't_O': np.inf,
            'N': n
        }

        def initial_I(constants):
            return np.random.normal(0, 0.3, constants['N'])

        def initial_O(constants):
            return np.random.normal(0, 0.2, constants['N'])

        initial_state = {
            'I': initial_I,
            'O': initial_O,
            'A': 1
        }

        def update_I_A(nodes, constants):
            node = nodes[0]
            nb = np.random.choice(self.model.get_neighbors(node))
            if abs(self.model.get_node_state(node, 'O') - self.model.get_node_state(nb, 'O')) > constants['t_O']:
                return {'I': self.model.get_node_state(node, 'I')}
            else:
                # Update information
                r = constants['r_min'] + (1 - constants['r_min']) / (1 + np.exp(-1 * constants['p'] * (self.model.get_node_state(node, 'O') - self.model.get_node_state(nb, 'O'))))
                inf = r * self.model.get_node_state(node, 'I') + (1-r) * self.model.get_node_state(nb, 'I') + np.random.normal(0, constants['s_I'])

                # Update attention
                node_A = self.model.get_node_state(node, 'A') + constants['d_A'] * (2 * constants['A_star'] - self.model.get_node_state(node, 'A'))
                nb_A = self.model.get_node_state(nb, 'A') + constants['d_A'] * (2 * constants['A_star'] - self.model.get_node_state(nb, 'A'))
                return {'I': [inf], 'A': {node: node_A, nb: nb_A}}

        def update_A(constants):
            return {'A': self.model.get_state('A') - 2 * constants['d_A'] * self.model.get_state('A')/constants['N']}

        def update_O(constants):
            noise = np.random.normal(0, constants['s_O'], constants['N'])
            x = self.model.get_state('O') - constants['dt'] * (self.model.get_state('O')**3 - (self.model.get_state('A') + constants['A_min']) * self.model.get_state('O') - self.model.get_state('I')) + noise
            return {'O': x}

        def shrink_I():
            return {'I': self.model.get_state('I') * 0.999}

        def shrink_A():
            return {'A': self.model.get_state('A') * 0.999}

        def sample_attention_weighted(graph):
            probs = []
            A = self.model.get_state('A')
            factor = 1.0/sum(A)
            for a in A:
                probs.append(a * factor)
            return np.random.choice(graph.nodes, size=1, replace=False, p=probs)

        # Model definition
        self.model.constants = constants
        self.model.set_states(['I', 'A', 'O'])

        update_cfg = UpdateConfiguration({
            'arguments': {'constants': self.model.constants},
            'get_nodes': True
        })
        up_I_A = Update(update_I_A, update_cfg)
        s_I = Update(shrink_I)
        s_A = Update(shrink_A)

        self.model.add_scheme(Scheme(sample_attention_weighted, {'args': {'graph': self.model.graph}, 'updates': [up_I_A]}))
        self.model.add_scheme(Scheme(lambda graph: graph.nodes, {'args': {'graph': self.model.graph}, 'lower_bound': 5000, 'updates': [s_I]}))
        self.model.add_scheme(Scheme(lambda graph: graph.nodes, {'args': {'graph': self.model.graph}, 'lower_bound': 10000, 'updates': [s_A]}))
        self.model.add_update(update_A, {'constants': self.model.constants})
        self.model.add_update(update_O, {'constants': self.model.constants})

        self.model.set_initial_state(initial_state, {'constants': self.model.constants})

    def simulate(self, n):
        return self.model.simulate(n)

    def visualize(self, iterations):
        visualization_config = {
            'layout': 'fr',
            'plot_interval': 100,
            'plot_variable': 'O',
            'variable_limits': {
                'A': [0, 1],
                'O': [-1, 1],
                'I': [-1, 1]
            },
            'cmin': -1,
            'cmax': 1,
            'color_scale': 'RdBu',
            'show_plot': True,
            # 'plot_output': '../animations/HIOM.gif',
            'plot_title': 'HIERARCHICAL ISING OPINION MODEL',
        }

        self.model.configure_visualization(visualization_config, iterations)
        self.model.visualize('animation')
示例#6
0
    def __init__(self):
        n = 400

        g = nx.watts_strogatz_graph(n, 2, 0.02)

        cfg = {
            'utility': False,
        }
        self.model = Model(g, ModelConfiguration(cfg))

        constants = {
            'dt': 0.01,
            'A_min': -0.5,
            'A_star': 1,
            's_O': 0.01,
            's_I': 0,
            'd_A': 0,
            'p': 1,
            'r_min': 0,
            't_O': np.inf,
            'N': n
        }

        def initial_I(constants):
            return np.random.normal(0, 0.3, constants['N'])

        def initial_O(constants):
            return np.random.normal(0, 0.2, constants['N'])

        initial_state = {
            'I': initial_I,
            'O': initial_O,
            'A': 1
        }

        def update_I_A(nodes, constants):
            node = nodes[0]
            nb = np.random.choice(self.model.get_neighbors(node))
            if abs(self.model.get_node_state(node, 'O') - self.model.get_node_state(nb, 'O')) > constants['t_O']:
                return {'I': self.model.get_node_state(node, 'I')}
            else:
                # Update information
                r = constants['r_min'] + (1 - constants['r_min']) / (1 + np.exp(-1 * constants['p'] * (self.model.get_node_state(node, 'O') - self.model.get_node_state(nb, 'O'))))
                inf = r * self.model.get_node_state(node, 'I') + (1-r) * self.model.get_node_state(nb, 'I') + np.random.normal(0, constants['s_I'])

                # Update attention
                node_A = self.model.get_node_state(node, 'A') + constants['d_A'] * (2 * constants['A_star'] - self.model.get_node_state(node, 'A'))
                nb_A = self.model.get_node_state(nb, 'A') + constants['d_A'] * (2 * constants['A_star'] - self.model.get_node_state(nb, 'A'))
                return {'I': [inf], 'A': {node: node_A, nb: nb_A}}

        def update_A(constants):
            return {'A': self.model.get_state('A') - 2 * constants['d_A'] * self.model.get_state('A')/constants['N']}

        def update_O(constants):
            noise = np.random.normal(0, constants['s_O'], constants['N'])
            x = self.model.get_state('O') - constants['dt'] * (self.model.get_state('O')**3 - (self.model.get_state('A') + constants['A_min']) * self.model.get_state('O') - self.model.get_state('I')) + noise
            return {'O': x}

        def shrink_I():
            return {'I': self.model.get_state('I') * 0.999}

        def shrink_A():
            return {'A': self.model.get_state('A') * 0.999}

        def sample_attention_weighted(graph):
            probs = []
            A = self.model.get_state('A')
            factor = 1.0/sum(A)
            for a in A:
                probs.append(a * factor)
            return np.random.choice(graph.nodes, size=1, replace=False, p=probs)

        # Model definition
        self.model.constants = constants
        self.model.set_states(['I', 'A', 'O'])

        update_cfg = UpdateConfiguration({
            'arguments': {'constants': self.model.constants},
            'get_nodes': True
        })
        up_I_A = Update(update_I_A, update_cfg)
        s_I = Update(shrink_I)
        s_A = Update(shrink_A)

        self.model.add_scheme(Scheme(sample_attention_weighted, {'args': {'graph': self.model.graph}, 'updates': [up_I_A]}))
        self.model.add_scheme(Scheme(lambda graph: graph.nodes, {'args': {'graph': self.model.graph}, 'lower_bound': 5000, 'updates': [s_I]}))
        self.model.add_scheme(Scheme(lambda graph: graph.nodes, {'args': {'graph': self.model.graph}, 'lower_bound': 10000, 'updates': [s_A]}))
        self.model.add_update(update_A, {'constants': self.model.constants})
        self.model.add_update(update_O, {'constants': self.model.constants})

        self.model.set_initial_state(initial_state, {'constants': self.model.constants})
示例#7
0
import networkx as nx

from dynsimf.models.Model import Model
from dynsimf.models.components.PropertyFunction import PropertyFunction

if __name__ == '__main__':

    g = nx.random_geometric_graph(50, 0.3)
    model = Model(g)

    def node_amount(G):
        return len(G.nodes())

    prop1 = PropertyFunction('1', nx.average_clustering, 2, {'G': model.graph})
    prop2 = PropertyFunction('2', node_amount, 2, {'G': model.graph})

    model.add_property_function(prop1)
    model.add_property_function(prop2)

    model.simulate(7)
    out = model.get_properties()
    print(out)
示例#8
0
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt

from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration

if __name__ == "__main__":

    # Network definition
    g = nx.random_geometric_graph(250, 0.125)
    cfg = {
        'utility': False,
    }
    model = Model(g, ModelConfiguration(cfg))

    constants = {
        'q': 0.8,
        'b': 0.5,
        'd': 0.2,
        'h': 0.2,
        'k': 0.25,
        'S+': 0.5,
    }
    constants['p'] = 2*constants['d']

    def initial_v(constants):
        return np.minimum(1, np.maximum(0, model.get_state('C') - model.get_state('S') - model.get_state('E')))

    def initial_a(constants):
示例#9
0
    p_between = .001
    rewiring = .02

    sizes = list(map(int, [n / clusters] * clusters))
    pm = np.ones((10, 10)) * p_between
    np.fill_diagonal(pm, p_within)
    g = nx.stochastic_block_model(sizes, pm)

    cfg = {
        'utility': False,
        'adjacency_memory_config': \
            MemoryConfiguration(MemoryConfigurationType.ADJACENCY, {
                'memory_size': 0
        }),
    }
    model = Model(g, ModelConfiguration(cfg))

    constants = {
        'link_addition_p': 0.15,
        'link_removal_p': 0.1,
        'N': n,
        'sd_noise_information': .005,
        'persuasion': 2,
        'r_min': 0.1,
        's_O': .01,
        'maxwell_convention': False,
        'attention_star': 1,
        'min_attention':
        -.5,  # to include continuous change in O as function of K
        'delta_attention': 0.1,
        'decay_attention': 0.1 / (1 * (n ^ 2)),
示例#10
0
文件: HIOM.py 项目: Tensaiz/DyNSimF
from dynsimf.models.Model import ModelConfiguration
from dynsimf.models.components.Scheme import Scheme
from dynsimf.models.components.Update import Update
from dynsimf.models.components.Update import UpdateConfiguration
from dynsimf.models.Example import Example

if __name__ == "__main__":

    n = 400

    g = nx.watts_strogatz_graph(n, 2, 0.02)

    cfg = {
        'utility': False,
    }
    model = Model(g, ModelConfiguration(cfg))

    constants = {
        'dt': 0.01,
        'A_min': -0.5,
        'A_star': 1,
        's_O': 0.01,
        's_I': 0,
        'd_A': 0,
        'p': 1,
        'r_min': 0,
        't_O': np.inf,
        'N': n
    }

    def initial_I(constants):
示例#11
0
import networkx as nx
import numpy as np

from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration
from dynsimf.models.components.Update import Update
from dynsimf.models.components.Update import UpdateType
from dynsimf.models.components.Update import UpdateConfiguration
from dynsimf.models.components.Scheme import Scheme

if __name__ == "__main__":

    g = nx.erdos_renyi_graph(n=10, p=0.1)
    model = Model(g, ModelConfiguration())

    # Define schemes
    def sample_state_weighted(graph):
        probs = []
        status_1 = model.get_state('status_1')
        factor = 1.0 / sum(status_1)
        for s in status_1:
            probs.append(s * factor)
        return np.random.choice(graph.nodes, size=1, replace=False, p=probs)

    initial_state = {
        'status_1': 0.1,
        'status_2': 0.1,
    }

    model.set_states(['status_1', 'status_2'])
示例#12
0
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt

from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration

if __name__ == "__main__":
    # Network definition
    n = 1000
    g = nx.random_geometric_graph(n, 0.05)

    model = Model(g)

    constants = {
        'n': n,
        'beta': 0.4,
        'gamma': 0.04,
        'init_infected': 3
    }

    def initial_infected(constants):
        state = np.zeros(constants['n'])

        sampled_nodes = np.random.choice(np.arange(constants['n']), constants['init_infected'], replace=False)
        state[sampled_nodes] = 1
        return state

    initial_state = {
        'state': initial_infected
示例#13
0
 def test_model_constants(self):
     g = nx.random_geometric_graph(10, 0.1)
     m = Model(g)
     d = {1: 2}
     m.constants = d
     self.assertEqual(m.constants, d)
示例#14
0
 def test_model_init(self):
     g = nx.random_geometric_graph(10, 0.1)
     m = Model(g)
     self.assertTrue(isinstance(m, Model))
示例#15
0
    def __init__(self):
        # Network definition
        self.g = nx.random_geometric_graph(250, 0.125)
        cfg = {
            'utility': False,
        }
        self.model = Model(self.g, ModelConfiguration(cfg))

        constants = {
            'q': 0.8,
            'b': 0.5,
            'd': 0.2,
            'h': 0.2,
            'k': 0.25,
            'S+': 0.5,
        }
        constants['p'] = 2 * constants['d']

        def initial_v(constants):
            return np.minimum(
                1,
                np.maximum(
                    0,
                    self.model.get_state('C') - self.model.get_state('S') -
                    self.model.get_state('E')))

        def initial_a(constants):
            return constants['q'] * self.model.get_state('V') + (
                np.random.poisson(self.model.get_state('lambda')) / 7)

        initial_state = {
            'C': 0,
            'S': constants['S+'],
            'E': 1,
            'V': initial_v,
            'lambda': 0.5,
            'A': initial_a
        }

        def update_C(constants):
            c = self.model.get_state(
                'C') + constants['b'] * self.model.get_state('A') * np.minimum(
                    1, 1 - self.model.get_state('C')
                ) - constants['d'] * self.model.get_state('C')
            return {'C': c}

        def update_S(constants):
            return {
                'S':
                self.model.get_state('S') + constants['p'] *
                np.maximum(0, constants['S+'] - self.model.get_state('S')) -
                constants['h'] * self.model.get_state('C') -
                constants['k'] * self.model.get_state('A')
            }

        # Naive manner
        # def update_E(constants):
        #     # return {'E': self.model.get_state('E') - 0.015}
        #     e = np.zeros(len(self.model.nodes))
        #     for i, node in enumerate(self.model.nodes):
        #         neighbor_addiction = 0
        #         for neighbor in self.model.get_neighbors(node):
        #             neighbor_addiction += self.model.get_node_state(neighbor, 'A')
        #         e[i] = neighbor_addiction / 50
        #     return {'E': np.maximum(-1.5, self.model.get_state('E') - e)} # Custom calculation

        # Less naive
        # def update_E(constants):
        #     e = np.zeros(len(self.model.nodes))
        #     adj = self.model.get_adjacency()
        #     for i in range(len(self.model.nodes)):
        #         neighbors = adj[i].nonzero()
        #         e[i] = np.sum(self.model.get_nodes_state(neighbors, 'A')) / 50
        #     return {'E': np.maximum(-1.5, self.model.get_state('E') - e)} # Custom calculation

        def update_E(constants):
            adj = self.model.get_adjacency()
            summed = np.matmul(adj, self.model.get_nodes_states())
            e = summed[:, self.model.get_state_index('A')] / 50
            return {'E': np.maximum(-1.5, self.model.get_state('E') - e)}

        def update_V(constants):
            return {
                'V':
                np.minimum(
                    1,
                    np.maximum(
                        0,
                        self.model.get_state('C') - self.model.get_state('S') -
                        self.model.get_state('E')))
            }

        def update_lambda(constants):
            return {'lambda': self.model.get_state('lambda') + 0.01}

        def update_A(constants):
            return {
                'A':
                constants['q'] * self.model.get_state('V') + np.minimum(
                    (np.random.poisson(self.model.get_state('lambda')) / 7),
                    constants['q'] * (1 - self.model.get_state('V')))
            }

        # Model definition
        self.model.constants = constants
        self.model.set_states(['C', 'S', 'E', 'V', 'lambda', 'A'])
        self.model.add_update(update_C, {'constants': self.model.constants})
        self.model.add_update(update_S, {'constants': self.model.constants})
        self.model.add_update(update_E, {'constants': self.model.constants})
        self.model.add_update(update_V, {'constants': self.model.constants})
        self.model.add_update(update_lambda,
                              {'constants': self.model.constants})
        self.model.add_update(update_A, {'constants': self.model.constants})
        self.model.set_initial_state(initial_state,
                                     {'constants': self.model.constants})
示例#16
0
文件: run_sa.py 项目: Tensaiz/DyNSimF
from dynsimf.models.Model import Model
from dynsimf.models.tools.SA import SensitivityAnalysis
from dynsimf.models.tools.SA import SAConfiguration

import networkx as nx
from networkx.algorithms import average_clustering
import numpy as np

__author__ = "Mathijs Maijer"
__email__ = "*****@*****.**"


if __name__ == "__main__":
    g = nx.random_geometric_graph(200, 0.125)
    model = Model(g)

    constants = {
        'q': 0.8,
        'b': 0.5,
        'd': 0.2,
        'h': 0.2,
        'k': 0.25,
        'S+': 0.5,
    }
    constants['p'] = 2*constants['d']

    def initial_v(constants):
        return np.minimum(1, np.maximum(0, model.get_state('C') - model.get_state('S') - model.get_state('E')))

    def initial_a(constants):
        return constants['q'] * model.get_state('V') + (np.random.poisson(model.get_state('lambda'))/7)
示例#17
0
class CravingSelfControl(Example):
    def __init__(self):
        # Network definition
        self.g = nx.random_geometric_graph(250, 0.125)
        cfg = {
            'utility': False,
        }
        self.model = Model(self.g, ModelConfiguration(cfg))

        constants = {
            'q': 0.8,
            'b': 0.5,
            'd': 0.2,
            'h': 0.2,
            'k': 0.25,
            'S+': 0.5,
        }
        constants['p'] = 2 * constants['d']

        def initial_v(constants):
            return np.minimum(
                1,
                np.maximum(
                    0,
                    self.model.get_state('C') - self.model.get_state('S') -
                    self.model.get_state('E')))

        def initial_a(constants):
            return constants['q'] * self.model.get_state('V') + (
                np.random.poisson(self.model.get_state('lambda')) / 7)

        initial_state = {
            'C': 0,
            'S': constants['S+'],
            'E': 1,
            'V': initial_v,
            'lambda': 0.5,
            'A': initial_a
        }

        def update_C(constants):
            c = self.model.get_state(
                'C') + constants['b'] * self.model.get_state('A') * np.minimum(
                    1, 1 - self.model.get_state('C')
                ) - constants['d'] * self.model.get_state('C')
            return {'C': c}

        def update_S(constants):
            return {
                'S':
                self.model.get_state('S') + constants['p'] *
                np.maximum(0, constants['S+'] - self.model.get_state('S')) -
                constants['h'] * self.model.get_state('C') -
                constants['k'] * self.model.get_state('A')
            }

        # Naive manner
        # def update_E(constants):
        #     # return {'E': self.model.get_state('E') - 0.015}
        #     e = np.zeros(len(self.model.nodes))
        #     for i, node in enumerate(self.model.nodes):
        #         neighbor_addiction = 0
        #         for neighbor in self.model.get_neighbors(node):
        #             neighbor_addiction += self.model.get_node_state(neighbor, 'A')
        #         e[i] = neighbor_addiction / 50
        #     return {'E': np.maximum(-1.5, self.model.get_state('E') - e)} # Custom calculation

        # Less naive
        # def update_E(constants):
        #     e = np.zeros(len(self.model.nodes))
        #     adj = self.model.get_adjacency()
        #     for i in range(len(self.model.nodes)):
        #         neighbors = adj[i].nonzero()
        #         e[i] = np.sum(self.model.get_nodes_state(neighbors, 'A')) / 50
        #     return {'E': np.maximum(-1.5, self.model.get_state('E') - e)} # Custom calculation

        def update_E(constants):
            adj = self.model.get_adjacency()
            summed = np.matmul(adj, self.model.get_nodes_states())
            e = summed[:, self.model.get_state_index('A')] / 50
            return {'E': np.maximum(-1.5, self.model.get_state('E') - e)}

        def update_V(constants):
            return {
                'V':
                np.minimum(
                    1,
                    np.maximum(
                        0,
                        self.model.get_state('C') - self.model.get_state('S') -
                        self.model.get_state('E')))
            }

        def update_lambda(constants):
            return {'lambda': self.model.get_state('lambda') + 0.01}

        def update_A(constants):
            return {
                'A':
                constants['q'] * self.model.get_state('V') + np.minimum(
                    (np.random.poisson(self.model.get_state('lambda')) / 7),
                    constants['q'] * (1 - self.model.get_state('V')))
            }

        # Model definition
        self.model.constants = constants
        self.model.set_states(['C', 'S', 'E', 'V', 'lambda', 'A'])
        self.model.add_update(update_C, {'constants': self.model.constants})
        self.model.add_update(update_S, {'constants': self.model.constants})
        self.model.add_update(update_E, {'constants': self.model.constants})
        self.model.add_update(update_V, {'constants': self.model.constants})
        self.model.add_update(update_lambda,
                              {'constants': self.model.constants})
        self.model.add_update(update_A, {'constants': self.model.constants})
        self.model.set_initial_state(initial_state,
                                     {'constants': self.model.constants})

    def simulate(self, n):
        return self.model.simulate(n)

    def plot_paper(self, iterations):
        A = [np.mean(it[:, 5]) for it in iterations]
        C = [np.mean(it[:, 0]) for it in iterations]

        E = [np.mean(it[:, 2]) for it in iterations]
        lmd = [np.mean(it[:, 4]) for it in iterations]

        S = [np.mean(it[:, 1]) for it in iterations]
        V = [np.mean(it[:, 3]) for it in iterations]

        x = np.arange(0, len(iterations))
        plt.figure()

        plt.subplot(221)
        plt.plot(x, E, label='E')
        plt.plot(x, lmd, label='lambda')
        plt.legend()

        plt.subplot(222)
        plt.plot(x, A, label='A')
        plt.plot(x, C, label='C')
        plt.legend()

        plt.subplot(223)
        plt.plot(x, S, label='S')
        plt.plot(x, V, label='V')
        plt.legend()

        plt.show()

    def visualize(self, iterations):
        visualization_config = {
            'plot_interval': 2,
            'initial_positions': nx.get_node_attributes(self.g, 'pos'),
            'plot_variable': 'A',
            'color_scale': 'Reds',
            'variable_limits': {
                'A': [0, 0.8],
                'lambda': [0.5, 1.5],
                'C': [-1, 1],
                'V': [-1, 1],
                'E': [-1, 1],
                'S': [-1, 1]
            },
            'show_plot': True,
            # 'plot_output': './animations/c_vs_s.gif',
            'plot_title': 'Self control vs craving simulation',
        }

        self.model.configure_visualization(visualization_config, iterations)
        self.model.visualize('animation')
示例#18
0
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt

from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration

if __name__ == "__main__":
    # Network definition
    n = 1
    g = nx.random_geometric_graph(n, 1)
    model = Model(g)

    init_infected = 3
    initial_state = {
        'S': 1000 - init_infected,
        'I': init_infected,
        'R': 0,
    }

    constants = {
        'N': 1000,
        'beta': 0.4,
        'gamma': 0.04,
        'dt': 0.01,
    }

    def deriv(S, I, constants):
        N = constants['N']
        beta = constants['beta']
示例#19
0
if __name__ == "__main__":
    # Network definition
    n_nodes = 50
    g = nx.random_geometric_graph(n_nodes, 0.2)

    cfg = {
        'adjacency_memory_config': \
            MemoryConfiguration(MemoryConfigurationType.ADJACENCY, {
                'memory_size': 0
            }),
        'edge_values_memory_config': \
            MemoryConfiguration(MemoryConfigurationType.EDGE_VALUES, {
                'memory_size': 0
            })
    }
    model = Model(g, ModelConfiguration(cfg))

    constants = {
        'q': 0.8,
        'b': 0.5,
        'd': 0.2,
        'h': 0.2,
        'k': 0.25,
        'S+': 0.5,
        'P': np.random.random_sample(n_nodes)
    }
    constants['p'] = 2 * constants['d']

    def initial_v(constants):
        return np.minimum(
            1,