예제 #1
0
    def test_inference_of_temporal_dependence(self, plot=False):

        data = np.array([
            (1.0, 2.00),
            (10000.0, 2.00),
            (10001.0, -2.00),
        ])
        times, rates = data[:, 0], data[:, 1]
        f = interp1d(times, rates, kind='linear')

        def infection_rate(t, y):
            return f(t)

        S, I = list("SI")
        N = 100
        rec = 1
        model = EpiModel([S, I], N)

        # first, initialize the time to t0 = 1, so
        # column sum tests do not fail
        model.set_initial_conditions({S: 99, I: 1}, initial_time=1)

        # Here, the function will fail to evaluate time dependence
        # but will warn the user that there were errors in time
        # evaluation.
        self.assertWarns(
            UserWarning,
            model.set_processes,
            [
                (S, I, infection_rate, I, I),
                (I, infection_rate, S),
            ],
        )

        assert (not model.rates_have_explicit_time_dependence)
        assert (model.rates_have_functional_dependence)

        # this should warn the user that rates are functionally dependent
        # but that no temporal dependence could be inferred, to in case
        # they know that there's a time dependence, they have to state
        # that explicitly
        self.assertWarns(UserWarning, model.simulate, tmax=2)
        model.set_initial_conditions({S: 99, I: 1}, initial_time=1)

        # here, the time dependence is given explicitly and so
        # the warning will not be shown
        model.simulate(tmax=2, rates_have_explicit_time_dependence=True)
예제 #2
0
    def test_stochastic_well_mixed(self):

        S, E, I, R = list("SEIR")

        N = 75000
        tmax = 100
        model = EpiModel([S, E, I, R], N)
        model.set_processes([
            (S, I, 2, E, I),
            (I, 1, R),
            (E, 1, I),
        ])
        model.set_initial_conditions({S: N - 100, I: 100})

        tt = np.linspace(0, tmax, 10000)
        result_int = model.integrate(tt)

        t, result_sim = model.simulate(tmax,
                                       sampling_dt=1,
                                       return_compartments=[S, R])

        model = StochasticEpiModel([S, E, I, R], N)
        model.set_link_transmission_processes([
            (I, S, 2, I, E),
        ])
        model.set_node_transition_processes([
            (I, 1, R),
            (E, 1, I),
        ])
        model.set_random_initial_conditions({S: N - 100, I: 100})

        t, result_sim2 = model.simulate(tmax,
                                        sampling_dt=1,
                                        return_compartments=[S, R])

        for c, res in result_sim2.items():
            #print(c, np.abs(1-res[-1]/result_int[c][-1]))
            #print(c, np.abs(1-res[-1]/result_sim[c][-1]))
            assert (np.abs(1 - res[-1] / result_int[c][-1]) < 0.05)
            assert (np.abs(1 - res[-1] / result_sim[c][-1]) < 0.05)
예제 #3
0
    def test_stochastic_fission(self):

        A, B, C = list("ABC")

        N = 10
        epi = EpiModel([A, B, C],
                       N,
                       correct_for_dynamical_population_size=True)
        epi.add_fusion_processes([
            (A, B, 1.0, C),
        ])
        epi.set_initial_conditions({A: 5, B: 5})

        t, res = epi.simulate(1e9)

        assert (res[C][-1] == 5)
예제 #4
0
    def test_birth_stochastics(self):

        A, B, C = list("ABC")

        epi = EpiModel([A, B, C],
                       10,
                       correct_for_dynamical_population_size=True)
        epi.set_initial_conditions({A: 5, B: 5})

        epi.set_processes([
            (None, 1, A),
            (A, 1, B),
            (B, 1, None),
        ],
                          allow_nonzero_column_sums=True)

        _, res = epi.simulate(200, sampling_dt=0.05)

        vals = np.concatenate([res[A][_ > 10], res[B][_ > 10]])
        rv = poisson(vals.mean())
        measured, bins = np.histogram(vals,
                                      bins=np.arange(10) - 0.5,
                                      density=True)
        theory = [
            rv.pmf(i) for i in range(0,
                                     len(bins) - 1) if measured[i] > 0
        ]
        experi = [
            measured[i] for i in range(0,
                                       len(bins) - 1) if measured[i] > 0
        ]
        # make sure the kullback-leibler divergence is below some threshold
        #for a, b in zip(theory, experi):
        #    print(a,b)
        assert (entropy(theory, experi) < 1e-2)
        assert (np.median(res[A]) == 1)
예제 #5
0
model.set_processes([
        ( S, I, 2, E, I ),
        ( I, 1, R),
        ( E, 1, I),
    ])
model.set_initial_conditions({S: N-100, I: 100})

tt = np.linspace(0,tmax,10000)
result_int = model.integrate(tt)

for c, res in result_int.items():
    pl.plot(tt, res)


start = time()
t, result_sim = model.simulate(tmax,sampling_dt=1)
end = time()

print("numeric model needed", end-start, "s")

for c, res in result_sim.items():
    pl.plot(t, res, '--')

model = StochasticEpiModel([S,E,I,R],N)
model.set_link_transmission_processes([
        ( I, S, 2, I, E ),
    ])
model.set_node_transition_processes([
        ( I, 1, R),
        ( E, 1, I),
    ])
예제 #6
0
    def test_temporal_gillespie_repeated_simulation(self, plot=False):

        scl = 40

        def R0(t, y=None):
            return 4 + np.cos(t * scl)

        S, I = list("SI")
        N = 100
        rec = 1
        model = EpiModel([S, I], N)
        model.set_processes([
            (S, I, R0, I, I),
            (I, rec, S),
        ])
        I0 = 1
        S0 = N - I0
        model.set_initial_conditions({
            S: S0,
            I: I0,
        })

        taus = []
        N_sample = 10000
        if plot:
            from tqdm import tqdm
        else:
            tqdm = lambda x: x
        tt = np.linspace(0, 1, 100)
        for sample in tqdm(range(N_sample)):
            tau = None
            model.set_initial_conditions({
                S: S0,
                I: I0,
            })
            for _t in tt[1:]:
                time, result = model.simulate(_t, adopt_final_state=True)
                #print(time, result['I'])
                if result['I'][-1] != I0:
                    tau = time[1]
                    break
            #print()
            if tau is not None:
                taus.append(tau)

        I = lambda t: (4 * t + 1 / scl * np.sin(t * scl))
        I2 = lambda t: I(t) * S0 * I0 / N + I0 * rec * t
        pdf = lambda t: (R0(t) * S0 * I0 / N + I0 * rec) * np.exp(-I2(t))
        measured, bins = np.histogram(taus, bins=100, density=True)
        theory = [
            np.exp(-I2(bins[i - 1])) - np.exp(-I2(bins[i]))
            for i in range(1, len(bins)) if measured[i - 1] > 0
        ]
        experi = [
            measured[i - 1] for i in range(1, len(bins)) if measured[i - 1] > 0
        ]
        # make sure the kullback-leibler divergence is below some threshold
        if plot:
            import matplotlib.pyplot as pl
            pl.figure()
            pl.hist(taus, bins=100, density=True)
            tt = np.linspace(0, 1, 100)
            pl.plot(tt, pdf(tt))
            pl.yscale('log')
            pl.figure()
            pl.hist(taus, bins=100, density=True)
            tt = np.linspace(0, 1, 100)
            pl.plot(tt, pdf(tt))
            pl.show()
        assert (entropy(theory, experi) < 0.01)