Exemplo n.º 1
0
def run_trials(si_trials=2, sir_trials=2, sis_trials=2):
    ps = np.linspace(0, 1, 11)
    rs = np.linspace(0, 1, 5, endpoint=False)

    #results = {"size": {}, "length": {}}
    #for p in ps:
    #    results["size"][p] = defaultdict(list)
    #    results["length"][p] = defaultdict(list)
    results = pickle.load(open("cache/HIS_SI.p", "rb"))
    for trial in xrange(si_trials):
        print("Trial progress: {}".format(trial / float(si_trials)))
        for p in ps:
            print(p)
            for node in school_metadata.keys():
                epi = SI(faculty_graph.copy(), p=p)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p][node].append(epi.size)
                results["length"][p][node].append(epi.length)
    pickle.dump(results, open("cache/HIS_SI.p", 'wb'))
    results.clear()
    print("SI done")

    #results = {"size": {}, "length": {}}
    #for p, r in product(ps, rs):
    #    results["size"][p, r] = defaultdict(list)
    #    results["length"][p, r] = defaultdict(list)
    results = pickle.load(open("cache/HIS_SIR.p", "rb"))
    for trial in xrange(sir_trials):
        print("Trial progress: {}".format(trial / float(sir_trials)))
        for p, r in product(ps, rs):
            print((p, r))
            for node in school_metadata.keys():
                epi = SIR(faculty_graph.copy(), p=p, r=r)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p, r][node].append(epi.size)
                results["length"][p, r][node].append(epi.length)
    pickle.dump(results, open("cache/HIS_SIR.p", 'wb'))
    results.clear()
    print("SIR done")

    #results = {"size": {}, "length": {}}
    #for p, r in product(ps, rs):
    #    results["size"][p, r] = defaultdict(list)
    #    results["length"][p, r] = defaultdict(list)
    results = pickle.load(open("cache/HIS_SIS.p", "rb"))
    for trial in xrange(sis_trials):
        print("Trial progress: {}".format(trial / float(sis_trials)))
        for p, r in product(ps, rs):
            print((p, r))
            for node in school_metadata.keys():
                epi = SIS(faculty_graph.copy(), p=p, r=r)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p, r][node].append(epi.size)
                results["length"][p, r][node].append(epi.length)
    pickle.dump(results, open("cache/HIS_SIS.p", 'wb'))
    results.clear()
    print("SIS done")
Exemplo n.º 2
0
def main():
    n_networks = 100

    ps = np.linspace(0, 1, 25, endpoint=False)
    rs = np.linspace(0, 1, 25, endpoint=False)

    results_p = []
    results_r = []
    results_s = []
    results_l = []
    for r in rs:
        for p in ps:
            print(r, p)
            samples_l = []
            samples_s = []
            for i in range(n_networks):
                #print('-- NEW EPI --')
                g = bi_planted_partition(1000, 8, 0)
                epi = SIS(g, p=p, r=r)
                #epi = SI(g, p=p)

                epi.infect_random_node()
                epi.simulate()

                samples_l.append(epi.length)
                samples_s.append(epi.size)

            results_r.append(r)
            results_p.append(p)
            results_s.append(np.average(samples_s))
            results_l.append(np.average(samples_l))

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    X, Y = results_r, results_p
    Z = results_l
    ax.scatter(X, Y, Z)

    ax.set_xlabel('Recovery Probability')
    ax.set_ylabel('Transmission Probability')
    ax.set_zlabel('Epidemic Length')

    plt.show()
    # TODO: Figure out the right way to save these plots
    plt.clf()

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    X, Y = results_r, results_p
    Z = results_s
    ax.scatter(X, Y, Z)

    ax.set_xlabel('Recovery Probability')
    ax.set_ylabel('Transmission Probability')
    ax.set_zlabel('Epidemic Size')

    plt.show()
    # TODO: Figure out the right way to save these plots
    plt.clf()
Exemplo n.º 3
0
def main():
    # example
    n_networks = 500
    x = np.linspace(0, 1, 50, endpoint=False)
    results_b_length = []
    results_b_size = []
    for p in x:
        print(p)
        samples_length = []
        samples_size = []
        for i in range(n_networks):
            #print('-- NEW EPI --')
            g = bi_planted_partition(1000, 8, 0)
            epi = SIS(g, p=p, r=0.04)
            #epi = SI(g, p=p)
            epi.infect_random_node()
            epi.simulate()
            samples_length.append(epi.length)
            samples_size.append(epi.size)
        results_b_length.append((p, np.average(samples_length)))
        results_b_size.append((p, np.average(samples_size)))

    fig = plt.figure()
    ax = plt.gca()
    ax.scatter(*zip(*results_b_length))
    plt.xlabel('Transmission probability')
    plt.ylabel('Epidemic length')
    plt.axhline(y=np.log(1000), linewidth=1, color='red')
    plt.show()
    #plt.savefig('length.png')
    plt.clf()

    fig = plt.figure()
    ax = plt.gca()
    ax.scatter(*zip(*results_b_size))
    plt.xlabel('Transmission probability')
    plt.ylabel('Epidemic size')
    plt.show()
def run_trials_graph_with_random_hops(si_trials=2, sir_trials=2, sis_trials=2):
    pjumps = np.linspace(0, 1, 11)

    # vary jump probability
    #results = {"size": {}, "length": {}}
    #for p in pjumps:
    #    results["size"][p] = defaultdict(list)
    #    results["length"][p] = defaultdict(list)
    results = pickle.load(
        open("cache/random_hops/jump_probability/BUSI_SI.p", "rb"))
    for trial in xrange(si_trials):
        print("Trial progress: {}".format(trial / float(si_trials)))
        for p in pjumps:
            print(p)
            for node in school_metadata.keys():
                epi = SI(faculty_graph.copy(),
                         p=0.1,
                         random_jump_p=p,
                         is_random_jump=True)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p][node].append(epi.size)
                results["length"][p][node].append(epi.length)
    pickle.dump(results,
                open("cache/random_hops/jump_probability/BUSI_SI.p", 'wb'))
    results.clear()

    print("SI done")

    #results = {"size": {}, "length": {}}
    #for p in pjumps:
    #    results["size"][p] = defaultdict(list)
    #    results["length"][p] = defaultdict(list)
    results = pickle.load(
        open("cache/random_hops/jump_probability/BUSI_SIR.p", "rb"))
    for trial in xrange(sir_trials):
        print("Trial progress: {}".format(trial / float(sir_trials)))
        for p in pjumps:
            print(p)
            for node in school_metadata.keys():
                epi = SIR(faculty_graph.copy(),
                          p=0.1,
                          r=0.2,
                          random_jump_p=p,
                          is_random_jump=True)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p][node].append(epi.size)
                results["length"][p][node].append(epi.length)
    pickle.dump(results,
                open("cache/random_hops/jump_probability/BUSI_SIR.p", 'wb'))
    results.clear()

    print("SIR done")

    #results = {"size": {}, "length": {}}
    #for p in pjumps:
    #    results["size"][p] = defaultdict(list)
    #    results["length"][p] = defaultdict(list)
    results = pickle.load(
        open("cache/random_hops/jump_probability/BUSI_SIS.p", "rb"))
    for trial in xrange(sir_trials):
        print("Trial progress: {}".format(trial / float(sir_trials)))
        for p in pjumps:
            print(p)
            for node in school_metadata.keys():
                epi = SIS(faculty_graph.copy(),
                          p=0.1,
                          r=0.2,
                          random_jump_p=p,
                          is_random_jump=True)
                epi.infect_node(node)
                epi.simulate()
                results["size"][p][node].append(epi.size)
                results["length"][p][node].append(epi.length)
    pickle.dump(results,
                open("cache/random_hops/jump_probability/BUSI_SIS.p", 'wb'))
    results.clear()

    print("SIS done")