Beispiel #1
0
def test_profile():
    ''' Test profiling functions '''

    def slow_fn():
        n = 10000
        int_list = []
        int_dict = {}
        for i in range(n):
            int_list.append(i)
            int_dict[i] = i
        return

    class Foo:
        def __init__(self):
            self.a = 0
            return

        def outer(self):
            for i in range(100):
                self.inner()
            return

        def inner(self):
            for i in range(1000):
                self.a += 1
            return

    foo = Foo()
    sc.profile(run=foo.outer, follow=[foo.outer, foo.inner])
    sc.profile(slow_fn)
    return foo
Beispiel #2
0
def run_benchmark(n, test_index_list, out_dir, nruns=1, base_seed=0):
    """
    loop over list of n and output perf profile for each n to test_n.txt
    """
    test_dict = {}

    for j in range(nruns):
        for indx in test_index_list:
            print(f'running {j} {indx}')
            to_profile = to_profile_dict[indx]
            file_name = f'test_{to_profile}_{n}.txt'
            file_path = os.path.join(out_dir, file_name)
            saved_stdout = sys.stdout
            # stop is used to compute the 'average time' of execution. sc.toc() returns
            # a string of the time
            stop = 0.0
            with open(file_path, 'w') as f:
                sys.stdout = f
                start = sc.tic()
                sc.profile(run=make_pop,
                           follow=func_options[to_profile],
                           n=int(n))
                sc.toc()
                stop = time.time() - start
            sys.stdout.close()
            sys.stdout = saved_stdout
            if j == 0:
                test_dict[indx] = stop
            else:
                test_dict[indx] = test_dict[indx] + (stop)
    return test_dict
def test_profile():
    sc.heading('Test profiling functions')

    def slow_fn():
        n = 10000
        int_list = []
        int_dict = {}
        for i in range(n):
            int_list.append(i)
            int_dict[i] = i
        return

    def big_fn():
        n = 1000
        int_list = []
        int_dict = {}
        for i in range(n):
            int_list.append([i] * n)
            int_dict[i] = [i] * n
        return

    class Foo:
        def __init__(self):
            self.a = 0
            return

        def outer(self):
            for i in range(100):
                self.inner()
            return

        def inner(self):
            for i in range(1000):
                self.a += 1
            return

    foo = Foo()
    try:
        sc.mprofile(
            big_fn)  # NB, cannot re-profile the same function at the same time
    except TypeError as E:  # This happens when re-running this script
        print(
            f'Unable to re-profile memory function; this is usually not cause for concern ({E})'
        )
    sc.profile(run=foo.outer, follow=[foo.outer, foo.inner])
    lp = sc.profile(slow_fn)

    return lp
Beispiel #4
0
'''
Test multisim plotting options
'''

import covasim as cv
import numpy as np
import sciris as sc

n = 100
betas = np.linspace(0.005, 0.030, n)
sims = []
for beta in betas:
    sim = cv.Sim(pop_size=1000, beta=beta, datafile='../example_data.csv')
    sims.append(sim)
msim = cv.MultiSim(sims)
msim.run(reseed=True)

# Demonstrate indices
msim.plot(inds=[10, 20, 30])

# Demonstrate and time lots of lines
sc.tic()
msim.plot()
sc.toc()

# Run with profiling
sc.profile(run=msim.plot, follow=cv.plotting.plot_sim)
Beispiel #5
0
Benchmark the simulation
'''

import sciris as sc
import covasim as cv
from test_baselines import make_sim

sim = make_sim(use_defaults=False, do_plot=False
               )  # Use the same sim as from the regression/benchmarking tests
to_profile = 'plot_tidy'  # Must be one of the options listed below

func_options = {
    'make_contacts': cv.make_random_contacts,
    'make_randpop': cv.make_randpop,
    'person': cv.Person.__init__,
    'make_people': cv.make_people,
    'init_people': sim.init_people,
    'initialize': sim.initialize,
    'run': sim.run,
    'step': sim.step,
    'infect': cv.People.infect,
    'plot': cv.plotting.plot_sim,
    'plot_tidy': cv.plotting.tidy_up,
}

if not to_profile.startswith('plot'):
    sc.profile(run=sim.run, follow=func_options[to_profile])
else:
    sim.run()
    sc.profile(sim.plot, follow=func_options[to_profile])
Beispiel #6
0
to_profile = 'sample_n_contact_ages'  # Must be one of the options listed below

func_options = {
    'make_popdict': sp.make_popdict,
    'make_contacts': sp.make_contacts,
    'sample_n_contact_ages': sp.sample_n_contact_ages,
}


def make_contacts():

    # Copied from test_contacts.py
    weights_dic = {'H': 4.11, 'S': 11.41, 'W': 8.07, 'R': 2.79}
    weights_dic[
        'R'] = 7  # increase the general community weight because the calibrate weight 2.79 doesn't include contacts from the general community that you don't know but are near!
    n = 10000

    kwargs = dict(weights_dic=weights_dic,
                  use_social_layers=True,
                  directed=False,
                  use_student_weights=True)  # Crashes if False

    popdict = sp.make_popdict(n=n)
    contacts = sp.make_contacts(popdict, **kwargs)

    return contacts


sc.profile(run=make_contacts, follow=func_options[to_profile])
Beispiel #7
0
to_profile = 'assign_rest_of_workers'  # Must be one of the options listed below

func_options = {
    'make_population': sp.make_population,
    'trim_contacts': sp.
    trim_contacts,  # This is where most of the time goes for loading a population
    'generate_synthetic_population': sp.
    generate_synthetic_population,  # This is where most of the time goes for generating a population
    'generate_all_households': sp.contact_networks.generate_all_households,
    'generate_larger_households':
    sp.contact_networks.generate_larger_households,
    'assign_rest_of_workers': sp.contact_networks.assign_rest_of_workers,
    'make_popdict': sp.make_popdict,
    'make_contacts': sp.make_contacts,
    'sample_n_contact_ages': sp.sample_n_contact_ages,
}


def make_pop():
    n = [
        10000, 10001
    ][1]  # Use either a pre-generated population, or one that has to be made from scratch
    max_contacts = {'S': 20, 'W': 10}
    population = sp.make_population(n=n, max_contacts=max_contacts)
    return population


sc.tic()
sc.profile(run=make_pop, follow=func_options[to_profile])
sc.toc()
        sim['interventions'] += [sm]

    sim.run(keep_people=debug)

    stats = evaluate_sim(sim)
    print(stats)

    if debug:
        sim.plot(to_plot='overview')
        #t = sim.make_transtree()
    else:
        sim.plot()

    #sim.save('test.sim')
    #cv.savefig('sim.png')

    return sim


if __name__ == '__main__':

    to_profile = 'stats_update'

    func_options = dict(
        step = cv.Sim.step,
        school_update = cvsch.School.update,
        stats_update = cvsch.SchoolStats.update,
        )

    sc.profile(run=benchmark_schools, follow=func_options[to_profile])
Beispiel #9
0
    max_contacts=None,
    generate=True,
    with_industry_code=0,
    with_facilities=0,
    with_non_teaching_staff=1,
    use_two_group_reduction=1,
    with_school_types=1,
    average_LTCF_degree=20,
    ltcf_staff_age_min=20,
    ltcf_staff_age_max=60,
    school_mixing_type='age_and_class_clustered',
    average_class_size=20,
    inter_grade_mixing=0.1,
    teacher_age_min=25,
    teacher_age_max=75,
    staff_age_min=20,
    staff_age_max=75,
    average_student_teacher_ratio=20,
    average_teacher_teacher_degree=3,
    average_student_all_staff_ratio=15,
    average_additional_staff_degree=20,
)


def run():
    pop = sp.make_population(**pars)
    return pop


sc.profile(run=run, follow=func_options[to_profile])
Beispiel #10
0
                    quar_test=10.0)
ct = cv.contact_tracing(start_day=sd, trace_probs=1, trace_time=0)

# Run with all options for both testing interventions
if not benchmark:
    for test_type in ['num', 'prob']:
        sims = []
        for quar_policy in ['end', 'start', 'both', 'daily']:
            ti = sc.dcp(t[test_type])
            ti.quar_policy = quar_policy
            sim = cv.Sim(pars,
                         interventions=[ti, sc.dcp(ct)],
                         label=f'Policy: {quar_policy}')
            sims.append(sim)

        # Run the sims
        msim = cv.MultiSim(sims)
        msim.run()
        msim.plot(to_plot='overview',
                  fig_args={'figsize': (35, 20)},
                  interval=120)

# Do benchmarking
else:
    sim = cv.Sim(pars, interventions=[t.prob, ct])
    sim.initialize()
    sc.profile(run=sim.run,
               follow=ct.apply)  # 99% of time is in sim.people.trace
    sc.profile(run=sim.run, follow=sim.people.trace
               )  # 97% of time is in np.isin(self.contacts[lkey][k1], inds)
Beispiel #11
0
# Test profiling functions
if 'profile' in torun:

    def slow_fn():
        n = 10000
        int_list = []
        int_dict = {}
        for i in range(n):
            int_list.append(i)
            int_dict[i] = i
        return

    class Foo:
        def __init__(self):
            self.a = 0
            return

        def outer(self):
            for i in range(100):
                self.inner()
            return

        def inner(self):
            for i in range(1000):
                self.a += 1
            return

    foo = Foo()
    sc.profile(run=foo.outer, follow=[foo.outer, foo.inner])
    sc.profile(slow_fn)