Example #1
0
def get_default_prognoses(by_age=True):
    '''
    Return the default parameter values for prognoses

    Args:
        by_age (bool): whether or not to use age-specific values

    Returns:
        prog_pars (dict): the dictionary of prognosis probabilities

    '''
    if not by_age:
        prog_pars = sc.objdict(
            symp_prob   = 0.75,
            severe_prob = 0.12,
            crit_prob   = 0.25,
            death_prob  = 0.50,
        )
    else:
        prog_pars = sc.objdict(
            age_cutoffs  = np.array([10,      20,      30,      40,      50,      60,      70,      80,      120]),     # Age cutoffs
            symp_probs   = np.array([0.50,    0.55,    0.60,    0.65,    0.70,    0.75,    0.80,    0.85,    0.90]),    # Overall probability of developing symptoms
            severe_probs = np.array([0.00100, 0.00100, 0.01100, 0.03400, 0.04300, 0.08200, 0.11800, 0.16600, 0.18400]), # Overall probability of developing severe symptoms (https://www.medrxiv.org/content/10.1101/2020.03.09.20033357v1.full.pdf)
            crit_probs   = np.array([0.00004, 0.00011, 0.00050, 0.00123, 0.00214, 0.00800, 0.02750, 0.06000, 0.10333]), # Overall probability of developing critical symptoms (derived from https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm)
            death_probs  = np.array([0.00002, 0.00006, 0.00030, 0.00080, 0.00150, 0.00600, 0.02200, 0.05100, 0.09300]), # Overall probability of dying (https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf)
        )
    return prog_pars
Example #2
0
def get_bounds():
    ''' Set parameter starting points and bounds 
		NB: uses ORDERED dictionary => specifiy bounds IN ORDER!'''

    pdict = sc.objdict()

    pdict['pop_infected'] = dict(best=20000, lb=1000, ub=50000)
    pdict['beta'] = dict(best=.005, lb=0.001, ub=0.01)

    # X[2] = test_rate
    if UseTestRate == 'srch':
        pdict['test_rate'] = dict(best=3.3e-4, lb=2e-6, ub=3e-3)

    if LogTransform:
        # NB: only ['pop_infected','beta'] LOG transformed
        for param in ParamLogTransformed:

            for key in ['best', 'lb', 'ub']:
                pdict[param][key] = math.log(pdict[param][key])

    # Convert from dicts to arrays
    pars = sc.objdict()
    for key in ['best', 'lb', 'ub']:
        pars[key] = np.array([v[key] for v in pdict.values()])

    return pars, pdict.keys()
Example #3
0
    def __init__(self,
                 sim=None,
                 metapars=None,
                 scenarios=None,
                 basepars=None,
                 filename=None):

        # For this object, metapars are the foundation
        default_pars = make_metapars()  # Start with default pars
        super().__init__(
            default_pars)  # Initialize and set the parameters as attributes

        # Handle filename
        self.created = sc.now()
        if filename is None:
            datestr = sc.getdate(obj=self.created,
                                 dateformat='%Y-%b-%d_%H.%M.%S')
            filename = f'covasim_scenarios_{datestr}.scens'
        self.filename = filename

        # Handle scenarios -- by default, create a baseline scenario
        if scenarios is None:
            scenarios = sc.dcp(default_scenario)
        self.scenarios = scenarios

        # Handle metapars
        if metapars is None:
            metapars = {}
        self.metapars = metapars
        self.update_pars(self.metapars)

        # Create the simulation and handle basepars
        if sim is None:
            sim = cvsim.Sim()
        self.base_sim = sim
        if basepars is None:
            basepars = {}
        self.basepars = basepars
        self.base_sim.update_pars(self.basepars)
        self.base_sim.validate_pars()
        self.base_sim.init_results()

        # Copy quantities from the base sim to the main object
        self.npts = self.base_sim.npts
        self.tvec = self.base_sim.tvec
        self.reskeys = self.base_sim.reskeys

        # Create the results object; order is: results key, scenario, best/low/high
        self.sims = sc.objdict()
        self.allres = sc.objdict()
        for reskey in self.reskeys:
            self.allres[reskey] = sc.objdict()
            for scenkey in scenarios.keys():
                self.allres[reskey][scenkey] = sc.objdict()
                for nblh in ['name', 'best', 'low', 'high']:
                    self.allres[reskey][scenkey][
                        nblh] = None  # This will get populated below
        return
def plot_schools(pop):
    ''' Not a formal test, but a sanity check for school distributions '''
    keys = ['pk', 'es', 'ms', 'hs'] # Exclude universities for this analysis
    ppl_keys = ['all', 'students', 'teachers', 'staff']
    xpeople = np.arange(len(ppl_keys)) # X axis for people
    school_types_by_ind = {}
    for key,vals in pop.school_types.items():
        for val in vals:
            if key in keys:
                school_types_by_ind[val] = key

    results = {}
    for sc_id,sc_type in school_types_by_ind.items():
        thisres = sc.objdict()
        sc_inds = (pop.school_id == sc_id)
        thisres.all = cv.true(sc_inds)
        thisres.students = cv.true(np.array(pop.student_flag) * sc_inds)
        thisres.teachers = cv.true(np.array(pop.teacher_flag) * sc_inds)
        thisres.staff    = cv.true(np.array(pop.staff_flag) * sc_inds)
        results[sc_id] = thisres

    # Do plotting
    fig = pl.figure(figsize=figsize, dpi=dpi)
    pl.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.5, wspace=0.5)
    n_schools = len(results)
    n_cols = len(ppl_keys) + 1
    count = 0
    for sc_id in results.keys():
        count += 1
        school_type = school_types_by_ind[sc_id]
        ax = pl.subplot(n_schools, n_cols, count)
        thisres = results[sc_id]
        thisres.people_counts = [len(thisres[k]) for k in ppl_keys]
        ax.bar(xpeople, thisres.people_counts)
        ax.set_xticks(xpeople)
        ax.set_xticklabels(ppl_keys)
        title = f'School ID {sc_id}, school type {school_type}, total size: {len(thisres.all)}'
        ax.set_title(title)

        thisres.ages = sc.objdict()
        for key in ppl_keys:
            count += 1
            ax = pl.subplot(n_schools, n_cols, count)
            thisres.ages[key] = pop.age[thisres[key]]
            pl.hist(thisres.ages[key])
            ax.set_title(f'Ages for {key} in school {sc_id} ({school_type})')

    if do_maximize:
        cv.maximize(fig=fig)

    if to_json:
        sc.savejson(outfile, results, indent=2)

    return results
Example #5
0
    def make_detailed(self, people, reset=False):
        ''' Construct a detailed transmission tree, with additional information for each person '''
        if self.detailed is None or reset:

            # Reset to look like the line list, but with more detail
            self.detailed = [None] * len(self)

            for transdict in self.linelist:

                if transdict is not None:

                    # Pull out key quantities
                    ddict = sc.objdict(
                        sc.dcp(transdict))  # For "detailed dictionary"
                    source = ddict.source
                    target = ddict.target
                    ddict.s = sc.objdict()  # Source
                    ddict.t = sc.objdict()  # Target

                    # If the source is available (e.g. not a seed infection), loop over both it and the target
                    if source is not None:
                        stdict = {'s': source, 't': target}
                    else:
                        stdict = {'t': target}

                    # Pull out each of the attributes relevant to transmission
                    attrs = [
                        'age', 'date_symptomatic', 'date_tested',
                        'date_diagnosed', 'date_quarantined', 'date_severe',
                        'date_critical', 'date_known_contact'
                    ]
                    for st, stind in stdict.items():
                        for attr in attrs:
                            ddict[st][attr] = people[attr][stind]
                    if source is not None:
                        for attr in attrs:
                            if attr.startswith('date_'):
                                is_attr = attr.replace(
                                    'date_', 'is_'
                                )  # Convert date to a boolean, e.g. date_diagnosed -> is_diagnosed
                                ddict.s[is_attr] = ddict.s[attr] <= ddict[
                                    'date']  # These don't make sense for people just infected (targets), only sources

                        ddict.s.is_asymp = np.isnan(
                            people.date_symptomatic[source])
                        ddict.s.is_presymp = ~ddict.s.is_asymp and ~ddict.s.is_symptomatic  # Not asymptomatic and not currently symptomatic
                    ddict.t['is_quarantined'] = ddict.t[
                        'date_quarantined'] <= ddict[
                            'date']  # This is the only target date that it makes sense to define since it can happen before infection

                    self.detailed[target] = ddict

        return
Example #6
0
def test_decays(do_plot=False):
    sc.heading('Testing decay parameters...')

    n = 300
    x = pl.arange(n)

    pars = sc.objdict(
        nab_decay = dict(
            func = cv.immunity.nab_decay,
            length = n,
            decay_rate1 = 0.05,
            decay_time1= 100,
            decay_rate2 = 0.002,
        ),

        exp_decay = dict(
            func = cv.immunity.exp_decay,
            length = n,
            init_val = 0.8,
            half_life= 100,
            delay = 20,
        ),

        linear_decay = dict(
            func = cv.immunity.linear_decay,
            length = n,
            init_val = 0.8,
            slope = 0.01,
        ),

        linear_growth = dict(
            func = cv.immunity.linear_growth,
            length = n,
            slope = 0.01,
        ),
    )

    # Calculate all the delays
    res = sc.objdict()
    for key,par in pars.items():
        func = par.pop('func')
        res[key] = func(**par)

    if do_plot:
        pl.figure(figsize=(12,8))
        for key,y in res.items():
            pl.semilogy(x, y, label=key, lw=3, alpha=0.7)
        pl.legend()
        pl.show()

    res.x = x

    return res
Example #7
0
def get_bounds():
    ''' Set parameter starting points and bounds '''
    pdict = sc.objdict(
        beta=dict(best=0.00522, lb=0.003, ub=0.008),
        pop_infected=dict(best=4500, lb=1000, ub=10000),
    )

    # Convert from dicts to arrays
    pars = sc.objdict()
    for key in ['best', 'lb', 'ub']:
        pars[key] = np.array([v[key] for v in pdict.values()])

    return pars, pdict.keys()
def get_bounds():
    ''' Set parameter starting points and bounds '''
    pdict = sc.objdict(
        beta         = dict(best=0.00593, lb=0.0059, ub=0.006),
        pop_infected = dict(best=1500,  lb=1000,   ub=1600),
        s_prob_may = dict(best=0.0171,  lb=0.016,   ub=0.019),
        s_prob_june = dict(best=0.0171,  lb=0.016,   ub=0.019),
    )

    # Convert from dicts to arrays
    pars = sc.objdict()
    for key in ['best', 'lb', 'ub']:
        pars[key] = np.array([v[key] for v in pdict.values()])

    return pars, pdict.keys()
Example #9
0
 def init_results(self):
     '''
     Initialize the results structure, e.g.:
         self.beds.aac.baseline.best = time series
     '''
     self.reskeys = ['aac', 'icu', 'total']
     self.reslabels = ['Adult acute beds', 'ICU beds', 'Total beds']
     self.beds = sc.objdict()
     for reskey in self.reskeys:
         self.beds[reskey] = sc.objdict()
         for scenkey in self.scenkeys:
             self.beds[reskey][scenkey] = sc.objdict()
             for blh in self.blh:
                 self.beds[reskey][scenkey][blh] = pl.zeros(self.npts)
     return
Example #10
0
def test_vaccine_1variant_scen(do_plot=False, do_show=True, do_save=False):
    sc.heading('Run a basic sim with 1 variant, pfizer vaccine')

    # Define baseline parameters
    n_runs = 3
    base_sim = cv.Sim(use_waning=True, pars=base_pars)

    # Vaccinate 75+, then 65+, then 50+, then 18+ on days 20, 40, 60, 80
    base_sim.vxsubtarg = sc.objdict()
    base_sim.vxsubtarg.age = [75, 65, 50, 18]
    base_sim.vxsubtarg.prob = [.05, .05, .05, .05]
    base_sim.vxsubtarg.days = subtarg_days = [20, 40, 60, 80]
    pfizer = cv.vaccinate(days=subtarg_days,
                          vaccine='pfizer',
                          subtarget=vacc_subtarg)

    # Define the scenarios

    scenarios = {
        'baseline': {
            'name': 'No Vaccine',
            'pars': {}
        },
        'pfizer': {
            'name': 'Pfizer starting on day 20',
            'pars': {
                'interventions': [pfizer],
            }
        },
    }

    metapars = {'n_runs': n_runs}
    scens = cv.Scenarios(sim=base_sim, metapars=metapars, scenarios=scenarios)
    scens.run()

    to_plot = sc.objdict({
        'New infections': ['new_infections'],
        'Cumulative infections': ['cum_infections'],
        'New reinfections': ['new_reinfections'],
        # 'Cumulative reinfections': ['cum_reinfections'],
    })
    if do_plot:
        scens.plot(do_save=do_save,
                   do_show=do_show,
                   fig_path='results/test_basic_vaccination.png',
                   to_plot=to_plot)

    return scens
Example #11
0
def test_employment_age_distribution(do_show, do_save, create_sample_pop_e2e,
                                     get_fig_dir_by_module):
    sp.logger.info(
        "Test employment age distribution vs the employment_rates_by_age.dat")

    plotting_kwargs = sc.objdict(do_show=do_show,
                                 do_save=do_save,
                                 figdir=get_fig_dir_by_module)
    actual_employment_age_count = create_sample_pop_e2e.count_employment_by_age(
    )
    total_employee = sum(actual_employment_age_count.values())
    expected_employment_age_dist = sp.norm_dic(
        sp.get_employment_rates(**create_sample_pop_e2e.loc_pars))

    expected_employment_age_count = {
        i: round(expected_employment_age_dist[i] * total_employee)
        for i in expected_employment_age_dist
    }

    # generate list of ages based on the actual count
    generated_actual = sum([[i] * actual_employment_age_count[i]
                            for i in actual_employment_age_count], [])
    generated_expected = sum([[i] * expected_employment_age_count[i]
                              for i in expected_employment_age_count], [])
    # run statistical tests for employment by age distribution
    # TODO: Need to refine the data for fair comparison
    sp.statistic_test(expected=generated_expected,
                      actual=generated_actual,
                      test=st.kstest,
                      verbose=True)
    # plot enrollment by age
    create_sample_pop_e2e.plot_employment_rates_by_age(**plotting_kwargs)
Example #12
0
def test_work_size_distribution(do_show, do_save, create_sample_pop_e2e,
                                get_fig_dir_by_module):
    sp.logger.info(
        "Test workplace size distribution vs the work_size_count.dat")

    plotting_kwargs = sc.objdict(do_show=do_show,
                                 do_save=do_save,
                                 figdir=get_fig_dir_by_module)

    workplace_brackets_index = sp.get_index_by_brackets(
        sp.get_workplace_size_brackets(**create_sample_pop_e2e.loc_pars))

    actual_workplace_sizes = create_sample_pop_e2e.count_workplace_sizes()
    # count the workplaces by size bracket

    actual_count = {k: 0 for k in set(workplace_brackets_index.values())}
    for i in workplace_brackets_index:
        actual_count[
            workplace_brackets_index[i]] += actual_workplace_sizes.get(i, 0)

    expected_distr = sp.norm_dic(
        sp.get_workplace_size_distr_by_brackets(
            **create_sample_pop_e2e.loc_pars))

    # calculate expected count by using actual number of workplaces
    expected_count = {
        k: expected_distr[k] * sum(actual_count.values())
        for k in expected_distr
    }
    # perform statistical check
    sp.statistic_test([expected_count[i] for i in sorted(expected_count)],
                      [actual_count[i] for i in sorted(actual_count)])

    create_sample_pop_e2e.plot_workplace_sizes(**plotting_kwargs)
def test_beta_edges(do_plot=False, do_show=True, do_save=False, fig_path=None):

    pars = dict(
        pop_size=1000,
        pop_infected=20,
        pop_type='hybrid',
    )

    start_day = 25  # Day to start the intervention
    end_day = 40  # Day to end the intervention
    change = 0.3  # Amount of change

    sims = sc.objdict()
    sims.b = cv.Sim(pars)  # Beta intervention
    sims.e = cv.Sim(pars)  # Edges intervention

    beta_interv = cv.change_beta(days=[start_day, end_day],
                                 changes=[change, 1.0])
    edge_interv = cv.clip_edges(start_day=start_day,
                                end_day=end_day,
                                change=change,
                                verbose=True)
    sims.b.update_pars(interventions=beta_interv)
    sims.e.update_pars(interventions=edge_interv)

    for sim in sims.values():
        sim.run()
        if do_plot:
            sim.plot(do_save=do_save, do_show=do_show, fig_path=fig_path)
            sim.plot_result('r_eff')

    return sims
Example #14
0
File: sim.py Project: willf/covasim
    def finalize(self, verbose=None):
        ''' Compute final results, likelihood, etc. '''

        # Scale the results
        for reskey in self.reskeys:
            if self.results[reskey].scale == 'dynamic':
                self.results[reskey].values *= self.rescale_vec
            elif self.results[reskey].scale == 'static':
                self.results[reskey].values *= self['pop_scale']

        # Calculate cumulative results
        for key in cvd.result_flows.keys():
            self.results[f'cum_{key}'].values = np.cumsum(
                self.results[f'new_{key}'].values)
        self.results['cum_infections'].values += self[
            'pop_infected'] * self.rescale_vec[
                0]  # Include initially infected people

        # Perform calculations on results
        self.compute_doubling()
        self.compute_r_eff()
        self.likelihood()

        # Convert results to a odicts/objdict to allow e.g. sim.results.diagnoses
        # self.people = sc.odict({str(p):person for p,person in enumerate(self.people)}) # Convert to an odict for a better repr
        self.results = sc.objdict(self.results)
        self.results_ready = True

        return
Example #15
0
def test_plot_schools_sizes_without_types(do_show=False, do_save=False):
    """Test that without school types, all schools are put together in one group."""
    sp.logger.info(
        "Creating schools where school types are not specified. Test school size distribution plotting method without school types. Note: For small population sizes, the expected and generated size distributions may not match very well given that the model is stochastic and demographics are based on much larger populations."
    )
    pars.with_school_types = False  # need to rerun the population
    pop = sp.Pop(**pars)
    kwargs = sc.objdict(sc.mergedicts(pars, pop.loc_pars))
    kwargs.datadir = sp.settings.datadir
    kwargs.do_show = do_show
    kwargs.do_save = do_save
    kwargs.screen_width_factor = 0.30
    kwargs.screen_height_factor = 0.20
    kwargs.width = 5
    kwargs.height = 3.2
    kwargs.figname = f"test_all_school_size_distributions_{kwargs.location}_pop"
    fig, ax = pop.plot_school_sizes(**kwargs)

    enrollment_by_school_type = pop.count_enrollment_by_school_type()
    school_types = list(enrollment_by_school_type.keys())

    assert school_types[0] is None and len(
        school_types
    ) == 1, f"Check 3 failed. School types created: {school_types}."

    return fig, ax, pop
Example #16
0
    def finalize(self, verbose=None):
        ''' Compute final results '''

        # Scale the results
        for reskey in self.result_keys():
            if self.results[reskey].scale == 'dynamic':
                self.results[reskey].values *= self.rescale_vec
            elif self.results[reskey].scale == 'static':
                self.results[reskey].values *= self['pop_scale']

        # Calculate cumulative results
        for key in cvd.result_flows.keys():
            self.results[f'cum_{key}'].values[:] = np.cumsum(
                self.results[f'new_{key}'].values)
        self.results['cum_infections'].values += self[
            'pop_infected'] * self.rescale_vec[
                0]  # Include initially infected people

        # Final settings
        self.t -= 1  # During the run, this keeps track of the next step; restore this be the final day of the sim
        self.results_ready = True  # Set this first so self.summary() knows to print the results
        self.initialized = False  # To enable re-running

        # Perform calculations on results
        self.compute_results(
            verbose=verbose)  # Calculate the rest of the results
        self.results = sc.objdict(
            self.results
        )  # Convert results to a odicts/objdict to allow e.g. sim.results.diagnoses

        return
Example #17
0
    def finalize(self, verbose=None, restore_pars=True):
        ''' Compute final results '''

        if self.results_ready:
            # Because the results are rescaled in-place, finalizing the sim cannot be run more than once or
            # otherwise the scale factor will be applied multiple times
            raise Exception('Simulation has already been finalized')

        # Scale the results
        for reskey in self.result_keys():
            if self.results[reskey].scale: # Scale the result dynamically
                self.results[reskey].values *= self.rescale_vec

        # Calculate cumulative results
        for key in cvd.result_flows.keys():
            self.results[f'cum_{key}'][:] = np.cumsum(self.results[f'new_{key}'][:])
        self.results['cum_infections'].values += self['pop_infected']*self.rescale_vec[0] # Include initially infected people

        # Final settings
        self.results_ready = True # Set this first so self.summary() knows to print the results
        self.t -= 1 # During the run, this keeps track of the next step; restore this be the final day of the sim

        # Perform calculations on results
        self.compute_results(verbose=verbose) # Calculate the rest of the results
        self.results = sc.objdict(self.results) # Convert results to a odicts/objdict to allow e.g. sim.results.diagnoses

        if restore_pars and self._orig_pars:
            preserved = ['analyzers', 'interventions']
            orig_pars_keys = list(self._orig_pars.keys()) # Get a list of keys so we can iterate over them
            for key in orig_pars_keys:
                if key not in preserved:
                    self.pars[key] = self._orig_pars.pop(key) # Restore everything except for the analyzers and interventions

        return
Example #18
0
def test_workplace_contact_distribution(do_show, do_save,
                                        create_sample_pop_e2e,
                                        get_fig_dir_by_module):
    # calculate the workplace contacts count and plot
    sp.logger.info(
        "Test workplace contact distribution: workers in workplace such that worksize <= max_contacts must have"
        "contacts equal to worksize-1, for workers in workplace with size > max_contacts, the distribution "
        "should be closer to poisson distribution with mean = max_contacts ")
    plotting_kwargs = sc.objdict(do_show=do_show,
                                 do_save=do_save,
                                 figdir=get_fig_dir_by_module)
    contacts, contacts_by_id = cn.get_contact_counts_by_layer(
        create_sample_pop_e2e.popdict, layer="w", with_layer_ids=1)
    plotting_kwargs.append(
        "title_prefix",
        f"Total Workers = {len(contacts.get('wpid').get('all'))}")
    plotting_kwargs.append("figname", f"workers_contact_count")
    sp.plot_contact_counts(contacts, **plotting_kwargs)
    plotting_kwargs.remove("title_prefix")
    plotting_kwargs.remove("figname")

    # check workplace with worksize <= max_contacts
    max_contacts = create_sample_pop_e2e.max_contacts['W']
    upperbound = st.poisson.interval(alpha=0.95, mu=max_contacts)[1]
    group_size_contacts = {
        f'all_worksize_contacts size > {max_contacts//2}':
        [],  # capture size > max_contacts//2
        f'large_worksize_contacts size > {upperbound}':
        [],  # capture size > upperbound
        f'medium_large_worksize_contacts size between {max_contacts}, {upperbound}':
        [],  # capture size between max_contacts and upperbound
        f'small_medium_worksize_contacts size between {max_contacts//2}, {max_contacts}':
        [],  # capture size between max_contacts//2 and max_contacts
    }
    for k, v in contacts_by_id.items():
        if len(v) <= max_contacts // 2:
            assert len([i for i in v if i != len(v) - 1]) == 0, \
                "Failed, not all contacts in {len(k)} are equal to {len(v)} : {v}"
        else:
            if len(v) > upperbound:
                group_size_contacts[
                    f'large_worksize_contacts size > {upperbound}'] += v
            elif len(v) >= max_contacts:
                group_size_contacts[
                    f'medium_large_worksize_contacts size between {max_contacts}, {upperbound}'] += v
            else:
                group_size_contacts[
                    f'small_medium_worksize_contacts size between {max_contacts//2}, {max_contacts}'] += v
            group_size_contacts[
                f'all_worksize_contacts size > {max_contacts//2}'] += v

    file_pattern = re.compile(r'([\s><=])')
    for i in group_size_contacts:
        plotting_kwargs["title_prefix"] = i
        plotting_kwargs["figname"] = file_pattern.sub("_", i)
        sp.check_truncated_poisson(testdata=group_size_contacts[i],
                                   mu=max_contacts,
                                   lowerbound=max_contacts // 2,
                                   skipcheck=True if "small" in i else True,
                                   **plotting_kwargs)
Example #19
0
def test_doubling_time():

    sim = cv.Sim(pop_size=1000)
    sim.run(verbose=0)

    d = sc.objdict()

    # Test doubling time
    d.t1 = cv.get_doubling_time(
        sim, interval=[3, sim['n_days'] + 10],
        verbose=2)  # should reset end date to sim['n_days']
    d.t2 = cv.get_doubling_time(sim, start_day=3, end_day=sim['n_days'])
    d.t3 = cv.get_doubling_time(sim,
                                interval=[3, sim['n_days']],
                                exp_approx=True)
    d.t4 = cv.get_doubling_time(sim,
                                start_day=3,
                                end_day=sim['n_days'],
                                moving_window=4)  # should return array
    d.t5 = cv.get_doubling_time(
        sim,
        series=np.power(1.03, range(100)),
        interval=[3, 30],
        moving_window=3)  # Should be a series with values = 23.44977..
    d.t6 = cv.get_doubling_time(
        sim, start_day=9, end_day=20, moving_window=1, series="cum_infections"
    )  # Should recast window to 2 then return a series with 100s in it
    with pytest.raises(ValueError):
        d.t7 = cv.get_doubling_time(
            sim, start_day=3, end_day=20, moving_window=4,
            series="cum_deaths")  # Should fail, no growth in deaths

    print('NOTE: this test prints some warnings; these are intended.')
    return d
def savejson(study):
    dbname = 'calibrated_parameters_UK'

    sc.heading('Making results structure...')
    results = []
    failed_trials = []
    for trial in study.trials:
        data = {'index':trial.number, 'mismatch': trial.value}
        for key,val in trial.params.items():
            data[key] = val
        if data['mismatch'] is None:
            failed_trials.append(data['index'])
        else:
            results.append(data)
    print(f'Processed {len(study.trials)} trials; {len(failed_trials)} failed')

    sc.heading('Making data structure...')
    keys = ['index', 'mismatch'] + pkeys
    data = sc.objdict().make(keys=keys, vals=[])
    for i,r in enumerate(results):
        for key in keys:
            data[key].append(r[key])
    df = pd.DataFrame.from_dict(data)

    order = np.argsort(df['mismatch'])
    json = []
    for o in order:
        row = df.iloc[o,:].to_dict()
        rowdict = dict(index=row.pop('index'), mismatch=row.pop('mismatch'), pars={})
        for key,val in row.items():
            rowdict['pars'][key] = val
        json.append(rowdict)
    sc.savejson(f'{dbname}.json', json, indent=2)

    return
Example #21
0
def get_default_colors():
    '''
    Specify plot colors -- used in sim.py.

    NB, includes duplicates since stocks and flows are named differently.
    '''
    c = sc.objdict()
    c.susceptible = '#4d771e'
    c.exposed = '#c78f65'
    c.exposed_by_strain = '#c75649',
    c.infectious = '#e45226'
    c.infectious_by_strain = c.infectious
    c.infections = '#b62413'
    c.reinfections = '#732e26'
    c.infections_by_strain = '#b62413'
    c.tests = '#aaa8ff'
    c.diagnoses = '#5f5cd2'
    c.diagnosed = c.diagnoses
    c.quarantined = '#5c399c'
    c.vaccinations = c.quarantined  # TODO: new color
    c.vaccinated = c.quarantined
    c.recoveries = '#9e1149'
    c.recovered = c.recoveries
    c.symptomatic = '#c1ad71'
    c.severe = '#c1981d'
    c.critical = '#b86113'
    c.deaths = '#000000'
    c.dead = c.deaths
    c.default = '#000000'
    c.pop_nabs = '#32733d'
    c.pop_protection = '#9e1149'
    c.pop_symp_protection = '#b86113'
    return c
def get_bounds():
    ''' Set parameter starting points and bounds '''
    pdict = sc.objdict(
        pop_infected=dict(best=1000, lb=500, ub=2000),
        beta=dict(best=0.016, lb=0.012, ub=0.018),
        symp_test=dict(best=30, lb=20, ub=40),
        beta_change1=dict(best=0.7, lb=0.5, ub=0.9),
        beta_change2=dict(best=0.3, lb=0.2, ub=0.5),
    )

    # Convert from dicts to arrays
    pars = sc.objdict()
    for key in ['best', 'lb', 'ub']:
        pars[key] = np.array([v[key] for v in pdict.values()])

    return pars, pdict.keys()
Example #23
0
def test_separate_school_types_for_seattle_metro(do_show=False, do_save=False):
    """
    Notes:
        By default, when no location is given and use_default is set to True,
        data pulled in will be for seattle metro and school type data will
        default to previous seattle metro data with pre-k and elementary kept
        separate.
    """
    sp.logger.info(
        "Creating schools where pre-k and elementary schools are separate and school sizes are the same for all school types. Note: For small population sizes, the expected and generated size distributions may not match very well given that the model is stochastic and demographics are based on much larger populations."
    )
    test_pars = sc.dcp(pars)
    test_pars.location = None  # seattle_metro results with school size distribution the same for all types
    test_pars.state_location = None  # no state information; will call default data for seattle metro but create figure without location information in title
    test_pars.country_location = None  # no country information; will call default data for seattle metro but create figure without location information in title
    pop = sp.Pop(**test_pars)
    kwargs = sc.objdict(sc.dcp(test_pars))
    kwargs.do_show = do_show
    kwargs.do_save = do_save
    fig, ax = pop.plot_school_sizes(**kwargs)

    enrollment_by_school_type = pop.count_enrollment_by_school_type(
        **test_pars)
    school_types = enrollment_by_school_type.keys()

    assert ('pk' in school_types) and (
        'es' in school_types
    ), 'Check failed. pk and es school type are not separately created.'
    print('Check passed.')

    return fig, ax, pop, school_types
Example #24
0
    def finalize(self, verbose=None):
        ''' Compute final results, likelihood, etc. '''

        # Scale the results
        for reskey in self.result_keys():
            if self.results[reskey].scale == 'dynamic':
                self.results[reskey].values *= self.rescale_vec
            elif self.results[reskey].scale == 'static':
                self.results[reskey].values *= self['pop_scale']

        # Calculate cumulative results
        for key in cvd.result_flows.keys():
            self.results[f'cum_{key}'].values[:] = np.cumsum(
                self.results[f'new_{key}'].values)
        self.results['cum_infections'].values += self[
            'pop_infected'] * self.rescale_vec[
                0]  # Include initially infected people

        # Perform calculations on results
        self.compute_results()

        # Convert results to a odicts/objdict to allow e.g. sim.results.diagnoses
        self.results = sc.objdict(self.results)
        self.results_ready = True
        self.initialized = False  # To enable re-running

        return
Example #25
0
def reset_ticks(ax, sim=None, date_args=None, start_day=None):
    ''' Set the tick marks, using dates by default '''

    # Handle options
    date_args = sc.objdict(date_args)  # Ensure it's not a regular dict
    if start_day is None and sim is not None:
        start_day = sim['start_day']

    # Handle start and end days
    xmin, xmax = ax.get_xlim()
    if date_args.start_day:
        xmin = float(sc.day(date_args.start_day,
                            start_day=start_day))  # Keep original type (float)
    if date_args.end_day:
        xmax = float(sc.day(date_args.end_day, start_day=start_day))
    ax.set_xlim([xmin, xmax])

    # Set the x-axis intervals
    if date_args.interval:
        ax.set_xticks(np.arange(xmin, xmax + 1, date_args.interval))

    # Set xticks as dates
    if date_args.as_dates:

        date_formatter(start_day=start_day,
                       dateformat=date_args.dateformat,
                       ax=ax)
        if not date_args.interval:
            ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))

    # Handle rotation
    if date_args.rotation:
        ax.tick_params(axis='x', labelrotation=date_args.rotation)

    return
Example #26
0
def run_sim(args):
    sim = covid_abm.Sim()
    sim.pars['r_contact'] = args.r
    sim.pars['incub'] = args.incub
    loglike = sim.likelihood(verbose=0)
    output = sc.objdict({'i': args.i, 'j': args.j, 'loglike': loglike})
    return output
Example #27
0
def test_pop_options(doplot=False): # If being run via pytest, turn off
    sc.heading('Basic populations tests')

    # Define population choices and betas
    popchoices = {'random':0.015, 'hybrid':0.015, 'synthpops':0.020}

    basepars = {
        'pop_size': 5000,
        'pop_infected': 10,
        'n_days': 90,
        }

    sims = sc.objdict()
    for popchoice,beta in popchoices.items():
        sc.heading(f'Running {popchoice}')
        sims[popchoice] = cv.Sim()
        sims[popchoice].update_pars(basepars)
        sims[popchoice]['pop_type'] = popchoice
        sims[popchoice]['beta'] = beta
        sims[popchoice].run()

    if doplot:
        for key,sim in sims.items():
            sim.plot()
            try:
                pl.gcf().axes[0].set_title(f'Counts: {key}')
            except:
                pass

    return sims
Example #28
0
    def default_plotting_kwargs(self):
        """Define default plotting kwrgs to be used in plotting methods."""
        default_kwargs = sc.objdict()
        default_kwargs.fontfamily = 'Roboto Condensed'
        default_kwargs.fontstyle = 'normal'
        default_kwargs.fontvariant = 'normal'
        default_kwargs.fontweight = 400
        default_kwargs.fontsize = 8
        default_kwargs.format = 'png'
        default_kwargs.rotation = 0
        default_kwargs.subplot_height = 5
        default_kwargs.subplot_width = 8
        default_kwargs.hspace = 0.4
        default_kwargs.wspace = 0.3
        default_kwargs.nrows = 1
        default_kwargs.ncols = 1
        default_kwargs.height = default_kwargs.nrows * default_kwargs.subplot_height
        default_kwargs.width = default_kwargs.ncols * default_kwargs.subplot_width
        default_kwargs.show = 1
        default_kwargs.cmap = 'cmr.freeze_r'
        default_kwargs.markersize = 6
        default_kwargs.display_dpi = int(
            os.getenv('SYNTHPOPS_DPI', plt.rcParams['figure.dpi']))
        default_kwargs.save_dpi = 300
        default_kwargs.screen_width = 1366
        default_kwargs.screen_height = 768
        default_kwargs.screen_height_factor = 0.85
        default_kwargs.screen_width_factor = 0.3
        default_kwargs.do_show = False
        default_kwargs.do_save = False
        default_kwargs.figdir = None

        return default_kwargs
def make_safegraph(sim):
    ''' Create interventions representing SafeGraph data '''

    # Load data.values
    fn = safegraph_file
    df = pd.read_csv(fn)
    week = df['week']
    s = df['p.tot.schools'].values
    w = df['p.tot.no.schools'].values
    c = sc.dcp(w)  # Not different enough to warrant different values

    # Do processing
    days = sim.day(week.values.tolist())
    last_day = days[-1] + 1
    i_days = np.arange(days[0], last_day)
    s = np.interp(i_days, days, s)
    w = np.interp(i_days, days, w)
    c = np.interp(i_days, days, c)
    days = i_days

    # Create interventions
    interventions = [
        cv.clip_edges(days=days, changes=s, layers='s', label='clip_s'),
        cv.clip_edges(days=days, changes=w, layers='w', label='clip_w'),
        cv.clip_edges(days=days, changes=c, layers='c', label='clip_c'),
    ]
    sim.intervention_info.ce = sc.objdict({'days': days, 'changes': w})

    return interventions
Example #30
0
def autolabel(ax, rects, h_offset=0, v_offset=0.3, **kwargs):
    """
    Attach a text label above each bar in *rects*, displaying its height.

    Args:
        ax                 : Matplotlib.axes object
        rects              : Matplotlib.container.BarContainer
        h_offset (float)   : The position x to place the text at.
        v_offset (float)   : The position y to place the text at.
        **fontsize (float) : Default fontsize

    Returns:
        None.

    Set the annotation according to the input parameters
    """
    method_defaults = dict(
        fontsize=10)  # in case kwargs does not have fontsize, add it
    kwargs = sc.mergedicts(method_defaults,
                           kwargs)  # let kwargs override method defaults
    kwargs = sc.objdict(kwargs)
    for rect in rects:
        height = rect.get_height()
        text = ax.annotate('{}'.format(round(height, 3)),
                           xy=(rect.get_x() + rect.get_width() / 2, height),
                           xytext=(h_offset, v_offset),
                           textcoords="offset points",
                           ha='center',
                           va='bottom')
        text.set_fontsize(kwargs.fontsize)