Exemplo n.º 1
0
    def check_result(self, actual_file, expected_file = None, prefix="test"):
        if not os.path.isfile(actual_file):
            raise FileNotFoundError(actual_file)
        if expected_file is None:
            expected_file = os.path.join(os.path.join(os.path.dirname(__file__), 'expected'), os.path.basename(actual_file))
        if not os.path.isfile(expected_file):
            raise FileNotFoundError(expected_file)
        expected = self.cast_uid_toINT(sc.loadjson(expected_file))
        actual = self.cast_uid_toINT(sc.loadjson(actual_file))
        #self.check_similarity(actual, expected)

        # generate the figures for comparison
        for code in ['H', 'W', 'S']:
            for type in ['density', 'frequency']:
                fig = plot_age_mixing_matrices.test_plot_generated_contact_matrix(setting_code=code,
                                                                                  population=expected,
                                                                                  title_prefix="Baseline_",
                                                                                  density_or_frequency=type)
                #fig.show()
                fig.savefig(os.path.join(self.figDir,f"{prefix}_{code}_{type}_expected.png"))
                fig = plot_age_mixing_matrices.test_plot_generated_contact_matrix(setting_code=code,
                                                                                  population=actual,
                                                                                  title_prefix="Actual_",
                                                                                  density_or_frequency=type)
                #fig.show()
                fig.savefig(os.path.join(self.figDir,f"{prefix}_{code}_{type}_actual.png"))
Exemplo n.º 2
0
 def check_result(self,
                  actual_folder,
                  expected_folder=None,
                  test_prefix="test",
                  decimal=3):
     passed = True
     checked = False
     failed_cases = []
     if not os.path.exists(actual_folder):
         raise FileNotFoundError(actual_folder)
     if expected_folder is None:
         expected_folder = os.path.join(self.expectedDir, test_prefix)
     if not os.path.exists(expected_folder):
         raise FileNotFoundError(
             f"{expected_folder} does not exist, use cls.generateBaseline = True to generate them"
         )
     for f in os.listdir(expected_folder):
         print(f"\n{f}")
         if f.endswith(".csv"):
             checked = True
             expected_data = np.loadtxt(os.path.join(expected_folder, f),
                                        delimiter=",")
             actual_data = np.loadtxt(os.path.join(actual_folder, f),
                                      delimiter=",")
             if (np.round(expected_data,
                          decimal) == np.round(actual_data, decimal)).all():
                 print("values unchanged, passed")
             else:
                 passed = False
                 failed_cases.append(
                     os.path.basename(f).replace(".csv", "*"))
                 print("result has been changed in these indexes:\n",
                       np.where(expected_data != actual_data)[0])
         elif f.endswith(".json"):
             expected_data = sc.loadjson(os.path.join(expected_folder, f))
             actual_data = sc.loadjson(os.path.join(actual_folder, f))
             if (expected_data == actual_data):
                 print("values unchanged, passed")
             else:
                 passed = False
                 failed_cases.append(
                     os.path.basename(f).replace(".json", "*"))
                 diff = set(expected_data.items()).symmetric_difference(
                     actual_data.items())
                 print("result has been changed in:\n", diff)
         else:
             print("ignored.\n")
     return passed & checked, failed_cases
Exemplo n.º 3
0
def git_info(filename=None, check=False, old_info=None, die=False, verbose=True, **kwargs):
    '''
    Get current git information and optionally write it to disk.

    Args:
        filename (str): name of the file to write to or read from
        check (bool): whether or not to compare two git versions
        old_info (dict): dictionary of information to check against
        die (bool): whether or not to raise an exception if the check fails

    **Examples**::

        cv.git_info('covasim_version.json') # Writes to disk
        cv.git_info('covasim_version.json', check=True) # Checks that current version matches saved file
    '''
    info = sc.gitinfo(__file__)
    if not check: # Just get information
        if filename is not None:
            output = sc.savejson(filename, info, **kwargs)
        else:
            output = info
    else:
        if filename is not None:
            old_info = sc.loadjson(filename, **kwargs)
        string = ''
        if info != old_info:
            string = f'Git information differs: {info} vs. {old_info}'
            if die:
                raise ValueError(string)
            elif verbose:
                print(string)
    return output
Exemplo n.º 4
0
def upload_pars(fname):
    parameters = sc.loadjson(fname)
    if not isinstance(parameters, dict):
        raise TypeError(f'Uploaded file was a {type(parameters)} object rather than a dict')
    if  'sim_pars' not in parameters or 'epi_pars' not in parameters:
        raise KeyError(f'Parameters file must have keys "sim_pars" and "epi_pars", not {parameters.keys()}')
    return parameters
Exemplo n.º 5
0
def test_calib():

    entry = sc.loadjson(calibfile)[0]
    params = sc.dcp(entry['pars'])
    params['rand_seed'] = int(entry['index'])

    scen = generate_scenarios()['all_remote']
    testing = generate_testing()['None']
    #testing[0]['delay'] = 0
    for stype, spec in scen.items():
        if spec is not None:
            spec['testing'] = testing
    scen['testing'] = testing
    scen['es']['verbose'] = scen['ms']['verbose'] = scen['hs'][
        'verbose'] = debug

    sim = cs.create_sim(params, pop_size=pop_size, folder=folder)

    sm = cvsch.schools_manager(scen)
    sim['interventions'] += [sm]

    sim.run(keep_people=debug)

    stats = evaluate_sim(sim)
    print(stats)

    if debug:
        sim.plot(to_plot='overview')
        #t = sim.make_transtree()
    else:
        sim.plot()

    cv.savefig('sim.png')

    return sim
Exemplo n.º 6
0
def git_info(filename=None, check=False, comments=None, old_info=None, die=False, indent=2, verbose=True, frame=2, **kwargs):
    '''
    Get current git information and optionally write it to disk. Simplest usage
    is cv.git_info(__file__)

    Args:
        filename  (str): name of the file to write to or read from
        check    (bool): whether or not to compare two git versions
        comments (dict): additional comments to include in the file
        old_info (dict): dictionary of information to check against
        die      (bool): whether or not to raise an exception if the check fails
        indent    (int): how many indents to use when writing the file to disk
        verbose  (bool): detail to print
        frame     (int): how many frames back to look for caller info
        kwargs   (dict): passed to sc.loadjson() (if check=True) or sc.savejson() (if check=False)

    **Examples**::

        cv.git_info() # Return information
        cv.git_info(__file__) # Writes to disk
        cv.git_info('covasim_version.gitinfo') # Writes to disk
        cv.git_info('covasim_version.gitinfo', check=True) # Checks that current version matches saved file
    '''

    # Handle the case where __file__ is supplied as the argument
    if isinstance(filename, str) and filename.endswith('.py'):
        filename = filename.replace('.py', '.gitinfo')

    # Get git info
    calling_file = sc.makefilepath(sc.getcaller(frame=frame, tostring=False)['filename'])
    cv_info = {'version':cvv.__version__}
    cv_info.update(sc.gitinfo(__file__, verbose=False))
    caller_info = sc.gitinfo(calling_file, verbose=False)
    caller_info['filename'] = calling_file
    info = {'covasim':cv_info, 'called_by':caller_info}
    if comments:
        info['comments'] = comments

    # Just get information and optionally write to disk
    if not check:
        if filename is not None:
            output = sc.savejson(filename, info, indent=indent, **kwargs)
        else:
            output = info
        return output

    # Check if versions match, and optionally raise an error
    else:
        if filename is not None:
            old_info = sc.loadjson(filename, **kwargs)
        string = ''
        old_cv_info = old_info['covasim'] if 'covasim' in old_info else old_info
        if cv_info != old_cv_info: # pragma: no cover
            string = f'Git information differs: {cv_info} vs. {old_cv_info}'
            if die:
                raise ValueError(string)
            elif verbose:
                print(string)
        return
def create_sim_from_calibrated_pars(filename):
    '''Wrapper around create_sim that reads the parameters from a file'''
    pars_calib = sc.loadjson(filename)
    # Take care to use the same order that create_sim expects!
    pars = [
        pars_calib["pop_infected"], pars_calib["beta"], pars_calib["beta_day"],
        pars_calib["beta_change"], pars_calib["symp_test"]
    ]
    return create_sim(pars)
Exemplo n.º 8
0
def test_baseline():
    ''' Compare the current default sim against the saved baseline '''

    # Load existing baseline
    filepath = sc.makefilepath(filename=baseline_filename,
                               folder=sc.thisdir(__file__))
    baseline = sc.loadjson(filepath)
    old = baseline[baseline_key]

    # Calculate new baseline
    sim = cv.Sim(verbose=0)
    sim.run()
    new = sim.summary

    # Compare keys
    errormsg = ''
    old_keys = set(old.keys())
    new_keys = set(new.keys())
    if old_keys != new_keys:
        errormsg = f"Keys don't match!\n"
        missing = old_keys - new_keys
        extra = new_keys - old_keys
        if missing:
            errormsg += f'  Missing old keys: {missing}\n'
        if extra:
            errormsg += f'  Extra new keys: {extra}\n'

    mismatches = {}
    for key in old_keys.union(new_keys):
        old_val = old[key] if key in old else 'not present'
        new_val = new[key] if key in new else 'not present'
        if old_val != new_val:
            mismatches[key] = {'old': old_val, 'new': new_val}

    if len(mismatches):
        errormsg = '\nThe following values have changed between old and new!\n'
        errormsg += 'Please rerun "tests/unittests/update_baseline" if this is intentional.\n'
        errormsg += 'Mismatches:\n'
        space = ' ' * 17
        for mkey, mval in mismatches.items():
            errormsg += f'  {mkey}:\n'
            errormsg += f'{space}old = {mval["old"]}\n'
            errormsg += f'{space}new = {mval["new"]}\n'

    # Raise an error if mismatches were found
    if errormsg:
        prefix = '\nThe following values have changed between the previous baseline and now!\n'
        prefix += 'If this is intentional, please rerun "update_baseline" and commit.\n\n'
        err = prefix + errormsg
        raise ValueError(err)
    else:
        print('Baseline matches')

    return new
Exemplo n.º 9
0
def get_version_pars(version, verbose=True):
    '''
    Function for loading parameters from the specified version.

    Args:
        version (str): the version to load parameters from

    Returns:
        Dictionary of parameters from that version
    '''

    # Define mappings for available sets of parameters -- note that this must be manually updated from the changelog
    match_map = {
        '0.30.4': ['0.30.4'],
        '0.31.0': ['0.31.0'],
        '0.32.0': ['0.32.0'],
        '1.0.0': ['1.0.0'],
        '1.0.1': [f'1.0.{i}' for i in range(1, 4)],
        '1.1.0': ['1.1.0'],
        '1.1.1': [f'1.1.{i}' for i in range(1, 3)],
        '1.1.3': [f'1.1.{i}' for i in range(3, 8)],
        '1.2.0': [f'1.2.{i}' for i in range(4)],
        '1.3.0': [f'1.3.{i}' for i in range(6)],
        '1.4.0': [f'1.4.{i}' for i in range(9)],
        '1.5.0': [f'1.5.{i}' for i in range(4)] +
        [f'1.6.{i}' for i in range(2)] + [f'1.7.{i}' for i in range(7)],
        '2.0.0': [f'2.0.{i}' for i in range(5)] + ['2.1.0'],
        '2.1.1': [f'2.1.{i}' for i in range(1, 3)],
        '3.0.0': ['3.0.0'],
    }

    # Find and check the match
    match = None
    for ver, verlist in match_map.items():
        if version in verlist:
            match = ver
            break
    if match is None:  # pragma: no cover
        options = '\n'.join(sum(match_map.values(), []))
        errormsg = f'Could not find version "{version}" among options:\n{options}'
        raise ValueError(errormsg)

    # Load the parameters
    filename = f'pars_v{match}.json'
    regression_folder = sc.thisdir(__file__, 'regression')
    pars = sc.loadjson(filename=filename, folder=regression_folder)
    if verbose:
        print(f'Loaded parameters from {match}')

    return pars
Exemplo n.º 10
0
def longway():
    pop_size = 2.25e5
    calibfile = os.path.join(
        folder,
        'pars_cases_begin=75_cases_end=75_re=1.0_prevalence=0.002_yield=0.024_tests=225_pop_size=225000.json'
    )
    par_list = sc.loadjson(calibfile)[par_inds[0]:par_inds[1]]
    scen = generate_scenarios()['as_normal']

    for stype, cfg in scen.items():
        if cfg:
            cfg['start_day'] = '2020-09-07'  # Move school start earlier

    # Configure and run the sims
    sims = []
    for eidx, entry in enumerate(par_list):
        par = sc.dcp(entry['pars'])
        par['rand_seed'] = int(entry['index'])

        # Clunky, but check that the population exists
        pop_seed = par['rand_seed'] % 5
        popfile = os.path.join(
            folder, 'inputs',
            f'kc_synthpops_clustered_{int(pop_size)}_withstaff_seed') + str(
                pop_seed) + '.ppl'
        if not os.path.exists(popfile):
            print(f'Population file {popfile} not found, recreating...')
            cvsch.make_population(pop_size=pop_size,
                                  rand_seed=par['rand_seed'],
                                  max_pop_seeds=5,
                                  popfile=popfile,
                                  do_save=True)

        par['pop_infected'] = 0  # Do NOT seed infections
        par['beta_layer'] = dict(
            h=0.0, s=0.0, w=0.0, c=0.0, l=0.0
        )  # Turn off transmission in other layers, looking for in-school R0
        sim = cs.create_sim(par, pop_size=pop_size, folder=folder)

        delay = sim.day('2020-09-16')  # Pick a Monday
        sim['interventions'] += [
            cvsch.schools_manager(scen),
            seed_schools(delay=delay, n_infections=1, choose_students=False)
        ]
        sims.append(sim)

    msim = cv.MultiSim(sims)
    msim.run(keep_people=True)

    return msim
Exemplo n.º 11
0
def test_baseline():
    ''' Compare the current default sim against the saved baseline '''

    # Load existing baseline
    baseline = sc.loadjson(baseline_filename)
    old = baseline['summary']

    # Calculate new baseline
    new = make_sim()
    new.run()

    # Compute the comparison
    cv.diff_sims(old, new, die=True)

    return new
Exemplo n.º 12
0
    def load(self, filename, verbose=True, **kwargs):
        '''
        Load current settings from a JSON file.

        Args:
            filename (str): file to load
            kwargs (dict): passed to ``sc.loadjson()``
        '''
        json = sc.loadjson(filename=filename, **kwargs)
        current = self.to_dict()
        new = {k: v
               for k, v in json.items()
               if v != current[k]}  # Don't reset keys that haven't changed
        self.set(**new)
        if verbose: print(f'Settings loaded from {filename}')
        return
Exemplo n.º 13
0
def get_version_pars(version, verbose=True):
    '''
    Function for loading parameters from the specified version.

    Parameters will be loaded for Covasim 'as at' the requested version i.e. the
    most recent set of parameters that is <= the requested version. Available
    parameter values are stored in the regression folder. If parameters are available
    for versions 1.3, and 1.4, then this function will return the following

    - If parameters for version '1.3' are requested, parameters will be returned from '1.3'
    - If parameters for version '1.3.5' are requested, parameters will be returned from '1.3', since
      Covasim at version 1.3.5 would have been using the parameters defined at version 1.3.
    - If parameters for version '1.4' are requested, parameters will be returned from '1.4'

    Args:
        version (str): the version to load parameters from

    Returns:
        Dictionary of parameters from that version
    '''

    # Construct a sorted list of available parameters based on the files in the regression folder
    regression_folder = sc.thisdir(__file__, 'regression', aspath=True)
    available_versions = [
        x.stem.replace('pars_v', '') for x in regression_folder.iterdir()
        if x.suffix == '.json'
    ]
    available_versions = sorted(available_versions, key=LooseVersion)

    # Find the highest parameter version that is <= the requested version
    version_comparison = [
        sc.compareversions(version, v) >= 0 for v in available_versions
    ]
    try:
        target_version = available_versions[sc.findlast(version_comparison)]
    except IndexError:
        errormsg = f"Could not find a parameter version that was less than or equal to '{version}'. Available versions are {available_versions}"
        raise ValueError(errormsg)

    # Load the parameters
    pars = sc.loadjson(filename=regression_folder /
                       f'pars_v{target_version}.json',
                       folder=regression_folder)
    if verbose:
        print(f'Loaded parameters from {target_version}')

    return pars
Exemplo n.º 14
0
def test_benchmark(do_save=do_save):
    ''' Compare benchmark performance '''

    print('Running benchmark...')
    previous = sc.loadjson(benchmark_filename)

    # Create the sim
    sim = cv.Sim(verbose=0)

    # Time initialization
    t0 = sc.tic()
    sim.initialize()
    t_init = sc.toc(t0, output=True)

    # Time running
    t0 = sc.tic()
    sim.run()
    t_run = sc.toc(t0, output=True)

    # Construct json
    n_decimals = 3
    json = {
        'time': {
            'initialize': round(t_init, n_decimals),
            'run': round(t_run, n_decimals),
        },
        'parameters': {
            'pop_size': sim['pop_size'],
            'pop_type': sim['pop_type'],
            'n_days': sim['n_days'],
        },
    }

    print('Previous benchmark:')
    sc.pp(previous)

    print('\nNew benchmark:')
    sc.pp(json)

    if do_save:
        sc.savejson(filename=benchmark_filename, obj=json, indent=2)

    print('Done.')

    return json
def generate_pars(res, incs):
    pars = []
    for re in res:
        for inc in incs:
            rel_trans = False
            if rel_trans:
                jsonfile = f'optimization_school_reopening_re_{re}_cases_{inc}_{int(pop_size)}_reltrans.json'
            else:
                jsonfile = f'optimization_school_reopening_re_{re}_cases_{inc}_{int(pop_size)}.json'
            json = sc.loadjson(jsonfile)

            for entry in json[:n_seeds]:
                p = entry['pars']
                p['rand_seed'] = int(entry['index'])
                # These are not model parameters, but useful to have later
                p['re'] = re
                p['inc'] = inc
                pars.append(p)

    return pars
Exemplo n.º 16
0
def get_version_pars(version, verbose=True):
    '''
    Function for loading parameters from the specified version.

    Args:
        version (str): the version to load parameters from

    Returns:
        Dictionary of parameters from that version
    '''
    regression_folder = sc.thisdir(__file__, 'regression')
    pattern = 'pars_v*.json'
    requested = pattern.replace('*', version)
    filepaths = sc.getfilelist(regression_folder, pattern=pattern)
    files = [os.path.basename(f) for f in filepaths]
    if requested in files:  # If there's an exact match
        match = requested
    else:  # No match, find the nearest matching file
        withmatch = files + [requested]  # Add this version
        withmatch.sort()  # Sort the files
        index = withmatch.index(requested)
        if index > 0:
            match = withmatch[
                index -
                1]  # Get latest earlier version -- note that this assumes versions are in alphabetical order, which they currently are!
        else:
            filestr = '\n'.join(files)
            errormsg = f'Could not find version {version} among options:\n{filestr}'
            raise ValueError(errormsg)

    # Load the parameters
    pars = sc.loadjson(filename=match, folder=regression_folder)
    if verbose:
        if match == requested:
            print(f'Loaded parameters from {match}')
        else:
            print(
                f'No exact match for parameters "{version}" found; using "{match}" instead'
            )

    return pars
Exemplo n.º 17
0
def benchmark_schools():

    entry = sc.loadjson(calibfile)[0]
    params = sc.dcp(entry['pars'])
    params['rand_seed'] = int(entry['index'])

    scen = generate_scenarios()['with_countermeasures']
    testing = generate_testing()['Antigen every 2w, PCR f/u']
    #testing[0]['delay'] = 0
    for stype, spec in scen.items():
        if spec is not None:
            spec['testing'] = testing
    scen['testing'] = testing
    scen['es']['verbose'] = scen['ms']['verbose'] = scen['hs']['verbose'] = debug

    sim = cs.create_sim(params, pop_size=pop_size, folder=folder, verbose=0.1)

    if use_intervention:
        sm = cvsch.schools_manager(scen)
        sim['interventions'] += [sm]

    sim.run(keep_people=debug)

    stats = evaluate_sim(sim)
    print(stats)

    if debug:
        sim.plot(to_plot='overview')
        #t = sim.make_transtree()
    else:
        sim.plot()

    #sim.save('test.sim')
    #cv.savefig('sim.png')

    return sim
bypass      = True # Whether to use a small population size
do_run      = True # Whether to rerun instead of load saved run
keep_people = False # Whether to keep people when running
parallelize = True # If running, whether to parallelize
do_save     = True # If rerunning, whether to save sims
do_plot     = True # Whether to plot results

rand_seed = 2346 # Overwrite the default random seed
folder = '../testing_in_schools/v20201019'
bypass_popfile = 'explore_scenarios_small.ppl'
sims_file = 'explore_scenarios.sims'
pop_size = int(20e3) if bypass else int(2.25e5)
calibfile = os.path.join(folder, 'pars_cases_begin=75_cases_end=75_re=1.0_prevalence=0.002_yield=0.024_tests=225_pop_size=225000.json')

try:
    entry = sc.loadjson(calibfile)[1]
except Exception as E:
    entry =   {
        "index": 376.0,
        "mismatch": 0.03221581045452142,
        "pars": {
          "pop_infected": 242.11186358945181,
          "change_beta": 0.5313884845187986,
          "symp_prob": 0.08250498122080606
        }
      }
    print(f'Warning: could not load calibration file "{calibfile}" due to "{str(E)}", using hard-coded parameters')
params = sc.dcp(entry['pars'])
if rand_seed is None:
    params['rand_seed'] = int(entry['index'])
else:
Exemplo n.º 19
0
def test_baseline():
    ''' Compare the current default sim against the saved baseline '''

    # Load existing baseline
    baseline = sc.loadjson(baseline_filename)
    old = baseline[baseline_key]

    # Calculate new baseline
    sim = cv.Sim(verbose=0)
    sim.run()
    new = sim.summary

    # Compare keys
    errormsg = ''
    old_keys = set(old.keys())
    new_keys = set(new.keys())
    if old_keys != new_keys:
        errormsg = f"Keys don't match!\n"
        missing = old_keys - new_keys
        extra   = new_keys - old_keys
        if missing:
            errormsg += f'  Missing old keys: {missing}\n'
        if extra:
            errormsg += f'  Extra new keys: {extra}\n'

    mismatches = {}
    union = old_keys.union(new_keys)
    for key in new.keys(): # To ensure order
        if key in union:
            old_val = old[key] if key in old else 'not present'
            new_val = new[key] if key in new else 'not present'
            if old_val != new_val:
                mismatches[key] = {'old': old_val, 'new': new_val}

    if len(mismatches):
        errormsg = '\nThe following values have changed from the previous baseline!\n'
        errormsg += 'If this is intentional, please rerun "tests/update_baseline" and commit.\n'
        errormsg += 'Mismatches:\n'
        df = pd.DataFrame.from_dict(mismatches).transpose()
        diff   = []
        ratio  = []
        change = []
        small_change = 1e-3 # Define a small change, e.g. a rounding error
        for mdict in mismatches.values():
            old = mdict['old']
            new = mdict['new']
            if sc.isnumber(new) and sc.isnumber(old) and old>0:
                this_diff  = new - old
                this_ratio = new/old
                abs_ratio  = max(this_ratio, 1.0/this_ratio)

                # Set the character to use
                if abs_ratio<small_change:
                    change_char = '≈'
                elif new > old:
                    change_char = '↑'
                elif new < old:
                    change_char = '↓'
                else:
                    errormsg = f'Could not determine relationship between old={old} and new={new}'
                    raise ValueError(errormsg)

                # Set how many repeats it should have
                repeats = 1
                if abs_ratio >= 1.1:
                    repeats = 2
                if abs_ratio >= 2:
                    repeats = 3
                if abs_ratio >= 10:
                    repeats = 4

                this_change = change_char*repeats
            else:
                this_diff   = np.nan
                this_ratio  = np.nan
                this_change = 'N/A'

            diff.append(this_diff)
            ratio.append(this_ratio)
            change.append(this_change)

        df['diff']   = diff
        df['ratio']  = ratio
        for col in ['old', 'new', 'diff', 'ratio']:
            df[col] = df[col].round(decimals=3)
        df['change'] = change
        errormsg += str(df)

    # Raise an error if mismatches were found
    if errormsg:
        raise ValueError(errormsg)
    else:
        print('Baseline matches')

    return new
Exemplo n.º 20
0
def test_benchmark(do_save=do_save):
    ''' Compare benchmark performance '''

    print('Running benchmark...')
    previous = sc.loadjson(benchmark_filename)

    repeats = 5
    t_inits = []
    t_runs  = []

    def normalize_performance():
        ''' Normalize performance across CPUs -- simple Numpy calculation '''
        t_bls = []
        bl_repeats = 5
        n_outer = 10
        n_inner = 1e6
        for r in range(bl_repeats):
            t0 = sc.tic()
            for i in range(n_outer):
                a = np.random.random(int(n_inner))
                b = np.random.random(int(n_inner))
                a*b
            t_bl = sc.toc(t0, output=True)
            t_bls.append(t_bl)
        t_bl = min(t_bls)
        reference = 0.112 # Benchmarked on an Intel i9-8950HK CPU @ 2.90GHz
        ratio = reference/t_bl
        return ratio


    # Test CPU performance before the run
    r1 = normalize_performance()

    # Do the actual benchmarking
    for r in range(repeats):

        # Create the sim
        sim = cv.Sim(verbose=0)

        # Time initialization
        t0 = sc.tic()
        sim.initialize()
        t_init = sc.toc(t0, output=True)

        # Time running
        t0 = sc.tic()
        sim.run()
        t_run = sc.toc(t0, output=True)

        # Store results
        t_inits.append(t_init)
        t_runs.append(t_run)

    # Test CPU performance after the run
    r2 = normalize_performance()
    ratio = (r1+r2)/2
    t_init = min(t_inits)*ratio
    t_run  = min(t_runs)*ratio

    # Construct json
    n_decimals = 3
    json = {'time': {
                'initialize': round(t_init, n_decimals),
                'run':        round(t_run,  n_decimals),
                },
            'parameters': {
                'pop_size': sim['pop_size'],
                'pop_type': sim['pop_type'],
                'n_days':   sim['n_days'],
                },
            'cpu_performance': ratio,
            }

    print('Previous benchmark:')
    sc.pp(previous)

    print('\nNew benchmark:')
    sc.pp(json)

    if do_save:
        sc.savejson(filename=benchmark_filename, obj=json, indent=2)

    print('Done.')

    return json
Exemplo n.º 21
0
            's': 0.8
        },
        trace_time=5.0,
    )
    res = ['0.9', '1.1']
    if rel_trans or beta_layer:
        cases = ['50']
    else:
        cases = ['20', '50', '110']
    for re in res:
        for case in cases:
            if rel_trans:
                jsonfile = f'optimization_school_reopening_re_{re}_cases_{case}_reltrans.json'
            else:
                jsonfile = f'optimization_school_reopening_re_{re}_cases_{case}.json'
            json = sc.loadjson(jsonfile)
            msims = []
            es_with_a_case = []
            ms_with_a_case = []
            hs_with_a_case = []
            for i, scen in enumerate(schools_reopening_scenarios):
                analysis_name = f'{scen}_{case}_cases_re_{re}'
                if rel_trans:
                    analysis_name = analysis_name + '_under10_0.5trans'

                if beta_layer:
                    analysis_name = analysis_name + '_3xschool_beta_layer'

                all_sims = []
                for j in range(n_seeds):
                    entry = json[j]
Exemplo n.º 22
0
 def validate(get_regression_dir,
              decimal=3,
              generate=False,
              force_report=False):
     if generate:
         print("skip validation")
         return
     passed = True
     checked = False
     failed_cases = []
     expected_folder, actual_folder = get_regression_dir
     if not os.path.exists(actual_folder):
         raise FileNotFoundError(actual_folder)
     if not os.path.exists(expected_folder):
         raise AssertionError(
             f"{expected_folder} does not exist, use regenerate = True to generate them"
         )
     expected_files = [
         f for f in os.listdir(expected_folder)
         if Path(f).suffix in [".csv", ".json", ".txt"]
     ]
     if len(expected_files) == 0:
         raise AssertionError(
             f"no files to validate in {expected_folder}, use regenerate = True to generate them"
         )
     #loop over all valid baseline files for comparison
     for f in expected_files:
         print(f"\n{f}")
         checked = True
         if f.endswith(".csv"):
             expected_data = np.loadtxt(os.path.join(expected_folder, f),
                                        delimiter=",")
             actual_data = np.loadtxt(os.path.join(actual_folder, f),
                                      delimiter=",")
             if (np.round(expected_data,
                          decimal) == np.round(actual_data, decimal)).all():
                 print("values unchanged, passed")
             else:
                 passed = False
                 failed_cases.append(
                     os.path.basename(f).replace(".csv", "*"))
                 print("result has been changed in these indexes:\n",
                       np.where(expected_data != actual_data)[0])
         elif f.endswith(".json"):
             expected_data = sc.loadjson(os.path.join(expected_folder, f))
             actual_data = sc.loadjson(os.path.join(actual_folder, f))
             if expected_data == actual_data:
                 print("values unchanged, passed")
             else:
                 passed = False
                 failed_cases.append(
                     os.path.basename(f).replace(".json", "*"))
         elif f.endswith(".txt"):
             result = check_files_diff(os.path.join(expected_folder, f),
                                       os.path.join(actual_folder, f))
             if not result:
                 failed_cases.append(
                     os.path.basename(f).replace(".txt", "*"))
             passed = result
         else:
             print("ignored.\n")
     if not (passed & checked) or force_report:
         # generate test reports if failed
         pdf_path = generate_reports(get_regression_dir=get_regression_dir,
                                     failedcases=failed_cases,
                                     forced=force_report)
     assert passed & checked, f"regression test failed! Please review report at: {pdf_path}"
Exemplo n.º 23
0
        # What if cohorting doesn't work all that well due to bussing, after-school care, recess/lunch, or friends?
        # --> Add a % of the old school network back in.  It's _more_ transmission, so would need to balance?  Match R0 (analytical?)
        'broken_bubbles': broken_bubbles,

        # How much might it help if some (30%?) of children choose remote learning?
        # Easy enough to have in School some uids persistently at home!
        #'remote_students',
    }

    # Select a subset, if desired:
    sensitivity = {
        k: v
        for k, v in sensitivity.items() if k in ['broken_bubbles']
    }

    par_list = sc.loadjson(calibfile)[par_inds[0]:par_inds[1]]
    par_list_ch_eq_sus = sc.loadjson(
        calibfile_ch_eq_sus)[par_inds[0]:par_inds[1]]
    par_list_alt_symp = sc.loadjson(
        calibfile_alt_symp)[par_inds[0]:par_inds[1]]

    sims = []
    msims = []
    tot = len(scenarios) * len(testing) * len(par_list) * len(sensitivity)
    proc = 0

    # Save time by pre-generating the base simulations

    for senskey, builder in sensitivity.items():
        ch_eq_sus = False
        alt_symp = False
Exemplo n.º 24
0
    popkeys = list(pop.keys())
    stridekeys = [popkeys[i] for i in range(0, len(pop), stride)]
    subpop = {k: pop[k] for k in stridekeys}

    if do_save:
        sc.savejson(f'pop_v{sp.version.__version__}.json', subpop, indent=2)

    print('\n\n')
    pps = pars["n"] / elapsed
    print(
        f'Total time: {elapsed:0.3f} s for {pars["n"]} people ({pps:0.0f} people/second)'
    )

    #%% Plotting
    fig, axes = pl.subplots(2, 3, figsize=(32, 18))
    expected = sc.loadjson('expected/pop_2001_seed1001.json')
    expected = {int(key): val for key, val in expected.items()}
    actual = pop
    for c, code in enumerate(['H', 'W', 'S']):
        args = dict(setting_code=code, density_or_frequency='density')
        fig = plotmatrix(population=expected,
                         title_prefix="Expected ",
                         fig=fig,
                         ax=axes[0, c],
                         **args)
        fig = plotmatrix(population=actual,
                         title_prefix="Actual",
                         fig=fig,
                         ax=axes[1, c],
                         **args)
Exemplo n.º 25
0
        'screening': [baseline, no_screening],

        # trace_prob was 75%
        # --> Remove tracing
        'tracing': [baseline, no_tracing],

        'NPI_screening': [baseline, no_NPI_reduction, no_screening],
        'NPI_tracing': [baseline, no_NPI_reduction, no_tracing],
        'screening_tracing': [baseline, no_screening, no_tracing],
        'NPI_screening_tracing': [baseline, no_NPI_reduction, no_screening, no_tracing],
    }

    # Select a subset, if desired:
    #sensitivity = {k:v for k,v in sensitivity.items() if k in ['baseline']}

    par_list = sc.loadjson(calibfile)[par_inds[0]:par_inds[1]]

    sims = []
    msims = []
    tot = len(scenarios) * len(testing) * len(par_list) * len(sensitivity)
    proc = 0

    # Save time by pre-generating the base simulations
    base_sims = []
    for eidx, entry in enumerate(par_list):
        par = sc.dcp(entry['pars'])
        par['rand_seed'] = int(entry['index'])
        base_sim = cs.create_sim(par, pop_size=pop_size, folder=folder)
        base_sim.dynamic_par = par
        base_sims.append(base_sim)
Exemplo n.º 26
0
        pl.text(mar23,
                labely,
                'Stay-at-home',
                color=color,
                alpha=0.9,
                style='italic')
    return


#%% Handle Fig. 1E

# Process data
data = sc.objdict()
dfs = sc.objdict()
for k, fn in datafiles.items():
    data[k] = sc.loadjson(datafiles[k])
    flat_data = []
    for entry in data[k]:
        flat = {}
        for k2 in ['index', 'mismatch']:
            flat[k2] = entry[k2]
        for k3, val3 in entry['pars'].items():
            flat[k3] = val3
            if k3 == 'beta':
                flat[
                    k3] *= 100 * 3  # Household transmission probability and percentage
            elif k3.startswith('bc'):
                flat[k3] = 100 * (
                    1 - flat[k3]
                )  # To percentage, convert relative beta to beta reductions
        flat_data.append(flat)