def version_info(): print( f'Loading SynthPops v{spv.__version__} ({spv.__versiondate__}) from {thisdir}' ) print(f'Data folder: {datadir}') print(f'Git information:') sc.pp(spv.__gitinfo__) return
def log_err(message, ex): ''' Compile error messages to send to the frontend ''' tex = traceback.TracebackException.from_exception(ex) output = { "message": message, "exception": ''.join(traceback.format_exception(tex.exc_type, tex, tex.exc_traceback)) } sc.pp(output) return output
def show_locations(location=None, output=False): ''' Print a list of available locations. Args: location (str): if provided, only check if this location is in the list output (bool): whether to return the list (else print) **Examples**:: cv.data.show_locations() # Print a list of valid locations cv.data.show_locations('lithuania') # Check if Lithuania is a valid location cv.data.show_locations('Viet-Nam') # Check if Viet-Nam is a valid location ''' country_json = sc.dcp(cad.data) state_json = sc.dcp(sad.data) aliases = get_country_aliases() age_data = sc.mergedicts( state_json, country_json, aliases) # Countries will overwrite states, e.g. Georgia household_data = sc.dcp(hsd.data) loclist = sc.objdict() loclist.age_distributions = sorted(list(age_data.keys())) loclist.household_size_distributions = sorted(list(household_data.keys())) if location is not None: age_available = location.lower() in [ v.lower() for v in loclist.age_distributions ] hh_available = location.lower() in [ v.lower() for v in loclist.household_size_distributions ] age_sugg = '' hh_sugg = '' age_sugg = f'(closest match: {sc.suggest(location, loclist.age_distributions)})' if not age_available else '' hh_sugg = f'(closest match: {sc.suggest(location, loclist.household_size_distributions)})' if not hh_available else '' print(f'For location "{location}":') print( f' Population age distribution is available: {age_available} {age_sugg}' ) print( f' Household size distribution is available: {hh_available} {hh_sugg}' ) return if output: return loclist else: print( f'There are {len(loclist.age_distributions)} age distributions and {len(loclist.household_size_distributions)} household size distributions.' ) print('\nList of available locations (case insensitive):\n') sc.pp(loclist) return
def load_pars(index): ''' Load the parameters from JSON ''' entry = json[index] pars = sc.objdict(entry['pars']) pars.rand_seed = int(entry['index']) print( f'Loading parameters from trial {index}, mismatch {entry["mismatch"]}...' ) sc.pp(pars) return pars
def test_printing(): ''' Test printing functions ''' example = sc.prettyobj() example.data = sc.vectocolor(10) print('sc.pr():') sc.pr(example) print('sc.pp():') sc.pp(example.data) string = sc.pp(example.data, doprint=False) return string
def test_data(): sc.heading('Data loading') data = covid_abm.load_data() sc.pp(data) # Check that it is looking for the right file with pytest.raises(FileNotFoundError): data = covid_abm.load_data(filename='file_not_found.csv') return data
def test_data(): sc.heading('Data loading') data = cv.load_data(os.path.join(sc.thisdir(__file__), 'example_data.csv')) sc.pp(data) # Check that it is looking for the right file with pytest.raises(FileNotFoundError): data = cv.load_data(datafile='file_not_found.csv') return data
def jsonify_projects(username, verbose=False): """ Return project jsons for all projects the user has to the client. """ output = {'projects': []} user = get_user(username) for project_key in user.projects: try: json = jsonify_project(project_key) except Exception as E: json = {'project': {'name': 'Project load failed: %s' % str(E)}} output['projects'].append(json) if verbose: sc.pp(output) return output
def test_printing(): sc.heading('test_printing()') example = sc.prettyobj() example.data = sc.vectocolor(10) print('sc.pr():') sc.pr(example) print('sc.pp():') sc.pp(example.data) string = sc.pp(example.data, doprint=False) print('sc.printdata():') sc.printdata(example.data) return string
def version_info(): print( f'Loading SynthPops v{spv.__version__} ({spv.__versiondate__}) from {spd.settings.thisdir}' ) print(f'Data folder: {spd.settings.datadir}') try: gitinfo = sc.gitinfo(__file__) print(f'Git information:') sc.pp(gitinfo) except: pass # Don't worry if git info isn't available return
def jsonify_interventions(project_id, intervkey=None, verbose=False): proj = load_project(project_id) # Get the Project object. intervset = proj.interv( key=intervkey) # Get the intervention set that matches the key if intervset.data is None: return { 'interventions': [] } # Return an empty list if no data is present. interv_data = intervset.jsonify( cols=intervset.colnames.values(), header=False ) # Gather the list for all of the diseases, but only those with defined names (leaving out parsedbc) if verbose: sc.pp(interv_data) return {'interventions': interv_data}
def jsonify_project(project_id, verbose=False): """ Return the project json, given the Project UID. """ proj = load_project( project_id ) # Load the project record matching the UID of the project passed in. json = { 'project': { 'id': str(proj.uid), 'name': proj.name, 'username': proj.webapp.username, 'hasData': len(proj.burdensets) > 0 and len(proj.intervsets) > 0, 'creationTime': sc.getdate(proj.created), 'updatedTime': sc.getdate(proj.modified), } } if verbose: sc.pp(json) return json
def test_benchmark(do_save=do_save): ''' Compare benchmark performance ''' print('Running benchmark...') previous = sc.loadjson(benchmark_filename) # Create the sim sim = cv.Sim(verbose=0) # Time initialization t0 = sc.tic() sim.initialize() t_init = sc.toc(t0, output=True) # Time running t0 = sc.tic() sim.run() t_run = sc.toc(t0, output=True) # Construct json n_decimals = 3 json = { 'time': { 'initialize': round(t_init, n_decimals), 'run': round(t_run, n_decimals), }, 'parameters': { 'pop_size': sim['pop_size'], 'pop_type': sim['pop_type'], 'n_days': sim['n_days'], }, } print('Previous benchmark:') sc.pp(previous) print('\nNew benchmark:') sc.pp(json) if do_save: sc.savejson(filename=benchmark_filename, obj=json, indent=2) print('Done.') return json
def disp(self): ''' Detailed representation ''' output = 'Covasim options (see also cv.options.help()):\n' keylen = 14 # Maximum key length -- "numba_parallel" for k, v in self.items(): keystr = sc.colorize(f' {k:>{keylen}s}: ', fg='cyan', output=True) reprstr = sc.pp(v, output=True) reprstr = sc.indent(n=keylen + 4, text=reprstr, width=None) output += f'{keystr}{reprstr}' print(output) return
def check_task(task_id, verbose=False): match_taskrec = datastore.loadtask(task_id) # Find a matching task record (if any) to the task_id. if match_taskrec is None: # Check to see if the task exists, and if not, return an error. errormsg = {'error': 'No task found for specified task ID (%s)' % task_id} if verbose: print(errormsg) return errormsg else: # Update the elapsed times. if match_taskrec.pending_time is not None: # If we are no longer pending... pending_time = match_taskrec.pending_time # Use the existing pending_time. if match_taskrec.execution_time is not None: # If we have finished executing... execution_time = match_taskrec.execution_time # Use the execution time in the record. else: # Else (we are still executing)... execution_time = (sc.now() - match_taskrec.start_time).total_seconds() else: # Else (we are still pending)... pending_time = (sc.now() - match_taskrec.queue_time).total_seconds() execution_time = 0 taskrec_dict = match_taskrec.jsonify() # Create the return dict from the user repr. taskrec_dict['pendingTime'] = pending_time taskrec_dict['executionTime'] = execution_time if verbose: sc.pp(taskrec_dict) return taskrec_dict # Return the has record information and elapsed times.
def jsonify_packages(project_id, packagekey, verbose=False): proj = load_project(project_id) # Get the Project object. packageset = proj.package( key=packagekey ) # Get the package set that matches packageset_numindex. if packageset.data is None: return {'results': []} # Return an empty list if no data is present. results = packageset.jsonify( cols=[ 'fixed', 'shortname', 'total_dalys', 'icer', 'spend', 'opt_spend', 'dalys_averted', 'opt_dalys_averted' ], header=False) # Gather the list for all of the diseases. output = { 'results': results, 'budget': packageset.budget, 'frpwt': packageset.frpwt, 'equitywt': packageset.equitywt } if verbose: sc.pp(output) return output
def test_webapp(): sc.heading('Testing webapp') pars = cw.get_defaults(die=True) output = cw.run_sim(sim_pars=pars['sim_pars'], epi_pars=pars['epi_pars'], die=True) if output['errs']: errormsg = 'Webapp encountered an error:\n' errormsg += sc.pp(str(output['errs']), doprint=False) raise Exception(errormsg) output2 = cw.run_sim(sim_pars='invalid', epi_pars='invalid') if not output2['errs']: errormsg = 'Invalid parameters failed to raise an error' raise Exception(errormsg) else: errormsg = 'Raising an error:\n' errormsg += sc.pp(output2['errs'], doprint=False) errormsg += '\n\nError message above successfully raised, all is well\n' print(errormsg) return output
staff_age_max=75, average_student_teacher_ratio=20, average_teacher_teacher_degree=3, average_student_all_staff_ratio=15, average_additional_staff_degree=20, ) if __name__ == '__main__': T = sc.tic() pop = sp.make_population(**pars) elapsed = sc.toc(T, output=True) for person in [6, 66, 666]: print(f'\n\nPerson {person}') sc.pp(pop[person]) print('\n\n') print(sc.gitinfo(sp.__file__)) print(sp.version.__version__) popkeys = list(pop.keys()) stridekeys = [popkeys[i] for i in range(0, len(pop), stride)] subpop = {k: pop[k] for k in stridekeys} if do_save: sc.savejson(f'pop_v{sp.version.__version__}.json', subpop, indent=2) print('\n\n') pps = pars["n"] / elapsed print( f'Total time: {elapsed:0.3f} s for {pars["n"]} people ({pps:0.0f} people/second)'
def test_parameters(): sc.heading('Model parameters') pars = cv.make_pars() sc.pp(pars) return pars
print('xlrd output: %s' % celltest) print('openpyxl output: %s' % celltest2) if check('Blobject'): blob = sc.Blobject(files.excel) f = blob.tofile() wb = openpyxl.load_workbook(f) ws = wb.active ws['B7'] = 'Hi! ' wb.save(f) blob.load(f) blob.tofile(output=False) data = sc.loadspreadsheet(fileobj=blob.bytes) print(blob) sc.pp(data) # Test spreadsheet saving if check('Spreadsheet'): S = sc.Spreadsheet(files.excel) S.writecells(cells=['A6','B7','C8','D9'], vals=['This','is','a','test']) # Method 1 S.writecells(cells=[pl.array([7,1])+i for i in range(4)], vals=['And','so','is','this']) # Method 2 newdata = (pl.rand(3,3)*100).round() S.writecells(startrow=14, startcol=1, vals=newdata, verbose=True) # Method 3 S.save() data = S.readcells(header=False) print(S) sc.pp(data)
def test_legacy(): ''' Preserved for completeness, but too fragile to be used in automated unit testing due to reliance on openpyxl (which is not a required Sciris dependency). ''' # Define filenames filedir = 'files' + os.sep files = sc.prettyobj() files.excel = filedir + 'test.xlsx' files.binary = filedir + 'test.obj' files.text = filedir + 'text.txt' files.zip = filedir + 'test.zip' tidyup = True # Define the test data nrows = 15 ncols = 3 testdata = pl.zeros((nrows + 1, ncols), dtype=object) # Includes header row testdata[0, :] = ['A', 'B', 'C'] # Create header testdata[1:, :] = pl.rand(nrows, ncols) # Create data # Test spreadsheet writing, and create the file for later formats = { 'header': { 'bold': True, 'bg_color': '#3c7d3e', 'color': '#ffffff' }, 'plain': {}, 'big': { 'bg_color': '#ffcccc' } } formatdata = pl.zeros( (nrows + 1, ncols), dtype=object) # Format data needs to be the same size formatdata[1:, :] = 'plain' # Format data formatdata[1:, :][testdata[ 1:, :] > 0.7] = 'big' # Find "big" numbers and format them differently formatdata[0, :] = 'header' # Format header sc.savespreadsheet(filename=files.excel, data=testdata, formats=formats, formatdata=formatdata) # Test loading sc.heading('Loading spreadsheet') data = sc.loadspreadsheet(files.excel) print(data) excel_path = filedir + 'exampledata.xlsx' if os.path.exists(excel_path): sc.heading('Reading cells') wb = sc.Spreadsheet( filename=excel_path ) # Load a sample databook to try pulling cells from celltest = wb.readcells(method='xlrd', sheetname='Baseline year population inputs', cells=[[46, 2], [47, 2]]) # Grab cells using xlrd celltest2 = wb.readcells( method='openpyexcel', wbargs={'data_only': True}, sheetname='Baseline year population inputs', cells=[[46, 2], [47, 2]] ) # Grab cells using openpyexcel. You have to set wbargs={'data_only': True} to pull out cached values instead of formula strings print('xlrd output: %s' % celltest) print('openpyxl output: %s' % celltest2) else: print(f'{excel_path} not found, skipping...') sc.heading('Loading a blobject') blob = sc.Blobject(files.excel) f = blob.tofile() wb = openpyexcel.load_workbook(f) ws = wb.active ws['B7'] = 'Hi! ' wb.save(f) blob.load(f) blob.tofile(output=False) data = sc.loadspreadsheet(fileobj=blob.bytes) print(blob) sc.pp(data) # Test spreadsheet saving sc.heading('Using a Spreadsheet') S = sc.Spreadsheet(files.excel) S.writecells(cells=['A6', 'B7', 'C8', 'D9'], vals=['This', 'is', 'a', 'test']) # Method 1 S.writecells(cells=[pl.array([7, 1]) + i for i in range(4)], vals=['And', 'so', 'is', 'this']) # Method 2 newdata = (pl.rand(3, 3) * 100).round() S.writecells(startrow=14, startcol=1, vals=newdata, verbose=True) # Method 3 S.save() data = S.readcells(header=False) print(S) sc.pp(data) sc.heading('Saveobj/loadobj') sc.saveobj(files.binary, testdata) obj = sc.loadobj(files.binary) print(obj) sc.heading('Savetext/loadtext') sc.savetext(files.text, testdata) obj = sc.loadtext(files.text) print(obj) sc.heading('Get files') print('Files in current folder:') sc.pp(sc.getfilelist()) sc.heading('Save zip') sc.savezip(files.zip, [files.text, files.excel]) ''' Check that loading an object with a non-existent class works. The file deadclass.obj was created with: deadclass.py: ------------------------------------------------- class DeadClass(): def __init__(self, x): self.x = x ------------------------------------------------- then: ------------------------------------------------- import deadclass as dc import sciris as sc deadclass = dc.DeadClass(238473) sc.saveobj('deadclass.obj', deadclass) ------------------------------------------------- ''' dead_path = filedir + 'deadclass.obj' if os.path.exists(dead_path): sc.heading('Intentionally loading corrupted file') obj = sc.loadobj(dead_path) print('Loading corrupted object succeeded, x=%s' % obj.x) else: print(f'{dead_path} not found, skipping...') # Tidy up if tidyup: sc.blank() sc.heading('Tidying up') for fn in [files.excel, files.binary, files.text, files.zip]: try: os.remove(fn) print('Removed %s' % fn) except: pass print('Done, all fileio tests succeeded') return S
def test_benchmark(do_save=do_save): ''' Compare benchmark performance ''' print('Running benchmark...') previous = sc.loadjson(benchmark_filename) repeats = 5 t_inits = [] t_runs = [] def normalize_performance(): ''' Normalize performance across CPUs -- simple Numpy calculation ''' t_bls = [] bl_repeats = 5 n_outer = 10 n_inner = 1e6 for r in range(bl_repeats): t0 = sc.tic() for i in range(n_outer): a = np.random.random(int(n_inner)) b = np.random.random(int(n_inner)) a*b t_bl = sc.toc(t0, output=True) t_bls.append(t_bl) t_bl = min(t_bls) reference = 0.112 # Benchmarked on an Intel i9-8950HK CPU @ 2.90GHz ratio = reference/t_bl return ratio # Test CPU performance before the run r1 = normalize_performance() # Do the actual benchmarking for r in range(repeats): # Create the sim sim = cv.Sim(verbose=0) # Time initialization t0 = sc.tic() sim.initialize() t_init = sc.toc(t0, output=True) # Time running t0 = sc.tic() sim.run() t_run = sc.toc(t0, output=True) # Store results t_inits.append(t_init) t_runs.append(t_run) # Test CPU performance after the run r2 = normalize_performance() ratio = (r1+r2)/2 t_init = min(t_inits)*ratio t_run = min(t_runs)*ratio # Construct json n_decimals = 3 json = {'time': { 'initialize': round(t_init, n_decimals), 'run': round(t_run, n_decimals), }, 'parameters': { 'pop_size': sim['pop_size'], 'pop_type': sim['pop_type'], 'n_days': sim['n_days'], }, 'cpu_performance': ratio, } print('Previous benchmark:') sc.pp(previous) print('\nNew benchmark:') sc.pp(json) if do_save: sc.savejson(filename=benchmark_filename, obj=json, indent=2) print('Done.') return json
sc.colorize('blue', string) return None # Launch app T = sc.tic() app = main.make_app() user = sw.make_default_users(app)[0] proj_id = sc.uuid(tostring=True) # These can all be the same proj = demoproj(proj_id, user.username) ########################################################################### ### Run the tests ########################################################################### string = 'Starting tests for proj = %s' % proj_id heading(string, 'big') if 'project_io' in torun: heading('Running project_io', 'big') uid = rpcs.save_new_project(proj, user.username) P = rpcs.load_project_record(uid) print(P) if 'intervset_io' in torun: heading('Running intervset_io', 'big') output = rpcs.get_project_interv_set_intervs(proj_id) sc.pp(output) sc.toc(T) print('Done.')
def __repr__(self): ''' Brief representation ''' output = sc.objectid(self) output += 'Covasim options (see also cv.options.disp()):\n' output += sc.pp(self.to_dict(), output=True) return output
print('Goodbye world') sc.colorize('reset') # Colorize all output in between bluearray = sc.colorize(color='blue', string=str(range(5)), output=True) print("c'est bleu: " + bluearray) sc.colorize('magenta') # Now type in magenta for a while print('this is magenta') sc.colorize('reset') # Stop typing in magenta # Test printing functions if 'printing' in torun: example = sc.prettyobj() example.data = sc.vectocolor(10) print('sc.pr():') sc.pr(example) print('sc.pp():') sc.pp(example.data) string = sc.pp(example.data, doprint=False) # Test profiling functions if 'profile' in torun: def slow_fn(): n = 10000 int_list = [] int_dict = {} for i in range(n): int_list.append(i) int_dict[i] = i return class Foo:
import create_sim as cs # Settings index = 0 do_plot = 1 do_save = 1 verbose = 1 sg = 1 # Use the calibration with SafeGraph data jsonfile = f'../outputs/opt_merged_sep20_sg{sg}.json' pplfile = '../inputs/kc_big_seed0.ppl' simfile = 'fig2.sim' print(f'Original parameters for index {index}:') json = sc.loadjson(jsonfile) entry = json[index] sc.pp(entry) pars = entry['pars'] pars['rand_seed'] = int(entry['index']) + 0 # To run with a different seed # Adjust for the larger population pars['pop_size'] = 2.25e6 # Reset for the full population pars[ 'beta'] *= 0.99 # Adjust the calibration slightly for the larger population pars['verbose'] = verbose print('Loading population file...') with sc.Timer(): people = cv.load(pplfile) print('Creating sim...') with sc.Timer():
def help(self, detailed=False, output=False): ''' Print information about options. Args: detailed (bool): whether to print out full help output (bool): whether to return a list of the options **Example**:: cv.options.help(detailed=True) ''' # If not detailed, just print the docstring for cv.options if not detailed: print(self.__doc__) return n = 15 # Size of indent optdict = sc.objdict() for key in self.orig_options.keys(): entry = sc.objdict() entry.key = key entry.current = sc.indent(n=n, width=None, text=sc.pp(self[key], output=True)).rstrip() entry.default = sc.indent(n=n, width=None, text=sc.pp(self.orig_options[key], output=True)).rstrip() if not key.startswith('rc'): entry.variable = f'COVASIM_{key.upper()}' # NB, hard-coded above! else: entry.variable = 'No environment variable' entry.desc = sc.indent(n=n, text=self.optdesc[key]) optdict[key] = entry # Convert to a dataframe for nice printing print( 'Covasim global options ("Environment" = name of corresponding environment variable):' ) for k, key, entry in optdict.enumitems(): sc.heading(f'{k}. {key}', spaces=0, spacesafter=0) changestr = '' if entry.current == entry.default else ' (modified)' print(f' Key: {key}') print(f' Current: {entry.current}{changestr}') print(f' Default: {entry.default}') print(f' Environment: {entry.variable}') print(f' Description: {entry.desc}') sc.heading('Methods:', spacesafter=0) print(''' cv.options(key=value) -- set key to value cv.options[key] -- get or set key cv.options.set() -- set option(s) cv.options.get_default() -- get default setting(s) cv.options.load() -- load settings from file cv.options.save() -- save settings to file cv.options.to_dict() -- convert to dictionary cv.options.style() -- create style context for plotting ''') if output: return optdict else: return