def test_argument_list_dm_commands(): al = ArgumentList([DmCommands()]) args = al.parse_args('--dm-commands init fit-fixed set-scale_var'.split()) assert type(args.dm_commands) == list assert args.dm_commands[0] == 'init' assert args.dm_commands[1] == 'fit fixed' assert args.dm_commands[2] == 'set scale_var'
from cascade_at.executor.args.arg_utils import ArgumentList from cascade_at.executor.args.args import ModelVersionID, BoolArg, ListArg, StrArg, LogLevel from cascade_at.context.model_context import Context from cascade_at.core.log import get_loggers, LEVELS from cascade_at.dismod.api.dismod_io import DismodIO LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), ListArg('--locations', help='The locations to pull mulcov statistics from', type=int, required=True), ListArg('--sexes', help='The sexes to pull mulcov statistics from', type=int, required=True), StrArg('--outfile-name', help='Filepath where mulcov statistics will be saved', required=False, default='mulcov_stats'), BoolArg('--sample', help='If true, the results will be pulled from the sample table rather' 'than the fit_var table'), BoolArg('--mean', help='Whether or not to compute the mean'), BoolArg('--std', help='Whether or not to compute the standard deviation'), ListArg('--quantile', help='Quantiles to compute', type=float), LogLevel() ]) def common_covariate_names(dbs): return set.intersection( *map(set, [d.covariate.c_covariate_name.tolist() for d in dbs]) ) def get_mulcovs(dbs, covs, table='fit_var'): """
LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), ParentLocationID(), SexID(), DmCommands(), DmOptions(), BoolArg('--fill', help='whether or not to fill the dismod database with data'), BoolArg( '--prior-samples', help='whether or not the prior came from samples or just a mean fit'), IntArg( '--prior-parent', help='the location ID of the parent database to grab the prior for'), IntArg('--prior-sex', help='the sex ID of the parent database to grab prior for'), IntArg('--prior-mulcov', help='the model version id where mulcov stats is passed in', required=False), BoolArg('--save-fit', help='whether or not to save the fit'), BoolArg('--save-prior', help='whether or not to save the prior'), LogLevel(), StrArg('--test-dir', help='if set, will save files to the directory specified') ]) class DismodDBError(CascadeATError):
LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), ParentLocationID(), SexID(), NSim(), NPool(), ListArg('--child-locations', help='child locations to make predictions for', type=int, required=False), ListArg('--child-sexes', help='sexes to make predictions for', type=int, required=False), BoolArg( '--prior-grid', help='whether to predict on the prior grid or the regular avgint grid' ), BoolArg( '--save-fit', help='whether to save the results of the predict sample as the fit'), BoolArg('--save-final', help='whether to save results as final'), BoolArg( '--sample', help='whether to predict from the sample table or the fit_var table'), LogLevel() ])
from cascade_at.core.log import get_loggers, LEVELS from cascade_at.dismod.api.dismod_io import DismodIO from cascade_at.dismod.process.process_behavior import check_sample_asymptotic, SampleAsymptoticError from cascade_at.dismod.api.multithreading import _DismodThread, dmdismod_in_parallel from cascade_at.dismod.api.run_dismod import run_dismod_commands from cascade_at.executor import ExecutorError LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), ParentLocationID(), SexID(), NSim(), NPool(), StrArg('--fit-type', help='what type of fit to simulate for, fit fixed or both', default='both'), BoolArg('--asymptotic', help='whether or not to do asymptotic statistics or fit-refit'), LogLevel() ]) class SampleError(ExecutorError): """Raised when there are issues with sample simulate.""" pass def simulate(path: Union[str, Path], n_sim: int): """
import logging import sys from cascade_at.executor.args.arg_utils import ArgumentList from cascade_at.executor.args.args import ModelVersionID, LogLevel, BoolArg from cascade_at.context.model_context import Context from cascade_at.core.log import get_loggers, LEVELS from cascade_at.saver.results_handler import ResultsHandler LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), BoolArg('--final', help='whether or not to upload final results'), BoolArg('--fit', help='whether or not to upload model fits'), BoolArg('--prior', help='whether or not to upload model priors'), LogLevel() ]) def upload_prior(context: Context, rh: ResultsHandler) -> None: """ Uploads the saved priors to the epi database in the table epi.model_prior.. Parameters ---------- rh a Results Handler object context A context object
# from typing import Optional from cascade_at.executor.args.arg_utils import ArgumentList from cascade_at.core.log import get_loggers, LEVELS from cascade_at.executor.args.args import ModelVersionID, BoolArg, LogLevel, StrArg LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), BoolArg('--make', help='whether or not to make the file structure for the cascade'), BoolArg('--configure', help='whether or not to configure for the IHME cluster'), LogLevel(), StrArg('--json-file', help='for testing, pass a json file directly by filepath' 'instead of referencing a model version ID.'), StrArg('--test-dir', help='if set, will save files to the directory specified.' 'Invalidated if --configure is set.') ]) class CovariateReference: def __init__(self, inputs): self.inputs = inputs self.cov_ids = { c.covariate_id: c.name for c in inputs.covariate_specs.covariate_specs if c.study_country == 'country'
from cascade_at.executor.args.args import ModelVersionID, BoolArg, LogLevel, NSim, StrArg from cascade_at.context.model_context import Context from cascade_at.core.log import get_loggers, LEVELS from cascade_at.jobmon.workflow import jobmon_workflow_from_cascade_command from cascade_at.settings.settings import settings_from_model_version_id from cascade_at.inputs.locations import LocationDAG LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), BoolArg('--jobmon', help='whether or not to use jobmon to run the cascade' 'or just run as a sequence of command line tasks'), BoolArg('--make', help='whether or not to make the file structure for the cascade'), NSim(), StrArg('--addl-workflow-args', help='additional info to append to workflow args, to re-do models', required=False), BoolArg('--skip-configure'), LogLevel() ]) def run(model_version_id: int, jobmon: bool = True, make: bool = True, n_sim: int = 10, addl_workflow_args: Optional[str] = None, skip_configure: bool = False) -> None: """
import logging import os import sys from cascade_at.executor.args.arg_utils import ArgumentList from cascade_at.executor.args.args import ModelVersionID, LogLevel from cascade_at.context.model_context import Context from cascade_at.core.log import get_loggers, LEVELS LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ModelVersionID(), LogLevel()]) def cleanup(model_version_id: int) -> None: """ Delete all databases (.db) files attached to a model version. Parameters ---------- model_version_id The model version ID to delete databases for """ context = Context(model_version_id=model_version_id) for root, dirs, files in os.walk(context.database_dir): for f in files: if f.endswith(".db"): file = context.database_dir / root / f LOG.info(f"Deleting {file}.") os.remove(file)
import logging import sys from typing import List from cascade_at.executor.args.arg_utils import ArgumentList from cascade_at.executor.args.args import StrArg, DmCommands, LogLevel from cascade_at.core.log import get_loggers, LEVELS from cascade_at.dismod.api.run_dismod import run_dismod LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ StrArg('--file', help='Which database file to execute commands on'), DmCommands(), LogLevel() ]) def run_dmdismod(file, dm_commands: List[str]) -> None: """ Runs commands on a dismod file. Parameters ---------- file Filepath to a database dm_commands List of commands that dismod_at understands """ for c in dm_commands: process = run_dismod(dm_file=file, command=c)
def test_argument_list_dm_options(): al = ArgumentList([DmOptions()]) args = al.parse_args('--dm-options foo=1=int'.split()) assert type(args.dm_options) == dict assert args.dm_options['foo'] == 1
def test_argument_list_lists(): al = ArgumentList([ListArg('--foo', type=int)]) args = al.parse_args('--foo 1 2 3'.split()) assert type(args.foo) == list assert args.foo == [1, 2, 3]
def test_argument_list(): al = ArgumentList([IntArg('--foo'), BoolArg('--bar')]) args = al.parse_args('--foo 1 --bar'.split()) assert args.foo == 1 assert args.bar
def test_argument_list_task_args(): arg1 = IntArg('--foo') al = ArgumentList([arg1, ModelVersionID()]) assert al.task_args == ['model_version_id'] assert al.node_args == ['foo']
def test_argument_list_template(): al = ArgumentList([IntArg('--foo-bar'), BoolArg('--bar')]) assert al.template == '{foo_bar} {bar}'
from cascade_at.settings.settings import settings_from_model_version_id from cascade_at.settings.settings import load_settings LOG = get_loggers(__name__) ARG_LIST = ArgumentList([ ModelVersionID(), BoolArg('--jobmon', help='whether or not to use jobmon to run the cascade' 'or just run as a sequence of command line tasks'), BoolArg('--make', help='whether or not to make the file structure for the cascade'), NSim(), NPool(), StrArg('--addl-workflow-args', help='additional info to append to workflow args, to re-do models', required=False), BoolArg('--skip-configure', help='Disable building the inputs.p and settings.json files.'), StrArg('--json-file', help='for testing, pass a json file directly by filepath', required=False), StrArg('--test-dir', help='if set, specifies where files directory is.', required=False), LogLevel() ]) def run(model_version_id: int, jobmon: bool = True, make: bool = True,