class WardInfo: """This class holds metadata about a ward, e.g. its name(s), any ID code(s), any information about the region or authority it is in etc. """ #: Name of the ward name: str = "" #: Any alternative names of the ward alternate_names: _List[str] = _field(default_factory=list) #: Official ID code of the ward code: str = "" #: Any alternative ID codes of the ward alternate_codes: _List[str] = _field(default_factory=list) #: The name of the local authority it is in authority: str = "" #: The ID of the local authority it is in authority_code: str = "" #: The name of the region it is in region: str = "" #: The ID of the region it is in region_code: str = ""
class SrcData: ''' dataclass used to store the returned values from pyheaders' API. ''' scope: cpp.Scope = _field(default_factory=cpp.Scope) macros: _Dict[_Text, _Text] = _field(default_factory=dict) def update(self, other): ''' Updates its data with the data of another SrcData object. ''' self.scope.update(other.scope) self.macros.update(other.macros) # pylint: disable=no-member # Implement the Iterable protocol to allow unpacking. def __iter__(self): return iter((self.scope, self.macros))
def field(env_type: EnvType, is_public: bool = True, default: Any = MISSING, env_name: Optional[str] = ''): return _field(default=default, metadata=dict(is_public=is_public, env_type=env_type, env_name=env_name))
class Symbol: name: str typ: str addOrRemove: str fields: Dict[str, Union[Field, RpcField]] = _field(default_factory=dict) def __str__(self): return '%s' % (self.name)
class File(_JSONConvertable): name: _Path # Path relative to torrent folder size: int downloaded: int priority: Priority _unknown_1: int = _field(repr=False) pieces: int _unknown_2: bool = _field(repr=False) _unknown_3: int = _field(repr=False) _unknown_4: int = _field(repr=False) _unknown_5: int = _field(repr=False) _unknown_6: int = _field(repr=False) _unknown_7: int = _field(repr=False) _JSON_CONVERSION_TABLE = {"priority": Priority, "name": _Path}
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, **kwargs): metadata = metadata or {} metadata.update(kwargs) return _field(default=default, default_factory=default_factory, init=init, repr=repr, hash=hash, compare=compare, metadata=metadata)
class Torrent(_JSONConvertable): hash: str status: Status name: str size: int # bytes precent_progress: int # per mil downloaded: int # bytes uploaded: int # bytes ratio: int # per mil upload_speed: int # bytes per second download_speed: int # bytes per second eta: int # seconds label: str peers_connected: int peers_in_swarm: int seeds_connected: int seeds_in_swarm: int availability: int # in 1/65536ths torrent_queue_order: int remaining: int # bytes _unknown_1: str = _field(repr=False) _unknown_2: str = _field(repr=False) status_message: str _unknown_3: str = _field(repr=False) added_on: _datetime completed_on: _Union[None, _datetime] _unknown_4: str = _field(repr=False) path: _Path # Location of torrent folder _unknown_5: int = _field(repr=False) _unknown_6: str = _field(repr=False) _JSON_CONVERSION_TABLE = { "status": Status, "added_on": _datetime.fromtimestamp, "completed_on": lambda x: _datetime.fromtimestamp(x) if x else None, "path": _Path } @_cachedproperty @classmethod def _FIELD_INDEX_TABLE(cls): return {field.name: index for index, field in enumerate(_fields(cls))} @classmethod def from_json(cls, json): attributes = list(json) indicies_table = cls._FIELD_INDEX_TABLE for key, func in cls._JSON_CONVERSION_TABLE.items(): index = indicies_table[key] attributes[index] = func(attributes[index]) return cls(*attributes)
class Demographics: """This class holds metadata about all of the demographics being modelled """ #: The list of individual Demographic objects, one for each #: demographic being modelled demographics: _List[Demographic] = _field(default_factory=list) #: The random seed to used when using any random number generator #: to resolve decisions needed when allocating individuals to #: demographics. This is set here so that the Demographics #: are uniquely determined and reproducible across runs random_seed: int = None #: The interaction matrix between demographics. This should #: be a list of lists that shows how demographic 'i' affects #: demographic 'j' interaction_matrix: _List[_List[int]] = None #: Map from index to names of demographics - enables lookup by name _names: _Dict[str, int] = _field(default_factory=dict) _name: str = None _version: str = None _authors: str = None _contacts: str = None _references: str = None _filename: str = None _repository: str = None _repository_version: str = None _repository_branch: str = None def __str__(self): d = "\n ".join([str(x) for x in self.demographics]) return f"Demographics {self._name}\n" \ f"loaded from {self._filename}\n" \ f"version: {self._version}\n" \ f"author(s): {self._authors}\n" \ f"contact(s): {self._contacts}\n" \ f"references(s): {self._references}\n" \ f"repository: {self._repository}\n" \ f"repository_branch: {self._repository_branch}\n" \ f"repository_version: {self._repository_version}\n" \ f"demographics = [\n {d}\n]" def __len__(self): return len(self.demographics) def __eq__(self, other): if not isinstance(other, Demographics): return False elif len(self) != len(other): return False else: for name, index in self._names.items(): if other._names.get(name, None) != index: return False if self.demographics[index] != other.demographics[index]: return False return True def __getitem__(self, item): if isinstance(item, str): # Lookup by name return self.demographics[self.get_index(item)] else: # Lookup by index return self.demographics[item] def copy(self): """Return a copy of this demographics object that should allow a safe reset between runs. This deepcopies things that may change, while shallow copying things that won't """ from copy import copy, deepcopy demographics = copy(self) demographics.interaction_matrix = deepcopy(self.interaction_matrix) demographics.demographics = copy(self.demographics) return demographics def add(self, demographic: Demographic): """Add a demographic to the set to be modelled""" if demographic.name is None: raise ValueError( f"You can only add named demographics to the set.") if demographic.name in self._names: raise ValueError(f"There is already a demographic called " f"{demographic.name} in this set. Please rename " f"and try again.") self.demographics.append(demographic) self._names[demographic.name] = len(self.demographics) - 1 def get_name(self, item): """Return the name of the demographic at 'item'""" return self.demographics[self.get_index(item)].name def get_index(self, item): """Return the index of the passed item""" try: item = int(item) except Exception: pass if isinstance(item, str): try: return self._names[item] except Exception: pass elif isinstance(item, int): try: if self.demographics[item] is not None: return item except Exception: pass elif isinstance(item, Demographic): for i, d in enumerate(self.demographics): if item == d: return i # haven't found the item raise KeyError(f"There is no demographic is this set that " f"matches {item}. Available names are " f"{self._names}. Available indexes are " f"0 -> {len(self._names)}") @staticmethod def load(name: str = None, repository: str = None, folder: str = _default_folder_name, filename: str = None): """Load the parameters for the specified set of demographics. This will look for a file called f"{name}.json" in the directory f"{repository}/{folder}/{name}.json" By default this will load nothing. Alternatively you can provide the full path to the json file via the "filename" argument Parameters ---------- name: str The name of the demographics to load. This is the name that will be searched for in the METAWARDSDATA diseases directory repository: str The location of the cloned METAWARDSDATA repository folder: str The name of the folder within the METAWARDSDATA repository that contains the diseases filename: str The name of the file to load the disease from - this directly loads this file without searching through the METAWARDSDATA repository Returns ------- demographics: Demographics The constructed and validated demographics """ repository_version = None repository_branch = None if filename is None: import os if os.path.exists(name): filename = name elif os.path.exists(f"{name}.json"): filename = f"{name}.json" import os if filename is None: if repository is None: repository = os.getenv("METAWARDSDATA") if repository is None: repository = _default_demographics_path filename = os.path.join(repository, folder, f"{name}.json") from ._parameters import get_repository_version v = get_repository_version(repository) repository = v["repository"] repository_version = v["version"] repository_branch = v["branch"] json_file = filename try: with open(json_file, "r") as FILE: import json data = json.load(FILE) except Exception as e: from .utils._console import Console Console.error(f""" Could not find the demographics file {json_file}. "Either it does not exist or was corrupted. Error was {e.__class__} {e}. To download the disease data follow the instructions at [https://metawards.org/model_data](https://metawards.org/model_data).""") raise FileNotFoundError(f"Could not find or read {json_file}: " f"{e.__class__} {e}") json_dir = os.path.split(os.path.abspath(json_file))[0] demographics = data.get("demographics", []) work_ratios = data.get("work_ratios", []) play_ratios = data.get("play_ratios", []) random_seed = data.get("random_seed", None) diseases = data.get("diseases", None) if diseases is None: diseases = len(demographics) * [None] else: from ._disease import Disease diseases = [ Disease.load(x, folder=json_dir) if x is not None else None for x in diseases ] if (len(demographics) != len(work_ratios) or len(demographics) != len(play_ratios) or len(demographics) != len(diseases)): raise ValueError( f"The number of work_ratios ({len(work_ratios)}) must " f"equal to number of play_ratios " f"({len(play_ratios)}) which must equal the number " f"of diseases ({len(diseases)}) which must equal " f"the number of demographics ({len(demographics)})") demos = Demographics(random_seed=random_seed, _name=name, _authors=data.get("author(s)", "unknown"), _contacts=data.get("contact(s)", "unknown"), _references=data.get("reference(s)", "none"), _filename=json_file, _repository=repository, _repository_branch=repository_branch, _repository_version=repository_version) for i in range(0, len(demographics)): demographic = Demographic(name=demographics[i], work_ratio=_get_value(work_ratios[i]), play_ratio=_get_value(play_ratios[i]), disease=diseases[i]) demos.add(demographic) return demos def specialise(self, network: Network, profiler=None, nthreads: int = 1): """Build the set of networks that will model this set of demographics applied to the passed Network. Parameters ---------- network: Network The overall population model - this contains the base parameters, wards, work and play links that define the model outbreak profiler: Profiler Profiler used to profile the specialisation nthreads: int Number of threads over which to parallelise the work Returns ------- networks: Networks The set of Networks that represent the model run over the full set of different demographics """ if len(self) == 0: return network else: from ._networks import Networks return Networks.build(network=network, demographics=self, profiler=profiler, nthreads=nthreads)
class ProtoFile: name: str symbols: Dict[str, Symbol] = _field(default_factory=dict)
class Parameters: """The full set of Parameters that are used to control the model outbreak over a Network. The combination of a Network and a Parameters defines the model outbreak. Load the Parameters using the Parameters.load function, and then add extra data using the various "set" and "add" functions, e.g. Examples -------- >>> params = Parameters.load("march29") >>> params.set_disease("ncov") >>> params.set_input_files("2011Data") >>> params.add_seeds("ExtraSeedsBrighton.dat") """ #: The set of input files that define the model Network input_files: InputFiles = None #: The set of parameters that define the disease disease_params: Disease = None #: The set of files that contain additional seeds that #: seed the outbreak during the model run additional_seeds: _List[str] = None #: The fraction of day considered "day" for work, e.g. 0.7 * 24 hours length_day: float = 0.7 static_play_at_home: float = 0.0 dyn_play_at_home: float = 0.0 #: The cutoff distance in km beyond which workers or players cannot move dyn_dist_cutoff: float = 10000000.0 play_to_work: float = 0.0 work_to_play: float = 0.0 #: proportion of daily imports if there are additional infections daily_imports: float = 0.0 #: The index of the seeding ward if there are daily imports ward_seed_index: int = None #: The number of initial infections if there are daily imports initial_inf: int = 5 #: how to treat the * state (stage 0). This should be a string #: describing the method. Currently "R", "E" and "disable" are #: supported. Not needed if the mapping is specified explicitly #: in the disease stage_0: str = "R" #: Seasonality parameter UV: float = 0.0 #: Date when transmission should be at a maximum UV_max: _date = None #: The global scale_uv. This is combined with the population and #: per-ward level scale_uvs to give a single value scale_uv: float = 1.0 #: The global background force of infection (FOI). This is combined #: with the per-ward level bg_foi to give a single value bg_foi: float = 0.0 #: User parameters user_params: _Dict[str, float] = _field(default_factory=dict) #: All of the VariableSet adjustments that have been applied #: to these parameters adjustments: _List[VariableSet] = None _name: str = None _version: str = None _authors: str = None _contacts: str = None _references: str = None _filename: str = None _repository: str = None _repository_version: str = None _repository_branch: str = None _repository_dir: str = None #: The parameters for demographic sub-networks. If this is None then #: the parameters are the same as the overall parameters _subparams = None def __str__(self): parts = [] for key, value in [("Parameters", self._name), ("loaded_from", self._filename), ("repository", self._repository), ("repository_branch", self._repository_branch), ("repository_version", self._repository_version), ("length_day", self.length_day), ("initial_inf", self.initial_inf), ("static_play_at_home", self.static_play_at_home), ("dyn_play_at_home", self.dyn_play_at_home), ("dyn_dist_cutoff", self.dyn_play_at_home), ("play_to_work", self.play_to_work), ("work_to_play", self.work_to_play), ("daily_imports", self.daily_imports), ("UV", self.UV), ("UV_max", self.UV_max), ("scale_uv", self.scale_uv), ("bg_foi", self.bg_foi), ("stage_0", self.stage_0)]: if value is not None: parts.append(f"* {key}: {value}") return "\n".join(parts) @staticmethod def default(): """Return the default set of parameters""" try: (repository, v) = Parameters.get_repository() repository_dir = repository repository = v["repository"] repository_branch = v["branch"] repository_version = v["version"] return Parameters(_repository=repository, _repository_dir=repository_dir, _repository_branch=repository_branch, _repository_version=repository_version) except Exception: pass return Parameters() @staticmethod def get_repository(repository: str = None): """Return the repository location and version information for the passed repository Parameters ---------- repository: str Location on the filesystem of the repository. If this is None then it will be searched for using first the environment variable METAWARDSDATA, then $HOME/GitHub/MetaWardsData, then ./METAWARDSDATA Returns ------- (repository, version): tuple A tuple of the location on disk of the repository, plus the version information (git ID etc) """ return get_repository(repository) @staticmethod def load(parameters: str = "march29", repository: str = None, folder: str = _default_folder_name, filename: str = None): """This will return a Parameters object containing all of the parameters loaded from the parameters found in file f"{repository}/{folder}/{parameters}.json" By default this will load the march29 parameters from $HOME/GitHub/model_data/2011Data/parameters/march29.json Alternatively, you can provide the exact path to the filename via the 'filename' argument Parameters ---------- parameters: str The name of the parameters to load. This is the name that will be searched for in the METAWARDSDATA parameters directory repository: str The location of the cloned METAWARDSDATA repository folder: str The name of the folder within the METAWARDSDATA repository that contains the parameters filename: str The name of the file to load the parameters from - this directly loads this file without searching through the METAWARDSDATA repository Returns ------- params: Parameters The constructed and validated parameters """ repository_version = None repository_branch = None repository_dir = None if filename is None: import os (repository, v) = Parameters.get_repository(repository) filename = os.path.join(repository, folder, f"{parameters}.json") repository_dir = repository repository = v["repository"] repository_branch = v["branch"] repository_version = v["version"] json_file = filename try: with open(json_file, "r") as FILE: import json data = json.load(FILE) except Exception as e: from .utils._console import Console Console.error(f""" Could not find the parameters file {json_file}. Either it does not exist or was corrupted. Error was {e.__class__} {e}. "Please see https://metawards.org/model_data for instructions on how to download and set the model data.""") raise FileNotFoundError(f"Could not find or read {json_file}: " f"{e.__class__} {e}") par = Parameters(length_day=data.get("length_day", 0.7), initial_inf=data.get("initial_inf", 0), static_play_at_home=data.get("static_play_at_home", 0.0), dyn_play_at_home=data.get("dyn_play_at_home", 0.0), dyn_dist_cutoff=data.get("dyn_dist_cutoff", 10000000.0), play_to_work=data.get("play_to_work", 0.0), work_to_play=data.get("work_to_play", 0.0), daily_imports=data.get("daily_imports", 0), UV=data.get("UV", 0.0), UV_max=data.get("UV_max", None), scale_uv=data.get("scale_uv", 1.0), bg_foi=data.get("bg_foi", 0.0), _name=data.get("name", parameters), _authors=data.get("author(s)", "unknown"), _version=data.get("version", "unknown"), _contacts=data.get("contact(s)", "unknown"), _references=data.get("reference(s)", "none"), _filename=json_file, _repository=repository, _repository_dir=repository_dir, _repository_branch=repository_branch, _repository_version=repository_version) return par def __getitem__(self, demographic: str): """Return the parameters that should be used for the demographic subnetwork called 'demographic'. If these have not been set specifically then the parameters for the overall network are used """ if demographic == "overall": return self if self._subparams is None: self._subparams = {} if demographic not in self._subparams: from copy import deepcopy self._subparams[demographic] = deepcopy(self) self._subparams[demographic]._subparams = {} return self._subparams[demographic] def copy(self, include_subparams: bool = False): """Return a safe copy of these parameters, which does not include any subnetwork parameters if 'include_subparams' is False """ from copy import deepcopy params = deepcopy(self) if not include_subparams: params._subparams = None return params def specialised_demographics(self) -> _List[str]: """Return the names of demographics that have specialised parameters that are different to those of the overall network """ if self._subparams is None: return [] else: return list(self._subparams.keys()) def add_seeds(self, filename: str): """Add an 'additional seeds' file that can be used to seed wards with new infections at different times and locations. Several additional_seed files can be added Parameters ---------- filename: str Name of the file containing the additional seeds """ # resolve the filename to the GitHub repo if possible... if self.additional_seeds is None: self.additional_seeds = [] import os if not os.path.exists(filename): if self._repository_dir is not None: f = os.path.join(self._repository_dir, "extra_seeds", filename) if os.path.exists(f): filename = f self.additional_seeds.append(filename) def set_input_files(self, input_files: InputFiles): """Set the input files that are used to initialise the simulation Parameters ---------- input_files: InputFiles The set of input files that will be used to load the Network. If a string is passed then the InputFiles will be loaded based on that string. """ if isinstance(input_files, InputFiles): from copy import deepcopy self.input_files = deepcopy(input_files) return self.input_files = InputFiles.load(input_files, repository=self._repository_dir) def set_disease(self, disease: Disease, silent: bool = True): """"Set the disease that will be modelled Parameters: disease: The disease to be modelled. If a string is passed then the disease will be loaded using that string silent: Whether or not to suppress printing out the disease """ if isinstance(disease, str): disease = Disease.load(disease, repository=self._repository_dir) if not silent: from .utils._console import Console Console.print(disease, markdown=True) from copy import deepcopy self.disease_params = deepcopy(disease) def set_variables(self, variables: VariableSet): """This function sets the adjustable variable values to those specified in 'variables' in A COPY OF THIS PARAMETERS OBJECT. This returns the copy. It does not change this object Parameters ---------- variables: VariableSet The variables that will be adjusted before the model run. This adjusts the parameters and returns them in a deep copy Returns ------- params: Parameters A copy of this set of parameters with the variables adjusted """ from copy import deepcopy params = deepcopy(self) if isinstance(variables, dict): variables = VariableSet(variables) variables.adjust(params) return params @staticmethod def read_variables(filename: str, line_numbers: _List[int]): """Read in extra variable parameters from the specified line number(s) of the specified file, returning the list of the dictionaries of variables that have been read. You can then apply those variable parameters using the 'set_variables' function Parameters ---------- filename: str The file from which to read the adjustable variables line_numbers: List[int] All of the line numbers from which to read. If this is None then all lines will be read. Returns ------- variables: VariableSets The VariableSets containing all of the adjustable variables """ return VariableSets.read(filename, line_numbers)
class Simple: """This is the monochrome 'Simple' theme""" frames: _Dict[int, _List[str]] = _field(default_factory=dict) def should_highlight(self): return False def highlighter(self): return None def should_markup(self): return False def panel_box(self, style): from rich import box as _box return _box.SQUARE def padding_style(self, style): return "" def text(self, style): return "white" def error(self): return "white" def warning(self): return "white" def info(self): return "white" def error_text(self): return f"bold {self.error()}" def warning_text(self): return f"bold {self.warning()}" def info_text(self): return f"bold {self.info()}" def spinner_success(self, spinner): spinner.ok("Success") def spinner_failure(self, spinner): spinner.fail("Failure") def rule(self, style): return "white" def panel(self, style): return "white" def get_frames(self, width: int = 80): """Return the frames used to animate a spinner in a console of specified width This returns the list of frames plus the timeout between the list """ if width in self.frames: return self.frames[width] frames = [] for i in range(0, width): frame = (i * '-') + '>' + ((width - i - 1) * ' ') frames.append(frame) self.frames[width] = (frames, 50) return self.frames[width]
class Demographics: """This class holds metadata about all of the demographics being modelled """ #: The list of individual Demographic objects, one for each #: demographic being modelled demographics: _List[Demographic] = _field(default_factory=list) #: The random seed to used when using any random number generator #: to resolve decisions needed when allocating individuals to #: demographics. This is set here so that the Demographics #: are uniquely determined and reproducible across runs random_seed: int = None #: The interaction matrix between demographics. This should #: be a list of lists that shows how demographic 'i' affects #: demographic 'j' interaction_matrix: _List[_List[int]] = None #: Map from index to names of demographics - enables lookup by name _names: _Dict[str, int] = _field(default_factory=dict) _name: str = None _version: str = None _authors: str = None _contacts: str = None _references: str = None _filename: str = None _repository: str = None _repository_version: str = None _repository_branch: str = None def __str__(self): d = "\n ".join([str(x) for x in self.demographics]) return f"[\n {d}\n]" def __repr__(self): return self.__str__() def __len__(self): return len(self.demographics) def __eq__(self, other): if not isinstance(other, Demographics): return False elif len(self) != len(other): return False else: for name, index in self._names.items(): if other._names.get(name, None) != index: return False if self.demographics[index] != other.demographics[index]: return False return True def __getitem__(self, item): if isinstance(item, str): # Lookup by name return self.demographics[self.get_index(item)] else: # Lookup by index return self.demographics[item] def copy(self): """Return a copy of this demographics object that should allow a safe reset between runs. This deepcopies things that may change, while shallow copying things that won't """ from copy import copy, deepcopy demographics = copy(self) demographics.interaction_matrix = deepcopy(self.interaction_matrix) demographics.demographics = copy(self.demographics) return demographics def __add__(self, other: Demographic): from copy import deepcopy r = deepcopy(self) r.add(other) return r def __radd__(self, other: Demographic): r = Demographics() r.add(other) for d in self.demographics: r.add(d) return r def add(self, demographic: Demographic): """Add a demographic to the set to be modelled""" if demographic.name is None: raise ValueError( f"You can only add named demographics to the set.") if demographic.name in self._names: raise ValueError(f"There is already a demographic called " f"{demographic.name} in this set. Please rename " f"and try again.") from copy import deepcopy self.demographics.append(deepcopy(demographic)) self._names[demographic.name] = len(self.demographics) - 1 def get_name(self, item): """Return the name of the demographic at 'item'""" return self.demographics[self.get_index(item)].name def get_index(self, item): """Return the index of the passed item""" try: item = int(item) except Exception: pass if isinstance(item, str): try: return self._names[item] except Exception: pass elif isinstance(item, int): try: if self.demographics[item] is not None: return item except Exception: pass elif isinstance(item, Demographic): for i, d in enumerate(self.demographics): if item == d: return i # haven't found the item raise KeyError(f"There is no demographic is this set that " f"matches {item}. Available names are " f"{self._names}. Available indexes are " f"0 -> {len(self._names)}") def uses_named_network(self): """Return whether or not at least one of these demographics specifies the use of a named network model """ for demographic in self.demographics: if demographic.network is not None: return True return False def is_multi_network(self): """Return whether or not these demographics need to use multiple custom networks (e.g. refer to different network models) """ if len(self) <= 1: return False else: first_network = self.demographics[0].network for demographic in self.demographics[1:]: if first_network != demographic.network: return True return False @staticmethod def load(name: str = None, repository: str = None, folder: str = _default_folder_name, filename: str = None): """Load the parameters for the specified set of demographics. This will look for a file called f"{name}.json" in the directory f"{repository}/{folder}/{name}.json" By default this will load nothing. Alternatively you can provide the full path to the json file via the "filename" argument Parameters ---------- name: str The name of the demographics to load. This is the name that will be searched for in the METAWARDSDATA diseases directory repository: str The location of the cloned METAWARDSDATA repository folder: str The name of the folder within the METAWARDSDATA repository that contains the diseases filename: str The name of the file to load the disease from - this directly loads this file without searching through the METAWARDSDATA repository Returns ------- demographics: Demographics The constructed and validated demographics """ repository_version = None repository_branch = None if filename is None: import os if os.path.exists(name): filename = name elif os.path.exists(f"{name}.json"): filename = f"{name}.json" import os if filename is None: if repository is None: repository = os.getenv("METAWARDSDATA") if repository is None: repository = _default_demographics_path filename = os.path.join(repository, folder, f"{name}.json") from ._parameters import get_repository_version v = get_repository_version(repository) repository = v["repository"] repository_version = v["version"] repository_branch = v["branch"] json_file = filename try: demographics = Demographics.from_json(json_file) except Exception as e: from .utils._console import Console Console.error(f""" Could not find the demographics file {json_file}. "Either it does not exist or was corrupted. Error was {e.__class__} {e}. To download the disease data follow the instructions at [https://metawards.org/model_data](https://metawards.org/model_data).""") raise FileNotFoundError(f"Could not find or read {json_file}: " f"{e.__class__} {e}") demographics._name = name demographics._filename = json_file demographics._repository = repository demographics._repository_branch = repository_branch demographics._repository_version = repository_version return demographics def to_data(self): """Return a data dictionary for this object that can be serialised to json """ data = {} if self.demographics is None: return data default = [1.0] * len(self.demographics) all_none = [None] * len(self.demographics) def _get_filename(x): if x is None: return None elif isinstance(x, str): import os if os.path.exists(x): from pathlib import Path return str(Path(x).expanduser().absolute()) else: return x else: if x._filename is None: raise IOError(f"Cannot locate file for {x}") return _get_filename(x._filename) demographics = [str(x.name) for x in self.demographics] work_ratios = [float(x.work_ratio) for x in self.demographics] play_ratios = [float(x.play_ratio) for x in self.demographics] diseases = [_get_filename(x.disease) for x in self.demographics] networks = [_get_filename(x.network) for x in self.demographics] adjustments = [x.adjustment for x in self.demographics] data["demographics"] = demographics if work_ratios != default: data["work_ratios"] = work_ratios if play_ratios != default: data["play_ratios"] = play_ratios if self.random_seed is not None: data["random_seed"] = int(self.random_seed) if diseases != all_none: data["diseases"] = diseases if networks != all_none: data["networks"] = networks if adjustments != all_none: data["adjustments"] = [ x.to_data() if x is not None else None for x in adjustments ] return data def to_json(self, filename: str = None, indent: int = None, auto_bzip: bool = True) -> str: """Serialise the Demographics to JSON. This will write to a file if filename is set, otherwise it will return a JSON string. Parameters ---------- filename: str The name of the file to write the JSON to. The absolute path to the written file will be returned. If filename is None then this will serialise to a JSON string which will be returned. indent: int The number of spaces of indent to use when writing the json auto_bzip: bool Whether or not to automatically bzip2 the written json file Returns ------- str Returns either the absolute path to the written file, or the json-serialised string """ import json if indent is not None: indent = int(indent) if filename is None: return json.dumps(self.to_data(), indent=indent) else: from pathlib import Path filename = str(Path(filename).expanduser().resolve().absolute()) if auto_bzip: if not filename.endswith(".bz2"): filename += ".bz2" import bz2 with bz2.open(filename, "wt") as FILE: try: json.dump(self.to_data(), FILE, indent=indent) except Exception: import os FILE.close() os.unlink(filename) raise else: with open(filename, "w") as FILE: try: json.dump(self.to_data(), FILE, indent=indent) except Exception: import os FILE.close() os.unlink(filename) raise return filename @staticmethod def from_data(data, json_dir=None) -> Demographics: """Construct and return a Demographics object constructed from a (json-deserialised) data dictionary """ demographics = data.get("demographics", []) work_ratios = data.get("work_ratios", [1.0] * len(demographics)) play_ratios = data.get("play_ratios", [1.0] * len(demographics)) random_seed = data.get("random_seed", None) diseases = data.get("diseases", None) networks = data.get("networks", None) adjustments = data.get("adjustments", None) if diseases is None: diseases = len(demographics) * [None] else: from ._disease import Disease diseases = [ Disease.load(x, folder=json_dir) if x is not None else None for x in diseases ] if networks is None: networks = len(demographics) * [None] else: from ._inputfiles import InputFiles networks = [ InputFiles.load(x, folder=json_dir) if x is not None else None for x in networks ] if adjustments is None: adjustments = len(demographics) * [None] else: from ._variableset import VariableSet adjustments = [ VariableSet.from_data(x) if x is not None else None for x in adjustments ] if (len(demographics) != len(work_ratios) or len(demographics) != len(play_ratios) or len(demographics) != len(diseases) or len(demographics) != len(networks) or len(adjustments) != len(networks)): raise ValueError( f"The number of work_ratios ({len(work_ratios)}) must " f"equal to number of play_ratios " f"({len(play_ratios)}) which must equal the number " f"of diseases ({len(diseases)}) which must equal " f"the number of demographics ({len(demographics)}), " f"which must equal the number of networks ({len(networks)}).") demos = Demographics(random_seed=random_seed, _authors=data.get("author(s)", None), _contacts=data.get("contact(s)", None), _references=data.get("reference(s)", None)) for i in range(0, len(demographics)): demographic = Demographic(name=demographics[i], work_ratio=_get_value(work_ratios[i]), play_ratio=_get_value(play_ratios[i]), disease=diseases[i], network=networks[i], adjustment=adjustments[i]) demos.add(demographic) return demos @staticmethod def from_json(s: str): """Construct and return Demographics loaded from the passed json file """ import os import json json_dir = None if os.path.exists(s): json_dir = os.path.split(os.path.abspath(s))[0] try: import bz2 with bz2.open(s, "rt") as FILE: data = json.load(FILE) except Exception: data = None if data is None: with open(s, "rt") as FILE: data = json.load(FILE) else: try: data = json.loads(s) except Exception: data = None if data is None: from .utils._console import Console Console.error( f"Unable to load Demographics from '{s}'. Check that " f"this is valid JSON or that the file exists.") raise IOError(f"Cannot load Demographics from '{s}'") return Demographics.from_data(data, json_dir=json_dir) def build(self, params: Parameters, population: Population = None, max_nodes: int = 16384, max_links: int = 4194304, nthreads: int = 1, profiler: Profiler = None) -> _Union[Network, Networks]: """Build the set of networks described by these demographics and the passed parameters Parameters ---------- params: Parameters Parameters used to help build the model networks max_nodes: int Initial guess for the maximum number of nodes(wards) max_links: int Initial guess for the maximum number of links between wards profiler: Profiler Profiler used to profile the specialisation nthreads: int Number of threads over which to parallelise the work Returns ------- Network or Networks The set of Networks that represent the model run over the full set of different demographics(or Network if there is just a single demographic) """ from .utils._console import Console if len(self) == 0: return Network.build(params=params, population=population, max_nodes=max_nodes, max_links=max_links, nthreads=nthreads, profiler=profiler) if len(self) == 1: demographic = self[0] if demographic.adjustment is not None: demographic.adjustment.adjust(params) if demographic.disease is not None: params.disease_params = demographic.disease if demographic.network is not None: params.input_files = demographic.network network = Network.build(params=params, population=population, max_nodes=max_nodes, max_links=max_links, nthreads=nthreads, profiler=profiler) if demographic.work_ratio != 1.0 or demographic.play_ratio != 1.0: network.scale_susceptibles(work_ratio=demographic.work_ratio, play_ratio=demographic.play_ratio) network.name = demographic.name return network if not self.uses_named_network(): # build a single network that is then specialised network = Network.build(params=params, population=population, max_nodes=max_nodes, max_links=max_links, nthreads=nthreads, profiler=profiler) Console.rule("Specialising into demographics") return self.specialise(network=network, profiler=profiler, nthreads=nthreads) # need to load each network separately, and then merge wards = {} shared_wards = {} from ._wards import Wards from copy import deepcopy for i, demographic in enumerate(self.demographics): if demographic.network is None: input_files = params.input_files else: input_files = demographic.network if input_files not in shared_wards: if input_files.is_wards_data: wards[input_files] = Wards.from_json( input_files.wards_data) else: network_params = deepcopy(params) network_params.input_files = input_files network = Network.build(params=network_params, population=population, max_nodes=max_nodes, max_links=max_links, nthreads=nthreads, profiler=profiler) wards[input_files] = network.to_wards() shared_wards[input_files] = [i] else: shared_wards[input_files].append(i) wardss = [None] * len(self) input_files = [None] * len(self) for key, value in shared_wards.items(): if len(value) > 1: # this is a combined network - need to divide the population # between multiple demographics. First create the network # and then use specialise to divide the population # between the demographics w = wards[key] network = Network.from_wards(w, params=params, nthreads=nthreads) ds = Demographics(demographics=[ deepcopy(self.demographics[x]) for x in value ]) for d in ds: d.network = None network = ds.specialise(network=network, nthreads=nthreads) for i, idx in enumerate(value): wardss[idx] = network.subnets[i].to_wards( nthreads=nthreads) input_files[idx] = key else: i = value[0] demographic = self.demographics[i] w = wards[key] if demographic.work_ratio != 1.0 or \ demographic.play_ratio != 1.0: w = w.scale(work_ratio=demographic.work_ratio, play_ratio=demographic.play_ratio) wardss[i] = w input_files[i] = key total_pop = worker_pop = player_pop = 0 for wards in wardss: total_pop += wards.population() worker_pop += wards.num_workers() player_pop += wards.num_players() overall, wardss = Wards.harmonise(wardss) assert overall.population() == total_pop assert overall.num_workers() == worker_pop assert overall.num_players() == player_pop overall = Network.from_wards(overall, params=params, nthreads=nthreads) subnets = [None] * len(self) total_pop = worker_pop = player_pop = 0 for i, demographic in enumerate(self.demographics): subparams = deepcopy(params) subparams.input_files = input_files[i] if demographic.adjustment is not None: demographic.adjustment.adjust(subparams) subnets[i] = Network.from_wards(wardss[i], params=subparams, nthreads=nthreads) subnets[i].name = demographic.name total_pop += subnets[i].population worker_pop += subnets[i].work_population player_pop += subnets[i].play_population assert total_pop == overall.population assert worker_pop == overall.work_population assert player_pop == overall.play_population from ._networks import Networks networks = Networks() networks.overall = overall networks.subnets = subnets networks.demographics = deepcopy(self) return networks def specialise(self, network: Network, profiler: Profiler = None, nthreads: int = 1): """Build the set of networks that will model this set of demographics applied to the passed Network. Parameters ---------- network: Network The overall population model - this contains the base parameters, wards, work and play links that define the model outbreak profiler: Profiler Profiler used to profile the specialisation nthreads: int Number of threads over which to parallelise the work Returns ------- networks: Networks The set of Networks that represent the model run over the full set of different demographics """ if len(self) == 0: return network else: from ._networks import Networks return Networks.build(network=network, demographics=self, profiler=profiler, nthreads=nthreads)
class Module(AST): name: string body: typing.List[Type] = _field(default_factory=list)
class WardInfos: """Simple class that holds a list of WardInfo objects, and provides useful search functions over that list. This prevents me from cluttering up the interface of Network """ #: The list of WardInfo objects, one for each ward in order wards: _List[WardInfo] = _field(default_factory=list) #: The index used to speed up lookup of wards _index: _Dict[WardInfo, int] = None def __len__(self): return len(self.wards) def __getitem__(self, index: int) -> WardInfo: return self.wards[index] def __setitem__(self, i: int, info: WardInfo) -> None: """Set the ith WardInfo equal to 'info'.""" if info is not None: if not isinstance(info, WardInfo): raise TypeError( f"Setting item at index {i} to not a WardInfo {info} " f"is not allowed") if i >= len(self.wards): self.wards += [None] * (i - len(self.wards) + 1) self.wards[i] = info if info is not None and self._index is not None: self._index[info] = i return elif i < 0: i = len(self.wards) + i if i < 0: raise IndexError(f"Invalid index") if self.wards[i] == info: # nothing to do return elif self.wards[i] is not None: self._index = None self.wards[i] = info return else: self.wards[i] = info if self._index is not None: index = self._index.get(info, None) if index is None or index > i: self._index[info] = i return def reindex(self): """Rebuild the WardInfo index. You must call this function after you have modified the list of WardInfo objects, as otherwise this will fall out of date. Note that this will be automatically called the first time you use the "contains" or "index" functions """ self._index = {} for i, ward in enumerate(self.wards): if ward is not None: if not isinstance(ward, WardInfo): raise TypeError( f"Item at index {i} is not a WardInfo! {ward}") if ward not in self._index: self._index[ward] = i def __contains__(self, info: WardInfo) -> bool: """Return whether or not this contains the passed WardInfo""" if self._index is None: self.reindex() return info in self._index def contains(self, info: WardInfo) -> bool: """Return whether or not this contains the passed WardInfo""" return self.__contains__(info) def index(self, info: WardInfo) -> int: """Return the index of the passed 'info' object if it is in this list. If not, then a ValueError exception is raised. Note that only the first matching WardInfo will be returned """ if self._index is None: self.reindex() i = self._index.get(info, None) if i is None: raise ValueError(f"Missing ward! {info}") else: return i def _find_ward(self, name: str, match: bool, include_alternates: bool): """Internal function that flexibly finds a ward by name""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.name): is_match = True elif search(ward.code): is_match = True elif include_alternates: for alternate in ward.alternate_names: if search(alternate): is_match = True break if not is_match: for alternate in ward.alternate_codes: if search(alternate): is_match = True break if is_match: matches.append(i) return matches def _find_authority(self, name: str, match: bool): """Internal function that flexibly finds a ward by authority""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.authority): is_match = True elif search(ward.authority_code): is_match = True if is_match: matches.append(i) return matches def _find_region(self, name: str, match: bool): """Internal function that flexibly finds a ward by region""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.region): is_match = True elif search(ward.region_code): is_match = True if is_match: matches.append(i) return matches def _intersect(self, list1, list2): """Return the intersection of two lists""" return [value for value in list1 if value in list2] def find(self, name: str = None, authority: str = None, region: str = None, match: bool = False, match_authority_and_region: bool = False, include_alternates: bool = True): """Generic search function that will search using any or all of the terms provided. This returns a list of indicies of wards that match the search Parameters ---------- name: str or regexp Name or code of the ward to search. You can also include the authority adn region by separating usign "/", e.g. "Clifton/Bristol". authority: str or regexp Name or code of the authority to search region: str or regexp Name or code of the region to search match: bool(False) Use a regular expression match for the ward rather than a search. This forces the match to be at the start of the string match_authority_and_region: bool(False) Use a regular expression match for the authority and region rather than a search. This forces the match to be at the start of the string include_alternates: bool(True) Whether or not to include alternative names and codes when searching for the ward """ wards = None if name is not None: parts = name.split("/") if len(parts) == 1: wards = self._find_ward(name, match=match, include_alternates=include_alternates) else: wards = self._find_ward(name=parts[0].strip(), match=match, include_alternates=include_alternates) authority = parts[1].strip() if len(parts) > 2: region = "/".join(parts[2:]).strip() if len(wards) == 0: return wards if authority is not None: authorities = self._find_authority( authority, match=match_authority_and_region) if len(authorities) == 0: return authorities if wards is None: wards = authorities else: wards = self._intersect(wards, authorities) wards.sort() if len(wards) == 0: return wards if region is not None: regions = self._find_region(region, match=match_authority_and_region) if len(regions) == 0: return regions if wards is None: wards = regions else: wards = self._intersect(wards, regions) wards.sort() if wards is None: # we have not searched for anything, so return everything return list(range(1, len(self.wards))) else: return wards
class WardInfo: """This class holds metadata about a ward, e.g. its name(s), any ID code(s), any information about the region or authority it is in etc. """ #: Name of the ward name: str = "" #: Any alternative names of the ward alternate_names: _List[str] = _field(default_factory=list) #: Official ID code of the ward code: str = "" #: Any alternative ID codes of the ward alternate_codes: _List[str] = _field(default_factory=list) #: The name of the local authority it is in authority: str = "" #: The ID of the local authority it is in authority_code: str = "" #: The name of the region it is in region: str = "" #: The ID of the region it is in region_code: str = "" def __hash__(self): return f"{self.name} | {self.authority} | {self.region}".__hash__() def is_null(self): return self == WardInfo() def summary(self): """Return a summary string that identifies this WardInfo""" s = [] if len(self.name) > 0: s.append(self.name) elif len(self.alternate_names) > 0: s.append(self.alternate_names[0]) elif len(self.code) > 0: s.append(self.code) elif len(self.alternate_codes) > 0: s.append(self.alternate_codes[0]) if len(self.authority) > 0: s.append(self.authority) elif len(self.authority_code) > 0: s.append(self.authority_code) if len(self.region) > 0: s.append(self.region) elif len(self.region_code) > 0: s.appened(self.region_code) return "/".join(s) def to_data(self): """Return a dictionary that contains all of this data, in a format that can be serialised to JSON """ data = {} if self.name is not None and len(self.name) > 0: data["name"] = str(self.name) if self.alternate_names is not None and len(self.alternate_names) > 0: data["alternate_names"] = [str(x) for x in self.alternate_names] if self.code is not None and len(self.code) > 0: data["code"] = str(self.code) if self.alternate_codes is not None and len(self.alternate_codes) > 0: data["alternate_codes"] = [str(x) for x in self.alternate_codes] if self.authority is not None and len(self.authority) > 0: data["authority"] = str(self.authority) if self.authority_code is not None and len(self.authority_code) > 0: data["authority_code"] = str(self.authority_code) if self.region is not None and len(self.region) > 0: data["region"] = str(self.region) if self.region_code is not None and len(self.region_code) > 0: data["region_code"] = str(self.region_code) return data @staticmethod def from_data(data): """Construct from the passed dictionary, which has, e.g. been deserialised from JSON """ if data is None or len(data) == 0: return WardInfo() info = WardInfo() info.name = str(data.get("name", "")) info.alternate_names = [str(x) for x in data.get("alternate_names", [])] info.code = str(data.get("code", "")) info.alternate_codes = [str(x) for x in data.get("alternate_codes", [])] info.authority = str(data.get("authority", "")) info.authority_code = str(data.get("authority_code", "")) info.region = str(data.get("region", "")) info.region_code = str(data.get("region_code", "")) return info
class Field(AST): kind: string name: string qualifier: typing.Optional[field_qualifier] = _field(default=None)
class Constructor(AST): name: string fields: typing.List[field] = _field(default_factory=list)
class Product(type): fields: typing.List[field] = _field(default_factory=list) attributes: typing.List[field] = _field(default_factory=list)
class Sum(type): types: typing.List[Constructor] = _field(default_factory=list) attributes: typing.List[field] = _field(default_factory=list)
class _CacheObj: label: _Dict[str, int] = _field(default_factory=dict) torrents: _Dict[str, Torrent] = _field(default_factory=dict)
class SpringFlowers: """This is the colourful 'SpringFlowers' theme""" frames: _Dict[int, _List[str]] = _field(default_factory=dict) panel_colors: _List[str] = _field(default_factory=list) panel_color_count = 0 def should_highlight(self): return False def highlighter(self): return None def should_markup(self): return False def text(self, style): if style == "warning": return "magenta" elif style == "error": return "red" elif style == "info": return "cyan" else: return "white" def error(self): return "red" def warning(self): return "magenta" def info(self): return "cyan" def spinner_success(self, spinner): if Console.supports_emojis(): spinner.green.ok("✔") else: spinner.green.ok("Success") def spinner_failure(self, spinner): if Console.supports_emojis(): spinner.red.fail("✘") else: spinner.red.fail("Failed") def rule(self, style): if style is None: return "green" elif style == "finish": return "magenta" elif style == "error": return self.error() elif style == "warning": return self.warning() elif style == "info": return self.info() elif style == "iteration": return "cyan" else: return "cyan" def panel_box(self, style): from rich import box as _box if style == "header": return _box.HEAVY_EDGE elif style == "command": return _box.MINIMAL_HEAVY_HEAD else: return _box.SQUARE def padding_style(self, style): if style == "header": return "on #220077" elif style == "command": return "bold white on #222222" else: return self.panel(style, advance=False) def panel(self, style, advance=True): if style is None: return "on black" elif style == "command": return "white on #222222" elif style == "alternate": if len(self.panel_colors) == 0: self.panel_colors = ["blue", "cyan"] self.panel_color_count = 0 color = self.panel_colors[self.panel_color_count] if advance: self.panel_color_count += 1 if self.panel_color_count >= len(self.panel_colors): self.panel_color_count = 0 return f"on {color}" elif style == "header": return f"on #0000FF" else: return "on black" def get_frames(self, width: int = 80): """Return the frames used to animate a spinner in a console of specified width This returns the list of frames plus the timeout between the list """ if width in self.frames: return self.frames[width] frames = [] frames.append("") if Console.supports_emojis(): bar = "👉 👉 👉 😷 😷 😷 👌 👍 👏 👏 👏 👏 👏 " else: bar = "-> -> -> #WearAMask :-) :-) :-) " for i in range(1, len(bar), 1): frames.append(bar[0:i]) self.frames[width] = (frames, 50) return self.frames[width]
class Networks: """This is a combination of Network objects which together represent an entire diverse population. Each individual Network is used to model the disease outbreak within a single demographic of the population. Multiple demographics are modelled by combining multiple networks. Special merge functions enable joint FOIs to be calculated, through which an outbreak in one network can cross-infect a demographic in another network. The Networks can be very independent, and don't necessarily need to have the same links. However, it is assumed (and checked) that each network will have the same nodes. """ #: The overall Network, which contains a combination of all of the #: sub-networks. This is used for summary analysis and also as #: a means of merging and distributing data between sub-networks overall: Network = None #: The list of Networks, one for each demographic, ordered in the #: same order as the "Demographics" object. This is empty if #: only a single demographic is modelled subnets: _List[Network] = _field(default_factory=list) #: Metadata about each of the demographics being modelled. This is #: None if only a single demographic is modelled demographics: Demographics = None @property def params(self) -> int: """The overall parameters that are then specialised for the different demographics. Note that this returns a copy, so changing this will not change any parameters in the networks """ if self.overall is not None: # return the parameters for all of the demographics params = self.overall.params.copy() params._subparams = {} for subnet in self.subnets: params._subparams[subnet.name] = subnet.params return params else: return None def num_demographics(self) -> int: """Return the number of demographics""" return len(self.subnets) def assert_sane(self, profiler: None): """Assert that the networks is sane. This checks that the networks and all of the demographic sub-networks are laid out correctly in memory and that they don't have anything unexpected. Checking here will prevent us from having to check every time the networks are accessed """ if self.overall: self.overall.assert_sane(profiler=profiler) for subnet in self.subnets: subnet.assert_sane(profiler=profiler) # SHOULD ASSERT HERE THAT THE POPULATIONS OF ALL OF THE SUBNETS # IN EACH WARD SUM UP TO THE POPULATION IN THE OVERALL NETWORK # WARDS @staticmethod def build(network: Network, demographics: Demographics, profiler=None, nthreads: int = 1): """Build the set of networks that will model the passed demographics based on the overall population model in the passed network Parameters ---------- network: Network The overall population model - this contains the base parameters, wards, work and play links that define the model outbreak demographics: Demographics Information about each of the demographics to be modelled. Note that the sum of the "work" and "play" populations across all demographics must be 1.0 in all wards in the model profiler: Profiler Optional profiler used to profile this build nthreads: int Number of threads over which to distribute the work Returns ------- networks: Networks The set of Networks that represent the model run over the full set of different demographics """ if not isinstance(network, Network): raise TypeError(f"You can only specialise a Network") if demographics is None or len(demographics) < 2: raise ValueError(f"You can only create a Networks object " f"with a valid Demographics that contains " f"more than one demographic") if demographics.uses_named_network(): raise ValueError( f"You cannot specialise an existing network with demographics " f"that specify named networks - instead you need to call " f"demographics.build(...)") if profiler is None: from .utils._profiler import NullProfiler profiler = NullProfiler() p = profiler.start("specialise") subnets = [] # specialise the network for each demographic for i in range(0, len(demographics)): p = p.start(f"demographic_{i}") subnets.append( network.specialise(demographic=demographics[i], profiler=p, nthreads=nthreads)) p = p.stop() p = p.start("distribute_remainders") from .utils._scale_susceptibles import distribute_remainders distribute_remainders(network=network, subnets=subnets, demographics=demographics, profiler=p, nthreads=nthreads, random_seed=demographics.random_seed) # we have changed the population, so need to recalculate the # denominators again... for subnet in subnets: subnet.reset_everything(nthreads=nthreads, profiler=p) subnet.rescale_play_matrix(nthreads=nthreads, profiler=p) subnet.move_from_play_to_work(nthreads=nthreads, profiler=p) p = p.stop() total_pop = network.population sum_pop = 0 from .utils._console import Console Console.print(f"Specialising network - population: {total_pop}, " f"workers: {network.work_population}, " f"players: {network.play_population}") for i, subnet in enumerate(subnets): pop = subnet.population sum_pop += pop Console.print(f" {demographics[i].name} - population: {pop}, " f"workers: {subnet.work_population}, " f"players: {subnet.play_population}") if subnet.work_population + subnet.play_population != pop: Console.error( f"Disagreement in subnet population. Should be " f"{pop} but is instead " f"{subnet.work_population+subnet.play_population}.") raise AssertionError("Disagreement in subnet population.") if total_pop != sum_pop: raise AssertionError( f"The sum of the population of the demographic " f"sub-networks ({sum_pop}) does not equal the population " f"of the total network ({total_pop}). This is a bug!") result = Networks() result.overall = network result.subnets = subnets result.demographics = demographics p = p.stop() return result def copy(self): """Return a copy of this Networks. Use this to hold a copy of the networks that you can use to reset between runs """ from copy import copy networks = copy(self) networks.overall = self.overall.copy() subnets = [] for subnet in self.subnets: subnets.append(subnet.copy()) networks.subnets = subnets networks.demographics = self.demographics.copy() return networks def aggregate(self, profiler=None, nthreads: int = 1): """Aggregate all of the sub-network population infection data so that this is available in the overall network """ from .utils._aggregate import aggregate_networks aggregate_networks(network=self, profiler=profiler, nthreads=nthreads) def run(self, population: Population, output_dir: OutputFiles, seed: int = None, nsteps: int = None, nthreads: int = None, iterator=None, extractor=None, mover=None, mixer=None, profiler=None) -> Population: """Run the model simulation for the passed population. The random number seed is given in 'seed'. If this is None, then a random seed is used. All output files are written to 'output_dir' The simulation will continue until the infection has died out or until 'nsteps' has passed (keep as 'None' to prevent exiting early). Parameters ---------- population: Population The initial population at the start of the model outbreak. This is also used to set start date and day of the model outbreak output_dir: OutputFiles The directory to write all of the output into seed: int The random number seed used for this model run. If this is None then a very random random number seed will be used nsteps: int The maximum number of steps to run in the outbreak. If None then run until the outbreak has finished profiler: Profiler The profiler to use - a new one is created if one isn't passed nthreads: int Number of threads over which to parallelise this model run iterator: function Function that is called at each iteration to get the functions that are used to advance the model extractor: function Function that is called at each iteration to get the functions that are used to extract data for analysis or writing to files mixer: function Function that is called to mix the data calculated for each of the sub-networks for the different demographics and merge it together so that this is shared mover: function Function that is called to move the population between different demographics Returns ------- population: Population The final population at the end of the run """ # Create the random number generator from .utils._ran_binomial import seed_ran_binomial, ran_binomial if seed == 0: # this is a special mode that a developer can use to force # all jobs to use the same random number seed (15324) that # is used for comparing outputs. This should NEVER be used # for production code from .utils._console import Console Console.warning("Using special mode to fix all random number " "seeds to 15324. DO NOT USE IN PRODUCTION!!!") rng = seed_ran_binomial(seed=15324) else: rng = seed_ran_binomial(seed=seed) # Print the first five random numbers so that we can # compare to other codes/runs, and be sure that we are # generating the same random sequence randnums = [] for i in range(0, 5): randnums.append(str(ran_binomial(rng, 0.5, 100))) from .utils._console import Console Console.print( f"* First five random numbers equal **{'**, **'.join(randnums)}", markdown=True) randnums = None if nthreads is None: from .utils._parallel import get_available_num_threads nthreads = get_available_num_threads() from .utils._parallel import create_thread_generators rngs = create_thread_generators(rng, nthreads) # Create space to hold the results of the simulation infections = self.initialise_infections() Console.rule("Running the model") from .utils import run_model population = run_model(network=self, population=population, infections=infections, rngs=rngs, output_dir=output_dir, nsteps=nsteps, nthreads=nthreads, profiler=profiler, iterator=iterator, extractor=extractor, mixer=mixer, mover=mover) return population def reset_everything(self, nthreads: int = 1, profiler=None): """Resets the networks ready for a new run of the model""" if self.overall: self.overall.reset_everything(nthreads=nthreads, profiler=profiler) for subnet in self.subnets: subnet.reset_everything(nthreads=nthreads, profiler=profiler) def update(self, params: Parameters, demographics=None, population=None, nthreads: int = 1, profiler=None): """Update this network with a new set of parameters (and optionally demographics). This is used to update the parameters for the network for a new run. The network will be reset and ready for a new run. Parameters ---------- params: Parameters The new parameters with which to update this Network demographics: Demographics The new demographics with which to update this Network. Note that this will return a Network object that contains the specilisation of this Network nthreads: int Number of threads over which to parallelise this update profiler: Profiler The profiler used to profile this update Returns ------- network: Network or Networks Either this Network after it has been updated, or the resulting Networks from specialising this Network using Demographics """ if profiler is None: from .utils import NullProfiler profiler = NullProfiler() p = profiler.start("overall.update") self.overall.update(params, profiler=p) p = p.stop() if demographics is not None: if demographics != self.demographics: from .utils._worker import must_rebuild_network if must_rebuild_network(network=self, params=self.params, demographics=demographics): networks = demographics.build(params=self.params, population=population, nthreads=nthreads, profiler=p) else: # we have a change in demographics, so need to re-specialise networks = demographics.specialise(network=self.overall, profiler=p, nthreads=nthreads) p.stop() return networks for i in range(0, len(self.demographics)): demographic = self.demographics[i] p = p.start(f"{demographic.name}.update") if demographic.name in params.specialised_demographics(): subnet_params = params[demographic.name] else: subnet_params = params if demographic.adjustment: subnet_params = subnet_params.set_variables( demographic.adjustment) self.subnets[i].update(subnet_params, profiler=p) p = p.stop() def initialise_infections(self, nthreads: int = 1): """Initialise and return the space that will be used to track infections """ from ._infections import Infections return Infections.build(network=self) def rescale_play_matrix(self, nthreads: int = 1, profiler=None): """Rescale the play matrix""" if self.overall: self.overall.reset_everything(nthreads=nthreads, profiler=profiler) for subnet in self.subnets: subnet.rescale_play_matrix(nthreads=nthreads, profiler=profiler) def move_from_play_to_work(self, nthreads: int = 1, profiler=None): """Move the population from play to work""" if self.overall: self.overall.move_from_play_to_work(nthreads=nthreads, profiler=profiler) for subnet in self.subnets: subnet.move_from_play_to_work(nthreads=nthreads, profiler=profiler)
class WardInfos: """Simple class that holds a list of WardInfo objects, and provides useful search functions over that list. This prevents me from cluttering up the interface of Network """ #: The list of WardInfo objects, one for each ward in order wards: _List[WardInfo] = _field(default_factory=list) def __len__(self): return len(self.wards) def __getitem__(self, index): return self.wards[index] def _find_ward(self, name: str, match: bool, include_alternates: bool): """Internal function that flexibly finds a ward by name""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.name): is_match = True elif search(ward.code): is_match = True elif include_alternates: for alternate in ward.alternate_names: if search(alternate): is_match = True break if not is_match: for alternate in ward.alternate_codes: if search(alternate): is_match = True break if is_match: matches.append(i) return matches def _find_authority(self, name: str, match: bool): """Internal function that flexibly finds a ward by authority""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.authority): is_match = True elif search(ward.authority_code): is_match = True if is_match: matches.append(i) return matches def _find_region(self, name: str, match: bool): """Internal function that flexibly finds a ward by region""" import re if not isinstance(name, re.Pattern): search = re.compile(name, re.IGNORECASE) else: search = name if match: search = search.match else: search = search.search matches = [] for i, ward in enumerate(self.wards): if ward is None: continue is_match = False if search(ward.region): is_match = True elif search(ward.region_code): is_match = True if is_match: matches.append(i) return matches def _intersect(self, list1, list2): """Return the intersection of two lists""" return [value for value in list1 if value in list2] def find(self, name: str = None, authority: str = None, region: str = None, match: bool = False, match_authority_and_region: bool = False, include_alternates: bool = True): """Generic search function that will search using any or all of the terms provided. This returns a list of indicies of wards that match the search Parameters ---------- name: str or regexp Name or code of the ward to search authority: str or regexp Name or code of the authority to search region: str or regexp Name or code of the region to search match: bool (False) Use a regular expression match for the ward rather than a search. This forces the match to be at the start of the string match_authority_and_region: bool (False) Use a regular expression match for the authority and region rather than a search. This forces the match to be at the start of the string include_alternates: bool (True) Whether or not to include alternative names and codes when searching for the ward """ wards = None if name is not None: wards = self._find_ward(name, match=match, include_alternates=include_alternates) if len(wards) == 0: return wards if authority is not None: authorities = self._find_authority( authority, match=match_authority_and_region) if len(authorities) == 0: return authorities if wards is None: wards = authorities else: wards = self._intersect(wards, authorities) wards.sort() if len(wards) == 0: return wards if region is not None: regions = self._find_region(region, match=match_authority_and_region) if len(regions) == 0: return regions if wards is None: wards = regions else: wards = self._intersect(wards, regions) wards.sort() if wards is None: # we have not searched for anything, so return everything return list(range(1, len(self.wards))) else: return wards