class StereotypeVote(models.Model): """ Similar to vote, but it is not associated with a comment. It forms a m2m relationship between Stereotypes and comments. """ author = models.ForeignKey( "Stereotype", related_name="votes", on_delete=models.CASCADE ) comment = models.ForeignKey( "ej_conversations.Comment", verbose_name=_("Comment"), related_name="stereotype_votes", on_delete=models.CASCADE, ) choice = EnumField(Choice, _("Choice")) stereotype = alias("author") objects = StereotypeVoteQuerySet.as_manager() class Meta: unique_together = [("author", "comment")] def __str__(self): return f"StereotypeVote({self.author}, value={self.choice})"
class BinaryOpMixin(ExprNode): """ A binary operator (ex.: <expr> + <expr>). """ tag: BinaryOp lhs: Expr rhs: Expr op = alias("_tag") operators = BinaryOp precedence_level = property(lambda self: self.op.precedence_level) class Meta: abstract = True command = "{lhs} {op} {rhs}" sexpr_skip = () sexpr_unary_op_class = None @classmethod def _meta_sexpr_symbol_map(cls) -> dict: """ Create a dictionary mapping symbols to the corresponding constructors for the given class. """ skip = cls._meta.sexpr_skip unary_class: Type[UnaryOpMixin] = cls._meta.sexpr_unary_op_class symbol_map = {} unary_ops = set() if unary_class: ops = unary_class._meta.annotations["tag"] unary_ops.update(op.value for op in ops) for op in cls._meta.annotations["tag"]: symbol_map[op] = binary = binary_operator_sexpr(cls, op) if op.value in skip: pass elif op.value in unary_ops: fn = flexible_operator_sexpr(cls, unary_class, op.value) symbol_map[op.value] = fn else: symbol_map[op.value] = binary return symbol_map @classmethod def _meta_finalize(cls): super()._meta_finalize() cls.operators = cls._meta.annotations["tag"] def __init__(self, op, lhs, rhs, **kwargs): if isinstance(op, str): op = self._meta.annotations["tag"].from_name(op) super().__init__(op, lhs, rhs, **kwargs)
class UnaryOpMixin(ExprNode): """ Unary operator (ex.: +<expr>) """ tag: Op expr: Expr op = alias("_tag") class Meta: abstract = True command = "{op} {expr}" sexpr_skip = () sexpr_binary_op_class = None @classmethod def _meta_sexpr_symbol_map(cls) -> dict: """ Create a dictionary mapping symbols to the corresponding constructors for the given class. """ skip = cls._meta.sexpr_skip binary_class: Type[BinaryOpMixin] = cls._meta.sexpr_binary_op_class symbol_map = {} binary_ops = set() if binary_class: ops = binary_class._meta.annotations["tag"] binary_ops.update(op.value for op in ops) for op in cls._meta.annotations["tag"]: symbol_map[op] = unary = unary_operator_sexpr(cls, op) if op.value in skip: pass elif op.value in binary_ops: fn = flexible_operator_sexpr(binary_class, cls, op.value) symbol_map[op.value] = fn else: symbol_map[op.value] = unary return symbol_map def __init__(self, op, expr, **kwargs): if isinstance(op, str): op = self._meta.annotations["op"].from_name(op) super().__init__(op, expr, **kwargs)
class DiseaseParams(WrappedParams): """ A wrapper for disease params. """ gamma: float = sk.property(1 / _.infectious_period) sigma: float = sk.property(1 / _.incubation_period) Qs: float = sk.alias("prob_symptoms") Qsv: float = sk.alias("prob_severe") Qcr: float = sk.alias("prob_critical") CFR: float = sk.alias("case_fatality_ratio") IFR: float = sk.alias("infection_fatality_ratio") HFR: float = sk.alias("hospital_fatality_ratio") ICUFR: float = sk.alias("icu_fatality_ratio")
class StereotypeVote(models.Model): """ Similar to vote, but it is not associated with a comment. It forms a m2m relationship between Stereotypes and comments. """ author = models.ForeignKey( 'Stereotype', related_name='votes', on_delete=models.CASCADE, ) comment = models.ForeignKey( 'ej_conversations.Comment', related_name='stereotype_votes', on_delete=models.CASCADE, ) choice = EnumField(Choice) stereotype = alias('author') objects = BoogieManager() def __str__(self): return f'StereotypeVote({self.author}, value={self.choice})'
class Inplace(StmtNode): """ Complex assignment statement with inplace operator (e.g., x += 1) """ tag: InplaceOpEnum lhs: Expr rhs: Expr op = alias("tag") @classmethod def _meta_sexpr_symbol_map(cls) -> dict: e = to_expr fn = lambda op, x, y: cls(op, e(x), e(y)) sexprs = {op: partial(fn, op) for op in InplaceOpEnum} sexprs.update({op.value: partial(fn, op) for op in InplaceOpEnum}) return sexprs def tokens(self, ctx): yield ctx.start_line() yield from self.lhs.tokens(ctx) yield f" {self.tag.value} " yield from self.rhs.tokens(ctx)
class CrudeFR(ClinicalObserverModel): """ Model in which infected can become hospitalized and suffer a constant hospitalization fatality rate. Attributes: prob_severe (float, alias: Qsv): Probability that regular cases become severe (cases that require hospitalization). prob_critical (float, alias: Qcr): Probability that regular cases become critical (require ICU). hospitalization_period: Average duration of hospitalizations. icu_period: Average duration of ICU treatment. hospital_fatality_ratio (float, alias: HFR): Fraction of deaths for patients that go to hospitalization. icu_fatality_ratio (float, alias: ICUFR): Fraction of deaths for patients that go to ICU treatment. """ params = clinical.DEFAULT # Primary parameters prob_severe: float = param_property(default=0.0) prob_critical: float = param_property(default=0.0) severe_period: float = param_property(default=0.0) critical_period: float = param_property(default=0.0) # Aliases Qsv: float = param_alias("prob_severe") Qcr: float = param_alias("prob_critical") # Properties hospital_fatality_ratio = sk.property(_.CFR / _.Qsv) HFR = sk.alias("hospital_fatality_ratio") icu_fatality_ratio = sk.property(_.CFR / _.Qcr) ICUFR = sk.alias("icu_fatality_ratio") prob_aggravate_to_icu = sk.property(_.Qcr / _.Qsv) # Cumulative series def get_data_deaths(self, idx): return self["cases", idx] * self.CFR def get_data_severe(self, idx): data = self["severe_cases"] K = max(self.K, 0) data = delayed_with_discharge(data, 0, self.severe_period, K, positive=True) return sliced(data, idx) def get_data_severe_cases(self, idx): return self["cases", idx] * self.Qsv def get_data_critical_cases(self, idx): return self["cases", idx] * self.Qcr
class ClinicalModel(Model, ABC): """ Base class for clinical models that track the infection curve and models the clinical history of patients. """ class Meta: model_name = "Clinical" data_aliases = {"H": "hospitalized", "D": "deaths"} plot_columns = ("hospitalized_cases", "hospitalized", "deaths") # Delegates (population parameters) parent_model = sk.alias("infection_model") population = sk.delegate_to("infection_model") K = sk.delegate_to("infection_model") R0 = sk.delegate_to("infection_model") age_distribution = sk.delegate_to("infection_model") age_pyramid = sk.delegate_to("infection_model") # Properties and aliases case_fatality_ratio = param_property(default=0.0) infection_fatality_ratio = param_property(default=lambda _: _.CFR) CFR = param_alias("case_fatality_ratio") IFR = param_alias("infection_fatality_ratio") @property def empirical_CFR(self): return self["empirical_CFR:final"] @property def empirical_IFR(self): return self["empirical_IFR:final"] @property def clinical_model(self): return self def __init__(self, infection_model, *args, **kwargs): self.infection_model = infection_model for k in ("disease", "disease_params", "region"): if k not in kwargs: kwargs[k] = getattr(infection_model, k) kwargs.setdefault("name", infection_model.name) super().__init__(*args, **kwargs) def __getattr__(self, item): try: return getattr(self.infection_model, item) except AttributeError: name = type(self).__name__ raise AttributeError(f'"{name}" object has no "{item}" attribute') def copy(self, **kwargs): kwargs["infection_model"] = self.infection_model.copy() return super().copy(**kwargs) # # Data accessors # def get_column(self, name, idx): name = self.meta.data_aliases.get(name, name) try: return super().get_column(name, idx) except ValueError: return self.infection_model.get_column(name, idx) # Basic columns def get_data_population(self, idx): """ Total population minus deaths. """ return self.infection_model["population", idx] - self["deaths", idx] def get_data_infectious(self, idx): """ Infectious population according to infectious model. This is usually the starting point of all clinical models. """ return self.infection_model["infectious", idx] def get_data_cases(self, idx): """ Cumulative curve of cases. A case is typically defined as an individual who got infected AND developed recognizable clinical symptoms. """ return self.infection_model["cases", idx] def get_data_infected(self, idx): """ Cumulative curve of infected individuals. Infected individuals might not develop clinical symptoms. They may never develop symptoms (asymptomatic) or develop them in a future time. """ try: return self.infection_model["infected", idx] except KeyError: return self["cases", idx] # Derived methods def get_data_empirical_CFR(self, idx): """ Empirical CFR computed as current deaths over cases. """ return (self["deaths", idx] / self["cases", idx]).fillna(0.0) def get_data_empirical_IFR(self, idx): """ Empirical IFR computed as current deaths over infected. """ return (self["deaths", idx] / self["infected", idx]).fillna(0.0) # Abstract interface def get_data_death_rate(self, idx): """ Daily number of deaths. """ return self["deaths", idx].diff().fillna(0) def get_data_deaths(self, idx): """ Cumulative curve of deaths. """ raise NotImplementedError("must be implemented in sub-classes") def get_data_severe(self, idx): """ Current number of severe cases. Severe cases usually require hospitalization, but have a low death risk. """ raise NotImplementedError("must be implemented in sub-classes") def get_data_severe_cases(self, idx): """ Cumulative number of severe cases. Severe cases usually require hospitalization, but have a low death risk. """ raise NotImplementedError("must be implemented in sub-classes") def get_data_critical(self, idx): """ Current number of critical cases. Critical cases require intensive care and are at a high risk of death. """ raise NotImplementedError("must be implemented in sub-classes") def get_data_critical_cases(self, idx): """ Cumulative number of critical cases. Critical cases require intensive care and are at a high risk of death. """ raise NotImplementedError("must be implemented in sub-classes") def get_data_hospitalized(self, idx): """ Cases currently occupying a hospital bed. In an ideal world, this would be equal to the number of severe cases. The default implementation assumes that. """ return self["severe", idx] def get_data_hospitalized_cases(self, idx): """ Cumulative number of hospitalizations. Default implementation assumes equal to the number of severe cases. """ return self["severe_cases", idx] def get_data_icu(self, idx): """ Number of ICU patients. In an ideal world, this would be equal to the number of critical cases. The default implementation assumes that. Default implementation assumes equal to the number of critical cases. """ return self["critical", idx] def get_data_icu_cases(self, idx): """ Cumulative number of ICU patients. Default implementation assumes equal to the number of critical cases. """ return self["critical_cases", idx] def get_data_ppe(self, idx): """ Requirement of several personal protection equipment estimated from the cumulative sum of patients x day, icu_patients x day. """ severe = self["severe"] critical = self["critical"] severe_day = cumtrapz(severe, severe.index, initial=0) critical_day = cumtrapz(critical, critical.index, initial=0) out = self.disease.recommended_ppe( pd.Series(severe_day, index=severe.index), pd.Series(critical_day, index=critical.index) ) if idx is None: return out return out[idx] # # Other functions # def plot(self, components=None, *, ax=None, logy=False, show=False, **kwargs): if components is None: self.infection_model.plot(**kwargs) components = self.meta.plot_columns super().plot(components, show=show, **kwargs)
class QuerySet(models.QuerySet): """ Boogie's drop in replacement to Django's query sets. It extends the query set API with a Pydata-inspired interface to select data. """ # Manager utils id_dict = LazyMethod("manager_utils:ManagerUtilsMixin.id_dict") bulk_upsert = LazyMethod("manager_utils:ManagerUtilsMixin.bulk_upsert") sync = LazyMethod("manager_utils:ManagerUtilsMixin.sync") upsert = LazyMethod("manager_utils:ManagerUtilsMixin.upsert") get_or_none = LazyMethod("manager_utils:ManagerUtilsMixin.get_or_none") single = LazyMethod("manager_utils:ManagerUtilsMixin.single") # Bulk update from manager utils is very limited, we use the implementation # in the django-bulk-update package. bulk_update = LazyMethod("bulk_update.manager:BulkUpdateManager.bulk_update") # Properties index = property(lambda self: Index(self)) _selected_column_names = None # Class methods def as_manager(cls): # Overrides to use Boogie manager instead of the default manager from .manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) new = alias("model") def __getitem__(self, item): # 1D indexing is delegated to Django. We extend it with some # idioms that preserve backwards compatibility # # We keep Django's behavior in all of the following cases (a, b >= 0): # * qs[a] # * qs[a:b] # # Small extensions: # * qs[-a] -> fetch from last # * qs[0:-b] -> specify a index from first to last # # Larger extensions: # * qs[set] -> fetch elements in the given pk set # * qs[list] -> fetch specified elements by pk in order return get_queryset_item(item, self) def __setitem__(self, item, value): # Similarly to getitem, we dispatch for the 1d and 2d indexing # functions if not isinstance(item, tuple): return setitem_1d(self, item, value) try: row, col = item except IndexError: raise TypeError("only 1d or 2d indexing is allowed") return setitem_2d(self, row, col, value) def __getattr__(self, attr): value = get_queryset_attribute(self, attr) if value is NotImplemented: raise AttributeError(attr) return value def update_item(self, pk, **kwargs): """ Updates a single item in the queryset. """ return self.filter(pk=pk).update(**kwargs) update_item.alters_data = True # # Enhanced API # def select_columns(self, *fields): """ Similar to .values_list(*fields) """ return select_columns(self, list(fields)) def annotate_verbose(self, **fields): """ Annotate given fields with their verbose versions. """ raise NotImplementedError def auto_annotate_verbose(self, *fields): raise NotImplementedError def values(self, *fields, verbose=False, **expressions): if verbose: return self.auto_annotate_verbose(fields) qs = super().values(*fields, **expressions) fields = fields + tuple(expressions.keys()) return self._mark_column_names(fields, qs) def values_list(self, *fields, verbose=False, **kwargs): if verbose: return self.auto_annotate_verbose(fields) qs = super().values_list(*fields, **kwargs) return self._mark_column_names(fields, qs) def _filter_or_exclude(self, negate, *args, **kwargs): if args and getattr(args[0], "comparable_expression", False): expr, *args = args name = f"filter_{id(expr)}" kwargs[name] = True qs = self.annotate(**{name: expr}) # noinspection PyProtectedMember return qs._filter_or_exclude(negate, *args, **kwargs) return super()._filter_or_exclude(negate, *args, **kwargs) def _mark_column_names(self, columns, qs=None): qs = self if qs is None else qs qs._selected_column_names = columns return qs # # Pandas data frame and numpy array APIs # def dataframe( self, *fields, index=None, verbose=False ) -> "pd.DataFrame": # noqa: F821 """ Convert query set to a Pandas data frame. If fields are given, it uses a similar semantics as .values_list(), otherwise, it uses the selected fields or the complete set of fields. Args: index: Name of index column (defaults to primary key). verbose (bool): If given, prints foreign keys ad choices using human readable names. """ if not fields: if self._selected_column_names: fields = self._selected_column_names else: fields = [ f.name for f in self.model._meta.fields if index is None and not f.primary_key ] elif len(fields) == 1 and isinstance(fields[0], collections.Mapping): field_map = fields[0] df = self.dataframe(*field_map.values(), index=index, verbose=verbose) df.columns = field_map.keys() return df # Build data frame if index is None: index = self.model._meta.pk.name data = list(self.values_list(index, *fields, verbose=verbose)) df = pd.DataFrame(data, columns=["__index__", *fields]) df.index = df.pop("__index__") df.index.name = index return df def pivot_table( self, index, columns, values, verbose=False, dropna=False, fill_value=None ): """ Creates a pivot table from this queryset. Args: index: Field used to define the pivot table indexes (rows names). columns: Field used to populate the different columns. values: Field used to fill table with values. dropna (bool): If True (default), exclude columns whose entries are all NaN. fill_value: Value to replace missing values with. verbose (bool): If given, prints foreign keys ad choices using human readable names. """ df = self.dataframe(index, columns, values, verbose=verbose) if df.shape[0] == 0: dtype = float if fill_value is None else type(fill_value) df = pd.DataFrame(dtype=dtype, index=pd.Index([], dtype=int)) df.index.name = index return df return df.pivot_table( index=index, columns=columns, values=values, dropna=dropna, fill_value=fill_value, ) def update_from_dataframe(self, dataframe, batch_size=None, in_bulk=True): """ Persist data frame data to the database. Data frame index must correspond to primary keys of existing objects. Args: dataframe: A pandas data frame in_bulk (bool): If True (default), save values in bulk. batch_size: If saving in bulk, defines the size of each batch that touches the database. This avoids creating very long SQL commands that can halt the database for a perceptible amount of time. """ objects = [] fields = dataframe.columns add_object = objects.append new_object = self.model for pk, row in zip(dataframe.index, dataframe.to_dict("records")): row.setdefault("pk", pk) add_object(new_object(**row)) if in_bulk: self.bulk_update(objects, update_fields=list(fields), batch_size=batch_size) else: for obj in objects: obj.save() def extend_dataframe(self, df, *fields, verbose=False) -> "pd.DataFrame": """ Returns a copy of dataframe that includes columns computed from the given fields. """ extra = ( self.filter(pk__in=set(df.index)) .distinct() .dataframe(*fields, verbose=verbose) ) extra.index.name = df.index.name new = pd.DataFrame(df) for k, v in extra.items(): new[k] = v return new # # Selecting parts of the dataframe # def head(self, n=5): """ Select the first n rows in the query set. """ return self[:n] def tail(self, n=5): """ Select the last n rows in the query set. """ return self[-n:] # # Transformations # def map(self, func, *args, **kwargs): """ Map function to each element returned by the dataframe. """ if args or kwargs: orig = func func = lambda x: orig(*args, **kwargs) clone = self.all() iter_cls = MapIterable.as_iterable_class(func, clone._iterable_class) clone._iterable_class = iter_cls return clone def annotate_attr(self, **kwargs): """ Like Django's builtin annotate, but instead of operating in SQL-level, it annotates the resulting Python objects. """ def annotator(obj): for k, v in kwargs.items(): if callable(v): v = v(obj) setattr(obj, k, v) return obj return self.map(annotator)