async def async_configure_service(gateway: DeconzGateway, data: MappingProxyType) -> None: """Set attribute of device in deCONZ. Entity is used to resolve to a device path (e.g. '/lights/1'). Field is a string representing either a full path (e.g. '/lights/1/state') when entity is not specified, or a subpath (e.g. '/state') when used together with entity. Data is a json object with what data you want to alter e.g. data={'on': true}. { "field": "/lights/1/state", "data": {"on": true} } See Dresden Elektroniks REST API documentation for details: http://dresden-elektronik.github.io/deconz-rest-doc/rest/ """ field = data.get(SERVICE_FIELD, "") entity_id = data.get(SERVICE_ENTITY) data = data[SERVICE_DATA] if entity_id: try: field = gateway.deconz_ids[entity_id] + field except KeyError: LOGGER.error("Could not find the entity %s", entity_id) return await gateway.api.request("put", field, json=data)
def main(): d = {1: 'AA'} d_proxy = MappingProxyType(d) print(d_proxy[1]) print(d_proxy.get(2, 'NULL')) d[2] = 'BB' print(d_proxy.get(2, 'NULL'))
class RewriteVisitor(IdentityVisitor): def __init__(self, store: 'PackageStore', always_expand: bool = False, val_blacklist: 'Optional[Collection[ValName]]' = None, expr_args: 'Optional[Mapping[str, Expr]]' = None, type_args: 'Optional[Mapping[str, Type]]' = None): self.store = store self.always_expand = always_expand self.val_blacklist = frozenset( val_blacklist) if val_blacklist is not None else () self.expr_args = MappingProxyType(expr_args or {}) # type: Mapping[str, Expr] self.type_args = MappingProxyType(type_args or {}) # type: Mapping[str, Type] def with_expr_var(self, var: str, value: 'Expr') -> 'RewriteVisitor': return type(self)(self.store, self.always_expand, self.val_blacklist, { **self.expr_args, var: value }, self.type_args) def with_expr_vars(self, var_values: 'Mapping[str, Expr]') -> 'RewriteVisitor': return type(self)(self.store, self.always_expand, self.val_blacklist, { **self.expr_args, **var_values }, self.type_args) def with_type_vars(self, type_values: 'Mapping[str, Type]') -> 'RewriteVisitor': return type(self)(self.store, self.always_expand, self.val_blacklist, self.expr_args, { **self.type_args, **type_values }) def without_val(self, val: 'ValName') -> 'RewriteVisitor': return type(self)(self.store, self.always_expand, {*self.val_blacklist, val}, self.expr_args, self.type_args) def resolve_val(self, val: 'ValName') -> 'Optional[Expr]': return self.store.resolve_value_reference( val) if val not in self.val_blacklist else None def resolve_type_arg(self, type_arg: str) -> 'Optional[Type]': return self.type_args.get(type_arg) def visit_expr_var(self, var: str) -> 'Expr': expr = self.expr_args.get(var) return expr if expr is not None else Expr(var=var) def visit_type_var(self, var: 'Type.Var') -> 'Type': base_type = self.resolve_type_arg(var.var) if var.args: raise ValueError( 'higher kinded types are not yet currently supported') return base_type if base_type is not None else Type(var=var)
def log_extra(context: MappingProxyType, **extra_fields): infos = { 'request_uuid': context.get(CTX_REQUEST_ID), 'user': context.get(CTX_ADMIN), 'testing': str(context.get(CTX_TESTING) or False), 'url': context.get(CTX_REQUEST_URL), 'roles': context.get(CTX_ROLES), } return { 'extra': { **infos, **extra_fields }, }
class Existentialist(): """ Describes the requirement of the existance of an argument. { "default": <object>, "required": <bool>, } """ def __init__(self, defdict): self._defdict = MappingProxyType(defdict) self.keyword = defdict.get('keyword') self.required = bool(defdict.get('required')) self.default = defdict.get('default') if self.default and self.required: logging.warning(("A default value is set for parameter '%s' " "while 'required' is set to True, making it" "ineffective."), self.keyword) def inquire(self, obj): if obj is None: if self.required: raise OptionError( missing=self.keyword, keyword=None, # empty this field alias=self._defdict.get('alias')) obj = self.default return obj
def _add_extra_blanks_due_to_next_statement( blank_lines: FormattedLines, next_statement_name: str, surrounding_empty_lines_table: MappingProxyType, ) -> FormattedLines: # assumption: there is no sequence of empty lines longer than 2 (in blank lines) forced_blanks_num = surrounding_empty_lines_table.get(next_statement_name) if forced_blanks_num is None: return blank_lines first_empty_line_ix_from_end = _find_first_empty_line_ix_from_end(blank_lines) empty_lines_already_in_place = 1 if first_empty_line_ix_from_end > -1 else 0 empty_lines_already_in_place += ( 1 if first_empty_line_ix_from_end > 0 and blank_lines[first_empty_line_ix_from_end - 1][1] == "" else 0 ) lines_to_inject = forced_blanks_num lines_to_inject -= empty_lines_already_in_place empty_line = [(None, "")] # type: FormattedLines if first_empty_line_ix_from_end == -1: return lines_to_inject * empty_line + blank_lines return ( blank_lines[:first_empty_line_ix_from_end] + lines_to_inject * empty_line + blank_lines[first_empty_line_ix_from_end:] )
def log_extra(context: MappingProxyType, **extra_fields): admin_login = None if context.get(CTX_ADMIN): admin_login = context.get(CTX_ADMIN).login infos = { 'request_uuid': context.get(CTX_REQUEST_ID), 'admin': admin_login, 'testing': str(context.get(CTX_TESTING) or False), } return { 'extra': { **infos, **extra_fields }, }
def _generate_settings_xml(runcfg: MappingProxyType, settingsfile: Path, forcerebuildscenarios=False): ''' Generate SUMO's settings configuration file. :param runcfg: Run configuration :param settingsfile: Destination to write settings file :param forcerebuildscenarios: Rebuild scenarios, even if they already exist for current run ''' if Path(settingsfile).exists() and not forcerebuildscenarios: return l_viewsettings = etree.Element('viewsettings') etree.SubElement(l_viewsettings, 'viewport', attrib={ 'x': '0', 'y': '0', 'zoom': '100' }) etree.SubElement( l_viewsettings, 'delay', attrib={'value': str(runcfg.get('sumo').get('gui-delay'))}) with open(settingsfile, 'w') as f_configxml: f_configxml.write( defusedxml.lxml.tostring(l_viewsettings, pretty_print=True, encoding='unicode'))
class Validator(): """ Describes the requirement of the existance of an argument. { "enum": <container>, "max": <int> } """ def __init__(self, defdict): self._defdict = MappingProxyType(defdict) self.keyword = defdict.get('keyword') self.strict = defdict.get('strict', True) self.enum = defdict.get('enum', ()) self.max = defdict.get('max') assert isinstance(self.enum, abc.Container) assert isinstance(self.max, (int, type(None))) def validate(self, obj): if self.enum and not self._in_enum(obj): raise OptionError(keyword=self.keyword, allowed=self.enum, alias=self._defdict.get('alias')) if self.max: if isinstance(obj, (list, tuple, set)): self._check_list_max(obj) elif isinstance(obj, (int, float, complex)): self._check_num_max(obj) return obj def _in_enum(self, value): if isinstance(value, (list, tuple, set)): for val in value: if not self._in_enum(val): return False return True return value in self.enum def _check_list_max(self, container): if len(container) > self.max: raise OptionError(keyword=self.keyword, max=self.max, size=len(container)) def _check_num_max(self, num): if isinstance(num, bool): return if num > self.max: raise OptionError(keyword=self.keyword, max=self.max, num=num)
def manual_run_all( controller: IUController, data: MappingProxyType, time: datetime ) -> None: zl = data.get(CONF_ZONES, None) for zone in controller.zones: if zl is None or zone.zone_index + 1 in zl: zone.service_manual_run(data, time) return
def _add_extra_blanks_due_to_previous_statement( blank_lines: FormattedLines, previous_statement_name: str, surrounding_empty_lines_table: MappingProxyType, ) -> FormattedLines: # assumption: there is no sequence of empty lines longer than 1 (in blank lines) forced_blanks_num = surrounding_empty_lines_table.get(previous_statement_name) if forced_blanks_num is None: return blank_lines lines_to_prepend = forced_blanks_num lines_to_prepend -= 1 if len(blank_lines) > 0 and blank_lines[0][1] == "" else 0 empty_line = [(None, "")] # type: FormattedLines return lines_to_prepend * empty_line + blank_lines
def make_crud_args(args: argparse.Namespace, presets: MappingProxyType = MappingProxyType({})): res = presets.get(args.access_preset, {}) relevant_cli_arguments = {k: v for k, v in vars(args).items() if k in ('aws_access_key_id', 'bucket_name', 'endpoint_url', 'aws_secret_access_key') and v is not None} res.update(relevant_cli_arguments) return res
def format(self, record: logging.LogRecord) -> str: record_dict = MappingProxyType(record.__dict__) data = dict(errno=0 if not record.exc_info else 255, ) # type: t.Dict[str, t.Any] for key, value in self.FIELD_MAPPING.items(): mapping, field_type = value v = record_dict.get(key) if not isinstance(v, field_type): v = field_type(v) data[mapping] = v for key in record_dict: if key in data: continue elif key in self.FIELD_MAPPING: continue elif key[0] == "_": continue record_value = record_dict[key] # type: t.Any if record_value is None: continue data[key] = record_value args = data.pop("args", []) # type: t.List[t.Any] for idx, item in enumerate(args): data["argument_%d" % idx] = str(item) payload = { "@fields": data, "msg": self.formatMessage(record), "level": self.LEVELS[record.levelno], } # type: JSONObjType if self.datefmt: payload["@timestamp"] = self.formatTime(record, self.datefmt) if record.exc_info: payload["stackTrace"] = self.formatException(record.exc_info) return self.dumps(payload) # type: ignore
def make_crud_args(args: argparse.Namespace, presets: MappingProxyType = MappingProxyType({})): res = presets.get(args.access_preset, {}) relevant_cli_arguments = { k: v for k, v in vars(args).items() if k in ('aws_access_key_id', 'bucket_name', 'endpoint_url', 'aws_secret_access_key') and v is not None } res.update(relevant_cli_arguments) return res
def format(self, record): record_dict = MappingProxyType(record.__dict__) data = dict(errno=0 if not record.exc_info else 255) for key, value in self.FIELD_MAPPING.items(): mapping, field_type = value v = record_dict.get(key) if not isinstance(v, field_type): v = field_type(v) data[mapping] = v for key in record_dict: if key in data: continue elif key[0] == "_": continue value = record_dict[key] if value is None: continue data[key] = value for idx, item in enumerate(data.pop('args', [])): data['argument_%d' % idx] = str(item) payload = { '@fields': data, 'msg': record.getMessage(), 'level': self.LEVELS[record.levelno] } if record.exc_info: payload['stackTrace'] = "\n".join( traceback.format_exception(*record.exc_info)) json_string = ujson.dumps( payload, ensure_ascii=False, escape_forward_slashes=False, ) return json_string
def format(self, record: logging.LogRecord): record_dict = MappingProxyType(record.__dict__) data = dict(errno=0 if not record.exc_info else 255) for key, value in self.FIELD_MAPPING.items(): mapping, field_type = value v = record_dict.get(key) if not isinstance(v, field_type): v = field_type(v) data[mapping] = v for key in record_dict: if key in data or key[0] == "_": continue value = record_dict[key] if value is None: continue data[key] = value for idx, item in enumerate(data.pop('args', [])): data['argument_%d' % idx] = str(item) payload = { 'fields': data, 'msg': record.getMessage(), 'level': self.LEVELS[record.levelno], } if isinstance(record.msg, dict): data['message_raw'] = '' payload['msg'] = record.msg if self.datefmt: payload['timestamp'] = self.formatTime(record, self.datefmt) if record.exc_info: payload['stackTrace'] = "\n".join( traceback.format_exception(*record.exc_info)) return fast_json.dumps(payload, ensure_ascii=False)
class InstanceAttrsProxy: def __init__(self, attrs, instance): self.attrs = MappingProxyType(attrs) self.instance = instance def __getitem__(self, item): try: return self.attrs[item] except KeyError: return getattr(self.instance, item) def get(self, k, default=None): try: return self.attrs.get(k) except KeyError: return getattr(self.instance, k, default) def keys(self): return self.attrs.keys()
def load(self, data: MappingProxyType): """Read the adjustment configuration""" if CONF_ACTUAL in data: self._method = CONF_ACTUAL self._time_adjustment = wash_td(data.get(CONF_ACTUAL)) elif CONF_PERCENTAGE in data: self._method = CONF_PERCENTAGE self._time_adjustment = data.get(CONF_PERCENTAGE) elif CONF_INCREASE in data: self._method = CONF_INCREASE self._time_adjustment = wash_td(data.get(CONF_INCREASE)) elif CONF_DECREASE in data: self._method = CONF_DECREASE self._time_adjustment = wash_td(data.get(CONF_DECREASE)) elif CONF_RESET in data: self._method = None self._time_adjustment = None self._minimum = wash_td(data.get(CONF_MINIMUM, None)) if self._minimum is not None: self._minimum = max(self._minimum, granularity_time()) # Set floor self._maximum = wash_td(data.get(CONF_MAXIMUM, None)) return self
class State(object): """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. """ __slots__ = [ 'entity_id', 'state', 'attributes', 'last_changed', 'last_updated' ] # pylint: disable=too-many-arguments def __init__(self, entity_id, state, attributes=None, last_changed=None, last_updated=None): """Initialize a new state.""" if not valid_entity_id(entity_id): raise InvalidEntityFormatError( ("Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) self.entity_id = entity_id.lower() self.state = str(state) self.attributes = MappingProxyType(attributes or {}) self.last_updated = dt_util.strip_microseconds(last_updated or dt_util.utcnow()) # Strip microsecond from last_changed else we cannot guarantee # state == State.from_dict(state.as_dict()) # This behavior occurs because to_dict uses datetime_to_str # which does not preserve microseconds self.last_changed = dt_util.strip_microseconds(last_changed or self.last_updated) @property def domain(self): """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self): """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self): """Name of this state.""" return (self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace('_', ' ')) def as_dict(self): """Return a dict representation of the State. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return { 'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': dt_util.datetime_to_str(self.last_changed), 'last_updated': dt_util.datetime_to_str(self.last_updated) } @classmethod def from_dict(cls, json_dict): """Initialize a state from a dict. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') if last_changed: last_changed = dt_util.str_to_datetime(last_changed) last_updated = json_dict.get('last_updated') if last_updated: last_updated = dt_util.str_to_datetime(last_updated) return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated) def __eq__(self, other): """Return the comparison of the state.""" return (self.__class__ == other.__class__ and self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes) def __repr__(self): """Return the representation of the states.""" attr = "; {}".format(util.repr_helper(self.attributes)) \ if self.attributes else "" return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attr, dt_util.datetime_to_local_str(self.last_changed))
class State(object): """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. """ __slots__ = ['entity_id', 'state', 'attributes', 'last_changed', 'last_updated'] def __init__(self, entity_id, state, attributes=None, last_changed=None, last_updated=None): """Initialize a new state.""" if not valid_entity_id(entity_id): raise InvalidEntityFormatError(( "Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) self.entity_id = entity_id.lower() self.state = str(state) self.attributes = MappingProxyType(attributes or {}) self.last_updated = last_updated or dt_util.utcnow() self.last_changed = last_changed or self.last_updated @property def domain(self): """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self): """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self): """Name of this state.""" return ( self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace('_', ' ')) def as_dict(self): """Return a dict representation of the State. Async friendly. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return {'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': self.last_changed, 'last_updated': self.last_updated} @classmethod def from_dict(cls, json_dict): """Initialize a state from a dict. Async friendly. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') if isinstance(last_changed, str): last_changed = dt_util.parse_datetime(last_changed) last_updated = json_dict.get('last_updated') if isinstance(last_updated, str): last_updated = dt_util.parse_datetime(last_updated) return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated) def __eq__(self, other): """Return the comparison of the state.""" return (self.__class__ == other.__class__ and self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes) def __repr__(self): """Return the representation of the states.""" attr = "; {}".format(util.repr_helper(self.attributes)) \ if self.attributes else "" return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attr, dt_util.as_local(self.last_changed).isoformat())
class State: """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. context: Context in which it was created """ __slots__ = [ 'entity_id', 'state', 'attributes', 'last_changed', 'last_updated', 'context' ] def __init__(self, entity_id: str, state: Any, attributes: Optional[Dict] = None, last_changed: Optional[datetime.datetime] = None, last_updated: Optional[datetime.datetime] = None, context: Optional[Context] = None) -> None: """Initialize a new state.""" state = str(state) if not valid_entity_id(entity_id): raise InvalidEntityFormatError( ("Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) if not valid_state(state): raise InvalidStateError( ("Invalid state encountered for entity id: {}. " "State max length is 255 characters.").format(entity_id)) self.entity_id = entity_id.lower() self.state = state self.attributes = MappingProxyType(attributes or {}) self.last_updated = last_updated or dt_util.utcnow() self.last_changed = last_changed or self.last_updated self.context = context or Context() @property def domain(self) -> str: """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self) -> str: """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self) -> str: """Name of this state.""" return (self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace('_', ' ')) def as_dict(self) -> Dict: """Return a dict representation of the State. Async friendly. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return { 'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': self.last_changed, 'last_updated': self.last_updated, 'context': self.context.as_dict() } @classmethod def from_dict(cls, json_dict: Dict) -> Any: """Initialize a state from a dict. Async friendly. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') if isinstance(last_changed, str): last_changed = dt_util.parse_datetime(last_changed) last_updated = json_dict.get('last_updated') if isinstance(last_updated, str): last_updated = dt_util.parse_datetime(last_updated) context = json_dict.get('context') if context: context = Context(**context) return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated, context) def __eq__(self, other: Any) -> bool: """Return the comparison of the state.""" return (self.__class__ == other.__class__ and # type: ignore self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes and self.context == other.context) def __repr__(self) -> str: """Return the representation of the states.""" attrs = "; {}".format(util.repr_helper(self.attributes)) \ if self.attributes else "" return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attrs, dt_util.as_local(self.last_changed).isoformat())
class DRV(object): """ A discrete random variable. A DRV has one or more :dfn:`possible values` (or just :dfn:`values`), which can be any type. Each possible value has an associated :dfn:`probability`, which is a real number between 0 and 1. It is strongly recommended that the probabilities add up to exactly 1. This might be difficult to achieve with :obj:`float` probabilities, and so this class does not enforce that restriction, and makes it possible to sample a variable even if the total is not 1. The exact distribution of the samples in that case is not specified, only that it will attempt to follow the probabilities given. Loosely: if the total is too low then one value's probability is rounded up. If the total is too high, then one probability is rounded down, and/or one or more values is ignored. These adjustments apply only to sampling: the original probabilities are still reported by :func:`to_dict()` etc. Because :code:`==` is overridden to return a DRV (not a boolean), DRV objects are not hashable and cannot be used in a set or as a dictionary key, even though the objects are immutable. This means you cannot have a DRV as a "possible value" of another DRV. DRV also resists being considered in boolean context, so for example you cannot in general test whether or not a DRV appears in a list:: >>> from omnidice.dice import d3, d6 >>> d3 in [d3, d6] True >>> d6 in [d3, d6] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "omnidice/drv.py", line 452, in __bool__ raise ValueError('The truth value of a random variable is ambiguous') ValueError: The truth value of a random variable is ambiguous This is the same solution used by (for example) :obj:`numpy.array`. If the object allowed standard boolean conversion then :code:`d4 in [d3, d6]` would be True, which is unacceptably surprising! :param distribution: Any value from which a dictionary can be constructed, that is a :obj:`Mapping` or :obj:`Iterable` of (value, probability) pairs. :param tree: The expression from which this object was defined. Currently this is used only for the string representation, but might in future help support lazily-evaluated DRVs. """ def __init__( self, distribution: 'DictData', *, tree: ExpressionTree = None, ): self.__dist = MappingProxyType(dict(distribution)) # Cumulative distribution. Defer calculating this, because we only # need it if the variable is actually sampled. Intermediate values in # building up a complex DRV won't ever be sampled, so save the work. self.__cdf = None self.__lcm = None self.__intvalued = None self.__expr_tree = tree # Computed probabilities can hit 0 due to float underflow, but maybe # we should strip out anything with probability 0. if not all(0 <= prob <= 1 for value, prob in self._items()): raise ValueError('Probability not in range') def __repr__(self): if self.__expr_tree is not None: return self.__expr_tree.bracketed() return f'DRV({self.__dist})' def is_same(self, other: 'DRV') -> bool: """ Return True if `self` and `other` have the same discrete probability distribution. Possible values with 0 probability are excluded from the comparison. """ values = set(value for value, prob in self._items() if prob != 0) othervalues = set(value for value, prob in other._items() if prob != 0) if values != othervalues: return False return all(self.__dist[val] == other.__dist[val] for val in values) def is_close(self, other: 'DRV', *, rel_tol=None, abs_tol=None) -> bool: """ Return True if `self` and `other` have approximately the same discrete probability distribution, within the specified tolerances. Possible values with 0 probability are excluded from the comparison. `rel_tol` and `abs_tol` are applied only to the probabilities, not to the possible values. They are defined as for :func:`math.isclose`. """ values = set(value for value, prob in self._items() if prob != 0) othervalues = set(value for value, prob in other._items() if prob != 0) if values != othervalues: return False kwargs = {} if rel_tol is not None: kwargs['rel_tol'] = rel_tol if abs_tol is not None: kwargs['abs_tol'] = abs_tol return all( isclose(self.__dist[val], other.__dist[val], **kwargs) for val in values ) def to_dict(self) -> Dict[Any, 'Probability']: """ Return a dictionary mapping all possible values to probabilities. """ # dict(self.__dist) is type-correct, but about 3 times slower. # Unfortunately there's no way to parameterise MappingProxyType to # say what the type is of the underlying mapping that gets copied. return self.__dist.copy() # type: ignore def to_pd(self): """ Return a :class:`pandas.Series` mapping values to probabilities. The series is indexed by the possible values. :raises: :class:`ModuleNotFoundError` if pandas is not installed. Note that pandas is not a hard dependency of this package. You must install it to use this method. """ try: import pandas as pd except ModuleNotFoundError: msg = 'You must install pandas for this optional feature' raise ModuleNotFoundError(msg) return pd.Series(self.__dist, name='probability') def to_table(self, as_float: bool = False) -> str: """ Return a string containing the values and probabilities formatted as a table. This is intended only for manually checking small distributions. :param as_float: Display probabilites as floating-point. You might find floats easier to read by eye. """ if not as_float: items = self._items() else: items = ((v, float(p)) for v, p in self._items()) with contextlib.suppress(TypeError): items = sorted(items) return '\n'.join([ 'value\tprobability', *(f'{v}\t{p}' for v, p in items), ]) def faster(self) -> 'DRV': """ Return a new DRV, with all probabilities converted to float. """ return DRV( {x: float(y) for x, y in self._items()}, tree=self._combine_post('.faster()'), ) def _items(self): return self.__dist.items() def replace_tree(self, tree: ExpressionTree) -> 'DRV': """ Return a new DRV with the same distribution as this DRV, but defined from the specified expression. This is used for example when some optimisation has computed a DRV one way, but we want to represent it the original way. """ return DRV(self.__dist, tree=tree) @property def cdf(self): if self.__cdf is None: def iter_totals(): total = 0 for value, probability in self._items(): total += probability yield value, total # In case of rounding errors if total < 1: yield value, 1 self.__cdf_values, self.__cdf = map(tuple, zip(*iter_totals())) return self.__cdf @property def _lcm(self): def lcm(a, b): return (a * b) // gcd(a, b) if self.__lcm is None: result = 1 for _, prob in self._items(): if not isinstance(prob, Fraction): result = 0 break result = lcm(prob.denominator, result) self.__lcm = result return self.__lcm def sample(self, random: Random = rng): """ Sample this variable. :param random: Random number generator to use. The default is a single object shared by all instances of :class:`DRV`. :returns: One possible value of this variable. """ sample: Probability if self._lcm == 0: sample = random.random() else: sample = Fraction(random.randrange(self._lcm) + 1, self._lcm) # The index of the first cumulative probability greater than or equal # to our random sample. If there's a repeated probability in the array, # that means there was a value with probability 0. So we don't want to # select that value even in the very unlikely case of our sample being # exactly equal to the repeated probability! idx = bisect_left(self.cdf, sample) return self.__cdf_values[idx] @property def _intvalued(self): if self.__intvalued is None: self.__intvalued = all(isinstance(x, int) for x in self.__dist) return self.__intvalued def __add__(self, right) -> 'DRV': """ Handler for :code:`self + right`. Return a random variable which is the result of adding this variable to `right`. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). As with :meth:`apply()`, probabilities are added up wherever addition is many-to-one (for constant numbers it is one-to-one provided overflow does not occur). """ while CONVOLVE_OPTIMISATION: if np is None: break if not isinstance(right, DRV): break product_size = len(self.__dist) * len(right.__dist) if product_size <= CONVOLVE_SIZE_LIMIT: break if not self._intvalued or not right._intvalued: break def get_range(dist): return range(min(dist), max(dist) + 1) self_values = get_range(self.__dist) right_values = get_range(right.__dist) # Very sparse arrays aren't faster to convolve. if 100 * product_size <= len(self_values) * len(right_values): break final_probs = np.convolve( np.array(tuple(self.__dist.get(x, 0) for x in self_values)), np.array(tuple(right.__dist.get(x, 0) for x in right_values)), ) values = range( min(self_values) + min(right_values), max(self_values) + max(right_values) + 1, ) filtered = (final_probs > 0) values = np.array(values)[filtered].tolist() final_probs = final_probs[filtered] return DRV( zip(values, final_probs), tree=self._combine(self, right, '+'), ) return self._apply2(operator.add, right, connective='+') def __sub__(self, right) -> 'DRV': """ Handler for :code:`self - right`. Return a random variable which is the result of subtracting `right` from this variable. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). As with :meth:`apply()`, probabilities are added up wherever subtraction is many-to-one (for constant numbers it is one-to-one provided overflow does not occur). """ if isinstance(right, DRV): # So that we get the convolve optimisation tree = self._combine(self, right, '-') return (self + -right).replace_tree(tree) else: return self._apply2(operator.sub, right, connective='-') def __mul__(self, right): """ Handler for :code:`self * right`. Return a random variable which is the result of multiplying this variable with `right`. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). As with :meth:`apply()`, probabilities are added up in the case where multiplication is not one-to-one (for constant numbers other than zero it is one-to-one provided overflow and underflow do not occur). """ return self._apply2(operator.mul, right, connective='*') def __rmatmul__(self, left: int) -> 'DRV': """ Handler for :code:`left @ self`. Return a random variable which is the result of sampling this variable `left` times, and adding the results together. """ if not isinstance(left, int): return NotImplemented if left <= 0: raise ValueError(left) # Exponentiation by squaring. This isn't massively faster, but does # help a bit for hundreds of dice. result = None so_far = self original = left while True: if left % 2 == 1: if result is None: result = so_far else: result += so_far left //= 2 if left == 0: break so_far += so_far # left was non-zero, so result cannot still be None result = cast(DRV, result) return result.replace_tree(self._combine(original, self, '@')) def __matmul__(self, right: 'DRV') -> 'DRV': """ Handler for :code:`self @ right`. Return a random variable which is the result of sampling this variable once, then adding together that many samples of `right`. All possible values of this variable must be of type :obj:`int`. """ if not isinstance(right, DRV): return NotImplemented if not all(isinstance(value, int) for value in self.__dist): raise TypeError('require integers on LHS of @') def iter_drvs(): so_far = min(self.__dist) @ right for num_dice in range(min(self.__dist), max(self.__dist) + 1): if num_dice in self.__dist: yield so_far, self.__dist[num_dice] so_far += right return DRV.weighted_average( iter_drvs(), tree=self._combine(self, right, '@'), ) def __truediv__(self, right) -> 'DRV': """ Handler for :code:`self / right`. Return a random variable which is the result of floor-dividing this variable by `right`. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). As with :meth:`apply()`, probabilities are added up wherever division is many-to-one (for constant numbers other than zero it is one-to-one provided overflow and underflow do not occur). 0 must not be a possible value of `right` (even with probability 0). """ return self._apply2(operator.truediv, right, connective='/') def __floordiv__(self, right) -> 'DRV': """ Handler for :code:`self // right`. Return a random variable which is the result of dividing this variable by `right`. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). As with :meth:`apply()`, probabilities are added up wherever floor division is many-to-one (for numbers it is mostly many-to-one, for example :code:`2 // 2 == 1 == 3 // 2`). 0 must not be a possible value of `right` (even with probability 0). """ return self._apply2(operator.floordiv, right, connective='//') def __neg__(self) -> 'DRV': """ Handler for :code:`-self`. Return a random variable which is the result of negating the values of this variable. As with :meth:`apply()`, probabilities are added up wherever negation is many-to-one (for numbers it is one-to-one). """ return self.apply(operator.neg, tree=self._combine(self, '-')) def __eq__(self, right) -> 'DRV': # type: ignore[override] """ Handler for :code:`self == right`. Return a random variable which takes value :obj:`True` where `self` is equal to `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ if isinstance(right, DRV): small, big = sorted([self, right], key=lambda x: len(x.__dist)) prob = sum( prob * big.__dist.get(val, 0) for val, prob in small._items() ) else: prob = self.__dist.get(right) if not prob: return DRV({False: 1}) if prob >= 1.0: return DRV({True: 1}) return DRV( {False: 1 - prob, True: prob}, tree=self._combine(self, right, '=='), ) def __ne__(self, right: 'DRV') -> 'DRV': # type: ignore[override] """ Handler for :code:`self != right`. Return a random variable which takes value :obj:`True` where `self` is not equal to `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ return ( (self == right) .apply(operator.not_) .replace_tree(self._combine(self, right, '!=')) ) def __bool__(self): # Prevent DRVs being truthy, and hence "3 in [DRV({2: 1})]" is true. raise ValueError('The truth value of a random variable is ambiguous') def __le__(self, right) -> 'DRV': """ Handler for :code:`self <= right`. Return a random variable which takes value :obj:`True` where `self` is less than or equal to `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ return self._apply2(operator.le, right, connective='<=') def __lt__(self, right) -> 'DRV': """ Handler for :code:`self < right`. Return a random variable which takes value :obj:`True` where `self` is less than `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ return self._apply2(operator.lt, right, connective='<') def __ge__(self, right) -> 'DRV': """ Handler for :code:`self >= right`. Return a random variable which takes value :obj:`True` where `self` is greater than or equal to `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ return self._apply2(operator.ge, right, connective='>=') def __gt__(self, right) -> 'DRV': """ Handler for :code:`self > right`. Return a random variable which takes value :obj:`True` where `self` is greater than `right`, and :obj:`False` otherwise. `right` can be either a constant or another DRV (in which case the result assumes that the two random variables are independent). If either :obj:`True` or :obj:`False` cannot happen then the result has only one possible value, with probability 1. There is no possible value with probability 0. """ return self._apply2(operator.gt, right, connective='>') def explode(self, rerolls: int = 50) -> 'DRV': """ Return a new DRV distributed according to the rules of an "exploding die". This means, first roll the die (sample this DRV). If the result is not the maximum possible, then keep it. If it is the maximum, then roll again and add the new result to the original. Because DRV represents only finitely-many possible values, whereas the process of rerolling can (with minuscule probability) carry on indefinitely, this method imposes an arbitary limit to the number of rerolls. :param rerolls: The maximum number of rerolls. Set this to 1 for a die that can only "explode" once, not indefinitely. """ reroll_value = max(self.__dist.keys()) reroll_prob = self.__dist[reroll_value] each_die = self.to_dict() each_die.pop(reroll_value) def iter_pairs(): for idx in range(rerolls + 1): for value, prob in each_die.items(): value += reroll_value * idx prob *= reroll_prob ** idx yield (value, prob) yield (reroll_value * (idx + 1), reroll_prob ** (idx + 1)) postfix = '.explode()' if rerolls == 50 else f'.explode({rerolls!r})' return self._reduced(iter_pairs(), tree=self._combine_post(postfix)) def apply( self, func: Callable[[Any], Any], *, tree: ExpressionTree = None, allow_drv: bool = False, ) -> 'DRV': """ Apply a unary function to the values produced by this DRV. If `func` is an injective (one-to-one) function, then the probabilities are unchanged. If `func` is many-to-one, then the probabilities are added together. :param func: Function to map the values. Each value `x` is replaced by `func(x)`. :param tree: the expression from which this object was defined. If ``None``, the result DRV is represented by listing out all the values and probabilities. :param allow_drv: If True, then when `func` returns a DRV, the possible values of that DRV are each included in the returned DRV. Recall that a DRV cannot be a possible value of the returned DRV, because it is not hashable. So, without this option `func` cannot return a DRV. .. versionchanged:: 1.1 Added ``allow_drv`` option. """ return DRV._reduced(self._items(), func, tree=tree, drv=allow_drv) def _apply2(self, func, right, connective=None) -> 'DRV': """Apply a binary function, with the values of this DRV on the left.""" expr_tree = self._combine(self, right, connective) if isinstance(right, DRV): return self._cross_reduce(func, right, tree=expr_tree) return self.apply(lambda x: func(x, right), tree=expr_tree) def _cross_reduce(self, func, right, tree=None) -> 'DRV': """ Take the cross product of self and right, then reduce by applying func. """ return DRV._reduced( self._iter_cross(right), lambda value: func(*value), tree=tree, ) def _iter_cross(self, right): """ Take the cross product of self and right, with probabilities assuming that the two are independent variables. Note that the cross product of an object with itself represents the results of sampling it twice, *not* just the pairs (x, x) for each possible value! """ for (lvalue, lprob) in self._items(): for (rvalue, rprob) in right._items(): yield ((lvalue, rvalue), lprob * rprob) @staticmethod def _reduced(iterable, func=lambda x: x, tree=None, drv=False) -> 'DRV': distribution: dict = collections.defaultdict(int) if not drv: # Optimisation does make a difference to e.g. test_convolve for value, prob in iterable: distribution[func(value)] += prob else: for value, prob in iterable: transformed = func(value) if isinstance(transformed, DRV): for value2, prob2 in transformed._weighted_items(prob): distribution[value2] += prob2 else: distribution[transformed] += prob return DRV(distribution, tree=tree) @staticmethod def weighted_average( iterable: Iterable[Tuple['DRV', 'Probability']], *, tree: ExpressionTree = None, ) -> 'DRV': """ Compute a weighted average of DRVs, each with its own probability. This is for when you have a set of mutually-exclusive events which can happen, and then the final outcome occurs with a different known distribution according to which of those events occurs. For example, this function is used to implement the ``@`` operator when the left-hand-side is a DRV. The first roll determines what the second roll will be. The DRVs that are averaged together do not need to be disjoint (that is, they can have overlapping possible values). Whenever multiple events lead to the same final outcome, the probabilities are combined: https://en.wikipedia.org/wiki/Law_of_total_probability :param iterable: Pairs, each containing a DRV and the probability of that DRV being the one selected. The probabilities should add to 1, but this is not enforced. :param tree: the expression from which this object was defined. If ``None``, the result DRV is represented by listing out all the values and probabilities. .. versionadded:: 1.1 """ def iter_pairs(): for drv, weight in iterable: yield from drv._weighted_items(weight) return DRV._reduced(iter_pairs(), tree=tree) def _weighted_items(self, weight, pred=lambda x: True): for value, prob in self.__dist.items(): if pred(value): yield value, prob * weight def given(self, predicate: Callable[[Any], bool]) -> 'DRV': """ Return the conditional probability distribution of this DRV, restricted to the possible values for which `predicate` is true. For example, :code:`drv.given(lambda x: True)` is the same distribution as :code:`drv`, and the following are equivalent to each other:: d6.given(lambda x: bool(x % 2)) DRV({1: Fraction(1, 3), 3: Fraction(1, 3), 5: Fraction(1, 3)}) If `x` is a DRV, and `A` and `B` are predicates, then the conditional probability of `A` given `B`, written in probability theory as ``p(A(x) | B(x))``, can be computed as :code:`p(x.given(B).apply(A)))`. :param predicate: Called with possible values of `self`, and must return :obj:`bool` (not just truthy). :raises ZeroDivisionError: if the probability of `predicate` being true is 0. .. versionadded:: 1.1 """ total = p(self.apply(predicate)) if total == 0: # Would be raised anyway, but nicer error message raise ZeroDivisionError('predicate is True with probability 0') return DRV(self._weighted_items(1 / total, predicate)) @staticmethod def _combine(*args): """ Helper for combining two expressions into a combined expression. """ for arg in args: if isinstance(arg, DRV) and arg.__expr_tree is None: return None def unpack(subexpr): if isinstance(subexpr, DRV): return subexpr.__expr_tree return Atom(repr(subexpr)) if len(args) == 2: # Unary expression subexpr, connective = args return UnaryExpression(unpack(subexpr), connective) # Binary expression left, right, connective = args return BinaryExpression(unpack(left), unpack(right), connective) def _combine_post(self, postfix): if self.__expr_tree is None: return None return AttrExpression(self.__expr_tree, postfix)
class State: """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. context: Context in which it was created """ __slots__ = [ "entity_id", "state", "attributes", "last_changed", "last_updated", "context", ] def __init__( self, entity_id: str, state: str, attributes: Optional[Mapping] = None, last_changed: Optional[datetime.datetime] = None, last_updated: Optional[datetime.datetime] = None, context: Optional[Context] = None, # Temp, because database can still store invalid entity IDs # Remove with 1.0 or in 2020. temp_invalid_id_bypass: Optional[bool] = False, ) -> None: """Initialize a new state.""" state = str(state) if not valid_entity_id(entity_id) and not temp_invalid_id_bypass: raise InvalidEntityFormatError( ("Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) if not valid_state(state): raise InvalidStateError( ("Invalid state encountered for entity id: {}. " "State max length is 255 characters.").format(entity_id)) self.entity_id = entity_id.lower() self.state = state self.attributes = MappingProxyType(attributes or {}) self.last_updated = last_updated or dt_util.utcnow() self.last_changed = last_changed or self.last_updated self.context = context or Context() @property def domain(self) -> str: """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self) -> str: """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self) -> str: """Name of this state.""" return self.attributes.get( ATTR_FRIENDLY_NAME) or self.object_id.replace("_", " ") def as_dict(self) -> Dict: """Return a dict representation of the State. Async friendly. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return { "entity_id": self.entity_id, "state": self.state, "attributes": dict(self.attributes), "last_changed": self.last_changed, "last_updated": self.last_updated, "context": self.context.as_dict(), } @classmethod def from_dict(cls, json_dict: Dict) -> Any: """Initialize a state from a dict. Async friendly. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and "entity_id" in json_dict and "state" in json_dict): return None last_changed = json_dict.get("last_changed") if isinstance(last_changed, str): last_changed = dt_util.parse_datetime(last_changed) last_updated = json_dict.get("last_updated") if isinstance(last_updated, str): last_updated = dt_util.parse_datetime(last_updated) context = json_dict.get("context") if context: context = Context(id=context.get("id"), user_id=context.get("user_id")) return cls( json_dict["entity_id"], json_dict["state"], json_dict.get("attributes"), last_changed, last_updated, context, ) def __eq__(self, other: Any) -> bool: """Return the comparison of the state.""" return ( # type: ignore self.__class__ == other.__class__ and self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes and self.context == other.context) def __repr__(self) -> str: """Return the representation of the states.""" attrs = ("; {}".format(util.repr_helper(self.attributes)) if self.attributes else "") return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attrs, dt_util.as_local(self.last_changed).isoformat(), )
"""MappingProxyType""" from types import MappingProxyType d = {'a': 1, 'b': 2} mp = MappingProxyType(d) # Read-only print(list(mp.keys())) print(list(mp.values())) print(mp.get('a')) print(mp.get('c', 'not found')) # Immutable # del mp['a'] # TypeError # mp['a'] = 100 # TypeError # Mutate the original dictionary d['a'] = 100 d['c'] = 'new item' del d['b'] print(mp) # Reflect the changes in original dictionary
def check_attr_equal( attr1: MappingProxyType, attr2: MappingProxyType, attr_str: str ) -> bool: """Return true if the given attributes are equal.""" return attr1.get(attr_str) == attr2.get(attr_str)
class PackageLookup(SymbolLookup): """ Caching structure to make lookups on type names within a :class:`Package` faster. """ def __init__(self, archive: "Archive"): self.archive = archive data_types = {} # type: Dict[str, Tuple[TypeConName, DefDataType]] values = {} # type: Dict[str, Tuple[ValName, DefValue]] templates = {} # type: Dict[str, Tuple[TypeConName, DefTemplate]] for module in self.archive.package.modules: module_ref = ModuleRef(archive.hash, module.name) for dt in module.data_types: dt_name = TypeConName(module_ref, dt.name.segments) data_types[f"{module.name}:{dt.name}"] = (dt_name, dt) for value in module.values: value_name = ValName(module_ref, value.name_with_type.name) values[f"{module.name}:{value.name_with_type.name}"] = ( value_name, value) for tmpl in module.templates: tmpl_name = TypeConName(module_ref, tmpl.tycon.segments) templates[f"{module.name}:{tmpl.tycon}"] = (tmpl_name, tmpl) self._data_types = MappingProxyType(data_types) self._values = MappingProxyType(values) self._templates = MappingProxyType(templates) def archives(self) -> "Collection[Archive]": return [self.archive] def package_ids(self) -> "AbstractSet[PackageRef]": return frozenset([self.archive.hash]) def data_type_name(self, ref: "Any") -> "TypeConName": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: dt_name = self.local_data_type_name(name) if dt_name is not None: return dt_name raise NameNotFoundError(ref) def data_type(self, ref: "Any") -> "DefDataType": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: dt = self.local_data_type(name) if dt is not None: return dt raise NameNotFoundError(ref) def local_data_type_name(self, name: str) -> "Optional[TypeConName]": r = self._data_types.get(name) return r[0] if r is not None else None def local_data_type(self, name: str) -> "Optional[DefDataType]": """ Variation of :meth:`data_type` that assumes the name is already scoped to this package. Unlike :meth:`data_type`, this method returns ``None`` in the case of no match. You should not normally use this method directly, and instead prefer to use the methods of the :class:`SymbolLookup` protocol. :param name: A name to search for. Must be of the form ``"ModuleName:EntityName"``, where both modules and entities are dot-delimited. :return: Either a matching :class:`DefDataType`, or ``None`` if no match. """ r = self._data_types.get(name) return r[1] if r is not None else None def value(self, ref: "Any") -> "DefValue": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: dt = self.local_value(name) if dt is not None: return dt raise NameNotFoundError(ref) def local_value(self, name: str) -> "Optional[DefValue]": """ Variation of :meth:`data_type` that assumes the name is already scoped to this package. Unlike :meth:`data_type`, this method returns ``None`` in the case of no match. You should not normally use this method directly, and instead prefer to use the methods of the :class:`SymbolLookup` protocol. :param name: A name to search for. Must be of the form ``"ModuleName:EntityName"``, where both modules and entities are dot-delimited. :return: Either a matching :class:`DefDataType`, or ``None`` if no match. """ r = self._values.get(name) return r[1] if r is not None else None def template_names(self, ref: "Any") -> "Collection[TypeConName]": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: if name == "*": return self.local_template_names() elif name in self._templates: n, _ = self._templates.get(name) return n return [] def template_name(self, ref: "Any") -> "TypeConName": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: tmpl = self.local_template_name(name) if tmpl is not None: return tmpl raise NameNotFoundError(ref) def template(self, ref: "Any") -> "DefTemplate": pkg, name = validate_template(ref) if pkg == self.archive.hash or pkg == STAR: tmpl = self.local_template(name) if tmpl is not None: return tmpl raise NameNotFoundError(ref) def local_template_names(self) -> "Collection[TypeConName]": return [n for n, _ in self._templates.values()] def local_template_name(self, name: str) -> "Optional[TypeConName]": r = self._templates.get(name) return r[0] if r is not None else None def local_template(self, name: str) -> "Optional[DefTemplate]": """ Variation of :meth:`data_type` that assumes the name is already scoped to this package. Unlike :meth:`data_type`, this method returns ``None`` in the case of no match. You should not normally use this method directly, and instead prefer to use the methods of the :class:`SymbolLookup` protocol. :param name: A name to search for. Must be of the form ``"ModuleName:EntityName"``, where both modules and entities are dot-delimited. :return: Either a matching :class:`DefDataType`, or ``None`` if no match. """ r = self._templates.get(name) return r[1] if r is not None else None
def get_no_data_icon(expected_entity: MappingProxyType): """Check attribute for icon for inactive sensors.""" entity_id = expected_entity["entity_id"] return ICON_FOR_EMPTY_VALUES.get(entity_id, expected_entity.get(ATTR_ICON))
class SessionStoreDb: # todo: get all grouped by namespace by host? # todo: get all grouped by namespace by host.key? # todo: consider refactoring to only getting metadata on first pass and everything else on demand? def __init__(self, in_dir: pathlib.Path): if not in_dir.is_dir(): raise IOError("Input directory is not a directory") self._ldb = ccl_leveldb.RawLevelDb(in_dir) # If performance is a concern we should refactor this, but slow and steady for now # First collect the namespace (session/tab guid + host) and map-ids together self._map_id_to_host = {} # map_id: (guid, host) self._deleted_keys = set() for rec in self._ldb.iterate_records_raw(): if rec.user_key.startswith(_NAMESPACE_PREFIX): if rec.user_key == _NAMESPACE_PREFIX: continue # bogus entry near the top usually try: key = rec.user_key.decode("utf-8") except UnicodeDecodeError: print(f"Invalid namespace key: {rec.user_key}") continue split_key = key.split("-", 2) if len(split_key) != 3: print(f"Invalid namespace key: {key}") continue _, guid, host = split_key if not host: continue # TODO investigate why this happens # normalize host to lower just in case host = host.lower() guid_host_pair = guid, host if rec.state == ccl_leveldb.KeyState.Deleted: self._deleted_keys.add(guid_host_pair) else: try: map_id = rec.value.decode("utf-8") except UnicodeDecodeError: print(f"Invalid namespace value: {key}") continue if not map_id: continue # TODO: investigate why this happens/do we want to keep the host around somewhere? #if map_id in self._map_id_to_host_guid and self._map_id_to_host_guid[map_id] != guid_host_pair: if map_id in self._map_id_to_host and self._map_id_to_host[ map_id] != host: print("Map ID Collision!") print(f"map_id: {map_id}") print(f"Old host: {self._map_id_to_host[map_id]}") print(f"New host: {guid_host_pair}") raise ValueError("map_id collision") else: self._map_id_to_host[map_id] = host # freeze stuff self._map_id_to_host = MappingProxyType(self._map_id_to_host) self._deleted_keys = frozenset(self._deleted_keys) self._host_lookup = {} # {host: {ss_key: [SessionStoreValue, ...]}} self._orphans = [ ] # list of tuples of key, value where we can't get the host for rec in self._ldb.iterate_records_raw(): if rec.user_key.startswith(_MAP_ID_PREFIX): try: key = rec.user_key.decode("utf-8") except UnicodeDecodeError: print(f"Invalid map id key: {rec.user_key}") continue if rec.state == ccl_leveldb.KeyState.Deleted: continue # TODO: do we want to keep the key around because the presence is important? split_key = key.split("-", 2) if len(split_key) != 3: print(f"Invalid map id key: {key}") continue _, map_id, ss_key = split_key if not split_key: # TODO what does it mean when there is no key here? # The value will also be a single number (encoded utf-8) continue try: value = rec.value.decode("UTF-16-LE") except UnicodeDecodeError: print(f"Error decoding value for {key}") print(f"Raw Value: {rec.value}") continue #guid_host_pair = self._map_id_to_host_guid.get(map_id) host = self._map_id_to_host.get(map_id) #if not guid_host_pair: if not host: self._orphans.append( (ss_key, SessionStoreValue(value, None, rec.seq))) else: #guid, host = guid_host_pair self._host_lookup.setdefault(host, {}) self._host_lookup[host].setdefault(ss_key, []) self._host_lookup[host][ss_key].append( SessionStoreValue(value, None, rec.seq)) def __contains__(self, item: typing.Union[str, typing.Tuple[str, str]]) -> bool: """if item is a str, returns true if that host is present if item is a tuple of (str, str), returns True if that host and key pair are present""" if isinstance(item, str): return item in self._host_lookup elif isinstance(item, tuple) and len(item) == 2: host, key = item return host in self._host_lookup and key in self._host_lookup[host] else: raise TypeError("item must be a string or a tuple of (str, str)") def iter_hosts(self) -> typing.Iterable[str]: yield from self._host_lookup.keys() def get_all_for_host(self, host): if host not in self: return {} result_raw = dict(self._host_lookup[host]) for ss_key in result_raw: result_raw[ss_key] = tuple(result_raw[ss_key]) return result_raw def get_session_storage_key(self, host, key): if (host, key) not in self: return tuple() return tuple(self._host_lookup[host][key]) def iter_orphans(self): yield from self._orphans def __getitem__(self, item: typing.Union[str, typing.Tuple[str, str]]): if item not in self: raise KeyError(item) if isinstance(item, str): return self.get_all_for_host(item) elif isinstance(item, tuple) and len(item) == 2: return self.get_session_storage_key(*item) else: raise TypeError("item must be a string or a tuple of (str, str)") def __iter__(self): """iterates the hosts present""" return self.iter_hosts() def close(self): self._ldb.close()
class RewriteVisitor(IdentityVisitor): def __init__( self, lookup: "SymbolLookup", always_expand: bool = False, val_blacklist: "Optional[Collection[ValName]]" = None, expr_args: "Optional[Mapping[str, Expr]]" = None, type_args: "Optional[Mapping[str, Type]]" = None, ): self.lookup = lookup self.always_expand = always_expand self.val_blacklist = frozenset( val_blacklist) if val_blacklist is not None else () self.expr_args = MappingProxyType(expr_args or {}) # type: Mapping[str, Expr] self.type_args = MappingProxyType(type_args or {}) # type: Mapping[str, Type] def with_expr_var(self, var: str, value: "Expr") -> "RewriteVisitor": return type(self)( self.lookup, self.always_expand, self.val_blacklist, { **self.expr_args, var: value }, self.type_args, ) def with_expr_vars(self, var_values: "Mapping[str, Expr]") -> "RewriteVisitor": return type(self)( self.lookup, self.always_expand, self.val_blacklist, { **self.expr_args, **var_values }, self.type_args, ) def with_type_vars(self, type_values: "Mapping[str, Type]") -> "RewriteVisitor": return type(self)( self.lookup, self.always_expand, self.val_blacklist, self.expr_args, { **self.type_args, **type_values }, ) def without_val(self, val: "ValName") -> "RewriteVisitor": return type(self)( self.lookup, self.always_expand, {*self.val_blacklist, val}, self.expr_args, self.type_args, ) def resolve_val(self, val: "ValName") -> "Optional[Expr]": return self.lookup.value( val).expr if val not in self.val_blacklist else None def resolve_type_arg(self, type_arg: str) -> "Optional[Type]": return self.type_args.get(type_arg) def visit_expr_var(self, var: str) -> "Expr": expr = self.expr_args.get(var) return expr if expr is not None else Expr(var=var) def visit_type_var(self, var: "Type.Var") -> "Type": base_type = self.resolve_type_arg(var.var) if var.args: raise ValueError( "higher kinded types are not yet currently supported") return base_type if base_type is not None else Type(var=var)
class HAState(object): """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. """ def __init__(self, entity_id, state, attributes=None, last_changed=None, last_updated=None): """Initialize a new state.""" self.entity_id = entity_id.lower() self.state = state self.attributes = MappingProxyType(attributes or {}) self.last_updated = last_updated self.last_changed = last_changed or self.last_updated @property def domain(self): """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self): """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self): """Name of this state.""" return (self.attributes.get('friendly_name') or self.object_id.replace('_', ' ')) def as_dict(self): """Return a dict representation of the State. Async friendly. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return { 'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': self.last_changed, 'last_updated': self.last_updated } @classmethod def from_dict(cls, json_dict): """Initialize a state from a dict. Async friendly. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') last_updated = json_dict.get('last_updated') return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated) def __eq__(self, other): """Return the comparison of the state.""" return (self.__class__ == other.__class__ and self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes) def __repr__(self): """Return the representation of the states.""" return "<HAstate {}={} attributes: {} @ {}>".format( self.entity_id, self.state, self.attributes, self.last_changed)
class State: """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. context: Context in which it was created """ __slots__ = ['entity_id', 'state', 'attributes', 'last_changed', 'last_updated', 'context'] def __init__(self, entity_id: str, state: Any, attributes: Optional[Dict] = None, last_changed: Optional[datetime.datetime] = None, last_updated: Optional[datetime.datetime] = None, context: Optional[Context] = None, # Temp, because database can still store invalid entity IDs # Remove with 1.0 or in 2020. temp_invalid_id_bypass: Optional[bool] = False) -> None: """Initialize a new state.""" state = str(state) if not valid_entity_id(entity_id) and not temp_invalid_id_bypass: raise InvalidEntityFormatError(( "Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) if not valid_state(state): raise InvalidStateError(( "Invalid state encountered for entity id: {}. " "State max length is 255 characters.").format(entity_id)) self.entity_id = entity_id.lower() self.state = state # type: str self.attributes = MappingProxyType(attributes or {}) self.last_updated = last_updated or dt_util.utcnow() self.last_changed = last_changed or self.last_updated self.context = context or Context() @property def domain(self) -> str: """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self) -> str: """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self) -> str: """Name of this state.""" return ( self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace('_', ' ')) def as_dict(self) -> Dict: """Return a dict representation of the State. Async friendly. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return {'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': self.last_changed, 'last_updated': self.last_updated, 'context': self.context.as_dict()} @classmethod def from_dict(cls, json_dict: Dict) -> Any: """Initialize a state from a dict. Async friendly. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') if isinstance(last_changed, str): last_changed = dt_util.parse_datetime(last_changed) last_updated = json_dict.get('last_updated') if isinstance(last_updated, str): last_updated = dt_util.parse_datetime(last_updated) context = json_dict.get('context') if context: context = Context( id=context.get('id'), user_id=context.get('user_id'), ) return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated, context) def __eq__(self, other: Any) -> bool: """Return the comparison of the state.""" return (self.__class__ == other.__class__ and # type: ignore self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes and self.context == other.context) def __repr__(self) -> str: """Return the representation of the states.""" attrs = "; {}".format(util.repr_helper(self.attributes)) \ if self.attributes else "" return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attrs, dt_util.as_local(self.last_changed).isoformat())
class State(object): """Object to represent a state within the state machine. entity_id: the entity that is represented. state: the state of the entity attributes: extra information on entity and state last_changed: last time the state was changed, not the attributes. last_updated: last time this object was updated. """ __slots__ = ['entity_id', 'state', 'attributes', 'last_changed', 'last_updated'] # pylint: disable=too-many-arguments def __init__(self, entity_id, state, attributes=None, last_changed=None, last_updated=None): """Initialize a new state.""" if not valid_entity_id(entity_id): raise InvalidEntityFormatError(( "Invalid entity id encountered: {}. " "Format should be <domain>.<object_id>").format(entity_id)) self.entity_id = entity_id.lower() self.state = str(state) self.attributes = MappingProxyType(attributes or {}) self.last_updated = dt_util.strip_microseconds( last_updated or dt_util.utcnow()) # Strip microsecond from last_changed else we cannot guarantee # state == State.from_dict(state.as_dict()) # This behavior occurs because to_dict uses datetime_to_str # which does not preserve microseconds self.last_changed = dt_util.strip_microseconds( last_changed or self.last_updated) @property def domain(self): """Domain of this state.""" return split_entity_id(self.entity_id)[0] @property def object_id(self): """Object id of this state.""" return split_entity_id(self.entity_id)[1] @property def name(self): """Name of this state.""" return ( self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace('_', ' ')) def as_dict(self): """Return a dict representation of the State. To be used for JSON serialization. Ensures: state == State.from_dict(state.as_dict()) """ return {'entity_id': self.entity_id, 'state': self.state, 'attributes': dict(self.attributes), 'last_changed': dt_util.datetime_to_str(self.last_changed), 'last_updated': dt_util.datetime_to_str(self.last_updated)} @classmethod def from_dict(cls, json_dict): """Initialize a state from a dict. Ensures: state == State.from_json_dict(state.to_json_dict()) """ if not (json_dict and 'entity_id' in json_dict and 'state' in json_dict): return None last_changed = json_dict.get('last_changed') if last_changed: last_changed = dt_util.str_to_datetime(last_changed) last_updated = json_dict.get('last_updated') if last_updated: last_updated = dt_util.str_to_datetime(last_updated) return cls(json_dict['entity_id'], json_dict['state'], json_dict.get('attributes'), last_changed, last_updated) def __eq__(self, other): """Return the comparison of the state.""" return (self.__class__ == other.__class__ and self.entity_id == other.entity_id and self.state == other.state and self.attributes == other.attributes) def __repr__(self): """Return the representation of the states.""" attr = "; {}".format(util.repr_helper(self.attributes)) \ if self.attributes else "" return "<state {}={}{} @ {}>".format( self.entity_id, self.state, attr, dt_util.datetime_to_local_str(self.last_changed))
# In[11]: print(m) # In[12]: print(dir(m)) # In[13]: m.get('1') # In[14]: Person.__dict__ # In[15]: getattr(Person, "name") # In[16]: