def union2(p: Union[Type[A], Type[B]]): attr.fields(p) attr.fields_dict(p) attr.asdict(<warning descr="'attr.asdict' method should be called on attrs instances">p</warning>) attr.astuple(<warning descr="'attr.astuple' method should be called on attrs instances">p</warning>) attr.assoc(<warning descr="'attr.assoc' method should be called on attrs instances">p</warning>) attr.evolve(<warning descr="'attr.evolve' method should be called on attrs instances">p</warning>)
def union1(p: Union[A, B]): attr.fields(<warning descr="'attr.fields' method should be called on attrs types">p</warning>) attr.fields_dict(<warning descr="'attr.fields_dict' method should be called on attrs types">p</warning>) attr.asdict(p) attr.astuple(p) attr.assoc(p) attr.evolve(p)
def cls_all_attrs_fields(cls): these_properties = attr.fields_dict(cls) #dictonary if cls._iterated_component_type is not None and issubclass( cls._iterated_component_type, Component): iterated_properties = attr.fields_dict( cls._iterated_component_type) these_properties.update(iterated_properties) return these_properties
def test_create_variable_cache(self, model): actual = model._var_cache[("init_profile", "n_points")] assert actual["name"] == "init_profile__n_points" assert (actual["attrib"] is attr.fields_dict( model["init_profile"].__class__)["n_points"]) assert actual["metadata"] == attr.fields_dict( InitProfile)["n_points"].metadata assert actual["value"] is None
def test_all_classes_have_a_non_optional_state_code(self): for cls in get_all_entity_classes_in_module(entities): self.assertTrue( 'state_code' in attr.fields_dict(cls), f"Expected field |state_code| not defined for class [{cls}].") attribute = attr.fields_dict(cls)['state_code'] self.assertEqual( attribute.type, str, f"Unexpected type [{attribute.type}] for " f"|state_code| field of class [{cls}].")
def structural(p): print(len(p)) attr.fields(p) attr.fields_dict(p) attr.asdict(p) attr.astuple(p) attr.assoc(p) attr.evolve(p)
def from_argparse(args: argparse.Namespace) -> "RunOptions": """ Takes an argparse.Namespace as specified in `parse_command_line`, loads input configuration files from file paths, and converts to a RunOptions instance. :param args: collection of command-line parameters passed to mlagents-learn :return: RunOptions representing the passed in arguments, with trainer config, curriculum and sampler configs loaded from files. """ argparse_args = vars(args) config_path = StoreConfigFile.trainer_config_path # Load YAML configured_dict: Dict[str, Any] = { "checkpoint_settings": {}, "env_settings": {}, "engine_settings": {}, "torch_settings": {}, } _require_all_behaviors = True if config_path is not None: configured_dict.update(load_config(config_path)) else: # If we're not loading from a file, we don't require all behavior names to be specified. _require_all_behaviors = False # Use the YAML file values for all values not specified in the CLI. for key in configured_dict.keys(): # Detect bad config options if key not in attr.fields_dict(RunOptions): raise TrainerConfigError( "The option {} was specified in your YAML file, but is invalid.".format( key ) ) # Override with CLI args # Keep deprecated --load working, TODO: remove argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"] for key, val in argparse_args.items(): if key in DetectDefault.non_default_args: if key in attr.fields_dict(CheckpointSettings): configured_dict["checkpoint_settings"][key] = val elif key in attr.fields_dict(EnvironmentSettings): configured_dict["env_settings"][key] = val elif key in attr.fields_dict(EngineSettings): configured_dict["engine_settings"][key] = val elif key in attr.fields_dict(TorchSettings): configured_dict["torch_settings"][key] = val else: # Base options configured_dict[key] = val final_runoptions = RunOptions.from_dict(configured_dict) final_runoptions.checkpoint_settings.prioritize_resume_init() # Need check to bypass type checking but keep structure on dict working if isinstance(final_runoptions.behaviors, TrainerSettings.DefaultTrainerDict): # configure whether or not we should require all behavior names to be found in the config YAML final_runoptions.behaviors.set_config_specified(_require_all_behaviors) return final_runoptions
def parameters(self): return [ ('Other_contrast', ['Creaky voice', 'Voicing', 'Glottalization', 'N / A'], lambda i: i.Other_contrasts[0] if i.Other_contrasts else None, None), ('Other_contrast_amount', ['Some', 'All', 'Maybe'], lambda i: i.Other_contrasts[1] if i.Other_contrasts else None, None), ('Sound_inventory_notes', 'string', lambda i: i.Notes[0] if i.Notes else None, lambda i: i.Notes[1] if i.Notes else []), ('N_consonants', 'integer', lambda i: i.N_consonant_phonemes, None), ('N_elaborated_consonants', 'integer', lambda i: i.N_elaborated_consonants, None), ('N_elaborations', 'integer', lambda i: i.N_elaborations, None), ('N_vowel_qualities', 'integer', lambda i: i.N_vowel_qualities, None), ('Consonant_inventory', 'multichoice', lambda i: i.C_phoneme_inventory, None), ( 'Vowel_inventory', 'multichoice', lambda i: i.V_phoneme_inventory, None, ), ('Geminate_inventory', 'multichoice', lambda i: i.Geminates, None), ( 'Diphtong_inventory', 'multichoice', lambda i: i.Diphthongs_or_vowel_sequences[0], None, ), ( 'Vowel_sequence_inventory', 'multichoice', lambda i: i.Diphthongs_or_vowel_sequences[1], None, ), ( 'Comment_on_diphthongs_and_vowel_sequences', 'string', lambda i: i.Diphthongs_or_vowel_sequences[2], None, ), ('Contrastive_length', attr.fields_dict(Sound_inventory) ['Contrastive_length'].validator.validator.options, lambda i: i.Contrastive_length, None), ('Contrastive_nasalization', attr.fields_dict(Sound_inventory) ['Contrastive_nasalization'].validator.validator.options, lambda i: i.Contrastive_nasalization, None), ('Place', 'multichoice', lambda i: i.Places, None), ('Elaboration', 'multichoice', lambda i: i.Elaborations, None), ('Manner', 'multichoice', lambda i: i.Manners, None), ('Voicing_contrasts', 'multichoice', lambda i: i.Voicing_contrasts, None), ]
def test_haproxy_stats_v1_5(self, mock_socket): # Mocking haproxy stats socket with csv file self.stats_file = open( path.join(TEST_DATA_DIR, 'haproxy-stats-v1.5.csv')) fake_socket = MagicMock(recv=self.stats_file.read) mock_socket.return_value = fake_socket # Running method under test stats_snapshot = proxy_stats.ProxiesStatsSource.get_current() # Verifying outcomes self.assertIsInstance(stats_snapshot.utc_timestamp, float) proxies_stats = stats_snapshot.proxies_stats self.assertEqual(len(proxies_stats), 5) proxies_stats_dict = { proxy_stats.name: proxy_stats for proxy_stats in proxies_stats } self.assertEqual( set(proxies_stats_dict), { 'TaskQueue', 'UserAppServer', 'appscale-datastore_server', 'as_blob_server', 'gae_appscaledashboard' }) # There are 5 proxies, let's choose one for deeper verification dashboard = proxies_stats_dict['gae_appscaledashboard'] self.assertEqual(dashboard.name, 'gae_appscaledashboard') self.assertEqual(dashboard.unified_service_name, 'application') self.assertEqual(dashboard.application_id, 'appscaledashboard') # Frontend stats shouldn't have Nones frontend = dashboard.frontend for field in attr.fields_dict(proxy_stats.HAProxyFrontendStats).keys(): self.assertIsNotNone(getattr(frontend, field)) # Backend stats shouldn't have Nones backend = dashboard.backend for field in attr.fields_dict(proxy_stats.HAProxyBackendStats).keys(): self.assertIsNotNone(getattr(backend, field)) # Backend stats can have Nones only in some fields servers = dashboard.servers self.assertIsInstance(servers, list) self.assertEqual(len(servers), 3) for server in servers: for field in attr.fields_dict( proxy_stats.HAProxyServerStats).keys(): if field in { 'qlimit', 'throttle', 'tracked', 'check_code', 'last_chk', 'last_agt' }: continue self.assertIsNotNone(getattr(server, field)) # We don't have listeners on stats self.assertEqual(dashboard.listeners, [])
def test_all_entity_classes_have_expected_primary_id(self): for cls in get_all_entity_classes_in_module(entities): key_name = primary_key_name_from_cls(cls) self.assertTrue(key_name in attr.fields_dict(cls), f"Expected primary key field [{key_name}] not " f"defined for class [{cls}].") attribute = attr.fields_dict(cls)[key_name] self.assertEqual(attribute.type, Optional[int], f"Unexpected type [{attribute.type}] for primary " f"key [{key_name}] of class [{cls}].")
def test_haproxy_stats_v1_5(self, mock_socket): # Mocking haproxy stats socket with csv file self.stats_file = open(path.join(DATA_DIR, 'haproxy-stats-v1.5.csv')) fake_socket = MagicMock(recv=self.stats_file.read) mock_socket.return_value = fake_socket # Running method under test stats_snapshot = proxy_stats.ProxiesStatsSource.get_current() # Verifying outcomes self.assertIsInstance(stats_snapshot.utc_timestamp, float) proxies_stats = stats_snapshot.proxies_stats self.assertEqual(len(proxies_stats), 5) proxies_stats_dict = { proxy_stats.name: proxy_stats for proxy_stats in proxies_stats } self.assertEqual(set(proxies_stats_dict), { 'TaskQueue', 'UserAppServer', 'appscale-datastore_server', 'as_blob_server', 'gae_appscaledashboard' }) # There are 5 proxies, let's choose one for deeper verification dashboard = proxies_stats_dict['gae_appscaledashboard'] self.assertEqual(dashboard.name, 'gae_appscaledashboard') self.assertEqual(dashboard.unified_service_name, 'application') self.assertEqual(dashboard.application_id, 'appscaledashboard') # Frontend stats shouldn't have Nones frontend = dashboard.frontend for field in attr.fields_dict(proxy_stats.HAProxyFrontendStats).keys(): self.assertIsNotNone(getattr(frontend, field)) # Backend stats shouldn't have Nones backend = dashboard.backend for field in attr.fields_dict(proxy_stats.HAProxyBackendStats).keys(): self.assertIsNotNone(getattr(backend, field)) # Backend stats can have Nones only in some fields servers = dashboard.servers self.assertIsInstance(servers, list) self.assertEqual(len(servers), 3) for server in servers: for field in attr.fields_dict(proxy_stats.HAProxyServerStats).keys(): if field in {'qlimit', 'throttle', 'tracked', 'check_code', 'last_chk', 'last_agt'}: continue self.assertIsNotNone(getattr(server, field)) # We don't have listeners on stats self.assertEqual(dashboard.listeners, [])
def get_doc(cls: t.Type, attrib: str, field: str) -> str: """ Fetch attribute documentation field. Requires fields metadata to be processed with :func:`documented`. Parameters ---------- cls : type Class from which to get the attribute. attrib : str Attribute from which to get the doc field. field : {"doc", "type", "init_type", "default"} Documentation field to query. Returns ------- str Queried documentation content. Raises ------ ValueError If the requested ``field`` is missing from the target attribute's metadata. ValueError If the requested ``field`` is unsupported. """ try: if field == "doc": return attr.fields_dict(cls)[attrib].metadata[MetadataKey.DOC] if field == "type": return attr.fields_dict(cls)[attrib].metadata[MetadataKey.TYPE] if field == "init_type": return attr.fields_dict(cls)[attrib].metadata[ MetadataKey.INIT_TYPE] if field == "default": return attr.fields_dict(cls)[attrib].metadata[MetadataKey.DEFAULT] except KeyError: raise ValueError(f"{cls.__name__}.{attrib} has no documented field " f"'{field}'") raise ValueError(f"unsupported attribute doc field {field}")
def from_argparse(args: argparse.Namespace) -> "RunOptions": """ Takes an argparse.Namespace as specified in `parse_command_line`, loads input configuration files from file paths, and converts to a RunOptions instance. :param args: collection of command-line parameters passed to mlagents-learn :return: RunOptions representing the passed in arguments, with trainer config, curriculum and sampler configs loaded from files. """ argparse_args = vars(args) config_path = StoreConfigFile.trainer_config_path # Load YAML configured_dict: Dict[str, Any] = { "checkpoint_settings": {}, "env_settings": {}, "engine_settings": {}, "torch_settings": {}, } if config_path is not None: configured_dict.update(load_config(config_path)) # Use the YAML file values for all values not specified in the CLI. for key in configured_dict.keys(): # Detect bad config options if key not in attr.fields_dict(RunOptions): raise TrainerConfigError( "The option {} was specified in your YAML file, but is invalid.".format( key ) ) # Override with CLI args # Keep deprecated --load working, TODO: remove argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"] for key, val in argparse_args.items(): if key in DetectDefault.non_default_args: if key in attr.fields_dict(CheckpointSettings): configured_dict["checkpoint_settings"][key] = val elif key in attr.fields_dict(EnvironmentSettings): configured_dict["env_settings"][key] = val elif key in attr.fields_dict(EngineSettings): configured_dict["engine_settings"][key] = val elif key in attr.fields_dict(TorchSettings): configured_dict["torch_settings"][key] = val else: # Base options configured_dict[key] = val final_runoptions = RunOptions.from_dict(configured_dict) return final_runoptions
def bq_schema_for_metric_table(cls) -> List[bigquery.SchemaField]: """Returns the necessary BigQuery schema for the RecidivizMetric, which is a list of SchemaField objects containing the column name and value type for each attribute on the RecidivizMetric.""" def schema_type_for_attribute(attribute) -> str: # Race and ethnicity fields are the only ones that support list form. These are converted to # comma-separated lists stored as strings in BigQuery. if is_enum(attribute) or is_list(attribute) or is_str(attribute): return bigquery.enums.SqlTypeNames.STRING.value if is_int(attribute): return bigquery.enums.SqlTypeNames.INTEGER.value if is_float(attribute): return bigquery.enums.SqlTypeNames.FLOAT.value if is_date(attribute): return bigquery.enums.SqlTypeNames.DATE.value if is_bool(attribute): return bigquery.enums.SqlTypeNames.BOOLEAN.value raise ValueError( f"Unhandled attribute type for attribute: {attribute}") return [ bigquery.SchemaField(field, schema_type_for_attribute(attribute), mode='NULLABLE') for field, attribute in attr.fields_dict(cls).items() ]
def wrapper(cls: Type[T]) -> Table[T]: name = table_name or cls.__name__ if cls.__bases__ == (object,): cls = dataclass(cls) cons: List[Union[Column, Index]] = list(args) if issubclass(cls, tuple): defaults = getattr(cls, "_field_defaults", {}) else: defaults = { k: (NO_DEFAULT if a.default == NOTHING else a.default) for k, a in fields_dict(cls).items() } for key, value in get_type_hints(cls).items(): cons.append( Column( key, ctype=value, table_name=name, default=defaults.get(key, NO_DEFAULT), ) ) return Table(name, cons=cons, source=cls)
def on_msg(msg: message.Message) -> None: if isinstance(msg, HomeassistantServiceResponse): kwargs = {} for key, _ in attr.fields_dict( HomeassistantServiceCall).items(): kwargs[key] = getattr(msg, key) on_service_call(HomeassistantServiceCall(**kwargs))
def is_property_forward_ref(obj, property_name) -> bool: """Returns true if the attribute corresponding to |property_name| on the given object is a ForwardRef type.""" attribute = attr.fields_dict(obj.__class__).get(property_name) return is_forward_ref(attribute)
def new_with_defaults(cls, **kwargs): """Create a new object with default values if set, otherwise None. Note: This method should only be used in tests. In prod you should always use the Attr's __init__ or builder which will verify that all fields on the Attr are set. Arguments: kwargs: The kwargs to pass to Attr object's __init__, the rest of the attributes are set to their default or None if a default is unspecified. """ for field, attribute in attr.fields_dict(cls).items(): default = attribute.default # Don't set a default if the field is already set if field in kwargs: continue # Ignore Factories to allow them to render into a default value if isinstance(default, attr.Factory): continue kwargs[field] = None if default is attr.NOTHING else default return cls(**kwargs)
def column_map(cls): """Return a dictionary that maps fields to DF Names.""" col_mapping = {} for name in attr.fields_dict(cls): out_name = "".join([part.capitalize() for part in name.split("_")]) col_mapping[name] = out_name return col_mapping
def AnalogChannel_to_dict(c): d = DefaultChannel.copy() d.update( { "ChannelMode": "Analog", "ScanList": dzcb.munge.zone_name(c.scanlist, NAME_MAX), } ) d.update( { AnalogChannel_name_maps[k]: v for k, v in attr.asdict(c).items() if k in attr.fields_dict(AnalogChannel) and k in AnalogChannel_name_maps } ) d["Name"] = c.short_name if d["CtcssEncode"]: if d["CtcssEncode"].startswith("D"): d["CtcssEncode"] += "N" else: d["CtcssEncode"] = "None" if d["CtcssDecode"]: if d["CtcssDecode"].startswith("D"): d["CtcssDecode"] += "N" else: d["CtcssDecode"] = "None" d["Bandwidth"] = str(round(d["Bandwidth"], 1)).replace(".0", "") return d
def variables_dict(process_cls): """Get all xsimlab variables declared in a process. Exclude attr.Attribute objects that are not xsimlab-specific. """ return OrderedDict((k, v) for k, v in fields_dict(process_cls).items() if "var_type" in v.metadata)
def from_opts(cls, game_opts: GameOptsStruct, gui_opts: GUIOptsStruct) -> "State": dict_ = {**attr.asdict(game_opts), **attr.asdict(gui_opts)} args = {a: v for a, v in dict_.items() if a in attr.fields_dict(cls)} args["current_game_state"] = PerGameState.from_structs( game_opts, gui_opts) return cls(**args)
def _deserialize_class(dict_: Dict[str, Any]) -> TRecord: type_name: str = dict_.pop('_t')[-1] type_info: type = ClassInfo.get_type(type_name) fields = attr.fields_dict(type_info) new_obj = type_info() for dict_key, dict_value in dict_.items(): slot = StringUtil.to_snake_case(dict_key) field = fields[slot] member_type = field.type deserialized_value: Any if get_origin( member_type) is not None and get_origin(member_type) is list: deserialized_value = _deserialize_list(member_type, dict_value, field.metadata) elif issubclass(member_type, Data): deserialized_value = _deserialize_class(dict_value) elif issubclass(member_type, IntEnum): deserialized_value = member_type[dict_value] else: deserialized_value = _deserialize_primitive( member_type, dict_value, field.metadata) setattr(new_obj, slot, deserialized_value) return new_obj
def _isprotocol_subclass(cls: Any, protocol: Type[Any]) -> bool: fields = set(attr.fields_dict(cls).keys()) meths = set(key for key in cls.__dict__.keys() if not key.startswith("_")) fm = fields | meths ret = all([attr in fm for attr in _get_protocol_attrs(protocol)]) return ret
def AnalogChannel_to_dict(c, codeplug): d = DefaultChannel.copy() d.update({ "ChannelMode": "Analog", "Bandwidth": c.bandwidth.value, "ScanList": dzcb.munge.zone_name(c.scanlist_name(codeplug), NAME_MAX), }) d.update({ AnalogChannel_name_maps[k]: v for k, v in attr.asdict(c).items() if k in attr.fields_dict(AnalogChannel) and k in AnalogChannel_name_maps }) d["Name"] = c.short_name if d["CtcssEncode"]: if d["CtcssEncode"].startswith("D"): d["CtcssEncode"] += "N" else: d["CtcssEncode"] = "None" if d["CtcssDecode"]: if d["CtcssDecode"].startswith("D"): d["CtcssDecode"] += "N" else: d["CtcssDecode"] = "None" return d
def Contact_to_dict(c): d = dict(CallReceiveTone="No", ) d.update({ Contact_name_map[k]: value_replacements.get(v, str(v)) for k, v in attr.asdict(c).items() if k in attr.fields_dict(Contact) }) return d
def get_attr_data(obj: Any) -> Dict[str, Any]: from omegaconf.omegaconf import _maybe_wrap d = {} is_type = isinstance(obj, type) obj_type = obj if is_type else type(obj) for name, attrib in attr.fields_dict(obj_type).items(): is_optional, type_ = _resolve_optional(attrib.type) type_ = _resolve_forward(type_, obj.__module__) is_nested = is_attr_class(type_) if not is_type: value = getattr(obj, name) else: value = attrib.default if value == attr.NOTHING: if is_nested: value = type_ else: raise ValueError( "Missing default value for {}, to indicate " "default must be populated later use '???'".format( name)) d[name] = _maybe_wrap( annotated_type=type_, is_optional=is_optional, key=name, value=value, parent=None, ) return d
def to_json(self, compiled=False): if not compiled: keys = [ 'name', 'description', 'code', 'level', 'search', 'split_by_year' ] else: keys = ['name', 'level'] fields = attr.fields_dict(type(self)) yield '{\n' first = True for key in keys: value = getattr(self, key) if key in fields and fields[key].default == value: continue if not first: yield ',\n' else: first = False yield f' "{key}": {json_escape(value)}' yield '\n}'
def get_non_flat_property_class_name(obj, property_name) -> Optional[str]: """Returns the class name of the property with |property_name| on obj, or None if the property is a flat field. """ if is_property_flat_field(obj, property_name): return None attribute = attr.fields_dict(obj.__class__).get(property_name) if not attribute: return None attr_type = attribute.type if _is_list(attr_type): list_elem_type = attr_type.__args__[0] # type: ignore return _get_type_name_from_type(list_elem_type) if _is_union(attr_type): type_names = [_get_type_name_from_type(t) for t in attr_type.__args__] # type: ignore type_names = [t for t in type_names if t != 'NoneType'] if len(type_names) > 1: raise ValueError(f'Multiple nonnull types found: {type_names}') if not type_names: raise ValueError(f'Expected at least one nonnull type') return type_names[0] if _is_forward_ref(attr_type): return _get_type_name_from_type(attr_type) raise ValueError( f'Non-flat field [{property_name}] on class [{obj.__class__}] should ' f'either correspond to list or union.')
def DigitalChannel_to_dict(c, codeplug, contacts_by_id): d = DefaultChannel.copy() talkgroup_name = "Parrot 1" if c.talkgroup: # get the dedupe'd contact's name for the given ID talkgroup_name = str( contacts_by_id.get(c.talkgroup.dmrid, c.talkgroup).name) d.update({ "ChannelMode": "Digital", "RepeaterSlot": str(c.talkgroup.timeslot) if c.talkgroup else 1, "ContactName": talkgroup_name, "GroupList": str(c.grouplist_name(codeplug)) if c.grouplist else None, "ScanList": dzcb.munge.zone_name(c.scanlist_name(codeplug), NAME_MAX), }) d.update({ DigitalChannel_name_maps[k]: v for k, v in attr.asdict(c).items() if k in attr.fields_dict(DigitalChannel) and k in DigitalChannel_name_maps }) d["Name"] = c.short_name return d
def config_client(**kwargs) -> Callable[[Dict[str, str]], ConfigClient]: """ConfigClient decorator. Usage: @config_client(app_name='test') def get_config(config): db_user = config.get_attribute('database.user') :raises: ConnectionError: If fail_fast enabled. :return: ConfigClient instance. """ logger.debug("kwargs: %r", kwargs) cls_attributes = attr.fields_dict(ConfigClient).keys() instance_params = {} get_config_params = {} for key, value in kwargs.items(): if key in cls_attributes: instance_params.update({key: value}) else: get_config_params.update({key: value}) def wrap_function(function): logger.debug("caller: %s", function.__name__) @wraps(function) def enable_config(): obj = ConfigClient(**instance_params) obj.get_config(**get_config_params) return function(obj) return enable_config return wrap_function
def _apply(cls, op, *samples): return cls( **{ field: op(*(getattr(sample, field) for sample in samples)) for field in attr.fields_dict(cls) } )
def from_config(cls, prov_config, prov_key): """Returns an object using the passed yaml config Sets defaults for yaml configured objects separate from attr.ib definitions """ config_copy = deepcopy(prov_config) # copy to avoid modifying passed attrdict config_copy.update(credentials[config_copy.get('credentials')]) class_params = {k: v for k, v in iteritems(config_copy) if k in attr.fields_dict(cls)} return cls(key=prov_key, **class_params)
def as_fill_value(self, user_type=None, auth_mode=None): """Basic implementation matches instance attributes to view form attributes""" if user_type not in USER_TYPES.keys(): raise ValueError('invalid user_type "{}", must be key in USER_TYPES'.format(user_type)) class_attrs = attr.fields_dict(type(self)) # dict of attr objects keyed by name # attr filter needs the Attribute object include_attrs = [class_attrs.get(name) for name in self.view_class.cls_widget_names() if name in class_attrs] fill = attr.asdict(self, filter=attr.filters.include(*include_attrs)) return fill
def as_fill_external_value(self): """openLDAP and FreeIPA providers can be configured for external auth Same view for all auth provider types """ class_attrs = attr.fields_dict(type(self)) # dict of attr objects keyed by name # attr filter needs the Attribute object include_attrs = [class_attrs.get(name) for name in ExternalAuthenticationView.cls_widget_names() if name in class_attrs] fill = attr.asdict(self, filter=attr.filters.include(*include_attrs)) return fill
def _attr_obj_from_dict(cls, **kwargs): return cls(**{key: kwargs[key] for key in attr.fields_dict(cls)})
import attr from typing import Type, Union class A: pass attr.fields(<warning descr="'attr.fields' method should be called on attrs types">A</warning>) attr.fields(<warning descr="'attr.fields' method should be called on attrs types">A()</warning>) attr.fields_dict(<warning descr="'attr.fields_dict' method should be called on attrs types">A</warning>) attr.fields_dict(<warning descr="'attr.fields_dict' method should be called on attrs types">A()</warning>) attr.asdict(<warning descr="'attr.asdict' method should be called on attrs instances">A()</warning>) attr.astuple(<warning descr="'attr.astuple' method should be called on attrs instances">A()</warning>) attr.assoc(<warning descr="'attr.assoc' method should be called on attrs instances">A()</warning>) attr.evolve(<warning descr="'attr.evolve' method should be called on attrs instances">A()</warning>) @attr.s class B: pass attr.fields(B) attr.fields(<warning descr="'attr.fields' method should be called on attrs types">B()</warning>) attr.fields_dict(B) attr.fields_dict(<warning descr="'attr.fields_dict' method should be called on attrs types">B()</warning>)
def unknown(p): attr.fields(p) attr.fields_dict(p) attr.asdict(p) attr.astuple(p)
def _has_check(rule): return rule.check is not fields_dict(type(rule))["check"].default
def test_haproxy_stats_v1_4(self, mock_logging_warn, mock_socket): # Mocking "echo 'show stat' | socat stdio unix-connect:{}" with csv file self.stats_file = open(path.join(DATA_DIR, 'haproxy-stats-v1.4.csv')) fake_socket = MagicMock(recv=self.stats_file.read) mock_socket.return_value = fake_socket # Running method under test proxy_stats.ProxiesStatsSource.first_run = True stats_snapshot = proxy_stats.ProxiesStatsSource.get_current() # Verifying outcomes self.assertIsInstance(stats_snapshot.utc_timestamp, float) proxies_stats = stats_snapshot.proxies_stats self.assertTrue( mock_logging_warn.call_args[0][0].startswith( "Old version of HAProxy is used (v1.5+ is expected)." ) ) self.assertEqual(len(proxies_stats), 5) proxies_stats_dict = { proxy_stats.name: proxy_stats for proxy_stats in proxies_stats } self.assertEqual(set(proxies_stats_dict), { 'TaskQueue', 'UserAppServer', 'appscale-datastore_server', 'as_blob_server', 'gae_appscaledashboard' }) # There are 5 proxies, let's choose one for deeper verification dashboard = proxies_stats_dict['gae_appscaledashboard'] self.assertEqual(dashboard.name, 'gae_appscaledashboard') self.assertEqual(dashboard.unified_service_name, 'application') self.assertEqual(dashboard.application_id, 'appscaledashboard') # Frontend stats shouldn't have Nones frontend = dashboard.frontend for field in attr.fields_dict(proxy_stats.HAProxyFrontendStats).keys(): self.assertIsNotNone(getattr(frontend, field)) # New columns should be highlighted for new_in_v1_5 in ('comp_byp', 'comp_rsp', 'comp_out', 'comp_in'): self.assertIs(getattr(frontend, new_in_v1_5), MISSED) # Backend stats shouldn't have Nones backend = dashboard.backend for field in attr.fields_dict(proxy_stats.HAProxyBackendStats).keys(): self.assertIsNotNone(getattr(backend, field)) # New columns should be highlighted for new_in_v1_5 in ('comp_byp', 'lastsess', 'comp_rsp', 'comp_out', 'comp_in', 'ttime', 'rtime', 'ctime', 'qtime'): self.assertIs(getattr(backend, new_in_v1_5), MISSED) # Backend stats can have Nones only in some fields servers = dashboard.servers self.assertIsInstance(servers, list) self.assertEqual(len(servers), 3) for server in servers: for field in attr.fields_dict(proxy_stats.HAProxyServerStats).keys(): if field in {'qlimit', 'throttle', 'tracked', 'check_code', 'last_chk', 'last_agt'}: continue self.assertIsNotNone(getattr(server, field)) # New columns should be highlighted for new_in_v1_5 in ('lastsess', 'last_chk', 'ttime', 'last_agt', 'rtime', 'ctime', 'qtime'): self.assertIs(getattr(server, new_in_v1_5), MISSED) # We don't have listeners on stats self.assertEqual(dashboard.listeners, [])