def create_dict_named(obj: namedtuple) -> dict: """ Create a dict out of a namedtuple. Args: obj: Namedtuple. Returns: A dict containing the data. """ data = { '__type__': type(obj).__name__, '__data__': create_dict(obj._asdict()) } return data
def genotype_to_dict(genotype: namedtuple): """Converts the given genotype to a dictionary that can be serialized. Inverse operation to dict_to_genotype(). Args: genotype (namedtuple): The genotype that should be converted. Returns: dict: The converted genotype. """ genotype_dict = genotype._asdict() for key, val in genotype_dict.items(): if type(val) == range: genotype_dict[key] = [node for node in val] return genotype_dict
def prepare_log(log: namedtuple): try: LogModel = log_model_map[log.__class__.__name__] except KeyError: raise sensitive_fields = sorted(LogModel.sensitive_fields()) log_dic_python = log._asdict() log_dic_python['hmac1'] = encrypt_log(log_dic_python, sensitive_fields) table = LogModel._meta.table_name log_dic_db = { field.column_name: field.db_value(value) for field, value in LogModel._normalize_data(None, log_dic_python).items() } fields = log._fields return log_dic_db, table, fields
def results_to_csv(config: namedtuple, val_metrics: dict, attn_metrics: dict, output_csv=None): keys = set(val_metrics.keys()) | set(attn_metrics.keys()) keys.add('epoch') overlapping_keys = val_metrics.keys() & attn_metrics.keys() if len(overlapping_keys) > 0: raise ValueError("Found overlapping keys {} in training and attention metrics".format(overlapping_keys)) def find_keys_with_epoch(metrics_dict: dict) -> set: return set(list(filter(lambda x: type(metrics_dict[x]) is list, metrics_dict.keys()))) val_keys_with_epoch = find_keys_with_epoch(val_metrics) val_keys_without_epoch = val_metrics.keys() - val_keys_with_epoch attn_keys_with_epoch = find_keys_with_epoch(attn_metrics) attn_metrics_without_epoch = attn_metrics.keys() - attn_keys_with_epoch keys_without_epochs = (val_metrics.keys() - val_keys_with_epoch) | (attn_metrics.keys() - attn_keys_with_epoch) model_parameters = dict(config._asdict()) df = pd.DataFrame(columns=(list(model_parameters.keys()) + list(keys))) def add_all_keys_with_epoch(df_, keys_with_epoch, undef_keys, metrics): (max_epoch,) = set(len(metrics[x]) for x in keys_with_epoch) for epoch in range(max_epoch): update_dict_ = dict(model_parameters) update_dict_['epoch'] = epoch for k in keys_with_epoch: update_dict_[k] = metrics[k][epoch] for k in undef_keys: update_dict_[k] = "" df_ = df_.append(update_dict_, ignore_index=True) return df_ df = add_all_keys_with_epoch(df, val_keys_with_epoch, keys_without_epochs | attn_keys_with_epoch, val_metrics) df = add_all_keys_with_epoch(df, attn_keys_with_epoch, keys_without_epochs | val_keys_with_epoch, attn_metrics) update_dict = dict(model_parameters) for k in attn_metrics_without_epoch: update_dict[k] = attn_metrics[k] for k in val_keys_without_epoch: update_dict[k] = val_metrics[k] for k in val_keys_with_epoch | attn_keys_with_epoch: update_dict[k] = "" update_dict['epoch'] = "" df = df.append(update_dict, ignore_index=True) if output_csv: os.makedirs(os.path.dirname(output_csv), exist_ok=True) df.to_csv(output_csv, index=False, compression=None) return df
def namedtuple_to_xml(item: namedtuple): elem = Element(type(item).__name__) asdict = item._asdict().items() for key, val in asdict: if val is None: continue if type(val) is namedtuple: child = namedtuple_to_xml(val) elif type(val) is list: child = Element(key) for item in val: child.append(namedtuple_to_xml(item)) else: child = Element(key) child.text = str(val) elem.append(child) return elem
def _get_by_entity(self, entity: namedtuple) -> namedtuple: if self.n >= 5: self.stop() self.start() self.n += 1 # noinspection PyProtectedMember result = entity._asdict() self.browser.get(entity.url) table = self.browser.find_element_by_xpath("//div[@class='portlet']//table[@align='center'][2]") if table: result["content"] = table.text if '不开展' not in table.text: span_tag = table.find_elements_by_xpath("//table[@border>'0']//tr[1]//td") data_tag = table.find_elements_by_xpath("//table[@border>'0']//tr[2]//td") span = [tag.text for tag in span_tag] data = [tag.text for tag in data_tag] try: result["days"] = data[["期限" in w for w in span].index(True)] result["amount"] = data[["量" in w for w in span].index(True)] result["rate"] = data[["利率" in w for w in span].index(True)] except ValueError: pass return self.entity(**result)
def nt2json(nt: namedtuple): return json.dumps(nt._asdict(), default=str)
def parse_namedtuple(losses: namedtuple, prefix: str): log = {'{}/{}'.format(prefix, k): v for k, v in losses._asdict().items()} return log
def _prefixed(nt: namedtuple, prefix): """Convert a named tuple into a dict with prefixed names.""" result = {} for key, value in nt._asdict().items(): result[prefix + key] = value return result
def data(self, cls: namedtuple): self._data = cls for name, value in cls._asdict().items(): self.__setattr__(name, value)
def message_from_tuple(payload_tuple: namedtuple, attributes: dict = None): tuple_as_json = json.dumps(payload_tuple._asdict()) return mock_message(tuple_as_json, attributes)
def save_link(self, link: namedtuple) -> None: # noinspection PyProtectedMember self.update("link", { "head": link.head, "tail": link.tail }, link._asdict(), True)
def save_node(self, node: namedtuple) -> None: # noinspection PyProtectedMember self.update("node", { "name": node.name, "source": node.source }, node._asdict(), True)
def _asSortedList(scores: namedtuple, places: int = 4) -> List[str]: """Converts namedtuple of scores to a list of rounded values in name- sorted order""" tf = truncatedFloat(places) return [tf % v for _, v in sorted(scores._asdict().items())]