class CONFIDENCE(object): UNKNOWN = None GUESSING = 1 NOT_SURE = 2 PRETTY_SURE = 3 ABSOLUTELY_SURE = 4 INT_TO_CODE = ub.odict([ (ABSOLUTELY_SURE, 'absolutely_sure'), (PRETTY_SURE, 'pretty_sure'), (NOT_SURE, 'not_sure'), (GUESSING, 'guessing'), (UNKNOWN, 'unspecified'), ]) INT_TO_NICE = ub.odict([ (ABSOLUTELY_SURE, 'Doubtless'), (PRETTY_SURE, 'Sure'), (NOT_SURE, 'Unsure'), (GUESSING, 'Guessing'), (UNKNOWN, 'Unspecified'), ]) CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE) CODE_TO_INT = ub.invert_dict(INT_TO_CODE) NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE) NICE_TO_INT = ub.invert_dict(INT_TO_NICE)
class QUAL(object): EXCELLENT = 5 GOOD = 4 OK = 3 POOR = 2 JUNK = 1 UNKNOWN = None INT_TO_CODE = ub.odict([ (EXCELLENT, 'excellent'), (GOOD, 'good'), (OK, 'ok'), (POOR, 'poor'), (JUNK, 'junk'), (UNKNOWN, 'unspecified'), ]) INT_TO_NICE = ub.odict([ (EXCELLENT, 'Excellent'), (GOOD, 'Good'), (OK, 'OK'), (POOR, 'Poor'), (JUNK, 'Junk'), (UNKNOWN, 'Unspecified'), ]) CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE) CODE_TO_INT = ub.invert_dict(INT_TO_CODE) NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE) NICE_TO_INT = ub.invert_dict(INT_TO_NICE)
def main(): mapper0, mapper2, cats2 = define_fine_challenge_categories() mapper0, mapper1, cats1 = define_coarse_challenge_categories() mapper2['shrimp'] mapper1['shrimp'] assert all([k == v for k, v in mapper2.items()]) assert not all([k != v for k, v in mapper1.items()]) raw_to_fine_cat = {k: mapper2[v] for k, v in mapper0.items()} raw_to_coarse_cat = {k: mapper1[v] for k, v in mapper0.items()} fine_to_coarse_cat = {} fine_to_raws = ub.invert_dict(mapper0, 0) for fine, raws in fine_to_raws.items(): for raw in raws: coarse = raw_to_coarse_cat[raw] fine_to_coarse_cat[fine] = coarse print(ub.repr2(ub.invert_dict(raw_to_fine_cat, False))) print(ub.repr2(ub.invert_dict(raw_to_coarse_cat, False))) # Write a python file that contains coarse mappings text = ub.codeblock( ''' """ autogenerated file defining the viame challenge 2018 categories """ class FineGrainedChallenge(object): raw_to_cat = {raw_to_fine_cat} heirarchy = {cats2} class CoarseChallenge(object): raw_to_cat = {raw_to_coarse_cat} fine_to_cat = {fine_to_coarse_cat} heirarchy = {cats1} ''').format( raw_to_fine_cat=ub.repr2(raw_to_fine_cat), raw_to_coarse_cat=ub.repr2(raw_to_coarse_cat), fine_to_coarse_cat=ub.repr2(fine_to_coarse_cat), cats1=ub.repr2(cats1), cats2=ub.repr2(cats2) ) import autopep8 pep8_options = {} new_text = autopep8.fix_code(text, pep8_options) # print(new_text) ub.writeto(join(dirname(viame_wrangler.__file__), 'mappings.py'), new_text)
class EVIDENCE_DECISION(object): """ TODO: change to EVIDENCE_DECISION / VISUAL_DECISION Enumerated types of review codes and texts Notes: Unreviewed: Not comparared yet. nomatch: Visually comparable and the different match: Visually comparable and the same notcomp: Not comparable means it is actually impossible to determine. unknown: means that it was reviewed, but we just can't figure it out. """ UNREVIEWED = None NEGATIVE = 0 POSITIVE = 1 INCOMPARABLE = 2 UNKNOWN = 3 INT_TO_CODE = ub.odict([ # (POSITIVE , 'match'), # (NEGATIVE , 'nomatch'), # (INCOMPARABLE , 'notcomp'), # (POSITIVE , 'positive'), # (NEGATIVE , 'negative'), # (INCOMPARABLE , 'incomparable'), # (UNKNOWN , 'unknown'), # (UNREVIEWED , 'unreviewed'), (POSITIVE, 'POSTV'), (NEGATIVE, 'NEGTV'), (INCOMPARABLE, 'INCMP'), (UNKNOWN, 'UNKWN'), (UNREVIEWED, 'UNREV'), ]) INT_TO_NICE = ub.odict([ (POSITIVE, 'Positive'), (NEGATIVE, 'Negative'), (INCOMPARABLE, 'Incomparable'), (UNKNOWN, 'Unknown'), (UNREVIEWED, 'Unreviewed'), ]) CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE) CODE_TO_INT = ub.invert_dict(INT_TO_CODE) NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE) NICE_TO_INT = ub.invert_dict(INT_TO_NICE) MATCH_CODE = CODE_TO_INT
def _build_index(self): """ construct lookup tables """ # Most of the categories should have been given integer ids max_id = max( it.chain([0], nx.get_node_attributes(self.graph, 'id').values())) # Fill in id-values for any node that doesn't have one node_to_id = {} for node, attrs in sorted(self.graph.nodes.items()): node_to_id[node] = attrs.get('id', max_id + 1) max_id = max(max_id, node_to_id[node]) id_to_node = ub.invert_dict(node_to_id) # Compress ids into a flat index space (sorted by node ids) idx_to_node = ub.argsort(node_to_id) node_to_idx = {node: idx for idx, node in enumerate(idx_to_node)} # Find the sets of nodes that need to be softmax-ed together node_groups = list(traverse_siblings(self.graph)) idx_groups = [ sorted([node_to_idx[n] for n in group]) for group in node_groups ] # Set instance attributes self.id_to_node = id_to_node self.node_to_id = node_to_id self.idx_to_node = idx_to_node self.node_to_idx = node_to_idx self.idx_groups = idx_groups
def __init__(self, coco_dset): self.dset = coco_dset self.label_names = sorted(self.dset.name_to_cat, key=lambda n: self.dset.name_to_cat[n]['id']) self._class_to_ind = ub.invert_dict(dict(enumerate(self.label_names))) self.base_size = np.array([416, 416]) self.num_images = len(self.dset.imgs) self.num_classes = len(self.label_names) try: self.input_id = ub.hash_data(self.dset.dataset) except TypeError: self.input_id = ub.hash_data(ub.repr2(self.dset.dataset, nl=0)) # self.input_id = os.path.basename(self.coco_fpath) if False: # setup heirarchy import networkx as nx g = nx.DiGraph() for cat in self.dset.cats.values(): g.add_node(cat['name']) if 'supercategory' in cat: g.add_edge(cat['supercategory'], cat['name']) for key, val in g.adj.items(): print('node = {!r}'.format(key)) print(' * neighbs = {!r}'.format(list(val)))
def __init__(verif, infr): verif.rng = np.random.RandomState(4033913) verif.dummy_params = { NEGTV: {'mean': .2, 'std': .25}, POSTV: {'mean': .85, 'std': .2}, INCMP: {'mean': .15, 'std': .1}, } verif.infr = infr verif.orig_nodes = set(infr.aids) verif.orig_labels = infr.get_node_attrs('orig_name_label') verif.orig_groups = ub.invert_dict(verif.orig_labels, False) verif.orig_groups = ub.map_vals(set, verif.orig_groups)
class META_DECISION(object): """ Enumerated types of review codes and texts Notes: unreviewed: we dont have a meta decision same: we know this is the same animal through non-visual means diff: we know this is the different animal through non-visual means Example: >>> assert hasattr(META_DECISION, 'CODE') >>> assert hasattr(META_DECISION, 'NICE') >>> code1 = META_DECISION.INT_TO_CODE[META_DECISION.NULL] >>> code2 = META_DECISION.CODE.NULL >>> assert code1 == code2 >>> nice1 = META_DECISION.INT_TO_NICE[META_DECISION.NULL] >>> nice2 = META_DECISION.NICE.NULL >>> assert nice1 == nice2 """ NULL = None DIFF = 0 SAME = 1 INT_TO_CODE = ub.odict([ (NULL, 'null'), (DIFF, 'diff'), (SAME, 'same'), ]) INT_TO_NICE = ub.odict([ (NULL, 'NULL'), (DIFF, 'Different'), (SAME, 'Same'), ]) CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE) CODE_TO_INT = ub.invert_dict(INT_TO_CODE) NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE) NICE_TO_INT = ub.invert_dict(INT_TO_NICE)
def seq_to_tree(subseq, open_to_close, toks): open_to_tok = ub.invert_dict(toks) subtree = nx.OrderedDiGraph() stack = [] for token in subseq: if token in open_to_close: node = open_to_tok[token] if stack: parent = open_to_tok[stack[-1]] subtree.add_edge(parent, node) else: subtree.add_node(node) stack.append(token) else: if not stack: raise Exception prev_open = stack.pop() want_close = open_to_close[prev_open] if token != want_close: raise Exception return subtree
def set_labelnames(task, labelnames, ignore_labelnames=[], alias={}): task.labelnames = list(labelnames) task.labelname_alias = alias task.ignore_labelnames = ignore_labelnames # Remove aliased classes for k in alias.keys(): if k in task.labelnames: task.labelnames.remove(k) # Assign an integer label to each labelname task.labelname_to_id = ub.invert_dict(dict(enumerate(task.labelnames))) # Map aliased classes to a different label for k, v in alias.items(): task.labelname_to_id[k] = task.labelname_to_id[v] task.ignore_labelnames = ignore_labelnames task.ignore_labels = np.array( list(ub.take(task.labelname_to_id, task.ignore_labelnames))) task.labels = np.arange(len(task.labelnames)) task.relevant_labels = np.setdiff1d(task.labels, task.ignore_labels)
def __init__(self, devkit_dpath=None, split='train', years=[2007, 2012]): if devkit_dpath is None: # ub.expandpath('~/data/VOC/VOCdevkit') devkit_dpath = self.ensure_voc_data(years=years) self.devkit_dpath = devkit_dpath self.years = years # determine train / test splits self.gpaths = [] self.apaths = [] if split == 'test': assert 2007 in years, 'test set is hacked to be only 2007' gps, aps = self._read_split_paths('test', 2007) self.gpaths += gps self.apaths += aps else: for year in sorted(years): gps, aps = self._read_split_paths(split, year) self.gpaths += gps self.apaths += aps self.label_names = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') self._class_to_ind = ub.invert_dict(dict(enumerate(self.label_names))) self.base_wh = [416, 416] self.num_classes = len(self.label_names) import os hashid = ub.hash_data(list(map(os.path.basename, self.gpaths))) yearid = '_'.join(map(str, years)) self.input_id = 'voc_{}_{}_{}'.format(yearid, split, hashid)
def do_fine_graine_level_sets(self, mapping): inverted = ub.invert_dict(mapping, False) for sup, subs in inverted.items(): print('sup = {!r}'.format(sup)) for sub in subs: if sub in self.name_to_cat: cat = self.name_to_cat[sub] n = len(self.cid_to_aids[cat['id']]) if n: print(' * {} = {}'.format(sub, n)) mapping = get_coarse_mapping() inverted = ub.invert_dict(mapping, False) fine_grained_map = {} custom_fine_grained_map = {v: k for k, vs in { 'unidentified roundfish': [ 'unidentified roundfish', 'unidentified roundfish (less than half)', 'unknown roundfish' 'Rockfish Unid.' ], 'unidentified sebastomus': [ 'sebastes_2species', 'unknown sebastomus', 'unknown rockfish', 'Thornyhead Unid.', 'Hexagrammidae sp.', ], 'prickleback': [ 'Prickleback', 'Stichaeidae', ], 'Flatfish Unid.': [ 'Flatfish Unid.', 'unknown flatfish', ] }.items() for v in vs} catnames = [cat['name'] for cat in self.cats.values()] catnames = list(mapping.keys()) for name in catnames: # normalize the name norm = normalize_name(name) fine_grained_map[name] = norm fine_grained_level_set = ub.invert_dict(fine_grained_map, False) print(ub.repr2(fine_grained_level_set)) for sup, subs in inverted.items(): print('* COARSE-CLASS = {!r}'.format(sup)) for norm in sorted(set([normalize_name(sub) for sub in subs])): raws = fine_grained_level_set.get(norm, []) if raws: print(' * fine-class = {!r}'.format(norm)) if len(raws) > 1: # or list(raws)[0] != norm: print(ub.indent('* raw-classes = {}'.format(ub.repr2(raws, nl=1)), ' ' * 8)) import networkx as nx G = nx.DiGraph() for norm in fine_grained_map.values(): G.add_node(norm) for sup, subs in inverted.items(): G.add_node(sup) for norm in sorted(set([normalize_name(sub) for sub in subs])): G.add_edge(norm, sup) if False: import plottool as pt pt.show_nx(G, layoutkw=dict(prog='neato'), arrow_width=.1, sep=10)
def maximum_common_ordered_tree_embedding(tree1, tree2, node_affinity='auto'): """ Finds the maximum common subtree-embedding between two ordered trees. A tree S is an embedded subtree of T if it can be obtained from T by a series of edge contractions. Note this produces a subtree embedding, which is not necessarilly a subgraph isomorphism (although a subgraph isomorphism is also an embedding.) The maximum common embedded subtree problem can be solved in in `O(n1 * n2 * min(d1, l1) * min(d2, l2))` time on ordered trees with n1 and n2 nodes, of depth d1 and d2 and with l1 and l2 leaves, respectively Implements algorithm described in [1]_. References: On the Maximum Common Embedded Subtree Problem for Ordered Trees https://pdfs.semanticscholar.org/0b6e/061af02353f7d9b887f9a378be70be64d165.pdf http://algo.inria.fr/flajolet/Publications/FlSiSt90.pdf Notes: Exact algorithms for computing the tree edit distance between unordered trees - https://pdf.sciencedirectassets.com/271538/1-s2.0-S0304397510X00299/1-s2.0-S0304397510005463/main.pdf ? Tree Edit Distance and Common Subtrees - https://upcommons.upc.edu/bitstream/handle/2117/97554/R02-20.pdf A Survey on Tree Edit Distance and Related Problems - https://grfia.dlsi.ua.es/ml/algorithms/references/editsurvey_bille.pdf Args: tree1 (nx.OrderedDiGraph): first ordered tree tree2 (nx.OrderedDiGraph): second ordered tree node_affinity (callable): function Example: >>> from netharn.initializers._nx_extensions import * # NOQA >>> from netharn.initializers._nx_extensions import _lcs, _print_forest >>> def random_ordered_tree(n, seed=None): >>> tree = nx.dfs_tree(nx.random_tree(n, seed=seed)) >>> otree = nx.OrderedDiGraph() >>> otree.add_edges_from(tree.edges) >>> return otree >>> tree1 = random_ordered_tree(10, seed=1) >>> tree2 = random_ordered_tree(10, seed=2) >>> print('tree1') >>> _print_forest(tree1) >>> print('tree2') >>> _print_forest(tree2) >>> embedding1, embedding2 = maximum_common_ordered_tree_embedding(tree1, tree2 ) >>> print('embedding1') >>> _print_forest(embedding1) >>> print('embedding2') >>> _print_forest(embedding2) """ if not (isinstance(tree1, nx.OrderedDiGraph) and nx.is_forest(tree1)): raise nx.NetworkXNotImplemented( 'only implemented for directed ordered trees') if not (isinstance(tree1, nx.OrderedDiGraph) and nx.is_forest(tree2)): raise nx.NetworkXNotImplemented( 'only implemented for directed ordered trees') # Convert the trees to balanced sequences sequence1, open_to_close, toks = tree_to_seq(tree1, open_to_close=None, toks=None) sequence2, open_to_close, toks = tree_to_seq(tree2, open_to_close, toks) seq1 = sequence1 seq2 = sequence2 open_to_tok = ub.invert_dict(toks) # Solve the longest common balanced sequence problem best, value = longest_common_balanced_sequence(seq1, seq2, open_to_close, open_to_tok=open_to_tok, node_affinity=node_affinity) subseq1, subseq2 = best # Convert the subsequence back into a tree embedding1 = seq_to_tree(subseq1, open_to_close, toks) embedding2 = seq_to_tree(subseq2, open_to_close, toks) return embedding1, embedding2
def maximum_common_ordered_subtree_isomorphism(tree1, tree2, node_affinity='auto'): """ Isomorphic version of `maximum_common_ordered_tree_embedding`. CommandLine: xdoctest -m /home/joncrall/code/netharn/netharn/initializers/_nx_extensions.py maximum_common_ordered_subtree_isomorphism:1 --profile && cat profile_output.txt Example: >>> from netharn.initializers._nx_extensions import * # NOQA >>> from netharn.initializers._nx_extensions import _lcs, _print_forest >>> def random_ordered_tree(n, seed=None): >>> tree = nx.dfs_tree(nx.random_tree(n, seed=seed)) >>> otree = nx.OrderedDiGraph() >>> otree.add_edges_from(tree.edges) >>> return otree >>> tree1 = random_ordered_tree(10, seed=3) >>> tree2 = random_ordered_tree(10, seed=2) >>> tree1.add_edges_from(tree2.edges, weight=1) >>> tree1 = nx.minimum_spanning_arborescence(tree1) >>> tree2.add_edges_from(tree1.edges, weight=1) >>> tree2 = nx.minimum_spanning_arborescence(tree2) >>> tree1.remove_edge(4, 7) >>> tree1.remove_edge(4, 9) >>> tree1.add_edge(4, 10) >>> tree1.add_edge(10, 7) >>> tree1.add_edge(10, 9) >>> #tree1.add_edges_from([(9, 11), (11, 12), (12, 13), (13, 14)]) >>> #tree2.add_edges_from([(9, 11), (11, 12), (12, 13), (13, 14)]) >>> tree1.add_edges_from([(9, 11), (11, 12)]) >>> tree2.add_edges_from([(9, 11), (11, 12)]) >>> tree2.add_edge(100, 0) >>> tree1.add_edge(102, 100) >>> tree1.add_edge(100, 101) >>> tree1.add_edge(101, 0) >>> tree1.add_edge(5, 201) >>> tree1.add_edge(5, 202) >>> tree1.add_edge(5, 203) >>> tree1.add_edge(201, 2000) >>> tree1.add_edge(2000, 2001) >>> tree1.add_edge(2001, 2002) >>> tree1.add_edge(2002, 2003) >>> tree2.add_edge(5, 202) >>> tree2.add_edge(5, 203) >>> tree2.add_edge(5, 201) >>> tree2.add_edge(201, 2000) >>> tree2.add_edge(2000, 2001) >>> tree2.add_edge(2001, 2002) >>> tree2.add_edge(2002, 2003) >>> print('-----') >>> print('tree1') >>> _print_forest(tree1) >>> print('tree2') >>> _print_forest(tree2) >>> subtree1, subtree2 = maximum_common_ordered_subtree_isomorphism(tree1, tree2 ) >>> print('-----') >>> print('subtree1') >>> _print_forest(subtree1) >>> print('subtree2') >>> _print_forest(subtree2) >>> embedding1, embedding2 = maximum_common_ordered_tree_embedding(tree1, tree2) >>> print('-----') >>> print('embedding1') >>> _print_forest(embedding1) >>> print('embedding2') >>> _print_forest(embedding2) >>> if 0: >>> ti = timerit.Timerit(6, bestof=2, verbose=2) >>> for timer in ti.reset('isomorphism'): >>> with timer: >>> maximum_common_ordered_subtree_isomorphism(tree1, tree2 ) >>> for timer in ti.reset('embedding'): >>> with timer: >>> maximum_common_ordered_tree_embedding(tree1, tree2 ) >>> from networkx import isomorphism >>> assert isomorphism.DiGraphMatcher(tree1, subtree1).subgraph_is_isomorphic() >>> assert isomorphism.DiGraphMatcher(tree2, subtree2).subgraph_is_isomorphic() >>> list(isomorphism.DiGraphMatcher(tree1, tree2).subgraph_isomorphisms_iter()) >>> list(isomorphism.DiGraphMatcher(tree1, tree2).subgraph_monomorphisms_iter()) >>> list(isomorphism.DiGraphMatcher(subtree1, subtree2).subgraph_isomorphisms_iter()) >>> list(isomorphism.DiGraphMatcher(tree1, subtree1).subgraph_isomorphisms_iter()) >>> list(isomorphism.DiGraphMatcher(tree2, subtree2).subgraph_isomorphisms_iter()) Example: >>> from netharn.initializers._nx_extensions import * # NOQA >>> from netharn.initializers._nx_extensions import _lcs, _print_forest >>> def random_ordered_tree(n, seed=None): >>> if n > 0: >>> tree = nx.dfs_tree(nx.random_tree(n, seed=seed)) >>> otree = nx.OrderedDiGraph() >>> if n > 0: >>> otree.add_edges_from(tree.edges) >>> return otree >>> import random >>> rng = random.Random(90269698983701724775426457020022) >>> num = 1000 >>> def _gen_seeds(num): >>> for _ in range(num): >>> yield (rng.randint(0, 50), rng.randint(0, 50), rng.randint(0, 2 ** 64), rng.randint(0, 2 ** 64)) >>> for n1, n2, s1, s2 in ub.ProgIter(_gen_seeds(num=num), total=num, verbose=3): >>> tree1 = random_ordered_tree(n1, seed=s1) >>> tree2 = random_ordered_tree(n2, seed=s2) >>> #print('-----') >>> #print('tree1') >>> #_print_forest(tree1) >>> #print('tree2') >>> #_print_forest(tree2) >>> subtree1, subtree2 = maximum_common_ordered_subtree_isomorphism(tree1, tree2, node_affinity='auto') >>> #print('-----') >>> #print('subtree1') >>> #_print_forest(subtree1) >>> #print('subtree2') >>> #_print_forest(subtree2) >>> from networkx import isomorphism >>> assert isomorphism.DiGraphMatcher(tree1, subtree1).subgraph_is_isomorphic() >>> assert isomorphism.DiGraphMatcher(tree2, subtree2).subgraph_is_isomorphic() """ try: if not (isinstance(tree1, nx.OrderedDiGraph) and nx.is_forest(tree1)): raise nx.NetworkXNotImplemented( 'only implemented for directed ordered trees') if not (isinstance(tree1, nx.OrderedDiGraph) and nx.is_forest(tree2)): raise nx.NetworkXNotImplemented( 'only implemented for directed ordered trees') except nx.NetworkXPointlessConcept: subtree1 = nx.OrderedDiGraph() subtree2 = nx.OrderedDiGraph() return subtree1, subtree2 # Convert the trees to balanced sequences sequence1, open_to_close, toks = tree_to_seq(tree1, open_to_close=None, toks=None, mode='chr') sequence2, open_to_close, toks = tree_to_seq(tree2, open_to_close, toks, mode='chr') seq1 = sequence1 seq2 = sequence2 open_to_tok = ub.invert_dict(toks) # Solve the longest common balanced sequence problem best, value = longest_common_isomorphic_sequence( seq1, seq2, open_to_close, open_to_tok=open_to_tok, node_affinity=node_affinity) subseq1, subseq2 = best # Convert the subsequence back into a tree subtree1 = seq_to_tree(subseq1, open_to_close, toks) subtree2 = seq_to_tree(subseq2, open_to_close, toks) return subtree1, subtree2
def named_large_number(num, prefix='auto', precision=2): """ https://en.wikipedia.org/wiki/Names_of_large_numbers Example: >>> import sys, ubelt >>> sys.path.append(ubelt.expandpath('~/misc/notes')) >>> from password_model import * # NOQA >>> import random >>> rng = random.Random(0) >>> lines = [] >>> test_mags = (list(range(-10, 3 * 22)) + [3 * 100, 3 * 101, 3 * 102]) >>> # test_mags = list(range(-1, 3 * 5)) >>> for mag in test_mags: >>> coef = rng.random() >>> for coef in [1.0, 1.1]: >>> num = coef * (10 ** mag) >>> text = named_large_number(num) >>> line = 'text@{:3d}: {}'.format(mag, text) >>> lines.append(line) >>> print('lines = {}'.format(ub.repr2(lines, nl=1), align=' ')) """ magnitude_to_prefix = { 3 * 0: '', 3 * 1: 'thousand', 3 * 2: 'million', 3 * 3: 'billion', 3 * 4: 'trillion', 3 * 5: 'quadrillion', 3 * 6: 'quintillion', 3 * 7: 'sextillion', 3 * 8: 'septillion', 3 * 9: 'octillion', 3 * 10: 'nonillion', 3 * 11: 'decillion', 3 * 12: 'undecillion', 3 * 13: 'duodectillion', 3 * 14: 'tredecillion', 3 * 15: 'quattuor-decillion', 3 * 16: 'quindecillion', 3 * 17: 'sexdecillion', 3 * 18: 'septendecillion', 3 * 19: 'octodecillion', 3 * 20: 'novemdecillion', 3 * 21: 'vigintillion', 3 * 101: 'centillion', } prefix_to_magintude = ub.invert_dict(magnitude_to_prefix) num_mag = math.log(abs(float(num) + 1), 10) if prefix == 'auto': chosen_prefix = '' for cand_mag, cand_prefix in magnitude_to_prefix.items(): if num_mag >= (cand_mag): chosen_prefix = cand_prefix prefix = chosen_prefix mag = prefix_to_magintude[prefix] coeff = num / (10**mag) coef_repr = ub.repr2(float(coeff), precision=precision) text = coef_repr + ' ' + prefix return text
def 字典_交换健值(dict_, 唯一值=True): data = ub.invert_dict(dict_, unique_vals=唯一值) return data
class VIEW(object): """ categorical viewpoint using the faces of a Rhombicuboctahedron References: https://en.wikipedia.org/wiki/Rhombicuboctahedron """ UNKNOWN = None R = 1 FR = 2 F = 3 FL = 4 L = 5 BL = 6 B = 7 BR = 8 U = 9 UF = 10 UB = 11 UL = 12 UR = 13 UFL = 14 UFR = 15 UBL = 16 UBR = 17 D = 18 DF = 19 DB = 20 DL = 21 DR = 22 DFL = 23 DFR = 24 DBL = 25 DBR = 26 INT_TO_CODE = ub.odict([ (UNKNOWN, 'unknown'), (R, 'right'), (FR, 'frontright'), (F, 'front'), (FL, 'frontleft'), (L, 'left'), (BL, 'backleft'), (B, 'back'), (BR, 'backright'), (U, 'up'), (UF, 'upfront'), (UB, 'upback'), (UL, 'upleft'), (UR, 'upright'), (UFL, 'upfrontleft'), (UFR, 'upfrontright'), (UBL, 'upbackleft'), (UBR, 'upbackright'), (D, 'down'), (DF, 'downfront'), (DB, 'downback'), (DL, 'downleft'), (DR, 'downright'), (DFL, 'downfrontleft'), (DFR, 'downfrontright'), (DBL, 'downbackleft'), (DBR, 'downbackright'), ]) INT_TO_NICE = ub.odict([ (UNKNOWN, 'Unknown'), (R, 'Right'), (FR, 'Front-Right'), (F, 'Front'), (FL, 'Front-Left'), (L, 'Left'), (BL, 'Back-Left'), (B, 'Back'), (BR, 'Back-Right'), (U, 'Up'), (UF, 'Up-Front'), (UB, 'Up-Back'), (UL, 'Up-Left'), (UR, 'Up-Right'), (UFL, 'Up-Front-Left'), (UFR, 'Up-Front-Right'), (UBL, 'Up-Back-Left'), (UBR, 'Up-Back-Right'), (D, 'Down'), (DF, 'Down-Front'), (DB, 'Down-Back'), (DL, 'Down-Left'), (DR, 'Down-Right'), (DFL, 'Down-Front-Left'), (DFR, 'Down-Front-Right'), (DBL, 'Down-Back-Left'), (DBR, 'Down-Back-Right'), ]) CODE_TO_NICE = ub.map_keys(INT_TO_CODE, INT_TO_NICE) CODE_TO_INT = ub.invert_dict(INT_TO_CODE) NICE_TO_CODE = ub.invert_dict(CODE_TO_NICE) NICE_TO_INT = ub.invert_dict(INT_TO_NICE) DIST = { # DIST 0 PAIRS (B, B): 0, (BL, BL): 0, (BR, BR): 0, (D, D): 0, (DB, DB): 0, (DBL, DBL): 0, (DBR, DBR): 0, (DF, DF): 0, (DFL, DFL): 0, (DFR, DFR): 0, (DL, DL): 0, (DR, DR): 0, (F, F): 0, (FL, FL): 0, (FR, FR): 0, (L, L): 0, (R, R): 0, (U, U): 0, (UB, UB): 0, (UBL, UBL): 0, (UBR, UBR): 0, (UF, UF): 0, (UFL, UFL): 0, (UFR, UFR): 0, (UL, UL): 0, (UR, UR): 0, # DIST 1 PAIRS (B, BL): 1, (B, BR): 1, (B, DB): 1, (B, DBL): 1, (B, DBR): 1, (B, UB): 1, (B, UBL): 1, (B, UBR): 1, (BL, DBL): 1, (BL, L): 1, (BL, UBL): 1, (BR, DBR): 1, (BR, R): 1, (BR, UBR): 1, (D, DB): 1, (D, DBL): 1, (D, DBR): 1, (D, DF): 1, (D, DFL): 1, (D, DFR): 1, (D, DL): 1, (D, DR): 1, (DB, DBL): 1, (DB, DBR): 1, (DBL, DL): 1, (DBL, L): 1, (DBR, DR): 1, (DBR, R): 1, (DF, DFL): 1, (DF, DFR): 1, (DF, F): 1, (DFL, DL): 1, (DFL, F): 1, (DFL, FL): 1, (DFL, L): 1, (DFR, DR): 1, (DFR, F): 1, (DFR, FR): 1, (DFR, R): 1, (DL, L): 1, (DR, R): 1, (F, FL): 1, (F, FR): 1, (F, UF): 1, (F, UFL): 1, (F, UFR): 1, (FL, L): 1, (FL, UFL): 1, (FR, R): 1, (FR, UFR): 1, (L, UBL): 1, (L, UFL): 1, (L, UL): 1, (R, UBR): 1, (R, UFR): 1, (R, UR): 1, (U, UB): 1, (U, UBL): 1, (U, UBR): 1, (U, UF): 1, (U, UFL): 1, (U, UFR): 1, (U, UL): 1, (U, UR): 1, (UB, UBL): 1, (UB, UBR): 1, (UBL, UL): 1, (UBR, UR): 1, (UF, UFL): 1, (UF, UFR): 1, (UFL, UL): 1, (UFR, UR): 1, # DIST 2 PAIRS (B, D): 2, (B, DL): 2, (B, DR): 2, (B, L): 2, (B, R): 2, (B, U): 2, (B, UL): 2, (B, UR): 2, (BL, BR): 2, (BL, D): 2, (BL, DB): 2, (BL, DBR): 2, (BL, DFL): 2, (BL, DL): 2, (BL, FL): 2, (BL, U): 2, (BL, UB): 2, (BL, UBR): 2, (BL, UFL): 2, (BL, UL): 2, (BR, D): 2, (BR, DB): 2, (BR, DBL): 2, (BR, DFR): 2, (BR, DR): 2, (BR, FR): 2, (BR, U): 2, (BR, UB): 2, (BR, UBL): 2, (BR, UFR): 2, (BR, UR): 2, (D, F): 2, (D, FL): 2, (D, FR): 2, (D, L): 2, (D, R): 2, (DB, DF): 2, (DB, DFL): 2, (DB, DFR): 2, (DB, DL): 2, (DB, DR): 2, (DB, L): 2, (DB, R): 2, (DB, UB): 2, (DB, UBL): 2, (DB, UBR): 2, (DBL, DBR): 2, (DBL, DF): 2, (DBL, DFL): 2, (DBL, DFR): 2, (DBL, DR): 2, (DBL, FL): 2, (DBL, UB): 2, (DBL, UBL): 2, (DBL, UBR): 2, (DBL, UFL): 2, (DBL, UL): 2, (DBR, DF): 2, (DBR, DFL): 2, (DBR, DFR): 2, (DBR, DL): 2, (DBR, FR): 2, (DBR, UB): 2, (DBR, UBL): 2, (DBR, UBR): 2, (DBR, UFR): 2, (DBR, UR): 2, (DF, DL): 2, (DF, DR): 2, (DF, FL): 2, (DF, FR): 2, (DF, L): 2, (DF, R): 2, (DF, UF): 2, (DF, UFL): 2, (DF, UFR): 2, (DFL, DFR): 2, (DFL, DR): 2, (DFL, FR): 2, (DFL, UBL): 2, (DFL, UF): 2, (DFL, UFL): 2, (DFL, UFR): 2, (DFL, UL): 2, (DFR, DL): 2, (DFR, FL): 2, (DFR, UBR): 2, (DFR, UF): 2, (DFR, UFL): 2, (DFR, UFR): 2, (DFR, UR): 2, (DL, DR): 2, (DL, F): 2, (DL, FL): 2, (DL, UBL): 2, (DL, UFL): 2, (DL, UL): 2, (DR, F): 2, (DR, FR): 2, (DR, UBR): 2, (DR, UFR): 2, (DR, UR): 2, (F, L): 2, (F, R): 2, (F, U): 2, (F, UL): 2, (F, UR): 2, (FL, FR): 2, (FL, U): 2, (FL, UBL): 2, (FL, UF): 2, (FL, UFR): 2, (FL, UL): 2, (FR, U): 2, (FR, UBR): 2, (FR, UF): 2, (FR, UFL): 2, (FR, UR): 2, (L, U): 2, (L, UB): 2, (L, UF): 2, (R, U): 2, (R, UB): 2, (R, UF): 2, (UB, UF): 2, (UB, UFL): 2, (UB, UFR): 2, (UB, UL): 2, (UB, UR): 2, (UBL, UBR): 2, (UBL, UF): 2, (UBL, UFL): 2, (UBL, UFR): 2, (UBL, UR): 2, (UBR, UF): 2, (UBR, UFL): 2, (UBR, UFR): 2, (UBR, UL): 2, (UF, UL): 2, (UF, UR): 2, (UFL, UFR): 2, (UFL, UR): 2, (UFR, UL): 2, (UL, UR): 2, # DIST 3 PAIRS (B, DF): 3, (B, DFL): 3, (B, DFR): 3, (B, FL): 3, (B, FR): 3, (B, UF): 3, (B, UFL): 3, (B, UFR): 3, (BL, DF): 3, (BL, DFR): 3, (BL, DR): 3, (BL, F): 3, (BL, R): 3, (BL, UF): 3, (BL, UFR): 3, (BL, UR): 3, (BR, DF): 3, (BR, DFL): 3, (BR, DL): 3, (BR, F): 3, (BR, L): 3, (BR, UF): 3, (BR, UFL): 3, (BR, UL): 3, (D, UB): 3, (D, UBL): 3, (D, UBR): 3, (D, UF): 3, (D, UFL): 3, (D, UFR): 3, (D, UL): 3, (D, UR): 3, (DB, F): 3, (DB, FL): 3, (DB, FR): 3, (DB, U): 3, (DB, UFL): 3, (DB, UFR): 3, (DB, UL): 3, (DB, UR): 3, (DBL, F): 3, (DBL, FR): 3, (DBL, R): 3, (DBL, U): 3, (DBL, UF): 3, (DBL, UR): 3, (DBR, F): 3, (DBR, FL): 3, (DBR, L): 3, (DBR, U): 3, (DBR, UF): 3, (DBR, UL): 3, (DF, U): 3, (DF, UBL): 3, (DF, UBR): 3, (DF, UL): 3, (DF, UR): 3, (DFL, R): 3, (DFL, U): 3, (DFL, UB): 3, (DFL, UR): 3, (DFR, L): 3, (DFR, U): 3, (DFR, UB): 3, (DFR, UL): 3, (DL, FR): 3, (DL, R): 3, (DL, U): 3, (DL, UB): 3, (DL, UBR): 3, (DL, UF): 3, (DL, UFR): 3, (DR, FL): 3, (DR, L): 3, (DR, U): 3, (DR, UB): 3, (DR, UBL): 3, (DR, UF): 3, (DR, UFL): 3, (F, UB): 3, (F, UBL): 3, (F, UBR): 3, (FL, R): 3, (FL, UB): 3, (FL, UBR): 3, (FL, UR): 3, (FR, L): 3, (FR, UB): 3, (FR, UBL): 3, (FR, UL): 3, (L, UBR): 3, (L, UFR): 3, (L, UR): 3, (R, UBL): 3, (R, UFL): 3, (R, UL): 3, # DIST 4 PAIRS (B, F): 4, (BL, FR): 4, (BR, FL): 4, (D, U): 4, (DB, UF): 4, (DBL, UFR): 4, (DBR, UFL): 4, (DF, UB): 4, (DFL, UBR): 4, (DFR, UBL): 4, (DL, UR): 4, (DR, UL): 4, (L, R): 4, # UNDEFINED DIST PAIRS (B, UNKNOWN): None, (BL, UNKNOWN): None, (BR, UNKNOWN): None, (D, UNKNOWN): None, (DB, UNKNOWN): None, (DBL, UNKNOWN): None, (DBR, UNKNOWN): None, (DF, UNKNOWN): None, (DFL, UNKNOWN): None, (DFR, UNKNOWN): None, (DL, UNKNOWN): None, (DR, UNKNOWN): None, (F, UNKNOWN): None, (FL, UNKNOWN): None, (FR, UNKNOWN): None, (L, UNKNOWN): None, (R, UNKNOWN): None, (U, UNKNOWN): None, (UB, UNKNOWN): None, (UBL, UNKNOWN): None, (UBR, UNKNOWN): None, (UF, UNKNOWN): None, (UFL, UNKNOWN): None, (UFR, UNKNOWN): None, (UL, UNKNOWN): None, (UNKNOWN, B): None, (UNKNOWN, BL): None, (UNKNOWN, BR): None, (UNKNOWN, D): None, (UNKNOWN, DB): None, (UNKNOWN, DBL): None, (UNKNOWN, DBR): None, (UNKNOWN, DF): None, (UNKNOWN, DFL): None, (UNKNOWN, DFR): None, (UNKNOWN, DL): None, (UNKNOWN, DR): None, (UNKNOWN, F): None, (UNKNOWN, FL): None, (UNKNOWN, FR): None, (UNKNOWN, L): None, (UNKNOWN, R): None, (UNKNOWN, U): None, (UNKNOWN, UB): None, (UNKNOWN, UBL): None, (UNKNOWN, UBR): None, (UNKNOWN, UF): None, (UNKNOWN, UFL): None, (UNKNOWN, UFR): None, (UNKNOWN, UL): None, (UNKNOWN, UR): None, (UR, UNKNOWN): None, (UNKNOWN, UNKNOWN): None, } # make distance symmetric for (f1, f2), d in list(DIST.items()): DIST[(f2, f1)] = d
def main(): import kwplot plt = kwplot.autoplt() sns = kwplot.autosns() alias = { '3090': 'nvctrl GeForce GTX 1080 Ti 1 temp', '1080ti': 'nvctrl GeForce RTX 3090 0 temp', # 'cpu': 'lmsensor coretemp-isa-0000 Package id 0', } all_df = read_psensor_log() unique_rawdevs = all_df.device.unique() for rawdev in unique_rawdevs: cpu_prefix = 'lmsensor coretemp-isa' if rawdev.startswith(cpu_prefix): suffix = rawdev[len(cpu_prefix):].split(' ', 1)[1].strip() alias['CPU ' + suffix] = rawdev if 'nvctrl' in rawdev and 'temp' in rawdev: alias['GPU ' + rawdev[7:-5]] = rawdev mapper = ub.invert_dict(alias) all_df['device'] = all_df['device'].apply(lambda x: mapper.get(x, None)) all_df = all_df[all_df['device'].apply(lambda x: x is not None)] hours = int(ub.argval('--hours', default=48)) delta = datetime.timedelta(hours=hours) min_time = datetime.datetime.now() - delta is_recent = all_df.datetime > min_time recent_df = all_df[is_recent] chosen = recent_df # chosen = all_df if 0: pivtbl = recent_df.pivot('unix_timestamp', 'device', 'temp') pivtbl = pivtbl.sort_index() smoothed_rows = [] for window_idxs in ub.iter_window(list(range(len(pivtbl))), size=10): window = pivtbl.iloc[list(window_idxs)] max_val = window.max(axis=0, skipna=True) for k, v in max_val.to_dict().items(): smoothed_rows.append({ 'unix_timestamp': window.index[1], 'device': k, 'temp': v, }) max_extra = pd.DataFrame(smoothed_rows) sns.lineplot(data=max_extra, x='unix_timestamp', y='temp', hue='device') df = recent_df.copy() df['device'] = df['device'].apply(lambda x: 'Core' if x.startswith('Core') else x) df['time'] = df['unix_timestamp'].apply( datetime.datetime.fromtimestamp) plt.gcf().clf() # sns.lineplot(data=chosen, x='unix_timestamp', y='temp', hue='device') for xx, (sess, group) in enumerate(chosen.groupby('session_x')): # ax.cla() ax = plt.gca() sns.lineplot(data=group, x='unix_timestamp', y='temp', hue='device', legend=xx == 0) label_xaxis_dates(ax) ax.figure.subplots_adjust(bottom=0.2) ax.set_ylim(0, 100) plt.locator_params(axis='y', nbins=10) # import matplotlib as mpl # Draw shutdown time as black lines end_times = [] for sx, group in chosen.groupby('session_x'): shutdown_time = group['unix_timestamp'].max() end_times.append(shutdown_time) for shutdown_time in sorted(end_times)[:-1]: ax.plot((shutdown_time, shutdown_time), [0, 100], color='k') # ci_df = pd.concat([max_extra, recent_df]) # ci_df['device'] = ci_df['device'].apply(lambda x: 'Core' if x.startswith('Core') else x) # sns.lineplot(data=ci_df, x='unix_timestamp', y='temp', hue='device') # from matplotlib.dates import date2num # all_df['date_ord'] = all_df['datetime'].map(lambda a: date2num(a)) # sns.lineplot(data=pt) # sns.lineplot(data=recent_df, x='unix_timestamp', y='temp', hue='device') # sns.regplot(data=recent_df, x='unix_timestamp', y='temp', hue='device') plt.show()