def get_hacked_pos(netx_graph, name_nodes=None, prog='dot'): import pygraphviz import networkx as netx # Add "invisible" edges to induce an ordering # Hack for layout (ordering of top level nodes) netx_graph2 = netx_graph.copy() if getattr(netx_graph, 'ttype2_cpds', None) is not None: grouped_nodes = [] for ttype in netx_graph.ttype2_cpds.keys(): ttype_cpds = netx_graph.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) netx_graph2.add_edges_from(invis_edges) grouped_nodes.append(ttype_nodes) A = netx.to_agraph(netx_graph2) for nodes in grouped_nodes: A.add_subgraph(nodes, rank='same') else: A = netx.to_agraph(netx_graph2) # if name_nodes is not None: # #netx.set_node_attributes(netx_graph, name='label', values={n: {'label': n} for n in all_nodes}) # invis_edges = list(ut.itertwo(name_nodes)) # netx_graph2.add_edges_from(invis_edges) # A.add_subgraph(name_nodes, rank='same') # else: # A = netx.to_agraph(netx_graph2) args = '' G = netx_graph A.layout(prog=prog, args=args) # A.draw('example.png', prog='dot') node_pos = {} for n in G: node_ = pygraphviz.Node(A, n) try: xx, yy = node_.attr['pos'].split(',') node_pos[n] = (float(xx), float(yy)) except Exception: logger.info('no position for node', n) node_pos[n] = (0.0, 0.0) return node_pos
def get_hacked_pos(netx_graph, name_nodes=None, prog='dot'): import pygraphviz import networkx as netx # Add "invisible" edges to induce an ordering # Hack for layout (ordering of top level nodes) netx_graph2 = netx_graph.copy() if getattr(netx_graph, 'ttype2_cpds', None) is not None: grouped_nodes = [] for ttype in netx_graph.ttype2_cpds.keys(): ttype_cpds = netx_graph.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) netx_graph2.add_edges_from(invis_edges) grouped_nodes.append(ttype_nodes) A = netx.to_agraph(netx_graph2) for nodes in grouped_nodes: A.add_subgraph(nodes, rank='same') else: A = netx.to_agraph(netx_graph2) #if name_nodes is not None: # #netx.set_node_attributes(netx_graph, 'label', {n: {'label': n} for n in all_nodes}) # invis_edges = list(ut.itertwo(name_nodes)) # netx_graph2.add_edges_from(invis_edges) # A.add_subgraph(name_nodes, rank='same') #else: # A = netx.to_agraph(netx_graph2) args = '' G = netx_graph A.layout(prog=prog, args=args) #A.draw('example.png', prog='dot') node_pos = {} for n in G: node_ = pygraphviz.Node(A, n) try: xx, yy = node_.attr["pos"].split(',') node_pos[n] = (float(xx), float(yy)) except: print("no position for node", n) node_pos[n] = (0.0, 0.0) return node_pos
def get_bayesnet_layout(model, name_nodes=None, prog='dot'): """ Ensures ordering of layers is in order of addition via templates """ import pygraphviz import networkx as nx # Add "invisible" edges to induce an ordering # Hack for layout (ordering of top level nodes) netx_graph2 = model.copy() if getattr(model, 'ttype2_cpds', None) is not None: grouped_nodes = [] for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) netx_graph2.add_edges_from(invis_edges) grouped_nodes.append(ttype_nodes) agraph = nx.nx_agraph.to_agraph(netx_graph2) for nodes in grouped_nodes: agraph.add_subgraph(nodes, rank='same') else: agraph = nx.nx_agraph.to_agraph(netx_graph2) logger.info(agraph) args = '' agraph.layout(prog=prog, args=args) # agraph.draw('example.png', prog='dot') node_pos = {} for n in model: node_ = pygraphviz.Node(agraph, n) try: xx, yy = node_.attr['pos'].split(',') node_pos[n] = (float(xx), float(yy)) except Exception: logger.info('no position for node', n) node_pos[n] = (0.0, 0.0) return node_pos
def get_bayesnet_layout(model, name_nodes=None, prog='dot'): """ Ensures ordering of layers is in order of addition via templates """ import pygraphviz import networkx as nx # Add "invisible" edges to induce an ordering # Hack for layout (ordering of top level nodes) netx_graph2 = model.copy() if getattr(model, 'ttype2_cpds', None) is not None: grouped_nodes = [] for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) netx_graph2.add_edges_from(invis_edges) grouped_nodes.append(ttype_nodes) agraph = nx.nx_agraph.to_agraph(netx_graph2) for nodes in grouped_nodes: agraph.add_subgraph(nodes, rank='same') else: agraph = nx.nx_agraph.to_agraph(netx_graph2) print(agraph) args = '' agraph.layout(prog=prog, args=args) #agraph.draw('example.png', prog='dot') node_pos = {} for n in model: node_ = pygraphviz.Node(agraph, n) try: xx, yy = node_.attr['pos'].split(',') node_pos[n] = (float(xx), float(yy)) except: print('no position for node', n) node_pos[n] = (0.0, 0.0) return node_pos
def try_query(model, infr, evidence, interest_ttypes=[], verbose=True): r""" CommandLine: python -m wbia.algo.hots.bayes --exec-try_query --show Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> verbose = True >>> other_evidence = {} >>> name_evidence = [1, None, 0, None] >>> score_evidence = ['high', 'low', 'low'] >>> query_vars = None >>> model = make_name_model(num_annots=4, num_names=4, verbose=True, mode=1) >>> model, evidence, soft_evidence = update_model_evidence(model, name_evidence, score_evidence, other_evidence) >>> interest_ttypes = ['name'] >>> infr = pgmpy.inference.BeliefPropagation(model) >>> evidence = infr._ensure_internal_evidence(evidence, model) >>> query_results = try_query(model, infr, evidence, interest_ttypes, verbose) >>> result = ('query_results = %s' % (str(query_results),)) >>> ut.quit_if_noshow() >>> show_model(model, show_prior=True, **query_results) >>> ut.show_if_requested() Ignore: query_vars = ut.setdiff_ordered(model.nodes(), list(evidence.keys())) probs = infr.query(query_vars, evidence) map_assignment = infr.map_query(query_vars, evidence) """ infr = pgmpy.inference.VariableElimination(model) # infr = pgmpy.inference.BeliefPropagation(model) if True: return bruteforce(model, query_vars=None, evidence=evidence) else: import vtool as vt query_vars = ut.setdiff_ordered(model.nodes(), list(evidence.keys())) # hack query_vars = ut.setdiff_ordered( query_vars, ut.list_getattr(model.ttype2_cpds['score'], 'variable')) if verbose: evidence_str = ', '.join(model.pretty_evidence(evidence)) logger.info('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ') = ') # Compute MAP joints # There is a bug here. # map_assign = infr.map_query(query_vars, evidence) # (probably an invalid thing to do) # joint_factor = pgmpy.factors.factor_product(*factor_list) # Brute force MAP name_vars = ut.list_getattr(model.ttype2_cpds['name'], 'variable') query_name_vars = ut.setdiff_ordered(name_vars, list(evidence.keys())) # TODO: incorporate case where Na is assigned to Fred # evidence_h = ut.delete_keys(evidence.copy(), ['Na']) joint = model.joint_distribution() joint.evidence_based_reduction(query_name_vars, evidence, inplace=True) # Find static row labels in the evidence given_name_vars = [var for var in name_vars if var in evidence] given_name_idx = ut.dict_take(evidence, given_name_vars) given_name_val = [ joint.statename_dict[var][idx] for var, idx in zip(given_name_vars, given_name_idx) ] new_vals = joint.values.ravel() # Add static evidence variables to the relabeled name states new_vars = given_name_vars + joint.variables new_rows = [tuple(given_name_val) + row for row in joint._row_labels()] # Relabel rows based on the knowledge that # everything is the same, only the names have changed. temp_basis = [i for i in range(model.num_names)] def relabel_names(names, temp_basis=temp_basis): names = list(map(six.text_type, names)) mapping = {} for n in names: if n not in mapping: mapping[n] = len(mapping) new_names = tuple([temp_basis[mapping[n]] for n in names]) return new_names relabeled_rows = list(map(relabel_names, new_rows)) # Combine probability of rows with the same (new) label data_ids = np.array(vt.other.compute_unique_data_ids_(relabeled_rows)) unique_ids, groupxs = vt.group_indices(data_ids) reduced_row_lbls = ut.take(relabeled_rows, ut.get_list_column(groupxs, 0)) reduced_row_lbls = list(map(list, reduced_row_lbls)) reduced_values = np.array( [g.sum() for g in vt.apply_grouping(new_vals, groupxs)]) # Relabel the rows one more time to agree with initial constraints used_ = [] replaced = [] for colx, (var, val) in enumerate(zip(given_name_vars, given_name_val)): # All columns must be the same for this labeling alias = reduced_row_lbls[0][colx] reduced_row_lbls = ut.list_replace(reduced_row_lbls, alias, val) replaced.append(alias) used_.append(val) basis = model.ttype2_cpds['name'][0]._template_.basis find_remain_ = ut.setdiff_ordered(temp_basis, replaced) repl_remain_ = ut.setdiff_ordered(basis, used_) for find, repl in zip(find_remain_, repl_remain_): reduced_row_lbls = ut.list_replace(reduced_row_lbls, find, repl) # Now find the most likely state sortx = reduced_values.argsort()[::-1] sort_reduced_row_lbls = ut.take(reduced_row_lbls, sortx.tolist()) sort_reduced_values = reduced_values[sortx] # Remove evidence based labels new_vars_ = new_vars[len(given_name_vars):] sort_reduced_row_lbls_ = ut.get_list_column( sort_reduced_row_lbls, slice(len(given_name_vars), None)) sort_reduced_row_lbls_[0] # hack into a new joint factor var_states = ut.lmap(ut.unique_ordered, zip(*sort_reduced_row_lbls_)) statename_dict = dict(zip(new_vars, var_states)) cardinality = ut.lmap(len, var_states) val_lookup = dict( zip(ut.lmap(tuple, sort_reduced_row_lbls_), sort_reduced_values)) values = np.zeros(np.prod(cardinality)) for idx, state in enumerate(ut.iprod(*var_states)): if state in val_lookup: values[idx] = val_lookup[state] joint2 = pgmpy.factors.Factor(new_vars_, cardinality, values, statename_dict=statename_dict) logger.info(joint2) max_marginals = {} for i, var in enumerate(query_name_vars): one_out = query_name_vars[:i] + query_name_vars[i + 1:] max_marginals[var] = joint2.marginalize(one_out, inplace=False) # max_marginals[var] = joint2.maximize(one_out, inplace=False) logger.info(joint2.marginalize(['Nb', 'Nc'], inplace=False)) factor_list = max_marginals.values() # Better map assignment based on knowledge of labels map_assign = dict(zip(new_vars_, sort_reduced_row_lbls_[0])) sort_reduced_rowstr_lbls = [ ut.repr2(dict(zip(new_vars, lbls)), explicit=True, nobraces=True, strvals=True) for lbls in sort_reduced_row_lbls_ ] top_assignments = list( zip(sort_reduced_rowstr_lbls[:3], sort_reduced_values)) if len(sort_reduced_values) > 3: top_assignments += [('other', 1 - sum(sort_reduced_values[:3]))] # import utool # utool.embed() # Compute all marginals # probs = infr.query(query_vars, evidence) # probs = infr.query(query_vars, evidence) # factor_list = probs.values() ## Marginalize over non-query, non-evidence # irrelevant_vars = ut.setdiff_ordered(joint.variables, list(evidence.keys()) + query_vars) # joint.marginalize(irrelevant_vars) # joint.normalize() # new_rows = joint._row_labels() # new_vals = joint.values.ravel() # map_vals = new_rows[new_vals.argmax()] # map_assign = dict(zip(joint.variables, map_vals)) # Compute Marginalized MAP joints # marginalized_joints = {} # for ttype in interest_ttypes: # other_vars = [v for v in joint_factor.scope() # if model.var2_cpd[v].ttype != ttype] # marginal = joint_factor.marginalize(other_vars, inplace=False) # marginalized_joints[ttype] = marginal query_results = { 'factor_list': factor_list, 'top_assignments': top_assignments, 'map_assign': map_assign, 'marginalized_joints': None, } return query_results
def make_name_model(num_annots, num_names=None, verbose=True, mode=1): """ Defines the general name model CommandLine: python -m wbia.algo.hots.bayes --exec-make_name_model --show Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True, mode=2) >>> kw = ut.argparse_funckw(make_name_model, defaults) >>> model = make_name_model(**kw) >>> ut.quit_if_noshow() >>> show_model(model, show_prior=True) >>> ut.show_if_requested() """ # annots = ut.chr_range(num_annots, base='a') mode = ut.get_argval('--mode', default=mode) annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # -- Define CPD Templates def match_pmf(match_type, n1, n2): if n1 == n2: val = 1.0 if match_type == 'same' else 0.0 # val = .999 if match_type == 'same' else 0.001 elif n1 != n2: # val = 0.01 if match_type == 'same' else .99 val = 0.0 if match_type == 'same' else 1.0 return val def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 } #'same': {'low': .1, 'high': .9}, #'diff': {'low': .9, 'high': .1} } val = score_lookup[match_type][score_type] return val def score_pmf3(score_type, match_type, isdup='False'): score_lookup = { 'False': { 'same': { 'low': 0.1, 'high': 0.5, 'veryhigh': 0.4 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, }, 'True': { 'same': { 'low': 0.01, 'high': 0.2, 'veryhigh': 0.79 }, 'diff': { 'low': 0.4, 'high': 0.4, 'veryhigh': 0.2 }, }, } val = score_lookup[isdup][match_type][score_type] return val def score_pmf2(score_type, n1, n2): score_lookup = { True: { 'low': 0.1, 'high': 0.4, 'veryhigh': 0.5 }, False: { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[n1 == n2][score_type] return val def dup_pmf(dupstate, match_type): lookup = { 'same': { 'True': 0.5, 'False': 0.5 }, 'diff': { 'True': 0.0, 'False': 1.0 }, } return lookup[match_type][dupstate] def check_pmf(n0, n1, match_type): pass def trimatch_pmf(match_ab, match_bc, match_ca): lookup = { 'same': { 'same': { 'same': 1, 'diff': 0 }, 'diff': { 'same': 0, 'diff': 1 }, }, 'diff': { 'same': { 'same': 0, 'diff': 1 }, 'diff': { 'same': 0.5, 'diff': 0.5 }, }, } return lookup[match_ca][match_bc][match_ab] name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) if mode == 1 or mode == 5: match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) if mode == 5: trimatch_cpd_t = pgm_ext.TemplateCPD( 'tri_match', ['diff', 'same'], varpref='T', # evidence_ttypes=[match_cpd_t, match_cpd_t, match_cpd_t], evidence_ttypes=[match_cpd_t, match_cpd_t], pmf_func=trimatch_pmf, ) score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) else: score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) elif mode == 2: name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=score_pmf2, ) elif mode == 3 or mode == 4: match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) if mode == 3: dup_cpd_t = pgm_ext.TemplateCPD('dup', ['False', 'True'], varpref='D') else: dup_cpd_t = pgm_ext.TemplateCPD( 'dup', ['False', 'True'], varpref='D', evidence_ttypes=[match_cpd_t], pmf_func=dup_pmf, ) score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high', 'veryhigh'], varpref='S', evidence_ttypes=[match_cpd_t, dup_cpd_t], pmf_func=score_pmf3, ) # Instanciate templates if mode == 1 or mode == 5: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [ match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] if mode == 5: # triple_idxs = ut.colwise_diag_idxs(num_annots, 3) tid2_match = {cpd._template_id: cpd for cpd in match_cpds} trimatch_cpds = [] # such hack for cpd in match_cpds: parents = [] this_ = list(cpd._template_id) for aid in annots: if aid in this_: continue for aid2 in this_: key = aid2 + aid if key not in tid2_match: key = aid + aid2 parents += [tid2_match[key]] trimatch_cpds += [trimatch_cpd_t.new_cpd(parents=parents)] # score_cpds = [score_cpd_t.new_cpd(parents=cpds) # for cpds in zip(trimatch_cpds)] cpd_list = name_cpds + score_cpds + match_cpds + trimatch_cpds else: cpd_list = name_cpds + score_cpds + match_cpds elif mode == 2: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] cpd_list = name_cpds + score_cpds elif mode == 3 or mode == 4: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [ match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] if mode == 3: dup_cpds = [ dup_cpd_t.new_cpd(parents=''.join(map(str, aids))) for aids in ut.list_unflat_take(annots, upper_diag_idxs) ] else: dup_cpds = [ dup_cpd_t.new_cpd(parents=[mcpds]) for mcpds in match_cpds ] score_cpds = [ score_cpd_t.new_cpd(parents=([mcpds] + [dcpd])) for mcpds, dcpd in zip(match_cpds, dup_cpds) ] cpd_list = name_cpds + score_cpds + match_cpds + dup_cpds # logger.info('upper_diag_idxs = %r' % (upper_diag_idxs,)) logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # import sys # sys.exit(1) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() # ut.colorprint('\n --- CPD Templates ---', 'blue') # for temp_cpd in templates: # ut.colorprint(temp_cpd._cpdstr('psql'), 'cyan') # print_ascii_graph(model) return model
def name_model_mode1(num_annots, num_names=None, verbose=True): r""" spaghettii CommandLine: python -m wbia.algo.hots.bayes --exec-name_model_mode1 --show python -m wbia.algo.hots.bayes --exec-name_model_mode1 python -m wbia.algo.hots.bayes --exec-name_model_mode1 --num-annots=3 Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True) >>> kw = ut.argparse_funckw(name_model_mode1, defaults) >>> model = name_model_mode1(**kw) >>> ut.quit_if_noshow() >>> show_model(model, show_prior=False, show_title=False) >>> ut.show_if_requested() Ignore: import nx2tikz logger.info(nx2tikz.dumps_tikz(model, layout='layered', use_label=True)) """ annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # +--- Define CPD Templates --- # +-- Name Factor --- name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] # +-- Match Factor --- def match_pmf(match_type, n1, n2): return { True: { 'same': 1.0, 'diff': 0.0 }, False: { 'same': 0.0, 'diff': 1.0 }, }[n1 == n2][match_type] match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] # +-- Score Factor --- def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[match_type][score_type] return val score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] # L___ End CPD Definitions ___ cpd_list = name_cpds + score_cpds + match_cpds logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() return model
def name_model_mode5(num_annots, num_names=None, verbose=True, mode=1): mode = ut.get_argval('--mode', default=mode) annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # -- Define CPD Templates name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] def match_pmf(match_type, n1, n2): return { True: { 'same': 1.0, 'diff': 0.0 }, False: { 'same': 0.0, 'diff': 1.0 }, }[n1 == n2][match_type] match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] def trimatch_pmf(match_ab, match_bc, match_ca): lookup = { 'same': { 'same': { 'same': 1, 'diff': 0 }, 'diff': { 'same': 0, 'diff': 1 }, }, 'diff': { 'same': { 'same': 0, 'diff': 1 }, 'diff': { 'same': 0.5, 'diff': 0.5 }, }, } return lookup[match_ca][match_bc][match_ab] trimatch_cpd_t = pgm_ext.TemplateCPD( 'tri_match', ['diff', 'same'], varpref='T', evidence_ttypes=[match_cpd_t, match_cpd_t], pmf_func=trimatch_pmf, ) # triple_idxs = ut.colwise_diag_idxs(num_annots, 3) tid2_match = {cpd._template_id: cpd for cpd in match_cpds} trimatch_cpds = [] # such hack for cpd in match_cpds: parents = [] this_ = list(cpd._template_id) for aid in annots: if aid in this_: continue for aid2 in this_: key = aid2 + aid if key not in tid2_match: key = aid + aid2 parents += [tid2_match[key]] trimatch_cpds += [trimatch_cpd_t.new_cpd(parents=parents)] def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[match_type][score_type] return val score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] # score_cpds = [score_cpd_t.new_cpd(parents=cpds) # for cpds in zip(trimatch_cpds)] cpd_list = name_cpds + score_cpds + match_cpds + trimatch_cpds logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() return model
def draw_bayesian_model(model, evidence={}, soft_evidence={}, fnum=None, pnum=None, **kwargs): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): model = model.to_bayesian_model() import plottool as pt import networkx as nx kwargs = kwargs.copy() factor_list = kwargs.pop('factor_list', []) ttype_colors, ttype_scalars = make_colorcodes(model) textprops = { 'horizontalalignment': 'left', 'family': 'monospace', 'size': 8, } # build graph attrs tup = get_node_viz_attrs(model, evidence, soft_evidence, factor_list, ttype_colors, **kwargs) node_color, pos_list, pos_dict, takws = tup # draw graph has_infered = evidence or 'factor_list' in kwargs if False: fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True) # NOQA ax = pt.gca() drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1100, node_color=node_color) nx.draw(model, **drawkw) else: # BE VERY CAREFUL if 1: graph = model.copy() graph.__class__ = nx.DiGraph graph.graph['groupattrs'] = ut.ddict(dict) #graph = model. if getattr(graph, 'ttype2_cpds', None) is not None: # Add invis edges and ttype groups for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) graph.add_edges_from(invis_edges) nx.set_edge_attributes( graph, 'style', {edge: 'invis' for edge in invis_edges}) nx.set_node_attributes( graph, 'groupid', {node: ttype for node in ttype_nodes}) graph.graph['groupattrs'][ttype]['rank'] = 'same' graph.graph['groupattrs'][ttype]['cluster'] = False else: graph = model pt.show_nx(graph, layout_kw={'prog': 'dot'}, fnum=fnum, pnum=pnum, verbose=0) pt.zoom_factory() fig = pt.gcf() ax = pt.gca() pass hacks = [ pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws if takw ] xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) if 'name' in model.ttype2_template: num_names = len(model.ttype2_template['name'].basis) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) title = 'num_names=%r, num_annots=%r' % ( num_names, num_annots, ) else: title = '' map_assign = kwargs.get('map_assign', None) def word_insert(text): return '' if len(text) == 0 else text + ' ' top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() if has_infered: # Hack in colorbars # if ut.list_type(basis) is int: # pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) # else: # pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) keys = ['name', 'score'] locs = ['left', 'right'] for key, loc in zip(keys, locs): if key in ttype_colors: basis = model.ttype2_template[key].basis # scalars = colors = ttype_colors[key] scalars = ttype_scalars[key] pt.colorbar(scalars, colors, lbl=key, ticklabels=basis, ticklocation=loc)
def make_name_model(num_annots, num_names=None, verbose=True, mode=1, num_scores=2, p_score_given_same=None, hack_score_only=False, score_basis=None, special_names=None): r""" CommandLine: python -m ibeis.algo.hots.bayes --exec-make_name_model --show python -m ibeis.algo.hots.bayes --exec-make_name_model python -m ibeis.algo.hots.bayes --exec-make_name_model --num-annots=3 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True) >>> modeltype = ut.get_argval('--modeltype', default='bayes') >>> kw = ut.argparse_funckw(make_name_model, defaults) >>> model = make_name_model(**kw) >>> ut.quit_if_noshow() >>> model.show_model(show_prior=False, show_title=False, modeltype=modeltype) >>> ut.show_if_requested() """ if special_names is None: special_names = SPECIAL_BASIS_POOL assert mode == 1, 'only can do mode 1' base = ut.get_argval('--base', type_=str, default='a') annots = ut.chr_range(num_annots, base=base) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if hack_score_only: upper_diag_idxs = upper_diag_idxs[-hack_score_only:] if num_names is None: num_names = num_annots # +--- Define CPD Templates and Instantiation --- cpd_list = [] # Name Factor name_cpd_t = pgm_ext.TemplateCPD( 'name', ('n', num_names), special_basis_pool=special_names) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] #name_cpds = [name_cpd_t.new_cpd(parents=aid, constrain_state=count) # for count, aid in enumerate(annots, start=1)] cpd_list.extend(name_cpds) # Match Factor def match_pmf(match_type, n1, n2): return { True: {'same': 1.0, 'diff': 0.0}, False: {'same': 0.0, 'diff': 1.0}, }[n1 == n2][match_type] match_states = ['diff', 'same'] match_cpd_t = pgm_ext.TemplateCPD( 'match', match_states, evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf) namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] cpd_list.extend(match_cpds) # Score Factor score_states = list(range(num_scores)) if score_basis is not None: score_states = ['%.2f' % (s,) for s in score_basis] if p_score_given_same is None: tmp = np.arange(num_scores + 1)[1:] tmp = np.cumsum(tmp) tmp = (tmp / tmp.sum()) p_score_given_same = tmp def score_pmf(score_type, match_type): if isinstance(score_type, six.string_types): score_type = score_states.index(score_type) if match_type == 'same': return p_score_given_same[score_type] else: return p_score_given_same[-(score_type + 1)] score_cpd_t = pgm_ext.TemplateCPD( 'score', score_states, evidence_ttypes=[match_cpd_t], pmf_func=score_pmf) score_cpds = [score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(score_cpds) with_humans = False if with_humans: human_states = ['diff', 'same'] human_cpd_t = pgm_ext.TemplateCPD( 'human', human_states, evidence_ttypes=[match_cpd_t], pmf_func=[[.9, .1], [.1, .9]]) human_cpds = [human_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(human_cpds) with_rank = False # Rank depends on dependant scores if with_rank: rank_states = ['0', '1', '2', '3'] rank_cpd_t = pgm_ext.TemplateCPD( 'rank', rank_states, evidence_ttypes=[match_cpd_t], pmf_func=None) rank_cpds = [rank_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(rank_cpds) # L___ End CPD Definitions ___ print('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'),)) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates(ignore_ttypes=['match']) return model
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=['match', 'score']) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=['match', 'score'], title='Soft Evidence', color='green') #if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != 'match': # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable') #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) #query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: print('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): print('Result Factors (%r)' % (type_,)) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') print('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) print(ut.align('\n'.join(tmp), ' :')) print('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def temp_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE]) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE], title='Soft Evidence', color='green') # if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != MATCH_TTYPE: # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable') # query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) # query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: logger.info('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): logger.info('Result Factors (%r)' % (type_, )) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') logger.info('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) logger.info(ut.align('\n'.join(tmp), ' :')) logger.info('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) from wbia.algo.hots import pgm_viz pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def make_name_model( num_annots, num_names=None, verbose=True, mode=1, num_scores=2, p_score_given_same=None, hack_score_only=False, score_basis=None, special_names=None, ): r""" CommandLine: python -m wbia.algo.hots.bayes --exec-make_name_model --no-cnn python -m wbia.algo.hots.bayes --exec-make_name_model --show --no-cnn python -m wbia.algo.hots.bayes --exec-make_name_model --num-annots=3 Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True) >>> modeltype = ut.get_argval('--modeltype', default='bayes') >>> kw = ut.argparse_funckw(make_name_model, defaults) >>> model = make_name_model(**kw) >>> ut.quit_if_noshow() >>> model.show_model(show_prior=False, show_title=False, modeltype=modeltype) >>> ut.show_if_requested() """ if special_names is None: special_names = SPECIAL_BASIS_POOL assert mode == 1, 'only can do mode 1' base = ut.get_argval('--base', type_=str, default='a') annots = ut.chr_range(num_annots, base=base) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if hack_score_only: upper_diag_idxs = upper_diag_idxs[-hack_score_only:] if num_names is None: num_names = num_annots # +--- Define CPD Templates and Instantiation --- cpd_list = [] # Name Factor name_cpd_t = pgm_ext.TemplateCPD(NAME_TTYPE, ('n', num_names), special_basis_pool=special_names) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] # name_cpds = [name_cpd_t.new_cpd(parents=aid, constrain_state=count) # for count, aid in enumerate(annots, start=1)] cpd_list.extend(name_cpds) # Match Factor def match_pmf(match_type, n1, n2): return { True: { 'same': 1.0, 'diff': 0.0 }, False: { 'same': 0.0, 'diff': 1.0 } }[n1 == n2][match_type] match_states = ['diff', 'same'] match_cpd_t = pgm_ext.TemplateCPD( MATCH_TTYPE, match_states, evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) # match_cpd_t.varpref = 'S' namepair_cpds = ut.unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] cpd_list.extend(match_cpds) # Score Factor score_states = list(range(num_scores)) if score_basis is not None: score_states = ['%.2f' % (s, ) for s in score_basis] if p_score_given_same is None: tmp = np.arange(num_scores + 1)[1:] tmp = np.cumsum(tmp) tmp = tmp / tmp.sum() p_score_given_same = tmp def score_pmf(score_type, match_type): if isinstance(score_type, six.string_types): score_type = score_states.index(score_type) if match_type == 'same': return p_score_given_same[score_type] else: return p_score_given_same[-(score_type + 1)] score_cpd_t = pgm_ext.TemplateCPD(SCORE_TTYPE, score_states, evidence_ttypes=[match_cpd_t], pmf_func=score_pmf) # match_cpd_t.varpref = 'P' score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] cpd_list.extend(score_cpds) with_humans = False if with_humans: human_states = ['diff', 'same'] human_cpd_t = pgm_ext.TemplateCPD( 'human', human_states, evidence_ttypes=[match_cpd_t], pmf_func=[[0.9, 0.1], [0.1, 0.9]], ) human_cpds = [ human_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] cpd_list.extend(human_cpds) with_rank = False # Rank depends on dependant scores if with_rank: rank_states = ['0', '1', '2', '3'] rank_cpd_t = pgm_ext.TemplateCPD('rank', rank_states, evidence_ttypes=[match_cpd_t], pmf_func=None) rank_cpds = [ rank_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] cpd_list.extend(rank_cpds) # L___ End CPD Definitions ___ logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates(ignore_ttypes=[MATCH_TTYPE]) return model
def draw_bayesian_model(model, evidence={}, soft_evidence={}, fnum=None, pnum=None, **kwargs): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): model = model.to_bayesian_model() import plottool as pt import networkx as nx kwargs = kwargs.copy() factor_list = kwargs.pop('factor_list', []) ttype_colors, ttype_scalars = make_colorcodes(model) textprops = { 'horizontalalignment': 'left', 'family': 'monospace', 'size': 8, } # build graph attrs tup = get_node_viz_attrs( model, evidence, soft_evidence, factor_list, ttype_colors, **kwargs) node_color, pos_list, pos_dict, takws = tup # draw graph has_infered = evidence or 'factor_list' in kwargs if False: fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True) # NOQA ax = pt.gca() drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1100, node_color=node_color) nx.draw(model, **drawkw) else: # BE VERY CAREFUL if 1: graph = model.copy() graph.__class__ = nx.DiGraph graph.graph['groupattrs'] = ut.ddict(dict) #graph = model. if getattr(graph, 'ttype2_cpds', None) is not None: # Add invis edges and ttype groups for ttype in model.ttype2_cpds.keys(): ttype_cpds = model.ttype2_cpds[ttype] # use defined ordering ttype_nodes = ut.list_getattr(ttype_cpds, 'variable') # ttype_nodes = sorted(ttype_nodes) invis_edges = list(ut.itertwo(ttype_nodes)) graph.add_edges_from(invis_edges) nx.set_edge_attributes(graph, 'style', {edge: 'invis' for edge in invis_edges}) nx.set_node_attributes(graph, 'groupid', {node: ttype for node in ttype_nodes}) graph.graph['groupattrs'][ttype]['rank'] = 'same' graph.graph['groupattrs'][ttype]['cluster'] = False else: graph = model pt.show_nx(graph, layout_kw={'prog': 'dot'}, fnum=fnum, pnum=pnum, verbose=0) pt.zoom_factory() fig = pt.gcf() ax = pt.gca() pass hacks = [pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws if takw] xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) if 'name' in model.ttype2_template: num_names = len(model.ttype2_template['name'].basis) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) title = 'num_names=%r, num_annots=%r' % (num_names, num_annots,) else: title = '' map_assign = kwargs.get('map_assign', None) def word_insert(text): return '' if len(text) == 0 else text + ' ' top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: title += '\n%sMAP: ' % (word_insert(kwargs.get('method', ''))) title += map_assign + ' @' + '%.2f%%' % (100 * map_prob,) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) for hack in hacks: hack() if has_infered: # Hack in colorbars # if ut.list_type(basis) is int: # pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1) # else: # pt.colorbar(scalars, colors, lbl='score', ticklabels=basis) keys = ['name', 'score'] locs = ['left', 'right'] for key, loc in zip(keys, locs): if key in ttype_colors: basis = model.ttype2_template[key].basis # scalars = colors = ttype_colors[key] scalars = ttype_scalars[key] pt.colorbar(scalars, colors, lbl=key, ticklabels=basis, ticklocation=loc)