Esempio n. 1
0
def test_triangle_property():
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-test_triangle_property --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = test_triangle_property()
        >>> ut.show_if_requested()
    """
    constkw = dict(
        num_annots=3,
        num_names=3,
        name_evidence=[],
    )
    test_model(
        mode=1,
        other_evidence={
            'Mab': False,
            'Mac': False,
            #'Na': 'fred',
            #'Nb': 'sue',
        },
        **constkw)
Esempio n. 2
0
def demo_name_annot_complexity():
    """
    This demo is meant to show the structure of the graph as more annotations
    and names are added.

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_name_annot_complexity --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> demo_name_annot_complexity()
        >>> ut.show_if_requested()
    """
    constkw = dict(score_evidence=[], name_evidence=[], mode=1)
    # Initially there are 2 annots and 4 names
    model, = test_model(num_annots=2, num_names=4, **constkw)
    draw_tree_model(model)
    # Adding a name causes the probability of the other names to go down
    model, = test_model(num_annots=2, num_names=5, **constkw)
    draw_tree_model(model)
    # Adding an annotation wihtout matches dos not effect probabilities of
    # names
    model, = test_model(num_annots=3, num_names=5, **constkw)
    draw_tree_model(model)
    model, = test_model(num_annots=4, num_names=10, **constkw)
    draw_tree_model(model)
    # Given A annots, the number of score nodes is (A ** 2 - A) / 2
    model, = test_model(num_annots=5, num_names=5, **constkw)
    draw_tree_model(model)
Esempio n. 3
0
def demo_name_annot_complexity():
    """
    This demo is meant to show the structure of the graph as more annotations
    and names are added.

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_name_annot_complexity --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> demo_name_annot_complexity()
        >>> ut.show_if_requested()
    """
    constkw = dict(score_evidence=[], name_evidence=[], mode=1)
    # Initially there are 2 annots and 4 names
    model, = test_model(num_annots=2, num_names=4, **constkw)
    draw_tree_model(model)
    # Adding a name causes the probability of the other names to go down
    model, = test_model(num_annots=2, num_names=5, **constkw)
    draw_tree_model(model)
    # Adding an annotation wihtout matches dos not effect probabilities of
    # names
    model, = test_model(num_annots=3, num_names=5, **constkw)
    draw_tree_model(model)
    model, = test_model(num_annots=4, num_names=10, **constkw)
    draw_tree_model(model)
    # Given A annots, the number of score nodes is (A ** 2 - A) / 2
    model, = test_model(num_annots=5, num_names=5, **constkw)
    draw_tree_model(model)
Esempio n. 4
0
def demo_single_add():
    """
    This demo shows how a name is assigned to a new annotation.

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_single_add --show --present --mode=1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> demo_single_add()
        >>> ut.show_if_requested()
    """
    # Initially there are only two annotations that have a strong match
    name_evidence = [{0: .9}]  # Soft label
    name_evidence = [0]  # Hard label
    test_model(num_annots=2, num_names=5, score_evidence=[1], name_evidence=name_evidence)
    # Adding a new annotation does not change the original probabilites
    test_model(num_annots=3, num_names=5, score_evidence=[1], name_evidence=name_evidence)
    # Adding evidence that Na matches Nc does not influence the probability
    # that Na matches Nb. However the probability that Nb matches Nc goes up.
    test_model(num_annots=3, num_names=5, score_evidence=[1, 1], name_evidence=name_evidence)
    # However, once Nb is scored against Nb that does increase the likelihood
    # that all 3 are fred goes up significantly.
    test_model(num_annots=3, num_names=5, score_evidence=[1, 1, 1],
               name_evidence=name_evidence)
Esempio n. 5
0
def demo_conflicting_evidence():
    """
    Notice that the number of annotations in the graph does not affect the
    probability of names.
    """
    # Initialized with two annots. Each are pretty sure they are someone else
    constkw = dict(num_annots=2, num_names=5, score_evidence=[])
    test_model(name_evidence=[{0: .9}, {1: .9}], **constkw)
    # Having evidence that they are different increases this confidence.
    test_model(name_evidence=[{
        0: .9
    }, {
        1: .9
    }],
               other_evidence={'Sab': 0},
               **constkw)
    # However,, confusion is introduced if there is evidence that they are the same
    test_model(name_evidence=[{
        0: .9
    }, {
        1: .9
    }],
               other_evidence={'Sab': 1},
               **constkw)
    # When Na is forced to be fred, this doesnt change Nbs evaulatation by more
    # than a few points
    test_model(name_evidence=[0, {
        1: .9
    }],
               other_evidence={'Sab': 1},
               **constkw)
Esempio n. 6
0
def demo_modes():
    """
    Look at the last result of the different names demo under differet modes
    """
    constkw = dict(
        num_annots=4,
        num_names=8,
        score_evidence=[1, 0, 0, 0, 0, 1],
        #name_evidence=[{0: .9}, None, None, {1: .9}],
        #name_evidence=[0, None, None, 1],
        name_evidence=[0, None, None, None],
        #other_evidence={
        #    'Sad': 0,
        #    'Sab': 1,
        #    'Scd': 1,
        #    'Sac': 0,
        #    'Sbc': 0,
        #    'Sbd': 0,
        #}
    )
    # The first mode uses a hidden Match layer
    test_model(mode=1, **constkw)
    # The second mode directly maps names to scores
    test_model(mode=2, **constkw)
    test_model(mode=3, noquery=True, **constkw)
    test_model(mode=4, noquery=True, **constkw)
Esempio n. 7
0
def demo_modes():
    """
    Look at the last result of the different names demo under differet modes
    """
    constkw = dict(
        num_annots=4, num_names=8,
        score_evidence=[1, 0, 0, 0, 0, 1],
        #name_evidence=[{0: .9}, None, None, {1: .9}],
        #name_evidence=[0, None, None, 1],
        name_evidence=[0, None, None, None],
        #other_evidence={
        #    'Sad': 0,
        #    'Sab': 1,
        #    'Scd': 1,
        #    'Sac': 0,
        #    'Sbc': 0,
        #    'Sbd': 0,
        #}
    )
    # The first mode uses a hidden Match layer
    test_model(mode=1, **constkw)
    # The second mode directly maps names to scores
    test_model(mode=2, **constkw)
    test_model(mode=3, noquery=True, **constkw)
    test_model(mode=4, noquery=True, **constkw)
Esempio n. 8
0
def demo_model_idependencies():
    """
    Independences of the 3 annot 3 name model

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show
        python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=2

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_model_idependencies()
        >>> print(result)
        >>> ut.show_if_requested()
    """
    num_names = ut.get_argval('--num-names', default=3)
    model = test_model(num_annots=num_names,
                       num_names=num_names,
                       score_evidence=[],
                       name_evidence=[])[0]
    # This model has the following independenceis
    idens = model.get_independencies()

    iden_strs = [
        ', '.join(sorted(iden.event1)) + ' _L ' +
        ','.join(sorted(iden.event2)) + ' | ' + ', '.join(sorted(iden.event3))
        for iden in idens.independencies
    ]
    print('general idependencies')
    print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
Esempio n. 9
0
def demo_model_idependencies():
    """
    Independences of the 3 annot 3 name model

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show
        python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=2

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_model_idependencies()
        >>> print(result)
        >>> ut.show_if_requested()
    """
    num_names = ut.get_argval('--num-names', default=3)
    model = test_model(num_annots=num_names, num_names=num_names, score_evidence=[], name_evidence=[])[0]
    # This model has the following independenceis
    idens = model.get_independencies()

    iden_strs = [', '.join(sorted(iden.event1)) +
                 ' _L ' +
                 ','.join(sorted(iden.event2)) +
                 ' | ' +
                 ', '.join(sorted(iden.event3))
                 for iden in idens.independencies]
    print('general idependencies')
    print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
Esempio n. 10
0
def demo_ambiguity():
    r"""
    Test what happens when an annotation need to choose between one of two
    names

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_ambiguity --show --verbose --present

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_ambiguity()
        >>> ut.show_if_requested()
    """
    constkw = dict(
        num_annots=3,
        num_names=3,
        name_evidence=[0],
        #name_evidence=[],
        #name_evidence=[{0: '+eps'}, {1: '+eps'}, {2: '+eps'}],
    )
    test_model(score_evidence=[0, 0, 1], mode=1, **constkw)
Esempio n. 11
0
def classify_one_new_unknown():
    r"""
    Make a model that knows who the previous annots are and tries to classify a new annot

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose
        python -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --show --verbose --present
        python3 -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose
        python3 -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose --diskshow --verbose --present --save demo5.png --dpath . --figsize=20,10 --dpi=128 --clipwhite

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = classify_one_new_unknown()
        >>> ut.show_if_requested()
    """
    if False:
        constkw = dict(
            num_annots=5,
            num_names=3,
            name_evidence=[0]
            #name_evidence=[0, 0, 1, 1, None],
            #name_evidence=[{0: .99}, {0: .99}, {1: .99}, {1: .99}, None],
            #name_evidence=[0, {0: .99}, {1: .99}, 1, None],
        )
        test_model(score_evidence=[1, 0, 0, 0, 0, 1], mode=1, **constkw)

    #from ibeis.algo.hots.demobayes import *
    constkw = dict(
        num_annots=4,
        num_names=4,
    )
    model, evidence = test_model(
        mode=1,
        # lll and llh have strikingly different
        # probability of M marginals
        score_evidence=[0, 0, 1],
        other_evidence={},
        **constkw)
Esempio n. 12
0
def demo_ambiguity():
    r"""
    Test what happens when an annotation need to choose between one of two
    names

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_ambiguity --show --verbose --present

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_ambiguity()
        >>> ut.show_if_requested()
    """
    constkw = dict(
        num_annots=3, num_names=3,
        name_evidence=[0],
        #name_evidence=[],
        #name_evidence=[{0: '+eps'}, {1: '+eps'}, {2: '+eps'}],
    )
    test_model(score_evidence=[0, 0, 1], mode=1,
               **constkw)
Esempio n. 13
0
def demo_conflicting_evidence():
    """
    Notice that the number of annotations in the graph does not affect the
    probability of names.
    """
    # Initialized with two annots. Each are pretty sure they are someone else
    constkw = dict(num_annots=2, num_names=5, score_evidence=[])
    test_model(name_evidence=[{0: .9}, {1: .9}], **constkw)
    # Having evidence that they are different increases this confidence.
    test_model(name_evidence=[{0: .9}, {1: .9}], other_evidence={'Sab': 0}, **constkw)
    # However,, confusion is introduced if there is evidence that they are the same
    test_model(name_evidence=[{0: .9}, {1: .9}], other_evidence={'Sab': 1}, **constkw)
    # When Na is forced to be fred, this doesnt change Nbs evaulatation by more
    # than a few points
    test_model(name_evidence=[0, {1: .9}], other_evidence={'Sab': 1}, **constkw)
Esempio n. 14
0
def classify_one_new_unknown():
    r"""
    Make a model that knows who the previous annots are and tries to classify a new annot

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose
        python -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --show --verbose --present
        python3 -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose
        python3 -m ibeis.algo.hots.demobayes --exec-classify_one_new_unknown --verbose --diskshow --verbose --present --save demo5.png --dpath . --figsize=20,10 --dpi=128 --clipwhite

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = classify_one_new_unknown()
        >>> ut.show_if_requested()
    """
    if False:
        constkw = dict(
            num_annots=5, num_names=3,
            name_evidence=[0]
            #name_evidence=[0, 0, 1, 1, None],
            #name_evidence=[{0: .99}, {0: .99}, {1: .99}, {1: .99}, None],
            #name_evidence=[0, {0: .99}, {1: .99}, 1, None],
        )
        test_model(score_evidence=[1, 0, 0, 0, 0, 1], mode=1, **constkw)

    #from ibeis.algo.hots.demobayes import *
    constkw = dict(
        num_annots=4, num_names=4,
    )
    model, evidence = test_model(
        mode=1,
        # lll and llh have strikingly different
        # probability of M marginals
        score_evidence=[0, 0, 1],
        other_evidence={
        },
        **constkw)
Esempio n. 15
0
def demo_structure():
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_structure --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_structure()
        >>> ut.show_if_requested()
    """
    constkw = dict(score_evidence=[], name_evidence=[], mode=3)
    model, = test_model(num_annots=4, num_names=4, **constkw)
    draw_tree_model(model)
Esempio n. 16
0
def demo_structure():
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_structure --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_structure()
        >>> ut.show_if_requested()
    """
    constkw = dict(score_evidence=[], name_evidence=[], mode=3)
    model, = test_model(num_annots=4, num_names=4, **constkw)
    draw_tree_model(model)
Esempio n. 17
0
def test_triangle_property():
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-test_triangle_property --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = test_triangle_property()
        >>> ut.show_if_requested()
    """
    constkw = dict(
        num_annots=3, num_names=3,
        name_evidence=[],
    )
    test_model(
        mode=1,
        other_evidence={
            'Mab': False,
            'Mac': False,
            #'Na': 'fred',
            #'Nb': 'sue',
        },
        **constkw)
Esempio n. 18
0
def demo_single_add():
    """
    This demo shows how a name is assigned to a new annotation.

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_single_add --show --present --mode=1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> demo_single_add()
        >>> ut.show_if_requested()
    """
    # Initially there are only two annotations that have a strong match
    name_evidence = [{0: .9}]  # Soft label
    name_evidence = [0]  # Hard label
    test_model(num_annots=2,
               num_names=5,
               score_evidence=[1],
               name_evidence=name_evidence)
    # Adding a new annotation does not change the original probabilites
    test_model(num_annots=3,
               num_names=5,
               score_evidence=[1],
               name_evidence=name_evidence)
    # Adding evidence that Na matches Nc does not influence the probability
    # that Na matches Nb. However the probability that Nb matches Nc goes up.
    test_model(num_annots=3,
               num_names=5,
               score_evidence=[1, 1],
               name_evidence=name_evidence)
    # However, once Nb is scored against Nb that does increase the likelihood
    # that all 3 are fred goes up significantly.
    test_model(num_annots=3,
               num_names=5,
               score_evidence=[1, 1, 1],
               name_evidence=name_evidence)
Esempio n. 19
0
def demo_bayesnet(cfg={}):
    r"""
    Make a model that knows who the previous annots are and tries to classify a new annot

    CommandLine:
        python -m ibeis --tf demo_bayesnet --diskshow --verbose --save demo4.png --dpath . --figsize=20,10 --dpi=128 --clipwhite

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Scd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1,Scd=1 --show

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=5,rand_scores=True --show

        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=3,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=2,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=5,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=2,Na=fred,rand_scores=True --show --verbose

        python -m ibeis.algo.hots.demobayes --exec-demo_bayesnet \
                --ev =:nA=4,Sab=0,Sac=0,Sbc=1 \
                :Sbd=1 :Scd=1 :Sbd=1,Scd=1 :Sbd=1,Scd=1,Sad=0 \
                --show --present

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> print('cfg_list = %r' % (cfg_list,))
        >>> for cfg in cfg_list:
        >>>     demo_bayesnet(cfg)
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_names = cfg.pop('num_names', None)
    num_scores = cfg.pop('num_scores', 2)
    rand_scores = cfg.pop('rand_scores', False)
    method = cfg.pop('method', 'bp')
    other_evidence = {k: v for k, v in cfg.items() if not k.startswith('_')}
    if rand_scores:
        #import randomdotorg
        #import sys
        #r = randomdotorg.RandomDotOrg('ExampleCode')
        #seed = int((1 - 2 * r.random()) * sys.maxint)
        toy_data = get_toy_data_1v1(num_annots, nid_sequence=[0, 0, 1, 0, 1, 2])
        print('toy_data = ' + ut.repr3(toy_data, nl=1))
        diag_scores, = ut.dict_take(
            toy_data, 'diag_scores'.split(', '))
        discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
        def discretize_scores(scores):
            # Assign continuous scores to discrete index
            score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
            return score_idxs
        score_evidence = discretize_scores(diag_scores)
    else:
        score_evidence = []
        discr_p_same = None
        discr_domain = None
    model, evidence, query_results = test_model(
        num_annots=num_annots, num_names=num_names,
        num_scores=num_scores,
        score_evidence=score_evidence,
        mode=1,
        other_evidence=other_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        method=method,
    )
Esempio n. 20
0
def classify_k(cfg={}):
    """
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=1
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=0 --method=approx
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=10,k=1 --method=approx

    Example:
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> classify_k(cfg_list[0])
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_scores = cfg.pop('num_scores', 2)
    num_iter = cfg.pop('k', 0)
    nid_sequence = np.array([0, 0, 1, 2, 2, 1, 1])
    toy_data = get_toy_data_1v1(num_annots, nid_sequence=nid_sequence)
    force_evidence = None
    force_evidence = 0
    diag_scores, = ut.dict_take(toy_data, 'diag_scores'.split(', '))

    #print('diag_scores = %r' % (diag_scores,))
    #diag_labels = pairwise_matches.compress(is_diag)
    #diag_pairs = ut.compress(pairwise_aidxs, is_diag)

    discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]

    def discretize_scores(scores):
        # Assign continuous scores to closest discrete index
        score_idxs = np.abs(1 -
                            (discr_domain / scores[:, None])).argmin(axis=1)
        return score_idxs

    # Careful ordering is important here
    score_evidence = discretize_scores(diag_scores)
    if force_evidence is not None:
        for x in range(len(score_evidence)):
            score_evidence[x] = 0

    model, evidence, query_results = test_model(
        num_annots=num_annots,
        num_names=num_annots,
        num_scores=num_scores,
        mode=1,
        score_evidence=score_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        #verbose=True
    )
    print(query_results['top_assignments'][0])
    toy_data1 = toy_data
    print('toy_data1 = ' + ut.repr3(toy_data1, nl=1))
    num_annots2 = num_annots + 1
    score_evidence1 = [None] * len(score_evidence)
    full_evidence = score_evidence.tolist()

    factor_list = query_results['factor_list']
    using_soft = False
    if using_soft:
        soft_evidence1 = [
            dict(zip(x.statenames[0], x.values)) for x in factor_list
        ]

    for _ in range(num_iter):
        print('\n\n ---------- \n\n')
        #toy_data1['all_nids'].max() + 1
        num_names_gen = len(toy_data1['all_aids']) + 1
        num_names_gen = toy_data1['all_nids'].max() + 2
        toy_data2 = get_toy_data_1v1(1,
                                     num_names_gen,
                                     initial_aids=toy_data1['all_aids'],
                                     initial_nids=toy_data1['all_nids'],
                                     nid_sequence=nid_sequence)
        diag_scores2, = ut.dict_take(toy_data2, 'diag_scores'.split(', '))
        print('toy_data2 = ' + ut.repr3(toy_data2, nl=1))

        score_evidence2 = discretize_scores(diag_scores2).tolist()
        if force_evidence is not None:
            for x in range(len(score_evidence2)):
                score_evidence2[x] = force_evidence
        print('score_evidence2 = %r' % (score_evidence2, ))

        if using_soft:
            # Demo with soft evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2,
                num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                name_evidence=soft_evidence1,
                #score_evidence=score_evidence1 + score_evidence2,
                score_evidence=score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                #verbose=True,
                hack_score_only=len(score_evidence2),
            )

        if 1:
            # Demo with full evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2,
                num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                score_evidence=full_evidence + score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                verbose=True)
        factor_list2 = query_results2['factor_list']
        if using_soft:
            soft_evidence1 = [
                dict(zip(x.statenames[0], x.values)) for x in factor_list2
            ]
        score_evidence1 += ([None] * len(score_evidence2))
        full_evidence = full_evidence + score_evidence2
        num_annots2 += 1
        toy_data1 = toy_data2
Esempio n. 21
0
def demo_annot_idependence_overlap():
    r"""
    Given:
        * an unknown annotation \d
        * three annots with the same name (Fred) \a, \b, and \c
        * \a and \b are near duplicates
        * (\a and \c) / (\b and \c) are novel views

    Goal:
        * If \d matches to \a and \b the probably that \d is Fred should not be
          much more than if \d matched only \a or only \b.

        * The probability that \d is Fred given it matches to any of the 3 annots
           alone should be equal

            P(\d is Fred | Mad=1) = P(\d is Fred | Mbd=1) = P(\d is Fred | Mcd=1)

        * The probability that \d is fred given two matches to any of those two annots
          should be greater than the probability given only one.

            P(\d is Fred | Mad=1, Mbd=1) > P(\d is Fred | Mad=1)
            P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1)

        * The probability that \d is fred given matches to two near duplicate
          matches should be less than
          if \d matches two non-duplicate matches.

            P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1, Mbd=1)

        * The probability that \d is fred given two near duplicates should be only epsilon greater than
          a match to either one individually.

            P(\d is Fred | Mad=1, Mbd=1) = P(\d is Fred | Mad=1) + \epsilon

    Method:

        We need to model the fact that there are other causes that create the
        effect of a high score.  Namely, near duplicates.
        This can be done by adding an extra conditional that score depends on
        if they match as well as if they are near duplicates.

        P(S_ij | Mij) --> P(S_ij | Mij, Dij)

        where

        Dij is a random variable indicating if the image is a near duplicate.

        We can model this as an independant variable

        P(Dij) = {True: .5, False: .5}

        or as depending on if the names match.

        P(Dij | Mij) = {'same': {True: .5, False: .5} diff: {True: 0, False 1}}



    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_annot_idependence_overlap --verbose --present --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_annot_idependence_overlap()
        >>> ut.show_if_requested()
    """
    # We will end up making annots a and b fred and c and d sue
    constkw = dict(
        num_annots=4,
        num_names=4,
        name_evidence=[{
            0: '+eps'
        }, {
            1: '+eps'
        }, {
            2: '+eps'
        }, {
            3: '+eps'
        }],
        #name_evidence=[{0: .9}, None, None, {1: .9}]
        #name_evidence=[0, None, None, None]
        #name_evidence=[0, None, None, None]
    )
    test_model(score_evidence=[1, 1, 1, None, None, None], **constkw)
    test_model(score_evidence=[1, 1, 0, None, None, None], **constkw)
    test_model(score_evidence=[1, 0, 0, None, None, None], **constkw)
Esempio n. 22
0
def demo_bayesnet(cfg={}):
    r"""
    Make a model that knows who the previous annots are and tries to classify a new annot

    CommandLine:
        python -m ibeis --tf demo_bayesnet --diskshow --verbose --save demo4.png --dpath . --figsize=20,10 --dpi=128 --clipwhite

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Scd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1,Scd=1 --show

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=5,rand_scores=True --show

        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=3,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=2,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=5,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=2,Na=fred,rand_scores=True --show --verbose

        python -m ibeis.algo.hots.demobayes --exec-demo_bayesnet \
                --ev =:nA=4,Sab=0,Sac=0,Sbc=1 \
                :Sbd=1 :Scd=1 :Sbd=1,Scd=1 :Sbd=1,Scd=1,Sad=0 \
                --show --present

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> print('cfg_list = %r' % (cfg_list,))
        >>> for cfg in cfg_list:
        >>>     demo_bayesnet(cfg)
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_names = cfg.pop('num_names', None)
    num_scores = cfg.pop('num_scores', 2)
    rand_scores = cfg.pop('rand_scores', False)
    method = cfg.pop('method', 'bp')
    other_evidence = {k: v for k, v in cfg.items() if not k.startswith('_')}
    if rand_scores:
        #import randomdotorg
        #import sys
        #r = randomdotorg.RandomDotOrg('ExampleCode')
        #seed = int((1 - 2 * r.random()) * sys.maxint)
        toy_data = get_toy_data_1v1(num_annots,
                                    nid_sequence=[0, 0, 1, 0, 1, 2])
        print('toy_data = ' + ut.repr3(toy_data, nl=1))
        diag_scores, = ut.dict_take(toy_data, 'diag_scores'.split(', '))
        discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]

        def discretize_scores(scores):
            # Assign continuous scores to discrete index
            score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(
                axis=1)
            return score_idxs

        score_evidence = discretize_scores(diag_scores)
    else:
        score_evidence = []
        discr_p_same = None
        discr_domain = None
    model, evidence, query_results = test_model(
        num_annots=num_annots,
        num_names=num_names,
        num_scores=num_scores,
        score_evidence=score_evidence,
        mode=1,
        other_evidence=other_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        method=method,
    )
Esempio n. 23
0
def demo_annot_idependence_overlap():
    r"""
    Given:
        * an unknown annotation \d
        * three annots with the same name (Fred) \a, \b, and \c
        * \a and \b are near duplicates
        * (\a and \c) / (\b and \c) are novel views

    Goal:
        * If \d matches to \a and \b the probably that \d is Fred should not be
          much more than if \d matched only \a or only \b.

        * The probability that \d is Fred given it matches to any of the 3 annots
           alone should be equal

            P(\d is Fred | Mad=1) = P(\d is Fred | Mbd=1) = P(\d is Fred | Mcd=1)

        * The probability that \d is fred given two matches to any of those two annots
          should be greater than the probability given only one.

            P(\d is Fred | Mad=1, Mbd=1) > P(\d is Fred | Mad=1)
            P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1)

        * The probability that \d is fred given matches to two near duplicate
          matches should be less than
          if \d matches two non-duplicate matches.

            P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1, Mbd=1)

        * The probability that \d is fred given two near duplicates should be only epsilon greater than
          a match to either one individually.

            P(\d is Fred | Mad=1, Mbd=1) = P(\d is Fred | Mad=1) + \epsilon

    Method:

        We need to model the fact that there are other causes that create the
        effect of a high score.  Namely, near duplicates.
        This can be done by adding an extra conditional that score depends on
        if they match as well as if they are near duplicates.

        P(S_ij | Mij) --> P(S_ij | Mij, Dij)

        where

        Dij is a random variable indicating if the image is a near duplicate.

        We can model this as an independant variable

        P(Dij) = {True: .5, False: .5}

        or as depending on if the names match.

        P(Dij | Mij) = {'same': {True: .5, False: .5} diff: {True: 0, False 1}}



    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-demo_annot_idependence_overlap --verbose --present --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> result = demo_annot_idependence_overlap()
        >>> ut.show_if_requested()
    """
    # We will end up making annots a and b fred and c and d sue
    constkw = dict(
        num_annots=4, num_names=4,
        name_evidence=[{0: '+eps'}, {1: '+eps'}, {2: '+eps'}, {3: '+eps'}],
        #name_evidence=[{0: .9}, None, None, {1: .9}]
        #name_evidence=[0, None, None, None]
        #name_evidence=[0, None, None, None]
    )
    test_model(score_evidence=[1, 1, 1, None, None, None], **constkw)
    test_model(score_evidence=[1, 1, 0, None, None, None], **constkw)
    test_model(score_evidence=[1, 0, 0, None, None, None], **constkw)
Esempio n. 24
0
def classify_k(cfg={}):
    """
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=1
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=0 --method=approx
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=10,k=1 --method=approx

    Example:
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> classify_k(cfg_list[0])
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_scores = cfg.pop('num_scores', 2)
    num_iter = cfg.pop('k', 0)
    nid_sequence = np.array([0, 0, 1, 2, 2, 1, 1])
    toy_data = get_toy_data_1v1(num_annots, nid_sequence=nid_sequence)
    force_evidence = None
    force_evidence = 0
    diag_scores, = ut.dict_take(
        toy_data, 'diag_scores'.split(', '))

    #print('diag_scores = %r' % (diag_scores,))
    #diag_labels = pairwise_matches.compress(is_diag)
    #diag_pairs = ut.compress(pairwise_aidxs, is_diag)

    discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
    def discretize_scores(scores):
        # Assign continuous scores to closest discrete index
        score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
        return score_idxs

    # Careful ordering is important here
    score_evidence = discretize_scores(diag_scores)
    if force_evidence is not None:
        for x in range(len(score_evidence)):
            score_evidence[x] = 0

    model, evidence, query_results = test_model(
        num_annots=num_annots, num_names=num_annots,
        num_scores=num_scores,
        mode=1,
        score_evidence=score_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        #verbose=True
    )
    print(query_results['top_assignments'][0])
    toy_data1 = toy_data
    print('toy_data1 = ' + ut.repr3(toy_data1, nl=1))
    num_annots2 = num_annots + 1
    score_evidence1 = [None] * len(score_evidence)
    full_evidence = score_evidence.tolist()

    factor_list = query_results['factor_list']
    using_soft = False
    if using_soft:
        soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list]

    for _ in range(num_iter):
        print('\n\n ---------- \n\n')
        #toy_data1['all_nids'].max() + 1
        num_names_gen = len(toy_data1['all_aids']) + 1
        num_names_gen = toy_data1['all_nids'].max() + 2
        toy_data2 = get_toy_data_1v1(
            1, num_names_gen,
            initial_aids=toy_data1['all_aids'],
            initial_nids=toy_data1['all_nids'],
            nid_sequence=nid_sequence)
        diag_scores2, = ut.dict_take(
            toy_data2, 'diag_scores'.split(', '))
        print('toy_data2 = ' + ut.repr3(toy_data2, nl=1))

        score_evidence2 = discretize_scores(diag_scores2).tolist()
        if force_evidence is not None:
            for x in range(len(score_evidence2)):
                score_evidence2[x] = force_evidence
        print('score_evidence2 = %r' % (score_evidence2,))

        if using_soft:
            # Demo with soft evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2, num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                name_evidence=soft_evidence1,
                #score_evidence=score_evidence1 + score_evidence2,
                score_evidence=score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                #verbose=True,
                hack_score_only=len(score_evidence2),
            )

        if 1:
            # Demo with full evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2, num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                score_evidence=full_evidence + score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                verbose=True
            )
        factor_list2 = query_results2['factor_list']
        if using_soft:
            soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list2]
        score_evidence1 += ([None] * len(score_evidence2))
        full_evidence = full_evidence + score_evidence2
        num_annots2 += 1
        toy_data1 = toy_data2