Esempio n. 1
0
def possible_mana_combinations(land_list, deck=None):
    """

    CommandLine:
        python -m mtgmonte.mtgutils --test-possible_mana_combinations

    Example:
        >>> # ENABLE_DOCTEST
        >>> from mtgmonte.mtgutils import *  # NOQA
        >>> from mtgmonte import mtgobjs
        >>> deck = mtgobjs.Deck(mtgobjs.load_cards(['Tropical Island', 'Sunken Hollow', 'Island']))
        >>> land_list = mtgobjs.load_cards(['Ancient Tomb', 'Island', 'Flooded Strand', 'Flooded Strand', 'Shivan Reef'])
        >>> card = land_list[-1]
        >>> mana_combos = possible_mana_combinations(land_list, deck)
        >>> result = (ut.repr2(mana_combos, nl=1, strvals=True, nobraces=True))
        >>> print(result)
        ({CC}, {U}, {G}, {U}, {C}),
        ({CC}, {U}, {G}, {B}, {C}),
        ({CC}, {U}, {U}, {U}, {C}),
        ({CC}, {U}, {U}, {B}, {C}),
        ({CC}, {U}, {G}, {U}, {R}),
        ({CC}, {U}, {G}, {B}, {R}),
        ({CC}, {U}, {U}, {U}, {R}),
        ({CC}, {U}, {U}, {B}, {R}),
    """
    from mtgmonte import mtgobjs

    avail_mana = [land.mana_potential2(deck=deck, recurse=False) for land in land_list]
    avail_mana = filter(len, avail_mana)
    mana_combos1 = list(ut.iprod(*avail_mana))
    # Encode the idea that two fetches cant fetch the same land
    non_class1 = [[c for c in co if not isinstance(c, six.string_types)] for co in mana_combos1]
    flags = [len(co) == 0 or len(set(co)) == len(co) for co in non_class1]
    mana_combos2 = ut.compress(mana_combos1, flags)
    mana_combos3 = [
        [[c] if isinstance(c, mtgobjs.ManaSet) else c.mana_potential2(deck=deck) for c in co] for co in mana_combos2
    ]
    unflat_combos3 = [list(ut.iprod(*co)) for co in mana_combos3]
    mana_combos4 = ut.flatten(unflat_combos3)
    # mana_combos4 = [reduce(operator.add, m) for m in mana_combos4]
    # z = reduce(operator.add, m)
    # import utool
    # utool.embed()
    # avail_mana = [land.mana_potential(deck=deck) for land in land_list]
    # avail_mana = filter(len, avail_mana)
    # mana_combos4 = list(ut.iprod(*avail_mana))
    combo_ids = [tuple(sorted(x)) for x in mana_combos4]
    flags = ut.flag_unique_items(combo_ids)
    mana_combos = ut.compress(mana_combos4, flags)
    # mana_combos = list(map(tuple, [''.join(c) for c in mana_combos]))
    return mana_combos
Esempio n. 2
0
def get_subbin_xy_neighbors(subbin_index00, grid_steps, num_cols, num_rows):
    """ Generate all neighbor of a bin
    subbin_index00 = left and up subbin index
    """
    subbin_index00 = np.floor(subbin_index00).astype(np.int32)
    subbin_x0, subbin_y0 = subbin_index00
    step_list = np.arange(1 - grid_steps, grid_steps + 1)
    offset_list = [
        # broadcast to the shape we will add too
        np.array([xoff, yoff])[:, None]
        for xoff, yoff in list(ut.iprod(step_list, step_list))]
    neighbor_subbin_index_list = [
        np.add(subbin_index00, offset)
        for offset in offset_list
    ]
    # Concatenate all subbin indexes into one array for faster vectorized op
    neighbor_bin_indices = np.dstack(neighbor_subbin_index_list).T

    # Clip with no wrapparound
    min_val = np.array([0, 0])
    max_val = np.array([num_cols - 1, num_rows - 1])

    np.clip(neighbor_bin_indices,
            min_val[None, None, :],
            max_val[None, None, :],
            out=neighbor_bin_indices)
    return neighbor_bin_indices
Esempio n. 3
0
    def apply_hard_soft_evidence(cpd_list, evidence_list):
        for cpd, ev in zip(cpd_list, evidence_list):
            if isinstance(ev, int):
                # hard internal evidence
                evidence[cpd.variable] = ev
            if isinstance(ev, six.string_types):
                # hard external evidence
                evidence[cpd.variable] = cpd._internal_varindex(
                    cpd.variable, ev)
            if isinstance(ev, dict):
                # soft external evidence
                # HACK THAT MODIFIES CPD IN PLACE
                def rectify_evidence_val(_v, card=cpd.variable_card):
                    # rectify hacky string structures
                    tmp = 1 / (2 * card**2)
                    return (1 + tmp) / (card + tmp) if _v == '+eps' else _v

                ev_ = ut.map_dict_vals(rectify_evidence_val, ev)
                fill = (1.0 - sum(ev_.values())) / (cpd.variable_card -
                                                    len(ev_))
                # HACK fix for float problems
                if len(ev_) == cpd.variable_card - 1:
                    fill = 0

                assert fill > -1e7, 'fill=%r' % (fill, )
                row_labels = list(ut.iprod(*cpd.statenames))

                for i, lbl in enumerate(row_labels):
                    if lbl in ev_:
                        # external case1
                        cpd.values[i] = ev_[lbl]
                    elif len(lbl) == 1 and lbl[0] in ev_:
                        # external case2
                        cpd.values[i] = ev_[lbl[0]]
                    elif i in ev_:
                        # internal case
                        cpd.values[i] = ev_[i]
                    else:
                        cpd.values[i] = fill
                cpd.normalize()
                soft_evidence[cpd.variable] = True
Esempio n. 4
0
    def alternatives(self):
        r"""
        Args:
            types (None): (default = None)

        Returns:
            list: list of alternative mana costs

        CommandLine:
            python -m mtgmonte.mtgobjs --exec-get_tokens --show

        Example:
            >>> # ENABLE_DOCTEST
            >>> from mtgmonte.mtgobjs import *  # NOQA
            >>> self = ManaCost(tokenize_manacost('12WWUBRG(W/B)(2/G)(U/P)(U/P)'))
            >>> alt_costs = self.alternatives
            >>> alt = alt_costs[0]
        """
        alt_sets =  [ManaSet(combos) for combos in ut.iprod(*[m.alternatives() for m in self._to_manas()])]
        alt_costs = [ManaCost(ut.flatten([m.to_tokens() for m in mset._manas])) for mset in alt_sets]
        return alt_costs
Esempio n. 5
0
    def apply_hard_soft_evidence(cpd_list, evidence_list):
        for cpd, ev in zip(cpd_list, evidence_list):
            if isinstance(ev, int):
                # hard internal evidence
                evidence[cpd.variable] = ev
            if isinstance(ev, six.string_types):
                # hard external evidence
                evidence[cpd.variable] = cpd._internal_varindex(
                    cpd.variable, ev)
            if isinstance(ev, dict):
                # soft external evidence
                # HACK THAT MODIFIES CPD IN PLACE
                def rectify_evidence_val(_v, card=cpd.variable_card):
                    # rectify hacky string structures
                    tmp = (1 / (2 * card ** 2))
                    return (1 + tmp) / (card + tmp) if _v == '+eps' else _v
                ev_ = ut.map_dict_vals(rectify_evidence_val, ev)
                fill = (1.0 - sum(ev_.values())) / (cpd.variable_card - len(ev_))
                # HACK fix for float problems
                if len(ev_) == cpd.variable_card - 1:
                    fill = 0

                assert fill > -1E7, 'fill=%r' % (fill,)
                row_labels = list(ut.iprod(*cpd.statenames))

                for i, lbl in enumerate(row_labels):
                    if lbl in ev_:
                        # external case1
                        cpd.values[i] = ev_[lbl]
                    elif len(lbl) == 1 and lbl[0] in ev_:
                        # external case2
                        cpd.values[i] = ev_[lbl[0]]
                    elif i in ev_:
                        # internal case
                        cpd.values[i] = ev_[i]
                    else:
                        cpd.values[i] = fill
                cpd.normalize()
                soft_evidence[cpd.variable] = True
Esempio n. 6
0
def try_query(model, infr, evidence, interest_ttypes=[], verbose=True):
    r"""
    CommandLine:
        python -m wbia.algo.hots.bayes --exec-try_query --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, 0, None]
        >>> score_evidence = ['high', 'low', 'low']
        >>> query_vars = None
        >>> model = make_name_model(num_annots=4, num_names=4, verbose=True, mode=1)
        >>> model, evidence, soft_evidence = update_model_evidence(model, name_evidence, score_evidence, other_evidence)
        >>> interest_ttypes = ['name']
        >>> infr = pgmpy.inference.BeliefPropagation(model)
        >>> evidence = infr._ensure_internal_evidence(evidence, model)
        >>> query_results = try_query(model, infr, evidence, interest_ttypes, verbose)
        >>> result = ('query_results = %s' % (str(query_results),))
        >>> ut.quit_if_noshow()
        >>> show_model(model, show_prior=True, **query_results)
        >>> ut.show_if_requested()

    Ignore:
        query_vars = ut.setdiff_ordered(model.nodes(), list(evidence.keys()))
        probs = infr.query(query_vars, evidence)
        map_assignment = infr.map_query(query_vars, evidence)
    """
    infr = pgmpy.inference.VariableElimination(model)
    # infr = pgmpy.inference.BeliefPropagation(model)
    if True:
        return bruteforce(model, query_vars=None, evidence=evidence)
    else:
        import vtool as vt

        query_vars = ut.setdiff_ordered(model.nodes(), list(evidence.keys()))
        # hack
        query_vars = ut.setdiff_ordered(
            query_vars, ut.list_getattr(model.ttype2_cpds['score'],
                                        'variable'))
        if verbose:
            evidence_str = ', '.join(model.pretty_evidence(evidence))
            logger.info('P(' + ', '.join(query_vars) + ' | ' + evidence_str +
                        ') = ')
        # Compute MAP joints
        # There is a bug here.
        # map_assign = infr.map_query(query_vars, evidence)
        # (probably an invalid thing to do)
        # joint_factor = pgmpy.factors.factor_product(*factor_list)
        # Brute force MAP

        name_vars = ut.list_getattr(model.ttype2_cpds['name'], 'variable')
        query_name_vars = ut.setdiff_ordered(name_vars, list(evidence.keys()))
        # TODO: incorporate case where Na is assigned to Fred
        # evidence_h = ut.delete_keys(evidence.copy(), ['Na'])

        joint = model.joint_distribution()
        joint.evidence_based_reduction(query_name_vars, evidence, inplace=True)

        # Find static row labels in the evidence
        given_name_vars = [var for var in name_vars if var in evidence]
        given_name_idx = ut.dict_take(evidence, given_name_vars)
        given_name_val = [
            joint.statename_dict[var][idx]
            for var, idx in zip(given_name_vars, given_name_idx)
        ]
        new_vals = joint.values.ravel()
        # Add static evidence variables to the relabeled name states
        new_vars = given_name_vars + joint.variables
        new_rows = [tuple(given_name_val) + row for row in joint._row_labels()]
        # Relabel rows based on the knowledge that
        # everything is the same, only the names have changed.
        temp_basis = [i for i in range(model.num_names)]

        def relabel_names(names, temp_basis=temp_basis):
            names = list(map(six.text_type, names))
            mapping = {}
            for n in names:
                if n not in mapping:
                    mapping[n] = len(mapping)
            new_names = tuple([temp_basis[mapping[n]] for n in names])
            return new_names

        relabeled_rows = list(map(relabel_names, new_rows))
        # Combine probability of rows with the same (new) label
        data_ids = np.array(vt.other.compute_unique_data_ids_(relabeled_rows))
        unique_ids, groupxs = vt.group_indices(data_ids)
        reduced_row_lbls = ut.take(relabeled_rows,
                                   ut.get_list_column(groupxs, 0))
        reduced_row_lbls = list(map(list, reduced_row_lbls))
        reduced_values = np.array(
            [g.sum() for g in vt.apply_grouping(new_vals, groupxs)])
        # Relabel the rows one more time to agree with initial constraints
        used_ = []
        replaced = []
        for colx, (var, val) in enumerate(zip(given_name_vars,
                                              given_name_val)):
            # All columns must be the same for this labeling
            alias = reduced_row_lbls[0][colx]
            reduced_row_lbls = ut.list_replace(reduced_row_lbls, alias, val)
            replaced.append(alias)
            used_.append(val)
        basis = model.ttype2_cpds['name'][0]._template_.basis
        find_remain_ = ut.setdiff_ordered(temp_basis, replaced)
        repl_remain_ = ut.setdiff_ordered(basis, used_)
        for find, repl in zip(find_remain_, repl_remain_):
            reduced_row_lbls = ut.list_replace(reduced_row_lbls, find, repl)

        # Now find the most likely state
        sortx = reduced_values.argsort()[::-1]
        sort_reduced_row_lbls = ut.take(reduced_row_lbls, sortx.tolist())
        sort_reduced_values = reduced_values[sortx]

        # Remove evidence based labels
        new_vars_ = new_vars[len(given_name_vars):]
        sort_reduced_row_lbls_ = ut.get_list_column(
            sort_reduced_row_lbls, slice(len(given_name_vars), None))

        sort_reduced_row_lbls_[0]

        # hack into a new joint factor
        var_states = ut.lmap(ut.unique_ordered, zip(*sort_reduced_row_lbls_))
        statename_dict = dict(zip(new_vars, var_states))
        cardinality = ut.lmap(len, var_states)
        val_lookup = dict(
            zip(ut.lmap(tuple, sort_reduced_row_lbls_), sort_reduced_values))
        values = np.zeros(np.prod(cardinality))
        for idx, state in enumerate(ut.iprod(*var_states)):
            if state in val_lookup:
                values[idx] = val_lookup[state]
        joint2 = pgmpy.factors.Factor(new_vars_,
                                      cardinality,
                                      values,
                                      statename_dict=statename_dict)
        logger.info(joint2)
        max_marginals = {}
        for i, var in enumerate(query_name_vars):
            one_out = query_name_vars[:i] + query_name_vars[i + 1:]
            max_marginals[var] = joint2.marginalize(one_out, inplace=False)
            # max_marginals[var] = joint2.maximize(one_out, inplace=False)
        logger.info(joint2.marginalize(['Nb', 'Nc'], inplace=False))
        factor_list = max_marginals.values()

        # Better map assignment based on knowledge of labels
        map_assign = dict(zip(new_vars_, sort_reduced_row_lbls_[0]))

        sort_reduced_rowstr_lbls = [
            ut.repr2(dict(zip(new_vars, lbls)),
                     explicit=True,
                     nobraces=True,
                     strvals=True) for lbls in sort_reduced_row_lbls_
        ]

        top_assignments = list(
            zip(sort_reduced_rowstr_lbls[:3], sort_reduced_values))
        if len(sort_reduced_values) > 3:
            top_assignments += [('other', 1 - sum(sort_reduced_values[:3]))]

        # import utool
        # utool.embed()

        # Compute all marginals
        # probs = infr.query(query_vars, evidence)
        # probs = infr.query(query_vars, evidence)
        # factor_list = probs.values()

        ## Marginalize over non-query, non-evidence
        # irrelevant_vars = ut.setdiff_ordered(joint.variables, list(evidence.keys()) + query_vars)
        # joint.marginalize(irrelevant_vars)
        # joint.normalize()
        # new_rows = joint._row_labels()
        # new_vals = joint.values.ravel()
        # map_vals = new_rows[new_vals.argmax()]
        # map_assign = dict(zip(joint.variables, map_vals))
        # Compute Marginalized MAP joints
        # marginalized_joints = {}
        # for ttype in interest_ttypes:
        #    other_vars = [v for v in joint_factor.scope()
        #                  if model.var2_cpd[v].ttype != ttype]
        #    marginal = joint_factor.marginalize(other_vars, inplace=False)
        #    marginalized_joints[ttype] = marginal
        query_results = {
            'factor_list': factor_list,
            'top_assignments': top_assignments,
            'map_assign': map_assign,
            'marginalized_joints': None,
        }
        return query_results
Esempio n. 7
0
def flow():
    """
    http://pmneila.github.io/PyMaxflow/maxflow.html#maxflow-fastmin

    pip install PyMaxFlow
    pip install pystruct
    pip install hdbscan
    """
    # Toy problem representing attempting to discover names via annotation
    # scores

    import pystruct  # NOQA
    import pystruct.models  # NOQA
    import networkx as netx  # NOQA

    import vtool as vt

    num_annots = 10
    num_names = num_annots
    hidden_nids = np.random.randint(0, num_names, num_annots)
    unique_nids, groupxs = vt.group_indices(hidden_nids)

    toy_params = {
        True: {
            'mu': 1.0,
            'sigma': 2.2
        },
        False: {
            'mu': 7.0,
            'sigma': 0.9
        }
    }

    if True:
        import vtool as vt
        import wbia.plottool as pt

        xdata = np.linspace(0, 100, 1000)
        tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
        fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
        pt.plot_probabilities([tp_pdf, fp_pdf], ['TP', 'TF'], xdata=xdata)

    def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            return 0
        rng = np.random.RandomState(int(aidx1 + aidx2))
        same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
        mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
        return np.clip(rng.normal(mu, sigma), 0, np.inf)

    pairwise_aidxs = list(ut.iprod(range(num_annots), range(num_annots)))
    pairwise_labels = np.array(
        [hidden_nids[a1] == hidden_nids[a2] for a1, a2 in pairwise_aidxs])
    pairwise_scores = np.array([metric(*zz) for zz in pairwise_aidxs])
    pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)
    if num_annots <= 10:
        logger.info(ut.repr2(pairwise_scores_mat, precision=1))

    # aids = list(range(num_annots))
    # g = netx.DiGraph()
    # g.add_nodes_from(aids)
    # g.add_edges_from([(tup[0], tup[1], {'weight': score}) for tup, score in zip(pairwise_aidxs, pairwise_scores) if tup[0] != tup[1]])
    # netx.draw_graphviz(g)
    # pr = netx.pagerank(g)

    X = pairwise_scores
    Y = pairwise_labels

    encoder = vt.ScoreNormalizer()
    encoder.fit(X, Y)
    encoder.visualize()

    # meanshift clustering
    import sklearn

    bandwidth = sklearn.cluster.estimate_bandwidth(
        X[:, None])  # , quantile=quantile, n_samples=500)
    assert bandwidth != 0, '[] bandwidth is 0. Cannot cluster'
    # bandwidth is with respect to the RBF used in clustering
    # ms = sklearn.cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=True)
    ms = sklearn.cluster.MeanShift(bandwidth=bandwidth,
                                   bin_seeding=True,
                                   cluster_all=False)
    ms.fit(X[:, None])
    label_arr = ms.labels_
    unique_labels = np.unique(label_arr)
    max_label = max(0, unique_labels.max())
    num_orphans = (label_arr == -1).sum()
    label_arr[label_arr == -1] = np.arange(max_label + 1,
                                           max_label + 1 + num_orphans)

    X_data = np.arange(num_annots)[:, None].astype(np.int64)

    # graph = pystruct.models.GraphCRF(
    #    n_states=None,
    #    n_features=None,
    #    inference_method='lp',
    #    class_weight=None,
    #    directed=False,
    # )

    import scipy
    import scipy.cluster
    import scipy.cluster.hierarchy

    thresh = 2.0
    labels = scipy.cluster.hierarchy.fclusterdata(X_data,
                                                  thresh,
                                                  metric=metric)
    unique_lbls, lblgroupxs = vt.group_indices(labels)
    logger.info(groupxs)
    logger.info(lblgroupxs)
    logger.info('groupdiff = %r' %
                (ut.compare_groupings(groupxs, lblgroupxs), ))
    logger.info('common groups = %r' %
                (ut.find_grouping_consistencies(groupxs, lblgroupxs), ))
    # X_data, seconds_thresh, criterion='distance')

    # help(hdbscan.HDBSCAN)

    import hdbscan

    alg = hdbscan.HDBSCAN(metric=metric,
                          min_cluster_size=1,
                          p=1,
                          gen_min_span_tree=1,
                          min_samples=2)
    labels = alg.fit_predict(X_data)
    labels[labels == -1] = np.arange(np.sum(labels == -1)) + labels.max() + 1
    unique_lbls, lblgroupxs = vt.group_indices(labels)
    logger.info(groupxs)
    logger.info(lblgroupxs)
    logger.info('groupdiff = %r' %
                (ut.compare_groupings(groupxs, lblgroupxs), ))
    logger.info('common groups = %r' %
                (ut.find_grouping_consistencies(groupxs, lblgroupxs), ))

    # import ddbscan
    # help(ddbscan.DDBSCAN)
    # alg = ddbscan.DDBSCAN(2, 2)

    # D = np.zeros((len(aids), len(aids) + 1))
    # D.T[-1] = np.arange(len(aids))

    ## Can alpha-expansion be used when the pairwise potentials are not in a grid?

    # hidden_ut.group_items(aids, hidden_nids)
    if False:
        import maxflow

        # from maxflow import fastmin
        # Create a graph with integer capacities.
        g = maxflow.Graph[int](2, 2)
        # Add two (non-terminal) nodes. Get the index to the first one.
        nodes = g.add_nodes(2)
        # Create two edges (forwards and backwards) with the given capacities.
        # The indices of the nodes are always consecutive.
        g.add_edge(nodes[0], nodes[1], 1, 2)
        # Set the capacities of the terminal edges...
        # ...for the first node.
        g.add_tedge(nodes[0], 2, 5)
        # ...for the second node.
        g.add_tedge(nodes[1], 9, 4)
        g = maxflow.Graph[float](2, 2)
        g.maxflow()
        g.get_nx_graph()
        g.get_segment(nodes[0])
Esempio n. 8
0
def crftest():
    """
    pip install pyqpbo
    pip install pystruct

    http://taku910.github.io/crfpp/#install

    cd ~/tmp
    #wget https://drive.google.com/folderview?id=0B4y35FiV1wh7fngteFhHQUN2Y1B5eUJBNHZUemJYQV9VWlBUb3JlX0xBdWVZTWtSbVBneU0&usp=drive_web#list
    7z x CRF++-0.58.tar.gz
    7z x CRF++-0.58.tar
    cd CRF++-0.58
    chmod +x configure
    ./configure
    make

    """
    import pystruct
    import pystruct.models

    inference_method_options = ['lp', 'max-product']
    inference_method = inference_method_options[1]

    # graph = pystruct.models.GraphCRF(
    #    n_states=None,
    #    n_features=None,
    #    inference_method=inference_method,
    #    class_weight=None,
    #    directed=False,
    # )

    num_annots = 5
    num_names = num_annots

    aids = np.arange(5)
    rng = np.random.RandomState(0)
    hidden_nids = rng.randint(0, num_names, num_annots)
    unique_nids, groupxs = ut.group_indices(hidden_nids)

    # Indicator vector indicating the name
    node_features = np.zeros((num_annots, num_names))
    node_features[(aids, hidden_nids)] = 1

    toy_params = {True: {'mu': 1.0, 'sigma': 2.2}, False: {'mu': 7.0, 'sigma': 0.9}}
    if False:
        import vtool as vt
        import wbia.plottool as pt

        pt.ensureqt()
        xdata = np.linspace(0, 100, 1000)
        tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
        fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
        pt.plot_probabilities([tp_pdf, fp_pdf], ['TP', 'TF'], xdata=xdata)

    def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            return 0
        rng = np.random.RandomState(int(aidx1 + aidx2))
        same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
        mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
        return np.clip(rng.normal(mu, sigma), 0, np.inf)

    pairwise_aidxs = list(ut.iprod(range(num_annots), range(num_annots)))
    pairwise_labels = np.array(  # NOQA
        [hidden_nids[a1] == hidden_nids[a2] for a1, a2 in pairwise_aidxs]
    )
    pairwise_scores = np.array([metric(*zz) for zz in pairwise_aidxs])
    pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)  # NOQA

    graph = pystruct.models.EdgeFeatureGraphCRF(  # NOQA
        n_states=num_annots,
        n_features=num_names,
        n_edge_features=1,
        inference_method=inference_method,
    )

    import opengm

    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'))
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
Esempio n. 9
0
def get_toy_data_1v1(num_annots=5, num_names=None, **kwargs):
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-get_toy_data_1v1 --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> toy_data = get_toy_data_1v1()
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> show_toy_distributions(toy_data['toy_params'])
        >>> ut.show_if_requested()

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> toy_data = get_toy_data_1v1()
        >>> kwargs = {}
        >>> initial_aids = toy_data['aids']
        >>> initial_nids = toy_data['nids']
        >>> num_annots = 1
        >>> num_names = 6
        >>> toy_data2 = get_toy_data_1v1(num_annots, num_names, initial_aids=initial_aids, initial_nids=initial_nids)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> show_toy_distributions(toy_data['toy_params'])
        >>> ut.show_if_requested()

    Ignore:
        >>> num_annots = 1000
        >>> num_names = 400
    """
    import vtool as vt
    tup_ = get_toy_annots(num_annots, num_names, **kwargs)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    rng = vt.ensure_rng(None)

    def pairwise_feature(aidx1,
                         aidx2,
                         all_nids=all_nids,
                         toy_params=toy_params):
        if aidx1 == aidx2:
            score = -1
        else:
            #rng = np.random.RandomState(int((aidx1 + 13) * (aidx2 + 13)))
            nid1 = all_nids[int(aidx1)]
            nid2 = all_nids[int(aidx2)]
            params = toy_params[nid1 == nid2]
            mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
            score_ = rng.normal(mu, sigma)
            score = np.clip(score_, 0, np.inf)
        return score

    pairwise_nids = list([tup[::-1] for tup in ut.iprod(nids, nids1)])
    pairwise_matches = np.array([nid1 == nid2 for nid1, nid2 in pairwise_nids])

    pairwise_aidxs = list([tup[::-1] for tup in ut.iprod(aids, aids1)])

    pairwise_features = np.array(
        [pairwise_feature(aidx1, aidx2) for aidx1, aidx2 in pairwise_aidxs])

    #pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)
    is_diag = [r < c for r, c, in pairwise_aidxs]
    diag_scores = pairwise_features.compress(is_diag)
    diag_aidxs = ut.compress(pairwise_aidxs, is_diag)
    import utool
    with utool.embed_on_exception_context:
        diag_nids = ut.compress(pairwise_nids, is_diag)
    diag_labels = pairwise_matches.compress(is_diag)

    #import utool
    #utool.embed()

    toy_data = {
        'aids': aids,
        'nids': nids,
        'all_nids': all_nids,
        'all_aids': all_aids,
        #'pairwise_aidxs': pairwise_aidxs,
        #'pairwise_scores': pairwise_scores,
        #'pairwise_matches': pairwise_matches,
        'diag_labels': diag_labels,
        'diag_scores': diag_scores,
        'diag_nids': diag_nids,
        'diag_aidxs': diag_aidxs,
        'toy_params': toy_params,
    }
    return toy_data
Esempio n. 10
0
    def new_cpd(self, parents=None, pmf_func=None):
        """
        Makes a new random variable that is an instance of this tempalte

        parents : only used to define the name of this node.
        """
        if pmf_func is None:
            pmf_func = self.pmf_func

        # --- MAKE VARIABLE ID
        def _getid(obj):
            if isinstance(obj, int):
                return str(obj)
            elif isinstance(obj, six.string_types):
                return obj
            else:
                return obj._template_id

        if not ut.isiterable(parents):
            parents = [parents]

        template_ids = [_getid(cpd) for cpd in parents]
        HACK_SAME_IDS = True
        # TODO: keep track of parent index inheritence
        # then rectify uniqueness based on that
        if HACK_SAME_IDS and ut.allsame(template_ids):
            _id = template_ids[0]
        else:
            _id = ''.join(template_ids)
        variable = ''.join([self.varpref, _id])
        # variable = '_'.join([self.varpref, '{' + _id + '}'])
        # variable = '$%s$' % (variable,)

        evidence_cpds = [cpd for cpd in parents if hasattr(cpd, 'ttype')]
        if len(evidence_cpds) == 0:
            evidence_cpds = None

        variable_card = len(self.basis)
        statename_dict = {
            variable: self.basis,
        }
        if self.evidence_ttypes is not None:
            if any(cpd.ttype != tcpd.ttype
                   for cpd, tcpd in zip(evidence_cpds, evidence_cpds)):
                raise ValueError('Evidence is not of appropriate type')
            evidence_bases = [cpd.variable_statenames for cpd in evidence_cpds]
            evidence_card = list(map(len, evidence_bases))
            evidence_states = list(ut.iprod(*evidence_bases))

            for cpd in evidence_cpds:
                _dict = ut.dict_subset(cpd.statename_dict, [cpd.variable])
                statename_dict.update(_dict)

            evidence = [cpd.variable for cpd in evidence_cpds]
        else:
            if evidence_cpds is not None:
                raise ValueError('Gave evidence for evidence-less template')
            evidence = None
            evidence_card = None

        # --- MAKE TABLE VALUES
        if pmf_func is not None:
            if isinstance(pmf_func, list):
                values = np.array(pmf_func)
            else:
                values = np.array([[
                    pmf_func(vstate, *estates) for estates in evidence_states
                ] for vstate in self.basis])
            ensure_normalized = True
            if ensure_normalized:
                values = values / values.sum(axis=0)
        else:
            # assume uniform
            fill_value = 1.0 / variable_card
            if evidence_card is None:
                values = np.full((1, variable_card), fill_value)
            else:
                values = np.full([variable_card] + list(evidence_card),
                                 fill_value)

        try:
            cpd = pgmpy.factors.TabularCPD(
                variable=variable,
                variable_card=variable_card,
                values=values,
                evidence=evidence,
                evidence_card=evidence_card,
                # statename_dict=statename_dict,
                state_names=statename_dict,
            )
        except Exception as ex:
            ut.printex(
                ex,
                'Failed to create TabularCPD',
                keys=[
                    'variable',
                    'variable_card',
                    'statename_dict',
                    'evidence_card',
                    'evidence',
                    'values.shape',
                ],
            )
            ut.embed()
            raise

        cpd.ttype = self.ttype
        cpd._template_ = self
        cpd._template_id = _id
        return cpd
Esempio n. 11
0
def get_toy_data_1v1(num_annots=5, num_names=None, **kwargs):
    r"""
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-get_toy_data_1v1 --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> toy_data = get_toy_data_1v1()
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> show_toy_distributions(toy_data['toy_params'])
        >>> ut.show_if_requested()

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> toy_data = get_toy_data_1v1()
        >>> kwargs = {}
        >>> initial_aids = toy_data['aids']
        >>> initial_nids = toy_data['nids']
        >>> num_annots = 1
        >>> num_names = 6
        >>> toy_data2 = get_toy_data_1v1(num_annots, num_names, initial_aids=initial_aids, initial_nids=initial_nids)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> show_toy_distributions(toy_data['toy_params'])
        >>> ut.show_if_requested()

    Ignore:
        >>> num_annots = 1000
        >>> num_names = 400
    """
    import vtool as vt
    tup_ = get_toy_annots(num_annots, num_names, **kwargs)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    rng = vt.ensure_rng(None)

    def pairwise_feature(aidx1, aidx2, all_nids=all_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            score = -1
        else:
            #rng = np.random.RandomState(int((aidx1 + 13) * (aidx2 + 13)))
            nid1 = all_nids[int(aidx1)]
            nid2 = all_nids[int(aidx2)]
            params = toy_params[nid1 == nid2]
            mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
            score_ = rng.normal(mu, sigma)
            score = np.clip(score_, 0, np.inf)
        return score

    pairwise_nids = list([tup[::-1] for tup in ut.iprod(nids, nids1)])
    pairwise_matches = np.array(
        [nid1 == nid2 for nid1, nid2 in pairwise_nids])

    pairwise_aidxs = list([tup[::-1] for tup in ut.iprod(aids, aids1)])

    pairwise_features = np.array(
        [pairwise_feature(aidx1, aidx2) for aidx1, aidx2 in pairwise_aidxs])

    #pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)
    is_diag = [r < c for r, c, in pairwise_aidxs]
    diag_scores = pairwise_features.compress(is_diag)
    diag_aidxs = ut.compress(pairwise_aidxs, is_diag)
    import utool
    with utool.embed_on_exception_context:
        diag_nids = ut.compress(pairwise_nids, is_diag)
    diag_labels = pairwise_matches.compress(is_diag)

    #import utool
    #utool.embed()

    toy_data = {
        'aids': aids,
        'nids': nids,
        'all_nids': all_nids,
        'all_aids': all_aids,
        #'pairwise_aidxs': pairwise_aidxs,
        #'pairwise_scores': pairwise_scores,
        #'pairwise_matches': pairwise_matches,
        'diag_labels': diag_labels,
        'diag_scores': diag_scores,
        'diag_nids': diag_nids,
        'diag_aidxs': diag_aidxs,
        'toy_params': toy_params,
    }
    return toy_data
Esempio n. 12
0
def dummy_cut_example():
    r"""
    CommandLine:
        python -m ibeis.workflow --exec-dummy_cut_example --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.workflow import *  # NOQA
        >>> result = dummy_cut_example()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import opengm
    import numpy as np
    import plottool as pt
    pt.ensure_pylab_qt4()
    # Matching Graph
    cost_matrix = np.array([
        [0.5, 0.6, 0.2, 0.4],
        [0.0, 0.5, 0.2, 0.9],
        [0.0, 0.0, 0.5, 0.1],
        [0.0, 0.0, 0.0, 0.5],
    ])
    cost_matrix += cost_matrix.T
    number_of_labels = 4
    num_annots = 4
    #cost_matrix = (cost_matrix * 2) - 1

    #gm = opengm.gm(number_of_labels)
    gm = opengm.gm(np.ones(num_annots) * number_of_labels)
    aids = np.arange(num_annots)
    aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod(
        aids, aids) if a1 != a2], dtype=np.uint32)
    aid_pairs.sort(axis=1)

    # add a potts function
    # penalizes neighbors for having different labels
    # beta = 0   # 0.1  # strength of potts regularizer
    #beta = 0.1   # 0.1  # strength of potts regularizer

    # Places to look for the definition of this stupid class
    # ~/code/opengm/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
    # /src/interfaces/python/opengm/opengmcore/function_injector.py

    #shape = [number_of_labels] * 2
    #regularizer = opengm.PottsGFunction(shape, 0.0, beta)
    # __init__( (object)arg1, (object)shape [, (object)values=()]) -> object :

    # values = np.arange(1, ut.num_partitions(num_annots) + 1)
    #regularizer = opengm.PottsGFunction(shape)
    #reg_fid = gm.addFunction(regularizer)

    # A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems
    # http://arxiv.org/pdf/1404.0533.pdf

    # regularizer1 = opengm.pottsFunction([number_of_labels] * 2, valueEqual=0.0, valueNotEqual=beta)

    # gm.addFactors(reg_fid, aid_pairs)

    # 2nd order function
    pair_fid = gm.addFunction(cost_matrix)
    gm.addFactors(pair_fid, aid_pairs)

    if False:
        Inf = opengm.inference.BeliefPropagation
        parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    else:
        Inf = opengm.inference.Multicut
        parameter = opengm.InfParam()

    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):

        def __init__(self,):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)
    inf.infer(visitor)
    print(callback.labels)

    print(cost_matrix)
    pt.imshow(cost_matrix, cmap='magma')
    opengm.visualizeGm(gm=gm)
    pass
Esempio n. 13
0
def crftest():
    """
    pip install pyqpbo
    pip install pystruct

    http://taku910.github.io/crfpp/#install

    cd ~/tmp
    #wget https://drive.google.com/folderview?id=0B4y35FiV1wh7fngteFhHQUN2Y1B5eUJBNHZUemJYQV9VWlBUb3JlX0xBdWVZTWtSbVBneU0&usp=drive_web#list
    7z x CRF++-0.58.tar.gz
    7z x CRF++-0.58.tar
    cd CRF++-0.58
    chmod +x configure
    ./configure
    make

    """
    import pystruct
    import pystruct.models
    inference_method_options = ['lp', 'max-product']
    inference_method = inference_method_options[1]

    #graph = pystruct.models.GraphCRF(
    #    n_states=None,
    #    n_features=None,
    #    inference_method=inference_method,
    #    class_weight=None,
    #    directed=False,
    #)

    num_annots = 5
    num_names = num_annots

    aids = np.arange(5)
    rng = np.random.RandomState(0)
    hidden_nids = rng.randint(0, num_names, num_annots)
    unique_nids, groupxs = ut.group_indices(hidden_nids)

    # Indicator vector indicating the name
    node_features = np.zeros((num_annots, num_names))
    node_features[(aids, hidden_nids)] = 1

    toy_params = {
        True: {'mu': 1.0, 'sigma': 2.2},
        False: {'mu': 7.0, 'sigma': .9}
    }
    if False:
        import vtool as vt
        import plottool as pt
        pt.ensure_pylab_qt4()
        xdata = np.linspace(0, 100, 1000)
        tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
        fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
        pt.plot_probabilities([tp_pdf, fp_pdf], ['TP', 'TF'], xdata=xdata)

    def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            return 0
        rng = np.random.RandomState(int(aidx1 + aidx2))
        same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
        mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
        return np.clip(rng.normal(mu, sigma), 0, np.inf)

    pairwise_aidxs = list(ut.iprod(range(num_annots), range(num_annots)))
    pairwise_labels = np.array([hidden_nids[a1] == hidden_nids[a2] for a1, a2 in pairwise_aidxs])
    pairwise_scores = np.array([metric(*zz) for zz in pairwise_aidxs])
    pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)


    graph = pystruct.models.EdgeFeatureGraphCRF(
        n_states=num_annots,
        n_features=num_names,
        n_edge_features=1,
        inference_method=inference_method,
    )

    import opengm

    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(
        workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'),
    )
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()
Esempio n. 14
0
def intra_encounter_matching():
    qreq_, cm_list = testdata_workflow()
    # qaids = [cm.qaid for cm in cm_list]
    # top_aids = [cm.get_top_aids(5) for cm in cm_list]
    import numpy as np
    from scipy.sparse import coo_matrix, csgraph
    aid_pairs = np.array([(cm.qaid, daid) for cm in cm_list for daid in cm.get_top_aids(5)])
    top_scores = ut.flatten([cm.get_top_scores(5) for cm in cm_list])

    N = aid_pairs.max() + 1
    mat = coo_matrix((top_scores, aid_pairs.T), shape=(N, N))
    csgraph.connected_components(mat)
    tree = csgraph.minimum_spanning_tree(mat)  # NOQA
    import plottool as pt
    dense = mat.todense()
    pt.imshow(dense / dense.max() * 255)
    pt.show_if_requested()

    # load image and convert to LAB
    img_fpath = str(ut.grab_test_imgpath(str('lena.png')))
    img = vigra.impex.readImage(img_fpath)
    imgLab = vigra.colors.transform_RGB2Lab(img)

    superpixelDiameter = 15   # super-pixel size
    slicWeight = 15.0        # SLIC color - spatial weight
    labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
                                                  superpixelDiameter)
    labels = vigra.analysis.labelImage(labels)-1

    # get 2D grid graph and RAG
    gridGraph = graphs.gridGraph(img.shape[0:2])
    rag = graphs.regionAdjacencyGraph(gridGraph, labels)

    nodeFeatures = rag.accumulateNodeFeatures(imgLab)
    nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures)
    nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc")
    nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg)

    #from sklearn.cluster import MiniBatchKMeans, KMeans
    from sklearn import mixture
    nCluster   = 3
    g = mixture.GMM(n_components=nCluster)
    g.fit(nodeFeatures[:,:])
    clusterProb = g.predict_proba(nodeFeatures)

    import numpy
    #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb
    #https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb

    clusterProbImg = rag.projectNodeFeaturesToGridGraph(clusterProb.astype(numpy.float32))
    clusterProbImg = vigra.taggedView(clusterProbImg, "xyc")

    # strength of potts regularizer
    beta = 40.0
    # graphical model with as many variables
    # as superpixels, each has 3 states
    gm = opengm.gm(numpy.ones(rag.nodeNum,dtype=opengm.label_type)*nCluster)
    # convert probabilites to energies
    probs = numpy.clip(clusterProb, 0.00001, 0.99999)
    costs = -1.0*numpy.log(probs)
    # add ALL unaries AT ONCE
    fids = gm.addFunctions(costs)
    gm.addFactors(fids,numpy.arange(rag.nodeNum))
    # add a potts function
    regularizer = opengm.pottsFunction([nCluster]*2,0.0,beta)
    fid = gm.addFunction(regularizer)
    # get variable indices of adjacent superpixels
    # - or "u" and "v" node id's for edges
    uvIds = rag.uvIds()
    uvIds = numpy.sort(uvIds,axis=1)
    # add all second order factors at once
    gm.addFactors(fid,uvIds)

    # get super-pixels with slic on LAB image

    import opengm
    # Matching Graph
    cost_matrix = np.array([
        [0.5, 0.6, 0.2, 0.4, 0.1],
        [0.0, 0.5, 0.2, 0.9, 0.2],
        [0.0, 0.0, 0.5, 0.1, 0.1],
        [0.0, 0.0, 0.0, 0.5, 0.1],
        [0.0, 0.0, 0.0, 0.0, 0.5],
    ])
    cost_matrix += cost_matrix.T
    number_of_labels = 5
    num_annots = 5
    cost_matrix = (cost_matrix * 2) - 1
    #gm = opengm.gm(number_of_labels)
    gm = opengm.gm(np.ones(num_annots) * number_of_labels)
    aids = np.arange(num_annots)
    aid_pairs = np.array([(a1, a2) for a1, a2 in ut.iprod(aids, aids) if a1 != a2], dtype=np.uint32)
    aid_pairs.sort(axis=1)
    # 2nd order function
    fid = gm.addFunction(cost_matrix)
    gm.addFactors(fid, aid_pairs)
    Inf = opengm.inference.BeliefPropagation
    #Inf = opengm.inference.Multicut
    parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    parameter = opengm.InfParam()
    inf = Inf(gm, parameter=parameter)
    class PyCallback(object):
        def __init__(self,):
            self.labels=[]
            pass
        def begin(self,inference):
            print("begin of inference")
            pass
        def end(self,inference):
            self.labels.append(inference.arg())
            pass
        def visit(self,inference):
            gm=inference.gm()
            labelVector=inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)
            pass
    callback=PyCallback()
    visitor=inf.pythonVisitor(callback,visitNth=1)
    inf.infer(visitor)
    print(callback.labels)
    # baseline jobid
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/OpenGM%20tutorial.ipynb
    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(
        workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'),
    )
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
    """
Esempio n. 15
0
    def new_cpd(self, parents=None, pmf_func=None):
        """
        Makes a new random variable that is an instance of this tempalte

        parents : only used to define the name of this node.
        """
        if pmf_func is None:
            pmf_func = self.pmf_func

        # --- MAKE VARIABLE ID
        def _getid(obj):
            if isinstance(obj, int):
                return str(obj)
            elif isinstance(obj, six.string_types):
                return obj
            else:
                return obj._template_id

        if not ut.isiterable(parents):
            parents = [parents]

        template_ids = [_getid(cpd) for cpd in parents]
        HACK_SAME_IDS = True
        # TODO: keep track of parent index inheritence
        # then rectify uniqueness based on that
        if HACK_SAME_IDS and ut.list_allsame(template_ids):
            _id = template_ids[0]
        else:
            _id = ''.join(template_ids)
        variable = ''.join([self.varpref, _id])
        #variable = '_'.join([self.varpref, '{' + _id + '}'])
        #variable = '$%s$' % (variable,)

        evidence_cpds = [cpd for cpd in parents if hasattr(cpd, 'ttype')]
        if len(evidence_cpds) == 0:
            evidence_cpds = None

        variable_card = len(self.basis)
        statename_dict = {
            variable: self.basis,
        }
        if self.evidence_ttypes is not None:
            if any(cpd.ttype != tcpd.ttype
                   for cpd, tcpd in zip(evidence_cpds, evidence_cpds)):
                raise ValueError('Evidence is not of appropriate type')
            evidence_bases = [cpd.variable_statenames for cpd in evidence_cpds]
            evidence_card = list(map(len, evidence_bases))
            evidence_states = list(ut.iprod(*evidence_bases))

            for cpd in evidence_cpds:
                _dict = ut.dict_subset(cpd.statename_dict, [cpd.variable])
                statename_dict.update(_dict)

            evidence = [cpd.variable for cpd in evidence_cpds]
        else:
            if evidence_cpds is not None:
                raise ValueError('Gave evidence for evidence-less template')
            evidence = None
            evidence_card = None

        # --- MAKE TABLE VALUES
        if pmf_func is not None:
            if isinstance(pmf_func, list):
                values = np.array(pmf_func)
            else:
                values = np.array([
                    [pmf_func(vstate, *estates) for estates in evidence_states]
                    for vstate in self.basis
                ])
            ensure_normalized = True
            if ensure_normalized:
                values = values / values.sum(axis=0)
        else:
            # assume uniform
            fill_value = 1.0 / variable_card
            if evidence_card is None:
                values = np.full((1, variable_card), fill_value)
            else:
                values = np.full([variable_card] + list(evidence_card), fill_value)

        try:
            cpd = pgmpy.factors.TabularCPD(
                variable=variable,
                variable_card=variable_card,
                values=values,
                evidence=evidence,
                evidence_card=evidence_card,
                statename_dict=statename_dict,
            )
        except Exception as ex:
            ut.printex(ex, 'Failed to create TabularCPD',
                       keys=[
                           'variable',
                           'variable_card',
                           'statename_dict',
                           'evidence_card',
                           'evidence',
                           'values.shape',
                       ])
            raise

        cpd.ttype = self.ttype
        cpd._template_ = self
        cpd._template_id = _id
        return cpd
Esempio n. 16
0
def dummy_cut_example():
    r"""
    CommandLine:
        python -m ibeis.workflow --exec-dummy_cut_example --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.workflow import *  # NOQA
        >>> result = dummy_cut_example()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import opengm
    import numpy as np
    import plottool as pt
    pt.ensure_pylab_qt4()
    # Matching Graph
    cost_matrix = np.array([
        [0.5, 0.6, 0.2, 0.4],
        [0.0, 0.5, 0.2, 0.9],
        [0.0, 0.0, 0.5, 0.1],
        [0.0, 0.0, 0.0, 0.5],
    ])
    cost_matrix += cost_matrix.T
    number_of_labels = 4
    num_annots = 4
    #cost_matrix = (cost_matrix * 2) - 1

    #gm = opengm.gm(number_of_labels)
    gm = opengm.gm(np.ones(num_annots) * number_of_labels)
    aids = np.arange(num_annots)
    aid_pairs = np.array([(a1, a2)
                          for a1, a2 in ut.iprod(aids, aids) if a1 != a2],
                         dtype=np.uint32)
    aid_pairs.sort(axis=1)

    # add a potts function
    # penalizes neighbors for having different labels
    # beta = 0   # 0.1  # strength of potts regularizer
    #beta = 0.1   # 0.1  # strength of potts regularizer

    # Places to look for the definition of this stupid class
    # ~/code/opengm/src/interfaces/python/opengm/opengmcore/pyFunctionTypes.cxx
    # /src/interfaces/python/opengm/opengmcore/function_injector.py

    #shape = [number_of_labels] * 2
    #regularizer = opengm.PottsGFunction(shape, 0.0, beta)
    # __init__( (object)arg1, (object)shape [, (object)values=()]) -> object :

    # values = np.arange(1, ut.num_partitions(num_annots) + 1)
    #regularizer = opengm.PottsGFunction(shape)
    #reg_fid = gm.addFunction(regularizer)

    # A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems
    # http://arxiv.org/pdf/1404.0533.pdf

    # regularizer1 = opengm.pottsFunction([number_of_labels] * 2, valueEqual=0.0, valueNotEqual=beta)

    # gm.addFactors(reg_fid, aid_pairs)

    # 2nd order function
    pair_fid = gm.addFunction(cost_matrix)
    gm.addFactors(pair_fid, aid_pairs)

    if False:
        Inf = opengm.inference.BeliefPropagation
        parameter = opengm.InfParam(steps=10,
                                    damping=0.5,
                                    convergenceBound=0.001)
    else:
        Inf = opengm.inference.Multicut
        parameter = opengm.InfParam()

    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):
        def __init__(self, ):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector), ))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)
    inf.infer(visitor)
    print(callback.labels)

    print(cost_matrix)
    pt.imshow(cost_matrix, cmap='magma')
    opengm.visualizeGm(gm=gm)
    pass