예제 #1
0
def test_chain():
    # test LP, AD3, AD3-BB and JT on a chain.
    # they should all be exact
    rnd = np.random.RandomState(0)
    algorithms = get_installed([('ad3', {'branch_and_bound': False}),
                                ('ad3', {'branch_and_bound': True}),
                                ('ogm', {'alg': 'dyn'}),
                                ('ogm', {'alg': 'dd'}),
                                ('ogm', {'alg': 'trw'})])
    n_states = 3
    n_nodes = 10

    for i in xrange(10):
        forward = np.c_[np.arange(n_nodes - 1), np.arange(1, n_nodes)]
        backward = np.c_[np.arange(1, n_nodes), np.arange(n_nodes - 1)]
        unary_potentials = rnd.normal(size=(n_nodes, n_states))
        pairwise_potentials = rnd.normal(size=(n_states, n_states))
        # test that reversing edges is same as transposing pairwise potentials
        y_forward = inference_dispatch(unary_potentials, pairwise_potentials,
                                       forward, 'lp')
        y_backward = inference_dispatch(unary_potentials,
                                        pairwise_potentials.T, backward, 'lp')
        assert_array_equal(y_forward, y_backward)
        for chain in [forward, backward]:
            y_lp = inference_dispatch(unary_potentials, pairwise_potentials,
                                      chain, 'lp')
            for alg in algorithms:
                if chain is backward and alg[0] == 'ogm':
                    # ogm needs sorted indices
                    continue
                y = inference_dispatch(unary_potentials, pairwise_potentials,
                                       chain, alg)
                assert_array_equal(y, y_lp)
def test_chain():
    # test LP, AD3, AD3-BB and JT on a chain.
    # they should all be exact
    rnd = np.random.RandomState(0)
    algorithms = get_installed([('ad3', {'branch_and_bound':False}),
                                ('ad3', {'branch_and_bound':True}),
                                ('dai', {'alg':'jt'})])
    for i in xrange(10):
        forward = np.c_[np.arange(9), np.arange(1, 10)]
        backward = np.c_[np.arange(1, 10), np.arange(9)]
        unary_potentials = rnd.normal(size=(10, 3))
        pairwise_potentials = rnd.normal(size=(3, 3))
        # test that reversing edges is same as transposing pairwise potentials
        y_forward = inference_dispatch(unary_potentials, pairwise_potentials,
                                       forward, 'lp')
        y_backward = inference_dispatch(unary_potentials,
                                        pairwise_potentials.T, backward, 'lp')
        assert_array_equal(y_forward, y_backward)
        for chain in [forward, backward]:
            y_lp = inference_dispatch(unary_potentials, pairwise_potentials,
                                      chain, 'lp')
            for alg in algorithms:
                print(alg)
                y = inference_dispatch(unary_potentials, pairwise_potentials,
                                       chain, alg)
                assert_array_equal(y, y_lp)
예제 #3
0
def test_demo():
     frm_num=3
     size=frm_num
     n_states=2
     unaries= np.array([[0.6, 0.5], [1, 0], [.4, .6]])
     edges= np.array([[0, 1], [1, 2]])
     pairwise=np.array([[0, 0], [0, 0]])

     print unaries.shape
     print edges.shape
     print pairwise.shape
      
     fig, ax = plt.subplots(1, 2, figsize=(3, 1))

     ##for a, inference_method in zip(ax, ['ad3', 'qpbo', 'max-product',
     ##                               ('max-product', {'max_iter': 10}), 'lp']):
     for a, inference_method in zip(ax, ['lp']):                               
                                    start = time()
                                    print a, inference_method
                                    y = inference_dispatch(unaries, pairwise, edges,
                                    inference_method=inference_method)    ##(400)
                                    took = time() - start

                                    a.matshow(y.reshape(size, 1))
                                    print y.shape
                                    print y
                                    energy = compute_energy(unaries, pairwise, edges, y)
                                    a.set_title(str(inference_method) + "\n time: %.2f energy %.2f" % (took, energy))
                                    ##a.set_xticks(())
                                    ##a.set_yticks(())
                                    plt.show()
예제 #4
0
def generate_Potts(shape=(10, 10),
                   ncolors=2,
                   beta=1.0,
                   inference='max-product'):
    """Generate Potts image."""
    # Generate initial normal image
    x = rnd.normal(size=(*shape, ncolors))

    # Unary potentials
    unaries = x.reshape(-1, ncolors)

    # Pairwise potentials
    pairwise = beta*np.eye(ncolors)

    # Generate edge matrix
    edges = make_grid_edges(x)

    # Start clock
    start = time()

    # Infer image
    y = inference_dispatch(unaries, pairwise, edges,
                           inference_method=inference)

    # End clock
    took = time() - start
    print('Inference took ' + str(took) + ' seconds')

    # Compute energy
    energy = compute_energy(unaries, pairwise, edges, y)

    # Return inferred image and energy
    return np.reshape(y, shape), energy
def mrf(probs, edges, potential=None):
    # probs2 = (-100 * np.log(probs)).astype(np.int32)
    # probs2 = (100 * probs).astype(np.int32)
    # min_prob = 0.001
    probs2 = np.array(probs)
    # probs2[probs2 < min_prob] = min_prob
    # probs2 = np.log(probs2)
    if potential is None:
        n_labels = probs2.shape[1]
        potential = np.eye(n_labels, dtype=np.int32)
    print "%d labels." % probs2.shape[1]
    t0 = time.time()
    smoothed_pred = inference_dispatch(probs2, potential, edges, inference_method="qpbo")
    print "MRF took %.2f seconds." % (time.time() - t0)
    return smoothed_pred
예제 #6
0
 def loss_augmented_inference(self, x, y, w, relaxed=False,
                              return_energy=False):
     self.inference_calls += 1
     self._check_size_w(w)
     unary_potentials = self.get_unary_potentials(x, w)
     pairwise_potentials = self.get_pairwise_potentials(x, w)
     edges = self.get_edges(x)
     # do loss-augmentation
     for l in np.arange(self.n_states):
         # for each class, decrement features
         # for loss-agumention
         unary_potentials[(y != l) * (y != self.void_label), l] += 1.
         unary_potentials[:, l] += 1. / y.size
     return inference_dispatch(unary_potentials, pairwise_potentials, edges,
                               self.inference_method, relaxed=relaxed,
                               return_energy=return_energy)
def mrf(probs, edges, potential=None):
    #probs2 = (-100 * np.log(probs)).astype(np.int32)
    #probs2 = (100 * probs).astype(np.int32)
    #min_prob = 0.001
    probs2 = np.array(probs)
    #probs2[probs2 < min_prob] = min_prob
    #probs2 = np.log(probs2)
    if potential is None:
        n_labels = probs2.shape[1]
        potential = np.eye(n_labels, dtype=np.int32)
    print "%d labels." % probs2.shape[1]
    t0 = time.time()
    smoothed_pred = inference_dispatch(probs2,
                                       potential,
                                       edges,
                                       inference_method='qpbo')
    print "MRF took %.2f seconds." % (time.time() - t0)
    return smoothed_pred
예제 #8
0
 def loss_augmented_inference(self,
                              x,
                              y,
                              w,
                              relaxed=False,
                              return_energy=False):
     self.inference_calls += 1
     self._check_size_w(w)
     unary_potentials = self.get_unary_potentials(x, w)
     pairwise_potentials = self.get_pairwise_potentials(x, w)
     edges = self.get_edges(x)
     # do loss-augmentation
     for l in np.arange(self.n_states):
         # for each class, decrement features
         # for loss-agumention
         unary_potentials[(y != l) * (y != self.void_label), l] += 1.
         unary_potentials[:, l] += 1. / y.size
     return inference_dispatch(unary_potentials,
                               pairwise_potentials,
                               edges,
                               self.inference_method,
                               relaxed=relaxed,
                               return_energy=return_energy)
    def loss_augmented_inference(self,
                                 x,
                                 y,
                                 w,
                                 relaxed=False,
                                 return_energy=False):
        """Loss-augmented Inference for x relative to y using parameters w.

        Finds (approximately)
        armin_y_hat np.dot(w, joint_feature(x, y_hat)) + loss(y, y_hat)
        using self.inference_method.


        Parameters
        ----------
        x : tuple
            Instance of a graph with unary evidence.
            x=(unaries, edges)
            unaries are an nd-array of shape (n_nodes, n_features),
            edges are an nd-array of shape (n_edges, 2)

        y : ndarray, shape (n_nodes,)
            Ground truth labeling relative to which the loss
            will be measured.

        w : ndarray, shape=(size_joint_feature,)
            Parameters for the CRF energy function.

        relaxed : bool, default=False
            Whether relaxed inference should be performed.
            Only meaningful if inference method is 'lp' or 'ad3'.
            By default fractional solutions are rounded. If relaxed=True,
            fractional solutions are returned directly.

        return_energy : bool, default=False
            Whether to return the energy of the solution (x, y) that was found.

        Returns
        -------
        y_pred : ndarray or tuple
            By default an inter ndarray of shape=(n_nodes)
            of variable assignments for x is returned.
            If ``relaxed=True`` and inference_method is ``lp`` or ``ad3``,
            a tuple (unary_marginals, pairwise_marginals)
            containing the relaxed inference result is returned.
            unary marginals is an array of shape (n_nodes, n_states),
            pairwise_marginals is an array of
            shape (n_states, n_states) of accumulated pairwise marginals.

        """
        self.inference_calls += 1
        self._check_size_w(w)
        unary_potentials = self._get_unary_potentials(x, w)
        pairwise_potentials = self._get_pairwise_potentials(x, w)
        edges = self._get_edges(x)
        self.loss_augment_unar(unary_potentials, y)

        return inference_dispatch(unary_potentials,
                                  pairwise_potentials,
                                  edges,
                                  self.inference_method,
                                  relaxed=relaxed,
                                  return_energy=return_energy)
예제 #10
0
def crf_infer(fw_masks,cur_masks,bw_masks):
     print '================================using CRF to do inference=========================================================='
     frm_num=len(cur_masks)
     n_nodes=frm_num
     states_per_frm=3
     n_states=frm_num*states_per_frm
    
     mask_sample=cur_masks[0]
     w_mask=mask_sample.shape[1]
     h_mask=mask_sample.shape[0]
     area_mask=w_mask*h_mask    ## area_mask is fixed here, but can be different later. 
     
     mask1=fw_masks[0]
     mask2=cur_masks[0]

     match_mask=np.logical_and(mask1, mask2)
     
     ##np.logical_and([True, False], [False, False])

     ##put all three kinds of mask in one category
     state_masks=[]
     for node_id in xrange(n_nodes):
         state_masks.append(fw_masks[node_id])
         state_masks.append(cur_masks[node_id]) 
         state_masks.append(bw_masks[node_id])
     
     ##(1) define edges 
     edges=np.zeros((n_nodes-1,2),dtype=int) 
     ##edges=np.zeros((2,2),dtype=int)    ##(node-1,2) 
     for node_id in xrange(n_nodes-1):
         edges[node_id,:]=[node_id,node_id+1]   

     ##(2) define unaries
     ## calculate unary values within the same frame
    #  unary_vals=np.zeros((n_nodes,n_states))
    #  for node_id in xrange(n_nodes):
    #      mask1=fw_masks[node_id]
    #      mask2=cur_masks[node_id]
    #      mask3=bw_masks[node_id]
    #      inter_1_2=inter_2_1=np.logical_and(mask1,mask2)
    #      inter_2_3=inter_3_2=np.logical_and(mask2,mask3)
    #      inter_1_3=inter_3_1=np.logical_and(mask1,mask3)

    #      val1=np.sum(inter_1_2)+np.sum(inter_1_3)
    #      val2=np.sum(inter_2_1)+np.sum(inter_2_3)
    #      val3=np.sum(inter_3_1)+np.sum(inter_3_2)
         
    #      val_sum=val1+val2+val3+0.0
    #      norm_val1=val1/val_sum
    #      norm_val2=val2/val_sum
    #      norm_val3=val3/val_sum
         
    #      t_nodes=[node_id*states_per_frm,node_id*states_per_frm+1,node_id*states_per_frm+2]
    #      unary_vals[node_id,t_nodes]=[norm_val1,norm_val2,norm_val3]
    #  unaries=unary_vals


    ##ignore unary terms:
     unary_vals=np.ones((n_nodes,n_states))
     unaries=unary_vals

      #  ##(2) define unaries
     #unaries=np.zeros((n_nodes,n_states))
     #  for node_id in xrange(n_nodes):
     #      t_nodes=[node_id*states_per_frm,node_id*states_per_frm+1,node_id*states_per_frm+2]
     #      unaries[node_id,t_nodes]=1.0/3
     
     ##(3) define pairwise
     pairwise=np.zeros((n_states, n_states))
     
     t_base=np.zeros((n_nodes,n_nodes))

     for state_id1 in xrange(n_states):
         t1=state_id1/states_per_frm
         for state_id2 in xrange(n_states):
             t2=state_id2/states_per_frm
             if t1==t2 or abs(t2-t1)>1 :
                 continue
             else:
                 mask1=state_masks[state_id1]
                 mask2=state_masks[state_id2]
                 iou_1_2=np.logical_and(mask1,mask2)
                 val_1_2=np.sum(iou_1_2)
                 pairwise[state_id1,state_id2]=val_1_2
                 t_base[t1,t2]=val_1_2+t_base[t1,t2]

      ##s1: normalize  based on two consective frames(9 pairs)
     for state_id1 in xrange(n_states):
        t1=state_id1/states_per_frm
        for state_id2 in xrange(n_states):
            t2=state_id2/states_per_frm
            if t1==t2 or abs(t2-t1)>1 :
                continue
            else:
                pairwise[state_id1,state_id2]=pairwise[state_id1,state_id2]/t_base[t1,t2]
     
     pairwise=pairwise
   
       ##s2: normalize on the whole sequence.

     ##print 't_base:\n', t_base
     ##print 'unaries:\n',unaries
     ##print 'pairwise:\n',pairwise

     print 'unaries.shape:',unaries.shape
     print 'edges.shape:',edges.shape
     print 'pairwise.shape:',pairwise.shape
      
     fig, ax = plt.subplots(1, 2, figsize=(n_nodes, 1))

     ##for a, inference_method in zip(ax, ['ad3', 'qpbo', 'max-product',
     ##                               ('max-product', {'max_iter': 10}), 'lp']):
     for a, inference_method in zip(ax, ['lp']):                               
                                    start = time()
                                    ##print a, inference_method
                                    y = inference_dispatch(unaries, pairwise, edges,
                                    inference_method=inference_method)    ##(400)
                                    took = time() - start
                                    a.matshow(y.reshape(n_nodes, 1))
                                    ##print y.shape
                                    energy = compute_energy(unaries, pairwise, edges, y)
                                    ##a.set_title(str(inference_method) + "\n time: %.2f energy %.2f" % (took, energy))
                                    ##a.set_xticks(())
                                    ##a.set_yticks(())
                                    ##plt.show()

     arr_state_masks=np.asarray(state_masks)
     picked_masks= arr_state_masks[y]                       
     return y,picked_masks                               
예제 #11
0
import numpy as np
import matplotlib.pyplot as plt

from time import time

from pystruct.inference import inference_dispatch, compute_energy
from pystruct.utils import make_grid_edges

size = 20
n_states = 5

rnd = np.random.RandomState(2)
x = rnd.normal(size=(size, size, n_states))
pairwise = np.eye(n_states)
edges = make_grid_edges(x)
unaries = x.reshape(-1, n_states)

fig, ax = plt.subplots(1, 5, figsize=(20, 5))
for a, inference_method in zip(ax, ['ad3', 'qpbo', 'max-product',
                                    ('max-product', {'max_iter': 10}), 'lp']):
    start = time()
    y = inference_dispatch(unaries, pairwise, edges,
                           inference_method=inference_method)
    took = time() - start
    a.matshow(y.reshape(size, size))
    energy = compute_energy(unaries, pairwise, edges, y)
    a.set_title(str(inference_method) + "\n time: %.2f energy %.2f" % (took, energy))
    a.set_xticks(())
    a.set_yticks(())
plt.show()
예제 #12
0
import matplotlib.pyplot as plt
from pystruct.inference import inference_dispatch

n_samples = 500

d = '12,12,11,11,10,9,8,8,7,6,6,6,7,8,8,8,6,5,4,3,3,2,1,0,1,3,4,5,6,8,8,9,9,10,11,12,13,14,14,14,15,15,15,15'
d = np.array([float(c) for c in d.split()])

nClasses = 100  #離散クラスの数
p = 20  #隣接するノードのクラスが異なった場合のコスト、同じ場合は0

#データ項,ラベルとの差の絶対値
unaries = np.array([abs(i - j) for j in range(nClasses)] for i in d)

#ペアワイズ項について、同じラベルなら0、異なるラベルの場合p
pairwise = np.array((np.ma.ones((nClasses, nClasses)) - np.eye(nClasses)) * p)

#ノードを連結するエッジの設定
edges = np.array([[i, i + 1] for i in range(unaries.shape[0] - 1)])

#推論時間関数が最大化するので、unaryiesとpairwiseにマイナスをつける
res = inference_dispatch(-unaries,
                         -pairwise,
                         edges,
                         inference_method='max-product')

#プロット
plt.plot(d, label="data")
plt.plot(res, label="result")
plt.legend()