Example #1
0
    def fit(self, X, y, idx):#img is unused here but is needed for the pipeline
        self.unaryEnergy = np.ascontiguousarray(-self.MULTIPLIER*np.log(X)).astype('int32') #energy=-log(probability)
        nClass = self.unaryEnergy.shape[2]

        params = np.logspace(-3,3,10) #grid search values for Potts compatibility
        best_score = 0.0

        print('Fitting Grid MRF...')
        for i in range(params.shape[0]):            
            pairwiseEnergy = (self.MULTIPLIER*params[i]*(1-np.eye(nClass))).astype('int32') 
            out = gco.cut_simple(unary_cost=self.unaryEnergy,\
                  pairwise_cost=pairwiseEnergy,n_iter=self.INFERENCE_NITER,
                  algorithm='swap').ravel()            
            
            score = np.sum(out[idx]==y)/float(y.size)
            if score > best_score:
                self.cost = params[i]
                best_score = score
             
        params = np.logspace(np.log10(self.cost)-1,np.log10(self.cost)+1,30)
        best_score = 0.0
        
        print('Finetuning Grid MRF...')
        for i in range(params.shape[0]):            
            pairwiseEnergy = (self.MULTIPLIER*params[i]*(1-np.eye(nClass))).astype('int32') 
            out = gco.cut_simple(unary_cost=self.unaryEnergy,\
                  pairwise_cost=pairwiseEnergy,n_iter=self.INFERENCE_NITER,
                  algorithm='swap').ravel()            
            
            score = np.sum(out[idx]==y)/float(y.size)
            if score > best_score:
                self.cost = params[i]
                best_score = score
Example #2
0
def example_multinomial():
    # generate dataset with three stripes
    np.random.seed(15)
    x = np.zeros((10, 12, 3))
    x[:, :4, 0] = -1
    x[:, 4:8, 1] = -1
    x[:, 8:, 2] = -1
    unaries = x + 1.5 * np.random.normal(size=x.shape)
    x = np.argmin(x, axis=2)
    unaries = (unaries * 10).astype(np.int32)
    x_thresh = np.argmin(unaries, axis=2)

    # potts potential
    pairwise_potts = -2 * np.eye(3, dtype=np.int32)
    result = cut_simple(unaries, 10 * pairwise_potts)
    # potential that penalizes 0-1 and 1-2 less thann 0-2
    pairwise_1d = -15 * np.eye(3, dtype=np.int32) - 8
    pairwise_1d[-1, 0] = 0
    pairwise_1d[0, -1] = 0
    print(pairwise_1d)
    result_1d = cut_simple(unaries, pairwise_1d)
    plt.subplot(141, title="original")
    plt.imshow(x, interpolation="nearest")
    plt.subplot(142, title="thresholded unaries")
    plt.imshow(x_thresh, interpolation="nearest")
    plt.subplot(143, title="potts potentials")
    plt.imshow(result, interpolation="nearest")
    plt.subplot(144, title="1d topology potentials")
    plt.imshow(result_1d, interpolation="nearest")
    plt.show()
Example #3
0
def example_multinomial():
    # generate dataset with three stripes
    np.random.seed(15)
    x = np.zeros((10, 12, 3))
    x[:, :4, 0] = -1
    x[:, 4:8, 1] = -1
    x[:, 8:, 2] = -1
    unaries = x + 1.5 * np.random.normal(size=x.shape)
    x = np.argmin(x, axis=2)
    unaries = (unaries * 10).astype(np.int32)
    x_thresh = np.argmin(unaries, axis=2)

    # potts potential
    pairwise_potts = -2 * np.eye(3, dtype=np.int32)
    result = cut_simple(unaries, 10 * pairwise_potts)
    # potential that penalizes 0-1 and 1-2 less thann 0-2
    pairwise_1d = -15 * np.eye(3, dtype=np.int32) - 8
    pairwise_1d[-1, 0] = 0
    pairwise_1d[0, -1] = 0
    print(pairwise_1d)
    result_1d = cut_simple(unaries, pairwise_1d)
    plt.subplot(141, title="original")
    plt.imshow(x, interpolation="nearest")
    plt.subplot(142, title="thresholded unaries")
    plt.imshow(x_thresh, interpolation="nearest")
    plt.subplot(143, title="potts potentials")
    plt.imshow(result, interpolation="nearest")
    plt.subplot(144, title="1d topology potentials")
    plt.imshow(result_1d, interpolation="nearest")
    plt.show()
Example #4
0
    def test_label_costs_simple(self):
        """Test the label_costs argument with cut_simple."""
        unaries, pairwise, edges, expected = self.binary_data()
        # Give a slight preference to class 0
        unaries[:, :, 1] += 1

        result = cut_simple(unaries, pairwise, label_cost=1)
        self.assertTrue(np.array_equal(result, expected))

        # Try again with a very high label cost to collapse to a single label
        result = cut_simple(unaries, pairwise, label_cost=1000)
        self.assertTrue(np.array_equal(result, np.zeros_like(result)))
Example #5
0
 def predict(self, X):
     #self.cost = 2. 
     self.unaryEnergy = np.ascontiguousarray(-self.MULTIPLIER*np.log(X)).astype('int32')
     nClass = self.unaryEnergy.shape[2]        
     pairwiseEnergy = (self.MULTIPLIER*self.cost*(1-np.eye(nClass))).astype('int32') 
     return gco.cut_simple(unary_cost=self.unaryEnergy,\
           pairwise_cost=pairwiseEnergy,n_iter=self.INFERENCE_NITER,algorithm='swap')        
Example #6
0
def original_example(img1, img2, max_disp):
    # Code was based on this example
    unaries = (unaries_ssd(img1, img2, max_disp) * 100).astype(np.int32)
    n_disps = unaries.shape[2]

    newshape = unaries.shape[:2]
    potts_cut1 = cut_simple(unaries, -5 * np.eye(n_disps, dtype=np.int32))
    potts_cut2 = cut_simple(unaries,
                            -5 * np.eye(n_disps, dtype=np.int32),
                            n_iter=10)
    x, y = np.ogrid[:n_disps, :n_disps]
    one_d_topology = np.abs(x - y).astype(np.int32).copy("C")

    one_d_cut1 = cut_simple(unaries, 5 * one_d_topology)
    one_d_cut2 = cut_simple(unaries, 5 * one_d_topology, n_iter=10)
    return one_d_cut1, one_d_cut2, potts_cut1, potts_cut2
Example #7
0
    def test_cut_simple(self):
        """Test the cut_simple method."""
        unaries, pairwise, edges, expected = self.binary_data()

        result = cut_simple(unaries, pairwise)

        self.assertTrue(np.array_equal(result, expected))
Example #8
0
def Post_Processing(prob_map, height, width, num_classes, y_test,
                    test_indexes):
    Gamma = [10, 20, 30, 50, 100, 150, 200]
    #    Gamma = [20]
    SL = np.zeros([len(Gamma), height, width])
    SAE = np.zeros([len(Gamma), num_classes])
    SA = np.zeros([len(Gamma)])
    for j in range(len(Gamma)):
        gamma = Gamma[j]
        unaries = (-gamma * np.log(prob_map + 1e-4)).astype(
            np.int32)  # 20   15
        una = unaries_reshape(unaries, width, height, num_classes)
        one_d_topology = (np.ones(num_classes) - np.eye(num_classes)).astype(
            np.int32).copy("C")
        Seg_Label = cut_simple(una, 100 * one_d_topology)  # 30   200
        Seg_Label = Seg_Label + 1
        seg_Label = Seg_Label.transpose().flatten()
        test_indexes = test_indexes.astype(np.int32)
        cmat = confusion_matrix(y_test, seg_Label[test_indexes])
        cmat.astype(float)
        a = cmat.diagonal()
        a = np.array([float(a[i]) for i in range(0, len(a))])
        b = cmat.sum(axis=1)
        seg_accuracy_each = a / b
        seg_accuracy = accuracy_score(y_test, seg_Label[test_indexes])
        SL[j, :, :] = Seg_Label
        SAE[j, :] = seg_accuracy_each
        SA[j] = seg_accuracy
    SA = SA.tolist()
    max_ind = SA.index(max(SA))
    seg_accuracy = SA[max_ind]
    Seg_Label = SL[max_ind, :, :]
    seg_accuracy_each = SAE[max_ind, :]
    return Seg_Label, seg_accuracy, seg_accuracy_each
    def segment_simple_cut(self):
        # noinspection PyUnresolvedReferences
        from pygco import cut_simple

        unaries = -np.log(np.array([np.clip(self.posterior_images[o],a_min=0.01, a_max=1.0) for o in self.candidate_objects])).transpose((1, 2, 0))
        num_labels = unaries.shape[2]
        p_same_label = 0.99
        pairwise = -np.log(1 - p_same_label) * (1 - np.eye(num_labels)) - np.log(p_same_label) * np.eye(num_labels)
        k = 10  # scaling factor for potentials to reduce aliasing because the potentials need to be converted to integers
        self.segmentation = cut_simple(np.copy((k * unaries).astype('int32'), order='C'), np.copy((k * pairwise).astype('int32'), order='C'))
Example #10
0
def Post_Processing(prob_map, height, width, num_classes, y_test,
                    test_indexes):
    unaries = (-100 * np.log(prob_map + 1e-4)).astype(np.int32)
    una = unaries_reshape(unaries, width, height, num_classes)
    one_d_topology = (np.ones(num_classes) - np.eye(num_classes)).astype(
        np.int32).copy("C")
    Seg_Label = cut_simple(una, 50 * one_d_topology)
    Seg_Label = Seg_Label + 1
    seg_Label = Seg_Label.transpose().flatten()
    seg_accuracy = accuracy_score(y_test, seg_Label[test_indexes])
    return Seg_Label, seg_accuracy
Example #11
0
def segmentation(depth_file, opts):
  depth = preprocess_depth(depth_file, opts['preprocess_depth'])
  # Potts pairwise potential used for graph cut algorithm
  pairwise = (-100 * np.eye(2)).astype(np.int32)
  depth = depth.astype(np.int32)
  depth = depth - opts['board_depth']
  depth = (np.dstack([depth, -depth]).copy("C")).astype(np.int32)
  segmask = cut_simple(depth.astype(np.int32), pairwise.astype(np.int32))
  # Invert mask
  segmask = (segmask+1) % 2
  return segmask
Example #12
0
def example_binary():
    # generate trivial data
    x = np.ones((10, 10))
    x[:, 5:] = -1
    x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
    x_thresh = x_noisy > 0.0

    # create unaries
    unaries = x_noisy
    # as we convert to int, we need to multipy to get sensible values
    unaries = (10 * np.dstack([unaries, -unaries]).copy("C")).astype(np.int32)
    # create potts pairwise
    pairwise = -10 * np.eye(2, dtype=np.int32)

    # do simple cut
    result = cut_simple(unaries, pairwise)

    # generalized Potts potentials
    pix_nums = np.r_[: 10 * 10].reshape(10, 10)
    pairwise_cost = dict(
        [(tuple(sorted(pair)), 30) for pair in zip(pix_nums[:, :-1].flatten(), pix_nums[:, 1:].flatten())]
        + [(tuple(sorted(pair)), 0) for pair in zip(pix_nums[:-1, :].flatten(), pix_nums[1:, :].flatten())]
    )
    result_gp = cut_simple_gen_potts(unaries, pairwise_cost)

    # use the gerneral graph algorithm
    # first, we construct the grid graph
    inds = np.arange(x.size).reshape(x.shape)
    horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
    vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
    edges = np.vstack([horz, vert]).astype(np.int32)

    # we flatten the unaries
    result_graph = cut_from_graph(edges, unaries.reshape(-1, 2), pairwise)

    # generalized Potts potentials
    result_graph_gp = cut_from_graph_gen_potts(unaries.reshape(-1, 2), pairwise_cost)

    # plot results
    plt.subplot(231, title="original")
    plt.imshow(x, interpolation="nearest")
    plt.subplot(232, title="noisy version")
    plt.imshow(x_noisy, interpolation="nearest")
    plt.subplot(233, title="rounded to integers")
    plt.imshow(unaries[:, :, 0], interpolation="nearest")
    plt.subplot(234, title="thresholding result")
    plt.imshow(x_thresh, interpolation="nearest")
    plt.subplot(235, title="cut_simple")
    plt.imshow(result, interpolation="nearest")
    plt.subplot(236, title="cut_from_graph")
    plt.imshow(result_graph.reshape(x.shape), interpolation="nearest")

    plt.show()
def potts_example():
    img1 = np.asarray(Image.open("scene1.row3.col1.ppm")) / 255.
    img2 = np.asarray(Image.open("scene1.row3.col2.ppm")) / 255.
    unaries = (stereo_unaries(img1, img2) * 100).astype(np.int32)
    n_disps = unaries.shape[2]

    newshape = unaries.shape[:2]
    potts_cut = cut_simple(unaries, -5 * np.eye(n_disps, dtype=np.int32))
    x, y = np.ogrid[:n_disps, :n_disps]
    one_d_topology = np.abs(x - y).astype(np.int32).copy("C")

    one_d_cut = cut_simple(unaries, 5 * one_d_topology)
    plt.subplot(231, xticks=(), yticks=())
    plt.imshow(img1)
    plt.subplot(232, xticks=(), yticks=())
    plt.imshow(img2)
    plt.subplot(233, xticks=(), yticks=())
    plt.imshow(np.argmin(unaries, axis=2), interpolation='nearest')
    plt.subplot(223, xticks=(), yticks=())
    plt.imshow(potts_cut.reshape(newshape), interpolation='nearest')
    plt.subplot(224, xticks=(), yticks=())
    plt.imshow(one_d_cut.reshape(newshape), interpolation='nearest')
    plt.show()
Example #14
0
    def segment_simple_cut(self):
        # noinspection PyUnresolvedReferences
        from pygco import cut_simple

        unaries = -np.log(
            np.array([
                np.clip(self.posterior_images[o], a_min=0.01, a_max=1.0)
                for o in self.candidate_objects
            ])).transpose((1, 2, 0))
        num_labels = unaries.shape[2]
        p_same_label = 0.99
        pairwise = -np.log(1 - p_same_label) * (
            1 - np.eye(num_labels)) - np.log(p_same_label) * np.eye(num_labels)
        k = 10  # scaling factor for potentials to reduce aliasing because the potentials need to be converted to integers
        self.segmentation = cut_simple(
            np.copy((k * unaries).astype('int32'), order='C'),
            np.copy((k * pairwise).astype('int32'), order='C'))
Example #15
0
	def graphcut(self,label_costs, l=100):
		num_classes = len(self.colors)
		pairwise_costs = np.zeros((num_classes, num_classes))
		for ii in range(num_classes):
			for jj in range(num_classes):
				c1 = np.array(self.colors[ii])
				c2 = np.array(self.colors[jj])
				pairwise_costs[ii,jj] = np.linalg.norm(c1-c2)
		label_costs_int32 = np.ascontiguousarray(label_costs).astype('int32')
		pairwise_costs_int32 = (l*pairwise_costs).astype('int32')
		if self.sobel:
			edges = self.get_edges()
			vv_int32 = edges.astype('int32')
			vh_int32 = edges.astype('int32')
			new_labels = pygco.cut_simple_vh(label_costs_int32, pairwise_costs_int32, vv_int32, vh_int32, n_iter=10, algorithm='swap') 
		else:
			new_labels = pygco.cut_simple(label_costs_int32, pairwise_costs_int32, n_iter=10, algorithm='swap') 
		return new_labels
Example #16
0
def example_binary():
    # generate trivial data
    x = np.ones((10, 10))
    x[:, 5:] = -1
    x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
    x_thresh = x_noisy > .0

    # create unaries
    unaries = x_noisy
    # as we convert to int, we need to multipy to get sensible values
    unaries = (10 * np.dstack([unaries, -unaries]).copy("C")).astype(np.int32)
    # create potts pairwise
    pairwise = -10 * np.eye(2, dtype=np.int32)

    # do simple cut
    result = cut_simple(unaries, pairwise)
    
    print unaries
    print result
Example #17
0
def example_binary():
    # generate trivial data
    x = np.ones((10, 10))
    x[:, 5:] = -1
    x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
    x_thresh = x_noisy > .0

    # create unaries
    unaries = x_noisy
    # as we convert to int, we need to multipy to get sensible values
    unaries = (10 * np.dstack([unaries, -unaries]).copy("C")).astype(np.int32)
    # create potts pairwise
    pairwise = -10 * np.eye(2, dtype=np.int32)

    # do simple cut
    result = cut_simple(unaries, pairwise)

    # use the gerneral graph algorithm
    # first, we construct the grid graph
    inds = np.arange(x.size).reshape(x.shape)
    horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
    vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
    edges = np.vstack([horz, vert]).astype(np.int32)

    # we flatten the unaries
    result_graph = cut_from_graph(edges, unaries.reshape(-1, 2), pairwise)

    # plot results
    plt.subplot(231, title="original")
    plt.imshow(x, interpolation='nearest')
    plt.subplot(232, title="noisy version")
    plt.imshow(x_noisy, interpolation='nearest')
    plt.subplot(233, title="rounded to integers")
    plt.imshow(unaries[:, :, 0], interpolation='nearest')
    plt.subplot(234, title="thresholding result")
    plt.imshow(x_thresh, interpolation='nearest')
    plt.subplot(235, title="cut_simple")
    plt.imshow(result, interpolation='nearest')
    plt.subplot(236, title="cut_from_graph")
    plt.imshow(result_graph.reshape(x.shape), interpolation='nearest')

    plt.show()
Example #18
0
def graph_cut(img_list, gaussian_size, unary_scale, pair_scale, n_iter):
    imGray_list = []
    for img in img_list:
        imGray_list.append(cvt_to_grayscale(img))

    n = len(imGray_list)
    unary_cost = []
    ii, jj = np.meshgrid(range(n), range(n))
    pairwise_cost = np.abs(ii - jj) * pair_scale

    for imGray in imGray_list:
        gray_img = imGray.astype(np.float32) / 255.
        grad = np.exp(-(cv2.Sobel(gray_img, cv2.CV_32F, 1, 1)**2))
        unary_cost.append(
            cv2.GaussianBlur(grad,
                             (gaussian_size, gaussian_size), 0) * unary_scale)

    unary_cost = normalization(np.stack(unary_cost, axis=-1)) * unary_scale
    graph_img = cut_simple(unary_cost.astype(np.int32),
                           pairwise_cost.astype(np.int32), n_iter)

    return graph_img
Example #19
0
def some_cut(unaries, binaries, K, n_iter):
    # Performs the graph cut
    n_disps = unaries.shape[2]
    newshape = unaries.shape[:2]
    cut = cut_simple(unaries, K * binaries, n_iter)
    return cut
def regularized_fine(lenses, fine_costs, disp, penalty1, penalty2, max_cost, conf_tec='mlm', conf_sigma=0.3, min_thresh=2.0, eps=0.0000001):

    fine_depths = dict()
    fine_depths_interp = dict()
    fine_depths_val = dict()
    wta_depths = dict()
    wta_depths_interp = dict()
    wta_depths_val = dict()
    num_lenses = len(lenses)
    confidence = dict()
    
    for i, l in enumerate(fine_costs):
        
        if i%1000==0:
            print("Regularization: Processing lens {0}/{1}".format(i, num_lenses))
        lens = lenses[l]

        # prepare the cost shape: disparity axis is third axis (index [2] instead of [0])
        F = np.flipud(np.rot90(fine_costs[l].T))

        # the regularized cost volume
        sgm_cost = rtxsgm.sgm(lenses[l].img, F, lens.mask, penalty1, penalty2, False, max_cost)
        
        # plain minima
        fine_depths[l] = np.argmin(sgm_cost, axis=2)
        
        # interpolated minima and values
        #fine_depths_interp[l], fine_depths_val[l] = rtxdisp.cost_minima_interp(sgm_cost, disp)

        # cost should be C-Contiguous
        sgm_cost_c = sgm_cost.copy(order='C')
        # also in int32
        sgm_cost_c_int = (sgm_cost_c  * 1000).astype(np.int32)
        # number of disparities
        n_disps = F.shape[2]

        cut = 'unary'
        if cut == 'potts':
            # potts model
            depth_cut = cut_simple(sgm_cost_c_int, -5 * np.eye(sgm_cost.shape[2], dtype=np.int32))
        elif cut == 'unary':
            # unary model
            x, y = np.ogrid[:n_disps, :n_disps]
            one_d_topology = np.abs(x - y).astype(np.int32).copy("C")
            #pdb.set_trace()
            depth_cut = cut_simple(sgm_cost_c_int, 5 * one_d_topology)
        else:
            print("No cut recognised. Do you wish to use potts or unary model?")
            pdb.set_trace()

        fine_depths_interp[l] = depth_cut

        #if i%1000==0:
        #    print("max interp: {0}".format(np.amax(fine_depths_interp[l])))

        # plain winner takes all minima
        wta_depths[l] = np.argmin(F, axis=2)
        
        # interpolated minima and values from the unregularized cost volume
        wta_depths_interp[l], wta_depths_val[l] = rtxdisp.cost_minima_interp(F, disp)

        fine_depths_val[l] = wta_depths_val[l]
        ### CALCULATE THE CONFIDENCE USING A METHOD
        minimum_costs = np.min(sgm_cost, axis=2)

        #pdb.set_trace()
        if conf_tec == 'oev':
            # TODO max does not work on arrays
            num_denom = 0
            dmax = np.max(sgm_cost)
            dmin = np.min(sgm_cost)
            denom_denom = max(sgm_cost - minimum_costs[:,:,None], 1)
            for n in range(0, sgm_cost.shape[2]):
                index_map = np.ones((sgm_cost.shape[0], sgm_cost.shape[1])) * n
                tmp_num = np.pow(max(min(index_map - fine_depths[l], (dmax - dmin)/3), 0), 2)
                num_denom += tmp_num / denom_denom[:,:,n]
            confidence[l] = 1 / num_denom
        elif conf_tec == 'rtvbf':
            confidence[l] = np.sum(np.exp(-((sgm_cost - fine_depths_val[l][:, :, None])**2) / conf_sigma), axis=2) - 1
            # confidence measure used in "Real-Time Visibility-Based Fusion of Depth Maps"
            # substract 1 at the end since the "real" optimium is included in the vectorized operations

            # confidence[l][fine_depths_val[l] > min_thresh] = 0.0
            #TODO: calculate wta confidence, scale the sigma accordingly
            #confidence[l] = np.sum(np.exp(-((F - wta_depths_val[l][:, :, None])**2) / conf_sigma), axis=2) - 1
            
            # avoid overflow in division
            ind = confidence[l] > eps
            confidence[l][confidence[l] <= 0] = 0.0
            confidence[l][ind] = 1.0 / confidence[l][ind]
        else: 
            # TODO
            # check overflow in exp 
            # and division for zero
            #conf_tec == 'mlm':
            exp_cost = np.exp(-minimum_costs/(2*np.power(conf_sigma,2)))
            denom_cost = np.sum(np.exp(-sgm_cost/(2*np.power(conf_sigma,2))), axis=2)
            #zeros = denom_cost == 0
            #denom_cost = denom_cost + zeros * np.max(denom_cost)
            confidence_map = exp_cost / denom_cost
            confidence_map[np.isnan(confidence_map)] = 0
            confidence[l] = confidence_map

    return fine_depths, fine_depths_interp, fine_depths_val, wta_depths, wta_depths_interp, wta_depths_val, confidence