Exemplo n.º 1
0
def test_new():
    ds = disjointset.DisjointSet(10)
    assert ds.get_num_sets() == 10
    assert ds.get_size_of_set(0) == 1
    assert ds.get_size_of_set(2) == 1
    assert ds.get_size_of_set(9) == 1
    assert ds.are_in_same_set(0, 0)
    assert not ds.are_in_same_set(0, 1)
    assert not ds.are_in_same_set(9, 3)
 def test_new(self):
     ds = disjointset.DisjointSet(10)
     self.assertEqual(ds.get_num_sets(), 10)
     self.assertEqual(ds.get_size_of_set(0), 1)
     self.assertEqual(ds.get_size_of_set(2), 1)
     self.assertEqual(ds.get_size_of_set(9), 1)
     self.assertTrue(ds.are_in_same_set(0, 0))
     self.assertFalse(ds.are_in_same_set(0, 1))
     self.assertFalse(ds.are_in_same_set(9, 3))
Exemplo n.º 3
0
def test_against_naive_randomly():
    trials = 300
    iterations = 1000
    numElems = 100

    for i in range(trials):
        nds = NaiveDisjointSet(numElems)
        ds = disjointset.DisjointSet(numElems)
        for j in range(iterations):
            k = random.randrange(numElems)
            l = random.randrange(numElems)
            assert ds.get_size_of_set(k) == nds.get_size_of_set(k)
            assert ds.are_in_same_set(k, l) == nds.are_in_same_set(k, l)
            if random.random() < 0.1:
                assert ds.merge_sets(k, l) == nds.merge_sets(k, l)
            assert nds.get_num_sets() == ds.get_num_sets()
            if random.random() < 0.001:
                ds.check_structure()
        ds.check_structure()
Exemplo n.º 4
0
    def test_against_naive_randomly(self):
        trials = 300
        iterations = 1000
        numElems = 100

        for i in range(trials):
            nds = NaiveDisjointSet(numElems)
            ds = disjointset.DisjointSet(numElems)
            for j in range(iterations):
                k = random.randrange(numElems)
                l = random.randrange(numElems)
                self.assertEqual(ds.get_size_of_set(k), nds.get_size_of_set(k))
                self.assertEqual(ds.are_in_same_set(k, l),
                                 nds.are_in_same_set(k, l))
                if random.random() < 0.1:
                    self.assertEqual(ds.merge_sets(k, l), nds.merge_sets(k, l))
                self.assertEqual(nds.get_num_sets(), ds.get_num_sets())
                if random.random() < 0.001:
                    ds.check_structure()
            ds.check_structure()
    def test_big_merge(self):
        maxRank = 20
        trials = 10000

        numElems = 1 << maxRank  # Grows exponentially
        ds = disjointset.DisjointSet(numElems)
        for level in range(maxRank):
            mergeStep = 1 << level
            incrStep = mergeStep * 2
            for i in range(0, numElems, incrStep):
                self.assertFalse(ds.are_in_same_set(i, i + mergeStep))
                self.assertTrue(ds.merge_sets(i, i + mergeStep))
            # Now we have a bunch of sets of size 2^(level+1)

            # Do random tests
            mask = -incrStep
            for i in range(trials):
                j = random.randrange(numElems)
                k = random.randrange(numElems)
                expect = (j & mask) == (k & mask)
                self.assertTrue(ds.are_in_same_set(j, k) == expect)
    def test_merge(self):
        ds = disjointset.DisjointSet(10)
        self.assertTrue(ds.merge_sets(0, 1))
        ds.check_structure()
        self.assertEqual(ds.get_num_sets(), 9)
        self.assertTrue(ds.are_in_same_set(0, 1))

        self.assertTrue(ds.merge_sets(2, 3))
        ds.check_structure()
        self.assertEqual(ds.get_num_sets(), 8)
        self.assertTrue(ds.are_in_same_set(2, 3))

        self.assertFalse(ds.merge_sets(2, 3))
        ds.check_structure()
        self.assertEqual(ds.get_num_sets(), 8)
        self.assertFalse(ds.are_in_same_set(0, 2))

        self.assertTrue(ds.merge_sets(0, 3))
        ds.check_structure()
        self.assertEqual(ds.get_num_sets(), 7)
        self.assertTrue(ds.are_in_same_set(0, 2))
        self.assertTrue(ds.are_in_same_set(3, 0))
        self.assertTrue(ds.are_in_same_set(1, 3))
Exemplo n.º 7
0
def test_merge():
    ds = disjointset.DisjointSet(10)
    assert ds.merge_sets(0, 1)
    ds.check_structure()
    assert ds.get_num_sets() == 9
    assert ds.are_in_same_set(0, 1)

    assert ds.merge_sets(2, 3)
    ds.check_structure()
    assert ds.get_num_sets() == 8
    assert ds.are_in_same_set(2, 3)

    assert not ds.merge_sets(2, 3)
    ds.check_structure()
    assert ds.get_num_sets() == 8
    assert not ds.are_in_same_set(0, 2)

    assert ds.merge_sets(0, 3)
    ds.check_structure()
    assert ds.get_num_sets() == 7
    assert ds.are_in_same_set(0, 2)
    assert ds.are_in_same_set(3, 0)
    assert ds.are_in_same_set(1, 3)
Exemplo n.º 8
0
        hist1 = red[i].ravel().astype('float32')
        hist2 = red[j].ravel().astype('float32')
        diff += abs(cv2.compareHist(hist1, hist2, method))

        hist1 = green[i].ravel().astype('float32')
        hist2 = green[j].ravel().astype('float32')
        diff += abs(cv2.compareHist(hist1, hist2, method))

        hist1 = blue[i].ravel().astype('float32')
        hist2 = blue[j].ravel().astype('float32')
        diff += abs(cv2.compareHist(hist1, hist2, method))

        dist[i][j] = diff

# Find comparable superpixels
ds = disjointset.DisjointSet(numSegments + 1)
print 'Original superpixels:', ds.get_num_sets()

for i in range(row):
    for j in range(col):
        a = i
        b = j - 1
        if (a >= 0 and a < row and b >= 0 and b < col):
            if (not superpixels[i][j] == superpixels[a][b]):
                cdist[superpixels[i][j]][superpixels[a][b]] = dist[
                    superpixels[i][j]][superpixels[a][b]]

        a = i
        b = j + 1
        if (a >= 0 and a < row and b >= 0 and b < col):
            if (not superpixels[i][j] == superpixels[a][b]):
Exemplo n.º 9
0
 def _zero_func():
     import disjointset
     return disjointset.DisjointSet()
Exemplo n.º 10
0
import disjointset

if __name__ == '__main__':
    disjoint = disjointset.DisjointSet(dict())
    disjoint.create_set(10)
    disjoint.create_set(20)
    disjoint.create_set(30)
    disjoint.create_set(40)
    disjoint.create_set(60)
    disjoint.create_set(70)
    disjoint.create_set(80)
    disjoint.create_set(90)
    disjoint.create_set(150)

    disjoint.union(10, 20)
    disjoint.union(20, 30)
    disjoint.union(30, 60)

    print disjoint.find_set(20)
    print disjoint.find_set(10)
    print disjoint.find_set(30)
    print disjoint.find_set(60)

    print disjoint.find_set(150)

    disjoint.union(150, 10)
    print disjoint.find_set(150)
Exemplo n.º 11
0
    def goemans(self, cf, vertexes):
        F = []  #holding solution

        class QueueElement(object):
            def __init__(self, i, j, priority):
                self.i = i
                self.j = j
                self.priority = priority

            def __hash__(self):
                return hash(str(self.i) + ' ' + str(self.j))

            def __eq__(self, other):
                return hash(self) == hash(other)

            def __ne__(self, other):
                return (self.i != other.i) or (self.j != other.i)

            def __lt__(self, other):
                return self.priority < other.priority

            def __str__(self):
                return "QElem (%s,%s) [%s]" % (self.i, self.j, self.priority)

            def __repr__(self):
                return "QueueElement(%s,%s, %s)" % (self.i, self.j,
                                                    self.priority)

        #declare edges priority queue and initialize it
        queue = variablepriorityqueue.VariablePriorityQueue()
        for line in vertexes:
            for column in vertexes:
                if line != column:
                    weight = float(self.matrix[line][column])
                    edge = QueueElement(line, column, weight / 2)
                    queue.add(edge)

        #define union find structure and initialize it
        C = disjointset.DisjointSet()
        f = {
        }  #also store the values of f function (access it only through nodes representatives)
        d = {}  #store nodes weights
        for vertex in vertexes:
            C.add(vertex)
            f[vertex] = 1
            d[vertex] = 0

        #store the number of trees C_r such that f(C_r) = 1
        #we need this info to check quickly for then end of the computations
        #(when it reaches 1)
        number_of_f1_trees = len(vertexes)

        #this is the function that computes priority of an edge
        def epsilon(i, j, d, f):
            sum_f = f[C.find(i)] + f[C.find(j)]
            if sum_f == 0:
                sum_f = 0.00001  # 1/100.000
            return (float(self.matrix[i][j]) - d[i] - d[j]) / (sum_f)

        #we also need a matrix that holds the best edge between two trees
        #can only be accessed through nodes representatives
        #we encode the edge to be able to store it in an integer
        best_edges = [[
            0 if line == column else line * self.size + column
            for column in xrange(self.size)
        ] for line in xrange(self.size)]

        #initialize lower bound
        LB = 0

        #initialize time
        #in priority queue the real priority we want
        #is epsilon(i,j)
        #however, it is too costly to store since any event
        #would require modifying all priorities
        #we instead use a global time T
        #and define priorities as epsilon(i,j) + T
        #see paper for more details
        T = 0

        print "iteration starting ..."
        #main loop
        while number_of_f1_trees > 1:
            #take best edge
            best_edge = queue.get()
            if C.find(best_edge.i) == C.find(best_edge.j):
                continue  #skip edges in same tree

            if best_edges[C.find(best_edge.i)][C.find(
                    best_edge.j)] != best_edge.i * self.size + best_edge.j:
                continue  #skip edges which we do not update because not best ones
            #add it to solution
            F.append([best_edge.i, best_edge.j])
            e = epsilon(best_edge.i, best_edge.j, d, f)
            #update time
            T += e

            #update d
            for v in vertexes:
                d[v] += e * f[C.find(v)]

            #update lower bound
            for r in C.retrieve():
                LB += e * f[r]

            #now last and complex part of the algorithm, do the fusion and weights update
            #start by updating number of trees with f=1
            r_i = C.find(best_edge.i)
            r_j = C.find(best_edge.j)
            v_i = f[r_i]
            v_j = f[r_j]

            new_f_value = cf(v_i, v_j)

            #compute new amount of trees with f value equel to 1
            if v_i + v_j > new_f_value:
                number_of_f1_trees -= 2 - cf(1, 1)  #one or two less
            #now, fuse
            C.union(r_i, r_j)
            #update f
            r_union = C.find(r_i)

            f[r_union] = new_f_value

            #modify best edges going out of r_union tree
            #start with line
            for column in vertexes:
                coded_edge1 = best_edges[r_i][column]
                coded_edge2 = best_edges[r_j][column]
                edge1 = [coded_edge1 // self.size, coded_edge1 % self.size]
                edge2 = [coded_edge2 // self.size, coded_edge2 % self.size]
                e1 = epsilon(edge1[0], edge1[1], d, f)
                e2 = epsilon(edge2[0], edge2[1], d, f)
                best_edges[r_union][
                    column] = coded_edge1 if e1 < e2 else coded_edge2

            #continue with column
            for line in vertexes:
                coded_edge1 = best_edges[line][r_i]
                coded_edge2 = best_edges[line][r_j]
                edge1 = [coded_edge1 // self.size, coded_edge1 % self.size]
                edge2 = [coded_edge2 // self.size, coded_edge2 % self.size]
                e1 = epsilon(edge1[0], edge1[1], d, f)
                e2 = epsilon(edge2[0], edge2[1], d, f)
                best_edges[line][
                    r_union] = coded_edge1 if e1 < e2 else coded_edge2

            #final step : modify priority queue
            #loop on all best edges leaving r_union tree
            #all edges leaving r_union tree which are not best
            #are therefore NOT UPDATED and will have ERRONEOUS priorities
            #this is why we filter edges at beginning of the loop to keep only best ones
            for column in vertexes:
                if C.find(column) != r_union and best_edges[r_union][
                        column] == column:
                    coded_edge = best_edges[r_union][column]
                    edge = [coded_edge // self.size, coded_edge % self.size]
                    priority = epsilon(r_union, column, d, f) + T
                    changing_edge = QueueElement(r_union, column, priority)
                    queue.update(changing_edge)
        return F