예제 #1
0
def get_hog_locations_path(f, data, page_path, number_of_blocks):
    """ gets the hog locations from the annotated image, or calculates them, and
		them to file, if that is not possible

	Note: this is only necessary when some hog features are classified as image """

    # If there are images in this page, get their location:
    # TODO: Uncommenting this can cause problems due to faulty saved hog
    # locations. This should be fixed sometime.
    # if data.has_key('hog_locations') and \
    # 	data['hog_locations'].has_key(number_of_blocks):
    # 	return data['hog_locations'][number_of_blocks]

    print 'calculating hog locations for %s in mode %s' % (f.name, f.mode)

    # otherwise, calculate em, and save em
    image = misc.imread(page_path)

    hog_locations = calculate_hog_locations(image, number_of_blocks)

    # If needed, create the dictionary in 'hog_features'
    if not (data.has_key('hog_locations')):
        data['hog_locations'] = {}
        data['hog_locations'][number_of_blocks] = hog_locations

    # write the new data to the original file:
    f.seek(0)
    f.write(str(data))
    # Remove any remaining text from the previous file contents
    f.truncate()

    return hog_locations
예제 #2
0
def get_hog_locations_path(f, data, page_path, number_of_blocks):
	""" gets the hog locations from the annotated image, or calculates them, and
		them to file, if that is not possible

	Note: this is only necessary when some hog features are classified as image """

	# If there are images in this page, get their location:
	# TODO: Uncommenting this can cause problems due to faulty saved hog
	# locations. This should be fixed sometime.
	# if data.has_key('hog_locations') and \
	# 	data['hog_locations'].has_key(number_of_blocks):
	# 	return data['hog_locations'][number_of_blocks]

	print 'calculating hog locations for %s in mode %s' % (f.name, f.mode)

	# otherwise, calculate em, and save em
	image = misc.imread(page_path)

	hog_locations = calculate_hog_locations(image, number_of_blocks)

	# If needed, create the dictionary in 'hog_features'
	if not(data.has_key('hog_locations')):
		data['hog_locations'] = {}
		data['hog_locations'][number_of_blocks] = hog_locations

	# write the new data to the original file:
	f.seek(0)
	f.write(str(data))
	# Remove any remaining text from the previous file contents
	f.truncate()

	return hog_locations
예제 #3
0
def get_hog_features_page(f, data, page_path, number_of_blocks):
	""" reads a vector of descriptors for an image. f is the file handle of the
	annotated data file, data is the data from that file, page_path the path
	to the image"""

	block_and_cells = (number_of_blocks, (1, 1))
	descriptor = None

	if data.has_key('hog_features') and \
		data['hog_features'].has_key(block_and_cells):
		print 'retrieving hog features for %s' % str(f.name)
		descriptor = data['hog_features'][block_and_cells]
	else:
		print 'calculating hog features for %s' % str(f.name)
		# Get the image nparray
		image = misc.imread(page_path)
		descriptor = calculate_hog(image, number_of_blocks)

		# If needed, create the dictionary in 'hog_features'
		if not(data.has_key('hog_features')) or \
				type(data['hog_features']) != dict:
			data['hog_features'] = {}

		data['hog_features'][block_and_cells] = descriptor

		# write the new data to the original file:
		f.seek(0)
		f.write(str(data))
		# Remove any remaining text from the previous file contents
		f.truncate()

	assert descriptor != None, 'descriptor cant be None'


	# Reshape the descriptors to the pystruct desired shape
	# We now have the 0'th index horizontal and the 1'th index
	# vertical
	descriptor.shape = (number_of_blocks[1], number_of_blocks[0], 8)
	# Transpose the first two axes, in order to get from x-y
	# coordinates to y-x coordinates
	descriptor = descriptor.transpose((1, 0, 2))

	return descriptor
예제 #4
0
def get_hog_features_page(f, data, page_path, number_of_blocks):
    """ reads a vector of descriptors for an image. f is the file handle of the
	annotated data file, data is the data from that file, page_path the path
	to the image"""

    block_and_cells = (number_of_blocks, (1, 1))
    descriptor = None

    if data.has_key('hog_features') and \
     data['hog_features'].has_key(block_and_cells):
        print 'retrieving hog features for %s' % str(f.name)
        descriptor = data['hog_features'][block_and_cells]
    else:
        print 'calculating hog features for %s' % str(f.name)
        # Get the image nparray
        image = misc.imread(page_path)
        descriptor = calculate_hog(image, number_of_blocks)

        # If needed, create the dictionary in 'hog_features'
        if not(data.has_key('hog_features')) or \
          type(data['hog_features']) != dict:
            data['hog_features'] = {}

        data['hog_features'][block_and_cells] = descriptor

        # write the new data to the original file:
        f.seek(0)
        f.write(str(data))
        # Remove any remaining text from the previous file contents
        f.truncate()

    assert descriptor != None, 'descriptor cant be None'

    # Reshape the descriptors to the pystruct desired shape
    # We now have the 0'th index horizontal and the 1'th index
    # vertical
    descriptor.shape = (number_of_blocks[1], number_of_blocks[0], 8)
    # Transpose the first two axes, in order to get from x-y
    # coordinates to y-x coordinates
    descriptor = descriptor.transpose((1, 0, 2))

    return descriptor
예제 #5
0
def process_felzen(path,ADAP):
    if True:
        if True:

            if False:
                im = Image.open(path)
                im.thumbnail((im.size[0] / 4, im.size[1] / 4), Image.ANTIALIAS)
                im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
                print 'im ', im_arr.size, im.size
                im_arr = im_arr.reshape((im.size[1], im.size[0], im_arr.size / (im.size[1] * im.size[0])))
                img = im_arr
            if True:
                img = cv2.imread(
                    path)  # 10978_13_001.png-1.jpg')# densities/143_13_001.png')#558_13_002.png')#fotoscale.jpg')143_13_001.png')#
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                kernel2 = np.ones((9, 9), np.uint8)
                data = dict()
                for i in gray.ravel():
                    if data.has_key(i):
                        data[i] += 1
                    else:
                        data[i] = 1
                # if data.has_key(255):
                max = 0
                id = None
                for k in data.keys():
                    # print 'k ',k
                    if data[k] >= max and k < 240 and k > 130:  # 50:#240:
                        id = k
                        max = data[k]
                #print 'key: ', id, ' - ', max
                #id -= 30
                THRESH=30
                id -= THRESH  # 15
                if ADAP:
                    thresh = cv2.adaptiveThreshold(gray, 255,
                                                   cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 55, 20)
                else:
                    ret, thresh = cv2.threshold(gray, id, 255, cv2.ADAPTIVE_THRESH_MEAN_C + cv2.THRESH_BINARY_INV)

                #thresh = cv2.adaptiveThreshold(gray, 255,
                #                            cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,55,20)# + cv2.THRESH_BINARY_INV)  # +cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+
                #plt.imshow(thresh)
                #plt.show()
                kernel = np.ones((3, 3), np.uint8)
                opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=5)

                for i in range(opening.shape[0]):
                    for j in range(opening.shape[1]):
                        if opening[i][j] == 0:
                            img[i][j] = (0, 0, 0)

                fx = segmentation.felzenszwalb(img, scale=100, sigma=0.8, min_size=50)

                ss = set()
                for e in range(len(fx)):
                    for ej in range(len(fx[0])):


                        ss.add(fx[e][ej])


                img2 = np.zeros_like(img)
                img2[:, :, 0] = opening
                img2[:, :, 1] = opening
                img2[:, :, 2] = opening
                c = 1
                dic = dict()
                dic[0] = (0, 0, 0)
                for r in range(1, 255, 1):
                    for r2 in range(1, 255, 1):
                        for r3 in range(1, 255, 1):
                            # cols.add((r,r2,r3))
                            dic[c] = (r, r2, r3)
                            if c == len(ss) + 5:
                                break
                            c += 1
                        if c == len(ss) + 5:
                            break
                    if c == len(ss) + 5:
                        break
                dic[0] = (0, 0, 0)
                for e in range(len(fx)):
                    for ej in range(len(fx[0])):
                        img2[e][ej] = dic[fx[e][ej]]

                #plt.imshow(fx)
                #plt.show()


                fx = np.uint8(img2)
                #s=set()
                #for x in fx:
                #    for y in  x:
                #        print 'x ',y
                #        s.add((y[0],y[1],y[2]))
                #print 'y.s ',len(s)
                im = Image.fromarray(fx)
                #plt.imshow(im)
                #plt.show()
                return im

###########

#segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
#ax[0, 0].imshow(mark_boundaries(img, segments_fz))
예제 #6
0
def process_suzuki(path,THRESH,ADAP):
    img = cv2.imread(
        path)  # 10978_13_001.png-1.jpg')# densities/143_13_001.png')#558_13_002.png')#fotoscale.jpg')143_13_001.png')#
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    kernel2 = np.ones((9, 9), np.uint8)


    if ADAP:
        thresh = cv2.adaptiveThreshold(gray, 255,
                                       cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 55,
                                       20)  # + cv2.THRESH_BINARY_INV)  # +cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    else:
        data = dict()
        for i in gray.ravel():
            if data.has_key(i):
                data[i] += 1
            else:
                data[i] = 1

        max = 0
        id = None
        for k in data.keys():
            # print 'k ',k
            if data[k] >= max and k < 240 and k > 130:  # 50:#240:
                id = k
                max = data[k]
                #   print 'key: ', id, ' - ', max
        id -= THRESH
        ret, thresh = cv2.threshold(gray, id, 255,
                                cv2.ADAPTIVE_THRESH_MEAN_C + cv2.THRESH_BINARY_INV)  # +cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=5)
    opening2 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=5)
    #plt.imshow(opening)
    #plt.show()
    _, contours, hierarchy = cv2.findContours(opening.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)



    img2 = np.zeros_like(img)
    img2[:, :, 0] = opening
    img2[:, :, 1] = opening
    img2[:, :, 2] = opening

    cp=10000
    cols = list()
    cols2 = set()
    for r in range(1, 255, 1):
        for r1 in range(1, 255, 1):
            for r2 in range(1, 255, 1):
                cols.append((r, r1, r2))
                cp-=1
                if cp==0:
                    break
            if cp == 0:
                break
        if cp == 0:
            break
    ind=0
#    print len(contours)


    shuffle(cols)
    for c in cols:

        cv2.drawContours(img2, contours, ind, c, -1)
        ind += 1
        if ind==len(contours):
            break

    for x in range(opening.shape[0]):
        for y in range(opening.shape[1]):
            if opening[x][y]==0:
                img2[x][y]=(0,0,0)




 #   plt.imshow(img2)
 #   plt.show()
    #markers = np.uint8(opening)
    im = Image.fromarray(img2)
 #   plt.imshow(im)
 #   plt.show()
    return im
예제 #7
0
def process_water(path,THRESH,ADAP,DIST_PERC):

    img = cv2.imread(path)#10978_13_001.png-1.jpg')# densities/143_13_001.png')#558_13_002.png')#fotoscale.jpg')143_13_001.png')#
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    kernel2 = np.ones((9, 9), np.uint8)



    #plt.matshow(gray,cmap='gray')
    #plt.show()


    if ADAP:
        thresh = cv2.adaptiveThreshold(gray, 255,
                                   cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 55, 20)
    else:
        data = dict()
        for i in gray.ravel():
            if data.has_key(i):
                data[i] += 1
            else:
                data[i] = 1

        max = 0
        id = None
        for k in data.keys():
            # print 'k ',k
            if data[k] >= max and k < 240 and k > 130:  # 50:#240:
                id = k
                max = data[k]
        print 'key: ', id, ' - ', max
        id -= THRESH  # 15
        ret, thresh = cv2.threshold(gray,id,255,cv2.ADAPTIVE_THRESH_MEAN_C+cv2.THRESH_BINARY_INV)#+cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+


    #plt.matshow(thresh,cmap='gray')
    #plt.show()

    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 5)

    #plt.matshow(opening,cmap='gray')
    #plt.show()


    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)#OPENING

    #plt.matshow(dist_transform)
    #plt.show()
    #print dist_transform.min(), dist_transform.max()
    ret, sure_fg = cv2.threshold(dist_transform,int(dist_transform.max()*DIST_PERC),255,0)

    sure_fg = cv2.erode(sure_fg,kernel,iterations=3)#3
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    ret, markers = cv2.connectedComponents(sure_fg)
    #plt.matshow(markers)
    #plt.show()
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    markers[unknown==255] = 0


    markers = cv2.watershed(img,markers)

    img[markers == -1] = [0,0,0]
    #plt.imshow(markers)
    #plt.show()



    ss = set()
    for e in range(len(markers)):
        for ej in range(len(markers[0])):

            if markers[e][ej]==-1 or markers[e][ej]==1:
                markers[e][ej]=0
            #if  markers[e][ej]==0:
            #    img2[e][ej]=(0,0,0)
            ss.add(markers[e][ej])
    #cols=list()

    img2 = np.zeros_like(img)
    img2[:, :, 0] = opening
    img2[:, :, 1] = opening
    img2[:, :, 2] = opening
    c=1
    dic=dict()
    dic[0]=(0,0,0)
    for r in range(1,255,1):
        for r2 in range(1, 255, 1):
            for r3 in range(1, 255, 1):
                #cols.add((r,r2,r3))
                dic[c]=(r,r2,r3)
                if c==len(ss)+5:
                    break
                c+=1
            if c==len(ss)+5:
                break
        if c == len(ss)+5:
            break
    for e in range(len(markers)):
        for ej in range(len(markers[0])):
            img2[e][ej]=dic[markers[e][ej]]

    #plt.imshow(markers)
    #plt.show()



    markers = np.uint8(img2)
    im = Image.fromarray(markers)
    #print im
    #print 'mar ', len(ss)


    return im
예제 #8
0
def process_quick(path,ADAP):

    if True:
        im = Image.open(path)
        im.thumbnail((im.size[0]/4,im.size[1]/4), Image.ANTIALIAS)
        im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
        print 'im ',im_arr.size, im.size
        im_arr = im_arr.reshape((im.size[1], im.size[0],im_arr.size/(im.size[1]*im.size[0])))
        img=im_arr
 #       plt.imshow(img)
 #       plt.show()

        #img = cv2.imread(
        #    path)  # 10978_13_001.png-1.jpg')# densities/143_13_001.png')#558_13_002.png')#fotoscale.jpg')143_13_001.png')#
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        kernel2 = np.ones((9, 9), np.uint8)
        data = dict()
        for i in gray.ravel():
            if data.has_key(i):
                data[i] += 1
            else:
                data[i] = 1

        max = 0
        id = None
        for k in data.keys():
            # print 'k ',k
            if data[k] >= max and k < 240 and k>130:  # 50:#240:
                id = k
                max = data[k]
        #print 'key: ', id, ' - ', max
        id -= 30

        if ADAP:
            thresh = cv2.adaptiveThreshold(gray, 255,
                                           cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 55, 20)

        else:
            ret, thresh = cv2.threshold(gray, id, 255,
                                    cv2.ADAPTIVE_THRESH_MEAN_C + cv2.THRESH_BINARY_INV)  # +cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)

        for i in range(opening.shape[0]):
            for j in range(opening.shape[1]):
                if opening[i][j] == 0:
                    img[i][j] = (0, 0, 0)

        #plt.imshow(img)
        #plt.show()

        fx = segmentation.quickshift(img,kernel_size=3)  # , markers)
        #plt.imshow(fx)
        #plt.show()
        ss = set()
        for i in range(fx.shape[0]):
            for j in range(fx.shape[1]):
               if isinstance(fx[i][j],int):
                   ss.add(fx[i][j])
               else:
                   ss.add((fx[i][j][0],fx[i][j][1]  ,fx[i][j][2]))
        #print 'ssle ',len(ss)
        c=0
        cols=list()
        for r in range(1,250,1):
            for r2 in range(1, 250, 1):
                for r3 in range(1, 250, 1):
                    cols.append((r,r2,r3))


        print 'cols',len(cols)
        shuffle(cols)

        img2 = np.zeros_like(img)
        img2[:, :, 0] = opening
        img2[:, :, 1] = opening
        img2[:, :, 2] = opening
        fx = color.label2rgb(fx, img2, colors=cols, kind='overlay')
        for i in range(fx.shape[0]):
            for j in range(fx.shape[1]):
                if opening[i][j]==0:
                    fx[i][j]=(0,0,0)
#                ss.add((fx[i][j][0], fx[i][j][1], fx[i][j][2]))
        #plt.imshow(fx)
        #plt.show()
        #ss = set()
        #for i in range(fx.shape[0]):
        #    for j in range(fx.shape[1]):
        #        ss.add((fx[i][j][0], fx[i][j][1], fx[i][j][2]))
        #print 'ssle22 ', len(ss)


        #plt.imshow(fx)
        #plt.show()

        fx = np.uint8(fx)
        im = Image.fromarray(fx)
        #plt.imshow(im)
        #plt.show()
        return im
예제 #9
0
#kernel = np.ones((3, 3), np.uint8)
#kernel2 = np.ones((9, 9), np.uint8)
#erosion = cv2.erode(im_arr, kernel, iterations=1)
#dilate = cv2.dilate(erosion, kernel2, iterations=1)
    #        plt.imshow(dilate)
    #        plt.show()
#imgM = dilate#Image.fromarray(dilate)
#grayM = cv2.cvtColor(imgM,cv2.COLOR_BGR2GRAY)#cv2.imread('/home/olusiak/water_coins.jpg',0)

#equ = cv2.equalizeHist(gray)
#gray=equ

kernel2 = np.ones((9, 9), np.uint8)
data=dict()
for i in gray.ravel():
    if data.has_key(i):
        data[i]+=1
    else:
        data[i]=1
#if data.has_key(255):
max=0
id=None
for k in data.keys():
    #print 'k ',k
    if data[k]>=max and k<240:#50:#240:
        id=k
        max=data[k]
print 'key: ',id,' - ',max
id-=15

#plt.imshow(img)
예제 #10
0
def process(path, N_SEGM, THRESH, ADAP):

    im = Image.open(path)  #'/home/olusiak/Obrazy/rois/41136_001.png-2.jpg')
    #ran=1

    #im_arr2 = np.fromstring(im.tobytes(), dtype=np.uint8)
    #im_arr2 = im_arr2.reshape((im.size[1], im.size[0], 3))
    #for x in range(len(im_arr2)):
    #    for y in range(len(im_arr2[0])):
    #        im_arr2[x][y][2]=0#=(im_arr2[0],255,255)
    #        im_arr2[x][y][1]=0
    #plt.imshow(im_arr2)
    #plt.show()

    im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
    im_arr = im_arr.reshape((im.size[1], im.size[0], 3))

    gray = cv2.cvtColor(im_arr, cv2.COLOR_BGR2GRAY)

    if ADAP:
        thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                       cv2.THRESH_BINARY_INV, 55, 20)

    else:
        data = dict()
        for i in gray.ravel():
            if data.has_key(i):
                data[i] += 1
            else:
                data[i] = 1
        # if data.has_key(255):
        max = 0
        id = None
        for k in data.keys():
            # print 'k ',k
            if data[k] >= max and k < 240 and k > 130:  # 50:#240:
                id = k
                max = data[k]
        # for k in data.keys():
        #    print 'k ',k,' ',data[k]
        # print 'key: ',id,' - ',max
        id -= THRESH  # 35
        ret, thresh = cv2.threshold(
            gray, id, 255, cv2.ADAPTIVE_THRESH_MEAN_C + cv2.THRESH_BINARY_INV
        )  #+cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    #plt.matshow(thresh,cmap='gray')
    #plt.show()

    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
    opening2 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
    #plt.imshow(opening)
    #plt.show()
    for i in range(opening.shape[0]):
        for j in range(opening.shape[1]):
            if opening[i][j] == 0:
                im_arr[i][j] = (0, 0, 0)

    #plt.imshow(im_arr)
    #plt.show()
    #plt.matshow(opening)
    #plt.show()
    #plt.imshow(im_arr)
    #plt.show()

    #im = Image.fromarray(im_arr)
    #im.thumbnail((im.size[0] / ran, im.size[1] / ran), Image.ANTIALIAS)
    #im_arr = np.fromstring(im.tobytes(), dtype=np.uint8)
    #im_arr = im_arr.reshape((im.size[1], im.size[0], 3))

    #im = Image.fromarray(opening)
    #im.thumbnail((im.size[0] / ran, im.size[1] / ran), Image.ANTIALIAS)
    #opening = np.fromstring(im.tobytes(), dtype=np.uint8)
    #opening = opening.reshape((im.size[1], im.size[0]))

    img2 = im_arr

    edges = filters.sobel(color.rgb2gray(img2))
    labels = segmentation.slic(img2, compactness=30,
                               n_segments=N_SEGM)  #30 2000
    #g = graph.rag_mean_color(img2, labels)#added

    g = graph.rag_boundary(labels, edges)  #first

    #graph.show_rag(labels, g, img)
    #plt.title('Initial RAG')

    labels2 = graph.merge_hierarchical(
        labels,
        g,
        thresh=0.98,
        rag_copy=False,  # 0.08
        in_place_merge=True,
        merge_func=merge_boundary,
        weight_func=weight_boundary)

    #final_labels = graph.cut_threshold(labels, g, 29)#added
    #final_label_rgb = color.label2rgb(final_labels, img2, colors=cols, kind='overlay')#added
    #labels2=final_label_rgb#added
    #plt.imshow(final_label_rgb)
    #plt.show()
    #graph.show_rag(labels, g, im)
    #plt.title('RAG after hierarchical merging')

    #plt.figure()

    #ret, opening = cv2.threshold(opening,0,255,cv2.THRESH_OTSU)#cv2.ADAPTIVE_THRESH_MEAN_C+

    #out = color.label2rgb(labels2, img2, kind='avg')
    s = set()
    for row in labels2:
        for e in row:
            s.add(e)
    #print 'sss ',len(s)
    cols = list()
    c = 0
    cp = len(s) + 5
    for r in range(0, 256, 1):
        for r2 in range(0, 256, 1):
            for r3 in range(0, 256, 1):
                cols.append((r, r2, r3))
                cp -= 1
                if cp == 0:
                    break

            if cp == 0:
                break
        if cp == 0:
            break
    # print 'cols', len(cols)

    shuffle(cols)

    img2 = np.zeros_like(img2)
    img2[:, :, 0] = opening2
    img2[:, :, 1] = opening2
    img2[:, :, 2] = opening2
    out = color.label2rgb(labels2, img2, colors=cols, kind='overlay', alpha=1)

    for i in range(img2.shape[0]):
        for j in range(img2.shape[1]):
            if img2[i][j][0] == 0 and img2[i][j][1] == 0 and img2[i][j][
                    2] == 0:  #,0,0]:
                out[i][j][0] = 0
                out[i][j][1] = 0
                out[i][j][2] = 0

    #print 'OUT'
    #plt.imshow(out)
    #plt.show()

    #plt.imshow(out)
    #plt.show()

    #xx = set()

    #for i in range(out.shape[0]):  # fx.shape[0]):
    #    for j in range(out.shape[1]):  # fx.shape[1]):


#            s = (out[i, j][0], out[i, j][1], out[i, j][2])

#            xx.add(s)

    out = np.uint8(out)
    im = Image.fromarray(out)

    #plt.imshow(out)
    #plt.show()
    return im