Exemplo n.º 1
0
def test_minsize():
    # single-channel:
    img = data.coins()[20:168, 0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
def fun_compare_colorsegmentation_and_display(image_data, number_segments=250, compactness_factor=10):
    """
    The function is a copy of what does this link http://scikit-image.org/docs/dev/auto_examples/plot_segmentations.html
    """
    segments_fz = felzenszwalb(image_data, scale=100, sigma=0.5, min_size=50)
    segments_slic = slic(image_data, n_segments=number_segments, compactness=compactness_factor, sigma=1)
    segments_quick = quickshift(image_data, kernel_size=3, max_dist=6, ratio=0.5)

    print ("Felzenszwalb's number of segments: %d" % len(np.unique(segments_fz)))
    print ("Slic number of segments: %d" % len(np.unique(segments_slic)))
    print ("Quickshift number of segments: %d" % len(np.unique(segments_quick)))

    fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, subplot_kw={"adjustable": "box-forced"})
    fig.set_size_inches(8, 3, forward=True)
    fig.subplots_adjust(0.05, 0.05, 0.95, 0.95, 0.05, 0.05)

    ax[0].imshow(mark_boundaries(image_data, segments_fz, color=(1, 0, 0)))
    ax[0].set_title("Felzenszwalbs's method")
    ax[1].imshow(mark_boundaries(image_data, segments_slic, color=(1, 0, 0)))
    ax[1].set_title("SLIC")
    ax[2].imshow(mark_boundaries(image_data, segments_quick, color=(1, 0, 0)))
    ax[2].set_title("Quickshift")
    for a in ax:
        a.set_xticks(())
        a.set_yticks(())
    plt.show()
Exemplo n.º 3
0
def SegmentationFelz_run_2d(rod):
    img = img_as_float(
        RodriguesToUnambiguousColor(rod["x"], rod["y"], rod["z"], maxRange=None, centerR=None).astype("uint8")
    )
    segments_slic = felzenszwalb(img, scale=100, sigma=0.0, min_size=10)
    print("Slic number of segments: %d" % len(np.unique(segments_slic)))
    return segments_slic
Exemplo n.º 4
0
def selectiveSearch(image):
    segments = felzenszwalb(image, scale=kFelzenszwalbScale)
    numRegions = segments.max()

    rectangles = []

    for regionTag in range(numRegions):
        selectedRegion = segments == regionTag
        regionPixelIndices = np.transpose(np.nonzero(selectedRegion))
        rectangle = aabb(regionPixelIndices)
        rectangles.append(rectangle)

    # Implement similarities, neighbourhood merging.
    # Felzenszwalb's segmentation is ridiculously good already.

    def debug():
        marked = np.zeros(image.shape, dtype=np.uint8)

        for rectangle in rectangles:
            rr, cc = rectangle.pixels(marked.shape)
            randcolor = randint(0, 255), randint(0, 255), randint(0, 255)
            marked[rr, cc] = randcolor

        print(image.shape, segments.shape, marked.shape)

        io.imshow_collection([image, segments, marked])
        io.show()

    # debug()

    return rectangles
Exemplo n.º 5
0
def mask_felz(image, config):
    #constants for felzenszwalb segmentation function
    scale = config[':felzenszwalb'][':scale']
    sigma = config[':felzenszwalb'][':sigma'] 
    min_size = config[':felzenszwalb'][':min_size'] 

    segments = felzenszwalb(image, scale, sigma, min_size)
    return segments
Exemplo n.º 6
0
def test_minsize():
    # single-channel:
    img = data.coins()[20:168,0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        # the construction doesn't guarantee min_size is respected
        # after intersecting the sementations for the colors
        assert_greater(np.mean(counts) + 1, min_size)
Exemplo n.º 7
0
def doSegment(param, img):
  if param[0] == 'slic':
    segments_res = slic(img, n_segments=int(param[1]), compactness=int(param[2]), sigma=int(param[3]), convert2lab=True)
  elif param[0] == 'pff':
    segments_res = felzenszwalb(img, scale=int(param[1]), sigma=float(param[2]), min_size=int(param[3]))
  elif param[0] == 'quick':
    segments_res = quickshift(img, kernel_size=int(param[1]), max_dist=int(param[2]), ratio=float(param[3]), convert2lab=True)
  return segments_res
Exemplo n.º 8
0
def test_merging():
    # test region merging in the post-processing step
    img = np.array([[0, 0.3], [0.7, 1]])
    # With scale=0, only the post-processing is performed.
    seg = felzenszwalb(img, scale=0, sigma=0, min_size=2)
    # we expect 2 segments:
    assert_equal(len(np.unique(seg)), 2)
    assert_array_equal(seg[0, :], 0)
    assert_array_equal(seg[1, :], 1)
Exemplo n.º 9
0
    def __init__(self, fname):
        self.fname = fname
        self.image_cache = {}

        print 'loading image'
        self.raw_img = io.imread(fname)
        self._segment_ids = None
        img_float = img_as_float(self.raw_img)
        print 'segmenting image'
        self.segments = felzenszwalb(img_float, scale=300, sigma=0.5, min_size=100)
 def extract(self, image):
     """
     Performs segmentation and returns a set of nd.array
     """
     # Init the list of segmentations
     segmentations = []
     for param in self.params_:
         sc, sg, m = param
         segm_mask = segmentation.felzenszwalb(image, sc, sg, m)
         segmentations.append(segm_mask)
     return segmentations
Exemplo n.º 11
0
def test_color():
    # very weak tests.
    img = np.zeros((20, 21, 3))
    img[:10, :10, 0] = 1
    img[10:, :10, 1] = 1
    img[10:, 10:, 2] = 1
    seg = felzenszwalb(img, sigma=0)
    # we expect 4 segments:
    assert_equal(len(np.unique(seg)), 4)
    assert_array_equal(seg[:10, :10], 0)
    assert_array_equal(seg[10:, :10], 2)
    assert_array_equal(seg[:10, 10:], 1)
    assert_array_equal(seg[10:, 10:], 3)
Exemplo n.º 12
0
def test_grey():
    # very weak tests.
    img = np.zeros((20, 21))
    img[:10, 10:] = 0.2
    img[10:, :10] = 0.4
    img[10:, 10:] = 0.6
    seg = felzenszwalb(img, sigma=0)
    # we expect 4 segments:
    assert_equal(len(np.unique(seg)), 4)
    # that mostly respect the 4 regions:
    for i in range(4):
        hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
        assert_greater(hist[i], 40)
Exemplo n.º 13
0
    def __init__(self, fname):
        self.fname = fname

        print 'loading image'
        img = Image.open(fname)
        img.thumbnail((1280,1024), Image.ANTIALIAS)
        width, height = img.size
        square_size = width * height
        min_size = square_size / 300
        self.raw_img = np.array(enhance(img).convert('RGB'))
        self._segment_ids = None
        img_float = img_as_float(self.raw_img)
        print 'segmenting image'
        self.segments = felzenszwalb(img_float, scale=300, sigma=0.25, min_size=min_size)
Exemplo n.º 14
0
def clustering(inPixNP, width, height, scale=50, sigma=4.5, min_size=10):
    segmentsNP = felzenszwalb(inPixNP, scale, sigma, min_size)
    # felzenszwalb(image, scale=1, sigma=0.8, min_size=20)
    # image : (width, height, 3) or (width, height) ndarray - Input image.
    # scale : float - Free parameter. Higher means larger clusters.
    # sigma : float - Width of Gaussian kernel used in preprocessing.
    # min_size : int - Minimum component size. Enforced using postprocessing.
    
    # create a data structure with regionIDs as keys and lists of their pixel coordinates as values:
    maxSegmentID = np.max(segmentsNP)
    segmentsByIDlist = [[] for i in range(maxSegmentID + 1)]
    for y in range(0,height):
        for x in range(0,width):
            regionID = segmentsNP[y,x]
            segmentsByIDlist[regionID].append((x, y))
    return segmentsNP, segmentsByIDlist
Exemplo n.º 15
0
def demo(image_name,color_space_list=None,ks=None,sim_feats_list=None,net='vgg16', cpu_mode=True):
	''' Object Recognition Demo : Selective Search + RCNN
	parameters
	----------
	image_name : filename of image stored in 'Data/img'
	
	color_space_list : list of colorspaces to be used. Refer color_utils for list of possible colorspaces.
	Default : [ 'HSV', 'LAB']

	ks : list felzenszwalb scale/threshold and minimum segment size.
	Default : [50, 100]
	
	'''
	
	blob_array = []	
	priority = []
	img = plt.imread('Data/img/' + image_name + '.jpg')
	seg_dir = 'Data/segments/'
	if color_space_list is None: color_space_list = ['HSV','LAB']
	if ks is None: ks = [50,100]
	if sim_feats_list is None: sim_feats_list = [[ sf.color_hist_sim(), sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape) ],[ sf.texture_hist_sim(), sf.size_sim(img.shape), sf.fill_sim(img.shape) ]]

	cc = convert_colorspace(img,color_space_list)
	seg_filename = [seg_dir + 'HSV/50/' + image_name +'.mat',seg_dir + 'HSV/100/' + image_name +'.mat', seg_dir + 'LAB/50/' + image_name +'.mat',seg_dir + 'LAB/100/' + image_name +'.mat']

	for i in range(len(color_space_list)):
		for j in range(len(ks)):
			for k in range(len(sim_feats_list)):
				_img = cc[i]
				_file = "%s%s/%d/%s.mat"%(seg_dir,color_space_list[i].upper(),ks[j],image_name)
				if not os.path.exists(_file):
					segment_mask = felzenszwalb(_img,scale=ks[j],sigma=0.8,min_size=ks[j])
					_temp_dict = dict()
					_temp_dict['blobIndIm'] = segment_mask + 1
					scipy.io.savemat(_file,_temp_dict)
				_blob_array = ssearch._ssearch(_img,ssearch.load_segment_mask(_file),sim_feats = sim_feats_list[k])
				blob_array.append(_blob_array)
				priority.append( np.arange(len(_blob_array),0,-1).clip(0,(len(_blob_array)+1)/2))
		
	bboxes = ssearch.remove_duplicate(blob_array,priority)
	bbox_dict = {}
	bbox_dict['boxes'] = np.vstack([np.asarray(bboxes)[:,2],np.asarray(bboxes)[:,1],np.asarray(bboxes)[:,4],np.asarray(bboxes)[:,3]]).T
	print('\nComputed %d proposals'%(len(bboxes)))
	scipy.io.savemat('Data/Boxes/' + image_name + '.mat',bbox_dict)
	rcnn.rcnn_demo(image_name,net=net, cpu_mode=cpu_mode)
Exemplo n.º 16
0
def FELZENSZWALB(Input_Image, scale, sigma, min_size):
   
    '''
    Description:   Computes Felsenszwalbs efficient graph based image segmentation. 

    source:         skimage, openCv python 
    
    parameters:     Input_Image : ndarray
                    Input image
    
                    min-size : int
                    Minimum component size. Enforced using postprocessing.
    
                    scale:  float
                     The parameter scale sets an observation level. Higher scale means less and larger segments.
    
                    sigma : float
                    Width of Gaussian smoothing kernel for preprocessing. Zero means no smoothing.
    
    return:         Segment_mask : ndarray
                    Integer mask indicating segment labels.
        
    '''
    #default values, set in case of 0 as input
    if scale == 0:
        scale = 5
    if sigma == 0:
        sigma = 0.5
    if min_size == 0:
        min_size = 30

    #print Input
    img = cv2.imread(Input_Image)
    #print img
    #print img.shape

    segments_fz = felzenszwalb(img, scale, sigma, min_size)
    print segments_fz.shape
    #print ('segments_fz datatype',segments_fz.dtype )
    print("Felzenszwalb's number of segments: %d" % len(np.unique(segments_fz)))
    print ('segments_fz datatype',segments_fz.dtype )

    return segments_fz
Exemplo n.º 17
0
def doSegment(im):
    timestr = time.strftime("%Y%m%d-%H%M%S")
    global count

    img = img_as_float(im)
    # img = img_as_float(imgO[::2, ::2])

    segments_fz = felzenszwalb(img, scale=2.0, sigma=1.0, min_size=0)
    # segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
    # segments_quick = quickshift(img, kernel_size=5, max_dist=10, ratio=0.0)

    if count % 2 == 0:
        bounds = mark_boundaries(img, segments_fz, color=(0.0, 0.0, 0.0), mode="inner")
    else:
        bounds = mark_boundaries(img, segments_fz, color=(1, 1, 1), mode="inner")

    labeled = label2rgb(
        segments_fz,
        img,
        alpha=1.0,
        bg_color=(0, 0, 0),
        bg_label=0,
        colors=[
            (0, 0, 0),
            (1, 1, 1),
            (0.25, 0.25, 0.25),
            (0.5, 0.5, 0.5),
            (0.75, 0.75, 0.75),
            (0.1, 0.1, 0.1),
            (0.05, 0.05, 0.05),
            (0.2, 0.2, 0.2),
            (0.15, 0.15, 0.15),
        ],
    )

    bb = Image.fromarray(np.uint8(labeled * 255))
    bb.show()

    bb.save("output/white" + timestr + ".png")
    # bb = bb.resize((500, 500), Image.NEAREST)
    count = count + 1
Exemplo n.º 18
0
def image2seg(folder, filename, algo='felzenszwalb'):
    image = cv2.imread(os.path.join(tgt_dir, folder, filename))
    image = cv2.resize(image, (640, 480))

    if algo == 'felzenszwalb':
        segment = felzenszwalb(image, scale=100, sigma=0.5,
                               min_size=50).astype(np.int16)
    elif algo == 'slic':
        segment = slic(image, n_segments=500, sigma=0.5,
                       compactness=1).astype(np.int16)
    else:
        raise NotImplementedError("Segmentation algorithm not implemented.")

    os.makedirs(os.path.join(output_dir, folder), exist_ok=True)

    filename_wo_extn = os.path.splitext(os.path.basename(filename))[0]

    np.savez(os.path.join(output_dir, folder,
                          "seg_{}.npz".format(filename_wo_extn)),
             segment_0=segment)
    return
Exemplo n.º 19
0
def felzenszwalb(request):

    image, response = process(request, color = True)

    if len(image.shape) == 2:
        
        image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)

    scale = int(request.GET.get('felzenszwalb-scale'))

    sigma = float(request.GET.get('felzenszwalb-sigma'))

    min_size = int(request.GET.get('felzenszwalb-min'))  

    segments = segmentation.felzenszwalb(image, scale = scale, sigma = sigma, min_size = min_size)

    image = segmentation.mark_boundaries(image, segments)

    io.imsave(response['filename'], image)

    return JsonResponse(response)
Exemplo n.º 20
0
def test_skimage(selenium):
    import numpy as np
    from skimage import color, data
    from skimage.util import view_as_blocks

    # get astronaut from skimage.data in grayscale
    l = color.rgb2gray(data.astronaut())
    assert l.size == 262144
    assert l.shape == (512, 512)

    # size of blocks
    block_shape = (4, 4)

    # see astronaut as a matrix of blocks (of shape block_shape)
    view = view_as_blocks(l, block_shape)
    assert view.shape == (128, 128, 4, 4)

    from skimage.filters import threshold_otsu

    to = threshold_otsu(l)
    assert to.hex() == "0x1.8e00000000000p-2"

    from skimage.color import rgb2gray
    from skimage.data import astronaut
    from skimage.filters import sobel
    from skimage.segmentation import felzenszwalb, quickshift, slic, watershed
    from skimage.util import img_as_float

    img = img_as_float(astronaut()[::2, ::2])

    segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
    segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
    segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
    gradient = sobel(rgb2gray(img))
    segments_watershed = watershed(gradient, markers=250, compactness=0.001)

    assert len(np.unique(segments_fz)) == 194
    assert len(np.unique(segments_slic)) == 196
    assert len(np.unique(segments_quick)) == 695
    assert len(np.unique(segments_watershed)) == 256
Exemplo n.º 21
0
    def makeSegmentation(self):

        segments_fz = felzenszwalb(self.img,
                                   scale=1000,
                                   sigma=0.5,
                                   min_size=250)
        #segments_slic = slic(self.img, n_segments=4, compactness=10, sigma=1)
        segments_quick = quickshift(self.img,
                                    kernel_size=3,
                                    max_dist=100,
                                    ratio=0.5)
        gradient = sobel(rgb2gray(self.img))
        segments_watershed = watershed(gradient, markers=10, compactness=0.001)

        print("Felzenszwalb number of segments: {}".format(
            len(np.unique(segments_fz))))
        #print('SLIC number of segments: {}'.format(len(np.unique(segments_slic))))
        print('Quickshift number of segments: {}'.format(
            len(np.unique(segments_quick))))

        fig, ax = plt.subplots(2,
                               2,
                               figsize=(10, 10),
                               sharex=True,
                               sharey=True)

        ax[0, 0].imshow(mark_boundaries(self.img, segments_fz))
        ax[0, 0].set_title("Felzenszwalbs's method")
        #ax[0, 1].imshow(mark_boundaries(self.img, segments_slic))
        ax[0, 1].set_title('SLIC')
        ax[1, 0].imshow(mark_boundaries(self.img, segments_quick))
        ax[1, 0].set_title('Quickshift')
        ax[1, 1].imshow(mark_boundaries(self.img, segments_watershed))
        ax[1, 1].set_title('Compact watershed')

        for a in ax.ravel():
            a.set_axis_off()

        plt.tight_layout()
        plt.show()
Exemplo n.º 22
0
    def cluster_image(self, img):
        edge_img = skeletonize(
            block_reduce(img[:, :,
                             2], (self.reduce_factor, self.reduce_factor),
                         func=np.amax) > 0.1).astype(np.float64)
        adjacency = img_to_graph(edge_img)
        eigenvectors = spectral_embedding(adjacency,
                                          n_components=self.n_components)

        eigenvectors = eigenvectors / np.amax(eigenvectors)
        eigenvectors = np.reshape(
            eigenvectors,
            (edge_img.shape[0], edge_img.shape[1], self.n_components))
        eigenvectors = ndimage.median_filter(eigenvectors,
                                             size=self.median_smooth)

        print('cluestering..')
        labels = felzenszwalb(eigenvectors,
                              scale=self.scale,
                              sigma=self.sigma,
                              min_size=self.min_size)
        labels = cv2.resize(labels, (img.shape[1], img.shape[0]),
                            interpolation=cv2.INTER_NEAREST)
        labels[np.argmax(img, axis=2) == 0] = 0
        labels[np.argmax(img, axis=2) == 1] += 1

        labels_post = np.zeros(
            (labels.shape[0], labels.shape[1], np.amax(labels) + 1))
        for i in range(1, np.amax(labels) + 1):
            labels_post[:, :, i] = labels == i
        labels_post = ndimage.morphology.binary_erosion(labels_post,
                                                        structure=np.ones(
                                                            (self.smooth,
                                                             self.smooth, 1)))
        labels_post = ndimage.morphology.binary_dilation(labels_post,
                                                         structure=np.ones(
                                                             (self.smooth,
                                                              self.smooth, 1)))

        return np.argmax(labels_post, axis=2)
Exemplo n.º 23
0
def doSegment(imgPath, outImagePath):
    im = load_image(imgPath)
    count = 0
    timestr = time.strftime("%Y%m%d-%H%M%S")
    global count

    img = img_as_float(im)
    #img = img_as_float(imgO[::2, ::2])

    segments_fz = felzenszwalb(img, scale=2.0, sigma=1.0, min_size=0)
    #segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
    #segments_quick = quickshift(img, kernel_size=5, max_dist=10, ratio=0.0)

    if (count % 2 == 0):
        bounds = mark_boundaries(img,
                                 segments_fz,
                                 color=(0.0, 0.0, 0.0),
                                 mode="inner")
    else:
        bounds = mark_boundaries(img,
                                 segments_fz,
                                 color=(1, 1, 1),
                                 mode="inner")

    labeled = label2rgb(segments_fz,
                        img,
                        alpha=1.0,
                        bg_color=(0, 0, 0),
                        bg_label=0,
                        colors=[(0, 0, 0), (1, 1, 1), (0.25, 0.25, 0.25),
                                (0.5, 0.5, 0.5), (0.75, 0.75, 0.75),
                                (0.1, 0.1, 0.1), (0.05, 0.05, 0.05),
                                (0.2, 0.2, 0.2), (0.15, 0.15, 0.15)])

    bb = Image.fromarray(np.uint8(labeled * 255))
    # bb.show()
    bb.save(outImagePath)
    #bb = bb.resize((500, 500), Image.NEAREST)
    count = count + 1
Exemplo n.º 24
0
def segm_felzenszwalb(input_image,
                      scale=800,
                      sigma=10,
                      min_size=5,
                      multichannel=True):
    """Función para generar la segmentación mediante la técnica
    de Felzenszwalb.

    Args:
        input_image ([Numpy Array]): Imagen de entrada sobre la que obtener
                                    la segmentación
        scale (int, optional): A mayor, más grandes son los grupos.
                               Defaults to 800.
        sigma (int, optional): Desviación estándar del kernel Gaussiano.
                                Defaults to 10.
        min_size (int, optional): Tamano mínimo de los componentes.
                                  Defaults to 5.
        multichannel (bool, optional): Si el último valor del shape
                                       se interpreta como múltiples canales.
                                       Defaults to True.
    Returns:
        Tuple (output image, labels, number classes): Tupla con la imagen
                                            segmentada, las etiquetas y el número
                                            total de segmentos encontrados.
    """

    segments_felz = felzenszwalb(np.uint8(input_image),
                                 scale=scale,
                                 sigma=sigma,
                                 min_size=min_size,
                                 multichannel=multichannel)

    output_image = mark_boundaries(input_image, segments_felz)
    labeled_fz = color.label2rgb(segments_felz,
                                 input_image,
                                 kind='avg',
                                 bg_label=0)

    return (output_image, labeled_fz, len(np.unique(segments_felz)))
Exemplo n.º 25
0
def segment(img):
    img = pil2cv(img)
    h, w = img.shape[0:2]
    img = cv2.bilateralFilter(img, 9, 100, 100)
    scale = int(h * w / 1000)
    segments = felzenszwalb(img, scale=scale, sigma=0.5, min_size=150)
    out_image = np.zeros((h, w, 3))
    num_segments = len(np.unique(segments))
    for s in tqdm(range(num_segments)):
        label_map = segments == s
        label_map3 = np.dstack([label_map] * 3)
        masked_img = np.multiply(label_map3, img)
        #avg_color = np.sum(np.sum(masked_img, axis=0), axis=0) / np.count_nonzero(label_map)  # maybe median is better
        nonzeros = [masked_img[:, :, c].reshape((h * w)) for c in range(3)]
        median_color = [
            np.median(np.take(nonzeros[c], nonzeros[c].nonzero()))
            for c in range(3)
        ]
        smooth_segment = (label_map3 * median_color).astype('uint8')
        out_image += smooth_segment
    out_image = Image.fromarray(out_image.astype('uint8'))
    return out_image
Exemplo n.º 26
0
    def pre_trans(self, img_arr, img_file_path):
        """
        return pre-filter feature extraction
        For color extraction use color_kmeans()
        local_equalize
        local_threshold
        Segmentation algorithms: felzenszwalb, slic, quickshift
        """
        # initialize pre_trans vector size
        pre_trans = np.zeros(
            5 * (img_arr.shape[0] * img_arr.shape[1] * img_arr.shape[2]))
        # All extractions using raw colored image array:
        color_kmeans = Color_Clustering(img_file_path, 10, img_arr.shape[:2])
        dom_colors = np.ravel(color_kmeans.main())

        local_eq_raw = self.local_equalize(img_arr)

        local_threshold_raw = self.local_thresh(img_arr)

        # Segmentation: felzenszwalb
        img = img_as_float(img_arr)
        segments_fz = np.ravel(
            felzenszwalb(img, scale=100, sigma=0.5, min_size=50))
        prior_length = dom_colors.shape[0] + local_eq_raw.shape[0]
        +local_threshold_raw.shape[0] + segments_fz.shape[0]

        # apply feature detection algorithms to grayscaled image
        img_arr_grey = color.rgb2gray(img_arr)
        feat_det_img_arr = self.feat_detect(img_arr_grey)
        stand_feat_det_img_arr = self.stand_vector_size(feat_det_img_arr)
        pre_trans_prior = np.concatenate(
            (dom_colors, local_eq_raw, local_threshold_raw, segments_fz),
            axis=0)
        pre_trans[:prior_length] = pre_trans_prior
        pre_trans[prior_length:prior_length +
                  stand_feat_det_img_arr.shape[0]] = stand_feat_det_img_arr

        return pre_trans, feat_det_img_arr.shape
Exemplo n.º 27
0
def get_felzenszwalb(slices: np.array) -> np.array:
    slices = slices.cpu().detach().numpy()

    mid_slice = slices.shape[0] // 2

    segments = felzenszwalb(slices[mid_slice, :, :],
                            scale=150,
                            sigma=0.7,
                            min_size=50)
    #segments = chan_vese(slices[mid_slice,:,:], mu=0.1)

    selected_pixels = np.array([
        [5 / 16, 5 / 16], [5 / 16, 11 / 16], [11 / 16, 5 / 16],
        [11 / 16, 11 / 16]
    ]) @ np.array([[256, 0], [0, 256]])  # don't hard code image resolution
    selected_pixels = selected_pixels.astype('int32')

    selected_segments = [segments[tuple(sp)] for sp in selected_pixels]

    pre_mask = [segments == ss for ss in selected_segments]

    mask = np.logical_or.reduce(pre_mask)

    bg_segment = int(np.floor(np.mean(segments[0])))
    mid_col = segments.shape[1] // 2

    start = timeit.default_timer()

    max_gap = smart_strip_edges(segments, bg_segment, 1)
    max_h_gap = smart_strip_edges(segments, bg_segment, 0)

    stop = timeit.default_timer()

    print("Step: {} - Runtime: {}".format(3, stop - start))

    return segments[max_gap[0]:max_gap[1],
                    max_h_gap[0]:max_h_gap[1]], mask[max_gap[0]:max_gap[1],
                                                     max_h_gap[0]:max_h_gap[1]]
    def pre_trans(self, img_arr, img_file_path):
        """
        return pre-filter feature extraction
        For color extraction use color_kmeans()
        local_equalize
        local_threshold
        Segmentation algorithms: felzenszwalb, slic, quickshift
        """
        # initialize pre_trans vector size
        pre_trans = np.zeros(18000)
        # All extractions using raw colored image array:
        color_kmeans = Color_Clustering(img_file_path, 10, img_arr.shape[:2])
        dom_colors = np.ravel(color_kmeans.main())

        local_eq_raw = self.local_equalize(img_arr)

        local_threshold_raw = self.local_thresh(img_arr)

        # Segmentation: felzenszwalb
        img = img_as_float(img_arr)
        segments_fz = np.ravel(
            felzenszwalb(
                img,
                scale=100,
                sigma=0.5,
                min_size=50))
        prior_length = dom_colors.shape[0] + local_eq_raw.shape[0]
        + local_threshold_raw.shape[0] + segments_fz.shape[0]

        pre_trans_prior = np.concatenate(
            (dom_colors,
             local_eq_raw,
             local_threshold_raw,
             segments_fz),
            axis=0)
        pre_trans[:prior_length] = pre_trans_prior

        return pre_trans
    def radioBtn_refresh(self):
        self.slider_pack()
        if (self.segment_type_choice.get() == 1):
            numSeg = self.slider1.get()
            comp = self.slider2.get()
            sigma = float(self.slider3.get())
            self.segments = slic(self.image,
                                 n_segments=numSeg,
                                 compactness=comp,
                                 sigma=sigma,
                                 slic_zero=True)

        if (self.segment_type_choice.get() == 2):
            scale = self.slider1.get()
            min_size = self.slider2.get()
            sigma = self.slider3.get()
            self.segments = felzenszwalb(self.image,
                                         scale=scale,
                                         sigma=sigma,
                                         min_size=min_size)

        if (self.segment_type_choice.get() == 3):
            kernel_size = self.slider1.get()
            max_dist = self.slider2.get()
            ratio = self.slider3.get()
            self.segments = quickshift(self.image,
                                       kernel_size=3,
                                       max_dist=6,
                                       ratio=0.5)

        imageOUT = cv2.bitwise_or(self.image, self.mask)
        #imageOUT = cv2.addWeighted(self.image,0.3,self.mask,0.7,0)
        #imageOUT=np.where(self.mask==(0,0,0),self.image,self.mask)
        imageOUT = toimage(mark_boundaries(imageOUT, self.segments))
        imageOUT = ImageTk.PhotoImage(imageOUT)

        self.panelA.create_image(0, 0, image=imageOUT, anchor=NW)
        self.panelA.image = imageOUT
Exemplo n.º 30
0
def segment_image(image, color_space_list = ['HSV','LAB'],
                  ks = [50,100]):

    blob_array =[]
    priority = []
    seg_masks = []
    converted_images = convert_colorspace(image,color_space_list)
    sim_feats_list = [ sf.color_hist_sim(), sf.texture_hist_sim(),
                      sf.size_sim(image.shape), sf.fill_sim(image.shape) ]
    for img in converted_images:
        for j in ks:
            print("segmenting",j)
            segmented_mask = felzenszwalb(img,j,sigma = 0.8,
                                         min_size = j)
            print("blobbing",j)
            blobs = ssearch._ssearch(img,segmented_mask,sim_feats =
                                          sim_feats_list)
            blob_array.append(blobs)
            priority.append(
                np.arange(len(blobs),0,-1).clip(0,(len(blobs)+1)/2))
            seg_masks.append(segmented_mask)
    blob_array = ssearch.remove_duplicate(blob_array)
    return blob_array
def ncut(img=None, thresh=0.001, num_cuts=10, sp_met='slic'):

    #     labels1 = segmentation.felzenszwalb(m_img, scale=50, sigma=0.5, min_size=100)
    if sp_met == 'slic':
        labels1 = segmentation.slic(img, compactness=30, n_segments=400)
    if sp_met == 'fl':
        labels1 = segmentation.felzenszwalb(img,
                                            scale=100,
                                            sigma=0.5,
                                            min_size=50)
    # if sp_met == 'qs':
    #     labels1 = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)

    out1 = color.label2rgb(labels1, img, kind='avg')

    g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1,
                                   g,
                                   thresh=thresh,
                                   num_cuts=num_cuts)
    out2 = color.label2rgb(labels2, img, kind='avg')

    return labels2
Exemplo n.º 32
0
    def task3(self):
        img1 = img_as_float64(mimg.imread("fish.bmp"))
        imgray = rgb2gray(img1)

        # Method 1
        # img2=sgm.watershed(imgray, markers=300, watershed_line=True)

        # Method 2
        # img2 = sgm.slic(imgray, n_segments=30, compactness=0.3, sigma=20)

        # Method 3
        # img2 = sgm.quickshift(img1, kernel_size=5, max_dist=15, ratio=0.05)

        # Method 4
        img2 = sgm.felzenszwalb(imgray, scale=200, sigma=1, min_size=100)

        img3 = sgm.mark_boundaries(img1, img2)

        fg, (ax1, ax2, ax3) = plt.subplots(1, 3)
        self.show(ax1, img1, "original")
        self.show(ax2, img2, "markers")
        self.show(ax3, img3, "with segmentation")
        plt.show()
    def extract_colored_map(self, image_with_boundaries, contours_image):
        segments = felzenszwalb(image_with_boundaries,
                                scale=self.segs,
                                min_size=self.min_size)

        for i in range(contours_image.shape[0]):
            for j in range(contours_image.shape[1]):
                if contours_image[i, j] == 0:
                    segments[i, j] = 255

        occurence_map = self.get_occurence_map(segments)

        keys = list(occurence_map.keys())
        for key in keys:
            if occurence_map[key] > self.threshold:
                del occurence_map[key]

        for i in range(segments.shape[0]):
            for j in range(segments.shape[1]):
                if segments[i, j] in occurence_map.keys():
                    segments[i, j] = 255

        return segments
def applyfelzenszwab(source_img, mask):
    fine_mask = mask.copy()
    input_img = source_img.copy()

    cv2.imshow("inputs", np.hstack((input_img, fine_mask)))
    cv2.waitKey(0)

    image_felzenszwalb = seg.felzenszwalb(mask)
    image_felzenszwalb_colored = color.label2rgb(image_felzenszwalb, mask, kind='avg')

    print(image_felzenszwalb.shape)
    print(image_felzenszwalb[:100])
    image_show(image_felzenszwalb)
    image_show(image_felzenszwalb_colored)

    #plt.imshow(image_felzenszwalb)
    plt.show()

    #cv2.imshow("adsf", image_felzenszwalb); #, cmap='gray'
    #cv2.waitKey(0)


    return fine_mask
Exemplo n.º 35
0
def segmentation(path):
    ds = gdal.Open(path, GA_ReadOnly )
    img = np.array(ds.GetRasterBand(1).ReadAsArray())
    segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
    #segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
    #segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)

    print("Felzenszwalb's number of segments: %d" % len(np.unique(segments_fz)))

    fig, ax = plt.subplots(1, 3)
    fig.set_size_inches(8, 3, forward=True)
    fig.subplots_adjust(0.05, 0.05, 0.95, 0.95, 0.05, 0.05)

    ax[0].imshow(mark_boundaries(img, segments_fz))
    ax[0].set_title("Felzenszwalbs's method")
    # ax[1].imshow(mark_boundaries(img, segments_slic))
    # ax[1].set_title("SLIC")
    # ax[2].imshow(mark_boundaries(img, segments_quick))
    # ax[2].set_title("Quickshift")
    for a in ax:
        a.set_xticks(())
        a.set_yticks(())
    plt.show()
Exemplo n.º 36
0
def felzenszwalb_skimage(input_band_list, scale, sigma, min_size):
    '''Felzenszwalb segmentation from Skimage library
    
    :param input_band_list: list of 2darrays (list of numpy arrays)
    :param scale: defines the observation level, higher scale means less and larger segments (float)
    :param sigma: idth of Gaussian smoothing kernel for preprocessing, zero means no smoothing (float)
    :param min_size: minimum size, minimum component size. Enforced using postprocessing. (integer)
    :returns:  2darray with the result of the segmentation (numpy array)
    
    Author: Daniele De Vecchi - Mostapha Harb
    Last modified: 22/03/2014
    
    Reference: http://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.felzenszwalb
    '''

    #TODO: Also here seems to me that only RGB is used and each band is segmented separately and than merged.
    #TODO: Would be more powerful if full spectral content would be used and one segmentation is run on the n-D feature space.

    #default values, set in case of 0 as input
    if scale == 0:
        scale = 2
    if sigma == 0:
        sigma = 0.1
    if min_size == 0:
        min_size = 2

    if len(input_band_list) == 4:
        img = np.dstack((input_band_list[0], input_band_list[1],
                         input_band_list[2], input_band_list[3]))
    if len(input_band_list) == 3:
        img = np.dstack(
            (input_band_list[0], input_band_list[1], input_band_list[2]))
    if len(input_band_list) == 1:
        img = input_band_list[0]
    segments_fz = felzenszwalb(img, scale, sigma, min_size)

    return segments_fz
Exemplo n.º 37
0
def press_wasdqe_to_adjust_parameter_of_felz(img):
    paras = [64, 0.5, 128]  # pieces
    # paras = [1, 0.8, 20]  # default

    act_dict = {0: (0, 1.0)}
    act_dict.update(
        zip(
            [119, 97, 113],
            [(i, 1.2) for i in range(len(paras))],
        ))  # _KeyBoard: W A Q
    act_dict.update(
        zip(
            [115, 100, 101],
            [(i, 0.8) for i in range(len(paras))],
        ))  # KeyBoard: S D E

    key = 0
    while True:
        if key != -1:
            i, multi = act_dict[key]
            paras[i] *= multi
            print(key, paras)

            seg_map = segmentation.felzenszwalb(img,
                                                scale=int(paras[0]),
                                                sigma=paras[1],
                                                min_size=int(paras[2]))
            show = mark_boundaries(img, seg_map)
            cv2.imshow('', show)

            wait_time = 1
        else:
            wait_time = 100

        key = cv2.waitKey(wait_time)
        break
    cv2.imwrite('tiger_felz.jpg', show * 255)
Exemplo n.º 38
0
def test_FH_time(scale, final_ls):
    """
    scale : list
    final_ls : list 

    This function takes as input data the list of indexes allowing access to your
    ground truth and your images. It also takes the scale parameter as a list.
    This allows to calculate the average time taken by the algorithm of Felznwalb and Huttenlocher
    to segment according to the parameter z.

    """

    results_dic = {}

    for i, z in enumerate(scale):
        print("z = {}".format(z))
        results = np.zeros((1, len(final_ls)))
        for idx, path in enumerate(final_ls):

            image = cv2.imread('Data/train/{}.jpg'.format(path))
            GT = np.load("Data/ground_truth/{}.npy".format(path))
            t0 = time.time()
            segments = felzenszwalb(img_as_float(image), scale=z)
            tend = time.time() - t0
            results[0][idx] = tend

            if (idx % 20) == 0:
                print('Image : {}, Time : {}'.format(idx, results[0][idx]))
        results_dic[z] = results
        print('-' * 10)

    result_2 = np.zeros((len(scale), 1))
    for idx, z in enumerate(scale):
        result_2[idx, 0] = results_dic[z][0].mean()
    df = pd.DataFrame(result_2, index=scale, columns=["Time"])

    return df
Exemplo n.º 39
0
def createSuperPixelMain(params):
    """Calculates super pixel segmentation
    
    Arguments:
        params {tuple} -- parameters for the function
    """
    super_pixel_method, img_file, i, out_dir = params

    img = cv2.imread(img_file[i], cv2.IMREAD_COLOR)

    if super_pixel_method == "Felzenszwalb":
        labels = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
    elif super_pixel_method == "Quickshift":
        labels = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
    elif super_pixel_method == "Slic":
        labels = slic(img, n_segments=250, compactness=10, sigma=1)
    elif super_pixel_method == "Watershed":
        gradient = sobel(rgb2gray(img))
        labels = watershed(gradient, markers=250, compactness=0.001)

    np.save(
        os.path.join(out_dir, "{0}_{1}.npy".format(i, super_pixel_method)),
        labels.astype(np.uint8),
    )
Exemplo n.º 40
0
def extract_superpixel(filename, index):
    CROP = 16
    scales = [1]
    markers = [400]
    filename = os.path.join(data_path, filename)
    image = cv2.imread(filename)
    # undistort
    image = undistort(image)
    image = image[CROP:-CROP, CROP:-CROP, :]

    segments = []
    Adjs = []
    Adjs_color = []

    for s, m in zip(scales, markers):
        image = cv2.resize(image, (384 // s, 288 // s))
        image = img_as_float(image)

        gradient = sobel(rgb2gray(image))
        # segment = watershed(gradient, markers=m, compactness=0.001)
        segment = felzenszwalb(image, scale=100, sigma=0.5, min_size=50)
        segments.append(segment)

    return segments[0].astype(np.int16)
Exemplo n.º 41
0
def felzenszwalb_skimage(input_band_list, scale, sigma, min_size):

    """Felzenszwalb segmentation from Skimage library
    
    :param input_band_list: list of 2darrays (list of numpy arrays)
    :param scale: defines the observation level, higher scale means less and larger segments (float)
    :param sigma: idth of Gaussian smoothing kernel for preprocessing, zero means no smoothing (float)
    :param min_size: minimum size, minimum component size. Enforced using postprocessing. (integer)
    :returns:  2darray with the result of the segmentation (numpy array)
    
    Author: Daniele De Vecchi - Mostapha Harb
    Last modified: 22/03/2014
    
    Reference: http://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.felzenszwalb
    """

    # TODO: Also here seems to me that only RGB is used and each band is segmented separately and than merged.
    # TODO: Would be more powerful if full spectral content would be used and one segmentation is run on the n-D feature space.

    # default values, set in case of 0 as input
    if scale == 0:
        scale = 2
    if sigma == 0:
        sigma = 0.1
    if min_size == 0:
        min_size = 2

    if len(input_band_list) == 4:
        img = np.dstack((input_band_list[0], input_band_list[1], input_band_list[2], input_band_list[3]))
    if len(input_band_list) == 3:
        img = np.dstack((input_band_list[0], input_band_list[1], input_band_list[2]))
    if len(input_band_list) == 1:
        img = input_band_list[0]
    segments_fz = felzenszwalb(img, scale, sigma, min_size)

    return segments_fz
Exemplo n.º 42
0
def felzen_segment():
    global segments, orig
    img_b64 = request.values['data'].split("base64,")[1]
    val = 600 - int(float(request.values['val']))
    image_result = open('orig.png', 'wb')
    image_result.write(base64.b64decode(img_b64))
    img = io.imread('orig.png')
    img = color.rgba2rgb(img)
    segments = segmentation.felzenszwalb(img, min_size=val)
    segmented_img = segmentation.mark_boundaries(img, segments)
    back_img = np.zeros((segmented_img.shape[0], segmented_img.shape[1], 4))
    back_img[:, :, 0] = img[:, :, 0] * 255
    back_img[:, :, 1] = img[:, :, 1] * 255
    back_img[:, :, 2] = img[:, :, 2] * 255
    back_img[:, :, 3] = 255
    orig = img
    for file in os.listdir("./masks/"):
        os.remove("./masks/" + file)
    plt.imsave("masks/back.png", back_img.astype(np.uint8))
    plt.imsave("segment.jpg", segmented_img)
    np.save("segments", segments)
    print('Image received: {}'.format(img.shape))
    response = jsonify({'message': 'Happy Noises'})
    return response
Exemplo n.º 43
0
def test_3D():
    grey_img = np.zeros((10, 10))
    rgb_img = np.zeros((10, 10, 3))
    three_d_img = np.zeros((10, 10, 10))
    with assert_no_warnings():
        felzenszwalb(grey_img, multichannel=True)
        felzenszwalb(grey_img, multichannel=False)
        felzenszwalb(rgb_img, multichannel=True)
    with assert_warns(RuntimeWarning):
        felzenszwalb(three_d_img, multichannel=True)
    with testing.raises(ValueError):
        felzenszwalb(rgb_img, multichannel=False)
        felzenszwalb(three_d_img, multichannel=False)
Exemplo n.º 44
0
def extractFeature(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = imutils.resize(img, width=200)

    # segment the image
    labels = segmentation.felzenszwalb(img, scale=35.0)
    # labels = segmentation.quickshift(img, ratio=1.0, kernel_size=4, max_dist=8)
    # labels = segmentation.slic(img, compactness=15, n_segments=300)

    # get the segmented image
    seg_img = color.label2rgb(labels, img, kind='avg')

    # Convert to hls space
    seg_bgr_img = cv2.cvtColor(seg_img, cv2.COLOR_RGB2BGR)
    hls = cv2.cvtColor(seg_bgr_img, cv2.COLOR_BGR2HLS)
    hsv = cv2.cvtColor(seg_bgr_img, cv2.COLOR_BGR2HSV)

    # get cluster information
    cluster_dict = {}
    max_light = 0
    max_sat = 0
    max_val = 0
    min_light = 1000
    min_sat = 1000
    min_val = 1000
    max_light_label = 0
    max_sat_label = 0
    max_val_label = 0
    height, width, color_depth = img.shape
    for y in range(0, height - 1):
        for x in range(0, width - 1):
            hls_pix = hls[y, x, :]
            hsv_pix = hsv[y, x, :]
            label = labels[y, x]

            if label not in cluster_dict:
                cluster_dict[label] = Cluster(hls_pix)
            cluster_dict[label].add_pixel(x, y)

            min_sat = min(min_sat, hls_pix[2])
            if hls_pix[2] > max_sat:
                max_sat = hls_pix[2]
                max_sat_label = label

            min_light = min(min_light, hls_pix[1])
            if hls_pix[1] > max_light:
                max_light = hls_pix[1]
                max_light_label = label

            min_val = min(min_val, hsv_pix[1])
            if hsv_pix[1] > max_val:
                max_val = hsv_pix[1]
                max_val_label = label

    # calculate cluster locations
    for c in cluster_dict:
        cluster_dict[c].finalize()

    # calculate the actual features
    # get location features
    max_sat_locx_bin = NUM_LOC_BINS*[0]
    max_sat_locy_bin = NUM_LOC_BINS*[0]
    max_light_locx_bin = NUM_LOC_BINS*[0]
    max_light_locy_bin = NUM_LOC_BINS*[0]
    max_val_locx_bin = NUM_LOC_BINS*[0]
    max_val_locy_bin = NUM_LOC_BINS*[0]

    max_sat_locx_bin[util.getBinIndex(float(cluster_dict[max_sat_label].x) / width,
                                      NUM_LOC_BINS, MAX_LOC_VAL)] = 1
    max_sat_locy_bin[util.getBinIndex(float(cluster_dict[max_sat_label].y) /  height,
                                      NUM_LOC_BINS, MAX_LOC_VAL)] = 1
    max_light_locx_bin[util.getBinIndex(float(cluster_dict[max_light_label].x) / width,
                                        NUM_LOC_BINS, MAX_LOC_VAL)] = 1
    max_light_locy_bin[util.getBinIndex(float(cluster_dict[max_light_label].y) / height,
                                        NUM_LOC_BINS, MAX_LOC_VAL)] = 1
    max_val_locx_bin[util.getBinIndex(float(cluster_dict[max_val_label].x) / width,
                                      NUM_LOC_BINS, MAX_LOC_VAL)] = 1
    max_val_locy_bin[util.getBinIndex(float(cluster_dict[max_val_label].y) / height,
                                      NUM_LOC_BINS, MAX_LOC_VAL)] = 1

    # get size features
    total_pixels = height*width
    sat_size = float(cluster_dict[max_sat_label].num_pixels) / total_pixels
    light_size = float(cluster_dict[max_light_label].num_pixels) / total_pixels
    val_size = float(cluster_dict[max_val_label].num_pixels) / total_pixels

    # get contrast features
    mini = 20   # from manual inspection
    max_sat_diff = util.normalize(max_sat - min_sat, mini, util.MAX_SAT)
    max_light_diff = util.normalize(max_light - min_light, mini, util.MAX_LIGHT)
    max_val_diff = util.normalize(max_val - min_val, mini, util.MAX_VAL)

    features = max_sat_locx_bin + max_sat_locy_bin + \
        max_light_locx_bin + max_light_locy_bin + \
        max_val_locx_bin + max_val_locy_bin + \
        [sat_size, light_size, val_size] + \
        [max_sat_diff, max_light_diff, max_val_diff]

    assert len(features) == len(getFeatureName()), \
        "length of segmentation features matches feature names"

    # plot for debugging
    if IS_DEBUG:
        # plot_marker(cluster_dict[max_sat_label].x,
        #             cluster_dict[max_sat_label].y,
        #             (255, 0, 0), seg_bgr_img)

        # plot_marker(cluster_dict[max_light_label].x,
        #             cluster_dict[max_light_label].y,
        #             (0, 255, 0), seg_bgr_img)

        # plot_marker(cluster_dict[max_val_label].x,
        #             cluster_dict[max_val_label].y,
        #             (0, 0, 255), seg_bgr_img)

        plt.figure()
        cv2.imshow('img', seg_bgr_img)
        cv2.waitKey(0)

        return [features, seg_bgr_img]

    return features
Exemplo n.º 45
0
'''
from __future__ import print_function

import matplotlib.pyplot as plt
import numpy as np

from skimage.data import lena
from skimage.segmentation import felzenszwalb, slic, quickshift
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float

'''
[::2,::2]表示每隔两个index娶一个sample,就是down-sampling
'''
img = img_as_float(lena()[::2, ::2])
segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
print (segments_fz)
print (segments_fz.shape)
segments_slic = slic(img, n_segments=4, compactness=10, sigma=1)
np.savetxt('test.txt', segments_slic, fmt='%1i')
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)

print("Felzenszwalb's number of segments: %d" % len(np.unique(segments_fz)))
print("Slic number of segments: %d" % len(np.unique(segments_slic)))
print("Quickshift number of segments: %d" % len(np.unique(segments_quick)))

fig, ax = plt.subplots(1, 3)
fig.set_size_inches(8, 3, forward=True)
fig.subplots_adjust(0.05, 0.05, 0.95, 0.95, 0.05, 0.05)

ax[0].imshow(mark_boundaries(img, segments_fz))
Exemplo n.º 46
0
'''


map_array=numpy.array(map_array)
finalLabel=numpy.array(finalLabel)
pickle.dump(g, file_g_final)
pickle.dump(finalLabel,file_final)
pickle.dump(map_array,file_map)
finalNum=len(numpy.unique(finalLabel))
print('finalNum')
print(finalNum)



plt.imshow(img)
plt.figure()
plt.imshow(mark_boundaries(img, segments_slic,color=(1,0,0)))
plt.imsave("fishMan_slic.jpg",mark_boundaries(img, segments_slic,color=(1,0,0)))
plt.figure()
plt.imshow(mark_boundaries(img,region_fh,color=(1,0,0)))
plt.imsave("fishMan_Hybrid.jpg",mark_boundaries(img,region_fh,color=(1,0,0)))
plt.figure()
plt.imshow(mark_boundaries(img,finalLabel,color=(1,0,0)))
plt.imsave("fishMan_final.jpg",mark_boundaries(img,finalLabel,color=(1,0,0)))
plt.show()
numpy.savetxt('finalLabel.txt', finalLabel, fmt='%1i')

fh = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
#savePath='/Users/yuan/BaiduYun/MyDocument/workspace/MyMatlab/superpixel_benchmark/result/fh/fh_200/62096..mat'
#sio.savemat(savePath,{'finalLabel':fh})
Exemplo n.º 47
0
def segment_fz(fig, image):
    segmented = felzenszwalb(image, scale=500, sigma=0.5, min_size=50)
    fig_label_segments(fig, image, segmented, 'felzenszwalb')
Exemplo n.º 48
0
def prep():
	header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
	with open('namesClasses.dat','rb') as f:
		namesClasses = cPickle.load(f)
	labels = map(lambda s: s.split('\\')[-1], namesClasses)
	for i in range(len(namesClasses)):
		currentClass = namesClasses[i]
		root_class.data_search(currentClass).id_list.append(i)
	#get the total test images
	#Full set
	#fnames = glob.glob(os.path.join("competition_data", "test", "*.jpg"))
	#Smaller set
	fnames = glob.glob(os.path.join("smaller_comp", "test", "*.jpg"))
	
	numberofTestImages = len(fnames)

	X = np.zeros((numberofTestImages, num_features), dtype=float)

	#Get filename separate from prefix path
	images = map(lambda fileName: fileName.split('\\')[-1], fnames)

	i = 0
	# report progress for each 1% done  
	report = [int((j+1)*numberofTestImages/100.) for j in range(100)]
	for fileName in fnames:
		# Read in the images and create the features
		image = imread(fileName, as_grey=True)
		image = rotImage(image)
		# Added from https://github.com/Newmu/Stroke-Prediction/blob/master/startPredictingGenCode.py
		thresh = 0.9*255
		# if image.min < 0.75*255:
			# img = image < thresh
		# else:
			# img = image
		# if img.sum() != 0:
			# imgX,imgY = np.nonzero(img)
			# imgW = imgX.max()-imgX.min()
			# imgH = imgY.max()-imgY.min()
			# if (imgW>1 and imgH>1):
				# image = image[imgX.min():imgX.max(),imgY.min():imgY.max()]
		# #----------------------------------
		cvimage = cv2.imread(fileName)
		features = getMinorMajorRatio(image)
		#__Begin moved region
		#From http://nbviewer.ipython.org/github/kqdtran/AY250-F13/blob/master/hw4/hw4.ipynb
		pca = decomposition.PCA(n_components=25)
		PCAFeatures = pca.fit_transform(image)
		PCAevr = pca.explained_variance_ratio_
		for evr in range(len(PCAevr)):
			if np.isnan(PCAevr[evr]):
				PCAevr[evr] = 0.0
		#_____________________________________________________________
		corners = getCorners(cvimage,features['orientation'])
		horslice = image[:,maxPixel/2]
		vertslice = image[maxPixel/2]
		#correlation = signal.correlate(image,image)
		#horcorrelation = signal.correlate(horslice,horslice)
		#vertcorrelation = signal.correlate(vertslice,vertslice)
		#crosscorrelation = signal.correlate(horslice,vertslice)
		#correlation = correlation/correlation[correlation.shape[0]/2,correlation.shape[0]/2]
		#horcorrelation = horcorrelation/horcorrelation[horcorrelation.shape[0]/2]
		#vertcorrelation = vertcorrelation/vertcorrelation[vertcorrelation.shape[0]/2]
		#crosscorrelation = crosscorrelation/crosscorrelation[horcorrelation.shape[0]/2]
		hormean = np.mean(horslice)
		horstd = np.std(horslice)
		vertmean = np.mean(vertslice)
		vertstd = np.std(vertslice)
		horcount = vertcount = 0
		for pix in horslice:
			graycheck = False
			if pix<thresh:
				if not graycheck:
					graycheck = True
					horcount = horcount + 1
			else:
				graycheck = False
		for pix in vertslice:
			graycheck = False
			if pix<thresh:
				if not graycheck:
					graycheck = True
					vertcount = vertcount + 1
			else:
				graycheck = False
		features['hsobel'] = np.nanmean(filter.hsobel(image))
		features['vsobel'] = np.nanmean(filter.vsobel(image))
		features['peaklocalmax'] = np.nanmean(peak_local_max(image))
		features['felzen'] = np.nanmean(segmentation.felzenszwalb(image))
		if np.isnan(features['peaklocalmax']):
			features['peaklocalmax'] = 0.0
		if np.isnan(features['felzen']):
			features['felzen'] = 0.0
		#hormirror = image[:maxPixel/2]-image[maxPixel-1:maxPixel/2:-1]
		#vertmirror = image[:,:maxPixel/2]-image[:,maxPixel-1:maxPixel/2:-1]		
		image = resize(image, (maxPixel, maxPixel))
					
		#From http://scikit-image.org/docs/dev/auto_examples/plot_local_binary_pattern.html
		lbp = local_binary_pattern(image, n_points, radius, METHOD)
		n_bins = lbp.max()+1
		lbpcounts = np.histogram(lbp,n_bins,normed=True,range=(0, n_bins))[0]
		#_____________________________________________________________
		#__Moved region was here
		#dist_trans = ndimage.distance_transform_edt(image[0])
		# Store the rescaled image pixels and the axis ratio
		#fd, hog_image = hog(image[0], orientations=8, pixels_per_cell=(2, 2),
		#    cells_per_block=(1, 1), visualise=True)
		#X[i, 0:imageSize] = np.reshape(dist_trans, (1, imageSize))
		#X[i, 0:imageSize] = np.reshape(hog_image, (1, imageSize))
					
		# Store the rescaled image pixels and the axis ratio
		X[i, 0:imageSize] = np.reshape(image, (1, imageSize))
		#X[i, imageSize:imageSize+corr2dsize] = np.reshape(correlation, (1,corr2dsize))
		#X[i, imageSize+corr2dsize:imageSize+corr2dsize+corrsize] = np.reshape(horcorrelation, (1,corrsize))
		#X[i, imageSize+corr2dsize+corrsize:imageSize+corr2dsize+2*corrsize] = np.reshape(vertcorrelation, (1,corrsize))
		#X[i, imageSize+corr2dsize+2*corrsize:imageSize+corr2dsize+3*corrsize] = np.reshape(crosscorrelation, (1,corrsize))
		featcount = imageSize+3*corrsize+corr2dsize
		for k,v in features.items():
			try:
				X[i, featcount:featcount+len(v)] = v
				featcount = featcount + len(v)
			except TypeError, te:
				X[i, featcount] = v
				featcount = featcount + 1
		X[i, featcount:featcount+lbpcounts.size] = lbpcounts
		X[i, featcount+lbpcounts.size:featcount+lbpcounts.size+PCAsize] = PCAevr
		X[i, featcount+lbpcounts.size+PCAsize] = np.mean(PCAFeatures)
		i += 1
		if i in report: print np.ceil(i *100.0 / numberofTestImages), "% done"
Exemplo n.º 49
0
def felzenszwalb(image):
    SCALE = 1000
    SIGMA = 5
    MIN_SIZE = 500
    return segmentation.felzenszwalb(image, SCALE, SIGMA, MIN_SIZE)
Exemplo n.º 50
0
"""
from __future__ import print_function

import matplotlib.pyplot as plt
import numpy as np

from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float

img = img_as_float(astronaut()[::2, ::2])

segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
gradient = sobel(rgb2gray(img))
segments_watershed = watershed(gradient, markers=250, compactness=0.001)

print("Felzenszwalb number of segments: {}".format(len(
    np.unique(segments_fz))))
print('SLIC number of segments: {}'.format(len(np.unique(segments_slic))))
print('Quickshift number of segments: {}'.format(len(
    np.unique(segments_quick))))

fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)

ax[0, 0].imshow(mark_boundaries(img, segments_fz))
ax[0, 0].set_title("Felzenszwalbs's method")
Exemplo n.º 51
0
def main():
	files = []
	# Generate training data
	i = 0    
	label = 0
	# List of string of class names
	namesClasses = list()
	features = {}
	features = features.copy()
	print "Reading images"
	# Navigate through the list of directories
	for folder in directory_names:
		#Get name of class directory separate from prefix path
		currentClass = folder.split(os.sep)[-1] 
		namesClasses.append(currentClass)
		root_class.data_search(currentClass).id_list.append(len(namesClasses)-1)
		for fileNameDir in os.walk(folder):   
			for fileName in fileNameDir[2]:
				# Only read in the images
				if fileName[-4:] != ".jpg":
					continue
				
				# Read in the images and create the features
				nameFileImage = "{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)
				image = []
				image.append(imread(nameFileImage, as_grey=True))
				features['original_size'] = image[0].size
				#image[0] = equalize_hist(image[0])
				image[0] = rotImage(image[0])
				# Added from https://github.com/Newmu/Stroke-Prediction/blob/master/startPredictingGenCode.py
				thresh = 0.9*255
				# if image.min < 0.75*255:
					# img = image < thresh
				# else:
					# img = image
				# if img.sum() != 0:
					# imgX,imgY = np.nonzero(img)
					# imgW = imgX.max()-imgX.min()
					# imgH = imgY.max()-imgY.min()
					# if (imgW>1 and imgH>1):
						# image = image[imgX.min():imgX.max(),imgY.min():imgY.max()]
				#----------------------------------
				cvimage = cv2.imread(nameFileImage)
				#image[0] = gaussian_filter(image[0],sigma=2)
				files.append(nameFileImage)
				image.append(np.fliplr(image[0]))
				image.append(np.flipud(image[0]))
				image.append(np.fliplr(image[2]))
				# image.append(np.rot90(image[0]))
				# image.append(np.fliplr(image[4]))
				# image.append(np.flipud(image[4]))
				# image.append(np.fliplr(image[6]))
				for j in range(len(image)):
					features = getMinorMajorRatio(image[j])
					#__Begin moved region
					#From http://nbviewer.ipython.org/github/kqdtran/AY250-F13/blob/master/hw4/hw4.ipynb
					pca = decomposition.PCA(n_components=25)
					PCAFeatures = pca.fit_transform(image[0])
					#_____________________________________________________________
					corners = getCorners(cvimage,features['orientation'])
					horslice = image[j][:,maxPixel/2]
					vertslice = image[j][maxPixel/2]
					#correlation = signal.correlate(image,image)
					#horcorrelation = signal.correlate(horslice,horslice)
					#vertcorrelation = signal.correlate(vertslice,vertslice)
					#crosscorrelation = signal.correlate(horslice,vertslice)
					#correlation = correlation/correlation[correlation.shape[0]/2,correlation.shape[0]/2]
					#horcorrelation = horcorrelation/horcorrelation[horcorrelation.shape[0]/2]
					#vertcorrelation = vertcorrelation/vertcorrelation[vertcorrelation.shape[0]/2]
					#crosscorrelation = crosscorrelation/crosscorrelation[horcorrelation.shape[0]/2]
					hormean = np.mean(horslice)
					horstd = np.std(horslice)
					vertmean = np.mean(vertslice)
					vertstd = np.std(vertslice)
					horcount = vertcount = 0
					for pix in horslice:
						graycheck = False
						if pix<thresh:
							if not graycheck:
								graycheck = True
								horcount = horcount + 1
						else:
							graycheck = False
					for pix in vertslice:
						graycheck = False
						if pix<thresh:
							if not graycheck:
								graycheck = True
								vertcount = vertcount + 1
						else:
							graycheck = False
					features['hsobel'] = np.nanmean(filter.hsobel(image[j]))
					features['vsobel'] = np.nanmean(filter.vsobel(image[j]))
					features['peaklocalmax'] = np.nanmean(peak_local_max(image[j]))
					features['felzen'] = np.nanmean(segmentation.felzenszwalb(image[j]))
					if np.isnan(features['peaklocalmax']):
						features['peaklocalmax'] = 0.0
					if np.isnan(features['felzen']):
						features['felzen'] = 0.0
					#hormirror = image[j][:maxPixel/2]-image[j][maxPixel-1:maxPixel/2:-1]
					#vertmirror = image[j][:,:maxPixel/2]-image[j][:,maxPixel-1:maxPixel/2:-1]
					#__End moved region
					image[j] = resize(image[j], (maxPixel, maxPixel))
					
					#From http://scikit-image.org/docs/dev/auto_examples/plot_local_binary_pattern.html
					lbp = local_binary_pattern(image[j], n_points, radius, METHOD)
					n_bins = lbp.max()+1
					lbpcounts = np.histogram(lbp,n_bins,normed=True,range=(0, n_bins))[0]
					#_____________________________________________________________
					#__Moved region was here
					#dist_trans = ndimage.distance_transform_edt(image[0])
					# Store the rescaled image pixels and the axis ratio
					#fd, hog_image = hog(image[0], orientations=8, pixels_per_cell=(2, 2),
					#    cells_per_block=(1, 1), visualise=True)
					#X[i*imagesperfile+j, 0:imageSize] = np.reshape(dist_trans, (1, imageSize))
					#X[i*imagesperfile+j, 0:imageSize] = np.reshape(hog_image, (1, imageSize))
					X[i*imagesperfile+j, 0:imageSize] = np.reshape(image[j], (1, imageSize))
					#X[i*imagesperfile+j, imageSize:imageSize+corr2dsize] = np.reshape(correlation, (1,corr2dsize))
					#X[i*imagesperfile+j, imageSize+corr2dsize:imageSize+corr2dsize+corrsize] = np.reshape(horcorrelation, (1,corrsize))
					#X[i*imagesperfile+j, imageSize+corr2dsize+corrsize:imageSize+corr2dsize+2*corrsize] = np.reshape(vertcorrelation, (1,corrsize))
					#X[i*imagesperfile+j, imageSize+corr2dsize+2*corrsize:imageSize+corr2dsize+3*corrsize] = np.reshape(crosscorrelation, (1,corrsize))
					featcount = imageSize+3*corrsize+corr2dsize
					for k,v in features.items():
						try:
							X[i*imagesperfile+j, featcount:featcount+len(v)] = v
							featcount = featcount + len(v)
						except TypeError, te:
							X[i*imagesperfile+j, featcount] = v
							featcount = featcount + 1
					X[i*imagesperfile+j, featcount:featcount+lbpcounts.size] = lbpcounts
					X[i*imagesperfile+j, featcount+lbpcounts.size:featcount+lbpcounts.size+PCAsize] = pca.explained_variance_ratio_
					X[i*imagesperfile+j, featcount+lbpcounts.size+PCAsize] = np.mean(PCAFeatures)
					# Store the classlabel
					y[i*imagesperfile+j] = label
					if i*imagesperfile+j in report: print np.ceil((i*imagesperfile+j) *100.0 / (num_rows)), "% done"
				i += 1
		label += 1
Exemplo n.º 52
0
    def draw_image(self):
        float_image = img_as_float(self.image)

        if self.method == 0:
            if self.just_mask == 0:
                self.segments = felzenszwalb(
                    float_image,
                    scale=self.felzenszwalb_scale,
                    sigma=self.felzenszwalb_sigma,
                    min_size=self.felzenszwalb_min_size)

        elif self.method == 1:
            if self.just_mask == 0:
                self.segments = slic(float_image,
                                     n_segments=self.slic_n_segments,
                                     compactness=self.slic_compactness,
                                     sigma=self.slic_sigma)

        elif self.method == 2:
            if self.just_mask == 0:
                self.segments = quickshift(
                    float_image,
                    kernel_size=self.quickshift_kernel_size,
                    max_dist=self.quickshift_max_dist,
                    ratio=self.quickshift_ratio)

        elif self.method == 3:
            if self.just_mask == 0:
                gradient = sobel(rgb2gray(float_image))
                self.segments = watershed(
                    gradient,
                    markers=self.watershed_markers,
                    compactness=self.watershed_compactness)

        elif self.method == 4:
            self.segments = None
            self.line = False

        else:
            self.segments = None
            self.line = True

        if self.segments is not None:
            boundaries = mark_boundaries(self.image, self.segments)
            cv_image = img_as_ubyte(boundaries)

            if self.hide == 0:
                if self.mask_on == 0:
                    result = cv2.addWeighted(cv_image, 0.5, self.mask, 0.5, 0)
                else:
                    result = self.mask
            else:
                if self.mask_on == 0:
                    result = cv2.addWeighted(self.image, 0.5, self.mask, 0.5,
                                             0)
                else:
                    result = self.mask
        else:
            if self.mask_on == 0:
                result = cv2.addWeighted(self.image, 0.5, self.mask, 0.5, 0)
            else:
                result = self.mask

        height, width, channels = np.shape(result)

        totalBytes = result.nbytes
        bytesPerLine = int(totalBytes / height)
        qimg = QtGui.QImage(result.data, result.shape[1], result.shape[0],
                            bytesPerLine, QtGui.QImage.Format_RGB888)
        pixmap = QtGui.QPixmap.fromImage(qimg)
        self.label.resize(width, height)
        self.label.setPixmap(pixmap)
        self.label.show()

        self.candidate = []
        self.delete_candidate = []
Exemplo n.º 53
0
def felz_seg(img):
    segments = felzenszwalb(img, scale=120.0, sigma=1.5, min_size=30)
    return segments
Exemplo n.º 54
0
def findIndividualSections(destination):
    #input:     - overview image of the sample glass containing sections
    #output:    - a matrix to be placed as a grid over the sample glass, indicating for each cell True or False for wheter there is section
    
    #PARAMETERS TO BE ALTERED FOR DIFFERENT BATCHES:
    size = 1200000 
    sizeGlass = 19.15 #mm
    sideSection = 0.736 #mm #size of one side of the section
    delta = 0.23 #ratio
    limit = 0.5
    ratioGlassToPic = 0.8 #assuming the diameter of the sample glass is at least this much of the short side of the picture
    
    start = time.time()
    warnings.filterwarnings("ignore")
    filename = os.path.join(skimage.data_dir, destination)
    im = io.imread(filename)
    im = downsizeTo(im, size)
    OG = im
    pr = 0
    p1, p2 = np.percentile(im, (pr, 100-pr))
    imCircle = dab(im, 20)
    s = 1.5
    edgesCircle = canny(imCircle, sigma=s, low_threshold=0, high_threshold=1)
    ratioGlassToPic = 0.8
    circx, circy = findCircle(edgesCircle, ratioGlassToPic)
    im = cropToCircle(im, circx, circy)
    
    mm = im.shape[0]/sizeGlass #1mm in pixels
    sideSectionPix= sideSection*mm
    
    
    #FIRST METHOD
    prEdges = 1.1
    imEdges = SSI(im,prEdges)
    
    s = 1.6
    edges = canny(imEdges, sigma=s, low_threshold=0, high_threshold=1)
    edges = cropToCircle(edges, circx, circy, crop = False, returnBool = True)
    d = 0.02 #radio of blacke out circle
    edges = blackoutCircle(edges, circx, circy, d)
    
    deltaPix = delta* sideSectionPix
    check = findHalfCircle(sideSectionPix, deltaPix)
    viaEdges = selectFillEdges(edges, check, limit, deltaPix)
    
    #SECOND METHOD
    prFz = 7.6
    imFz = SSI(im,prFz, returnRGB = True)
    
    
    sc = sideSectionPix*20
    s = 1.6
    mins = int(sideSectionPix**2/5)
    segments_fz = felzenszwalb(imFz, scale=sc, sigma=s, min_size=mins)
    
    
    #viaFz = findSectionsFz(segments_fz)
    #viaFz = cropToCircle(viaFz, circx, circy, crop = False, returnBool = True)
    
    #COMBINE METHODS
    sections = overlapIndividual(viaEdges, segments_fz)
    
    
    #DISPLAY RESULTS
    print("total ellapsed time = ", round(time.time()-start))
    fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(40, 40),sharex=False, sharey=False)
    ax1.set_title('Original picture')
    ax1.imshow(OG)
    ax2.set_title('result')
    ax2.imshow(sections)
    
    return sections
        mask = np.isnan(C_BTemp)
        C_BTemp[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), C_BTemp[~mask])
      
    elif I_minute[C_i] == 30:
        #slice out BT images for the current basin
        C_BTemp = xr.open_dataset(IRDIR+ "\\" + BTemp_filename)['Tb'].values[1][DIM_BOUND[0]:DIM_BOUND[1]+1,DIM_BOUND[2]:DIM_BOUND[3]+1]
        C_lat = xr.open_dataset(IRDIR+ "\\" + BTemp_filename)['lat'].values[DIM_BOUND[0]:DIM_BOUND[1]+1]
        C_lon = xr.open_dataset(IRDIR+ "\\" + BTemp_filename)['lon'].values[DIM_BOUND[2]:DIM_BOUND[3]+1]
        #interpolate NaN values in BT images
        mask = np.isnan(C_BTemp)
        C_BTemp[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), C_BTemp[~mask])

#%%
C_BTemp_nor = np.flipud(C_BTemp/max(C_BTemp.flatten()))
cm = plt.get_cmap('Greys')
C_BTemp_nor_cm = cm(C_BTemp_nor)
C_BTemp_nor_cm = np.uint8(C_BTemp_nor_cm * 255)
im = Image.fromarray(C_BTemp_nor_cm)
enhancer = ImageEnhance.Brightness(im)
im2 = enhancer.enhance(2.5)
im2_np = np.asarray(im2)
im2_np_3 = im2_np[:,:,0:3]
im2_np_3_fl = img_as_float(im2_np_3)
#%%
segments_slic = segmentation.slic(im2_np_3_fl, n_segments=400, compactness=10, sigma=1)
plt.imshow(segmentation.mark_boundaries(im2_np_3_fl, segments_slic))

#%%fig = plt.figure()
segments_fz = segmentation.felzenszwalb(im2_np_3_fl, scale=40, sigma=0.5, min_size=50)
plt.imshow(segmentation.mark_boundaries(im2_np_3_fl, segments_fz))
Exemplo n.º 56
0
					if not graycheck:
						graycheck = True
						horcount = horcount + 1
				else:
					graycheck = False
			for pix in vertslice:
				graycheck = False
				if pix<thresh:
					if not graycheck:
						graycheck = True
						vertcount = vertcount + 1
				else:
					graycheck = False
    
			peaklocalmax = np.nanmean(peak_local_max(image))
			felzen = np.nanmean(segmentation.felzenszwalb(image))
			if np.isnan(peaklocalmax):
				peaklocalmax = 0.0
			if np.isnan(felzen):
				felzen = 0.0
			# Store the rescaled image pixels and the axis ratio
			X[i, 0:imageSize] = np.reshape(image, (1, imageSize))
			#Zak's note: I think I need to add new features into this array after axisratio
			X[i, imageSize:imageSize+corr2dsize] = np.reshape(correlation, (1,corr2dsize))
			X[i, imageSize+corr2dsize:imageSize+corr2dsize+corrsize] = np.reshape(horcorrelation, (1,corrsize))
			X[i, imageSize+corr2dsize+corrsize:imageSize+corr2dsize+2*corrsize] = np.reshape(vertcorrelation, (1,corrsize))
			X[i, imageSize+corr2dsize+2*corrsize:imageSize+corr2dsize+3*corrsize] = np.reshape(crosscorrelation, (1,corrsize))
			X[i, imageSize+3*corrsize+corr2dsize] = axisratio
			X[i, imageSize+3*corrsize+corr2dsize+1] = fillratio
			X[i, imageSize+3*corrsize+corr2dsize+2] = eigenratio
			X[i, imageSize+3*corrsize+corr2dsize+3] = solidity
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
import pylab
import skimage as ski
import skimage.segmentation as sks
import cv2
import numpy as np

img = ski.img_as_float(plt.imread('239239_faces.jpg')[::2, ::2, :3])

#img = cv2.imread()
pylab.figure(figsize=(20, 10))

segments_fz = sks.felzenszwalb(img, scale=100, sigma=0.01, min_size=100)
borders = sks.find_boundaries(segments_fz)
unique_colors = np.unique(segments_fz.ravel())
segments_fz[borders] = -1
colors = [np.zeros(3)]
for color in unique_colors:
    colors.append(np.mean(img[segments_fz == color], axis=0))
cm = LinearSegmentedColormap.from_list('pallete', colors, N=len(colors))
pylab.subplot(121), pylab.imshow(img), pylab.title('Original',
                                                   size=20), pylab.axis('off'),
pylab.subplot(122), pylab.imshow(segments_fz, cmap=cm),
pylab.title('Segmented with Felzenszwalbs\'s method',
            size=20), pylab.axis('off'),
pylab.show()
Exemplo n.º 58
-1
def SegmentationFelzenszwalb(filename="/media/scratch/plasticity/lvp2d1024_s0_d.tar", time=None):
    t, s = TarFile.LoadTarState(filename, time=time)
    rod = s.CalculateRotationRodrigues()
    img = img_as_float(
        RodriguesToUnambiguousColor(rod["x"], rod["y"], rod["z"], maxRange=None, centerR=None).astype("uint8")
    )

    segments_slic = felzenszwalb(img, scale=100, sigma=0.0, min_size=10)
    print("Slic number of segments: %d" % len(np.unique(segments_slic)))

    fig = plt.figure()
    plt.imshow(mark_boundaries(img, segments_slic, color=[0, 0, 0], outline_color=None))
    plt.show()