def segmentImage(bgImg, humImg, spatial_rad, range_rad, density, clusterThresh, labThresh):

	# perform mean shift clustering on both images
	(seg_bg, labels_bg, regions_bg) = pms.segment(bgImg, spatial_rad, range_rad, density)
	(seg_hum, labels_hum, regions_hum) = pms.segment(humImg, spatial_rad, range_rad, density)

	# display num clusters found
	print('regions_bg: ' + str(regions_bg))
	print('regions_hum: ' + str(regions_hum))

	# convert to LAB colour space
	lab_seg_bg = cv2.cvtColor(seg_bg, cv2.COLOR_BGR2LAB)
	lab_seg_hum = cv2.cvtColor(seg_hum, cv2.COLOR_BGR2LAB)
	lab_humImg = cv2.cvtColor(humImg, cv2.COLOR_BGR2LAB)
	lab_bgImg = cv2.cvtColor(bgImg, cv2.COLOR_BGR2LAB)

	threshImg = np.empty_like(humImg)
	meanshiftImg = np.empty_like(humImg)
	threshImg[:] = humImg
	meanshiftImg[:] = humImg

	for row in range(0, humImg.shape[0]):
		for col in range(0, humImg.shape[1]):
		
			# while looking at background pixel, look at range of human pixels
			# to account for slight translation invariances
			seg_bg_pixel = seg_bg[row][col]
			lab_bgImg_pixel = lab_bgImg[row][col]

			meanshift_deltas = getOffsetDeltas(seg_hum, col, row, seg_bg_pixel)
			threshold_deltas = getOffsetDeltas(lab_humImg, col, row, lab_bgImg_pixel)

			do_seg_cluster = any(x < clusterThresh for x in meanshift_deltas)
			do_seg_thresh = any(x < labThresh for x in threshold_deltas)

			if do_seg_cluster or do_seg_thresh:
				humImg[row][col][0] = 255 # white out
				humImg[row][col][1] = 255 # all channels
				humImg[row][col][2] = 255 
			
			# save intermediate threshold photo for analysis
			if do_seg_thresh:
				threshImg[row][col][0] = 255 
				threshImg[row][col][1] = 255 
				threshImg[row][col][2] = 255 

			# save intermediate mean shift clustering photo for analysis
			if do_seg_cluster:
				meanshiftImg[row][col][0] = 255 
				meanshiftImg[row][col][1] = 255 
				meanshiftImg[row][col][2] = 255

	return (seg_hum, seg_bg, humImg, threshImg, meanshiftImg)
Esempio n. 2
0
def getSegments(original, SHOW):
    allimages["original"] = original
    ##############################################################################################################
    #gaussian Blur
    #blur = cv2.GaussianBlur(gray_img,(5,5),0)
    #allimages["gaussianBlur"] = blur

    #mean shift segmentation on bgr image
    #https://github.com/fjean/pymeanshift
    #http://ieeexplore.ieee.org/document/1000236/
    (segmented_image, labels_image,
     number_regions) = pms.segment(original,
                                   spatial_radius=SPATIAL_RADIUS,
                                   range_radius=RANGE_RADIUS,
                                   min_density=MIN_DENSITY,
                                   speedup_level=2)
    print("Number of Regions Found: %s" % number_regions)
    unique_labels = np.unique(labels_image)
    blank = original - original
    for label in unique_labels:
        b = random.randint(0, 255)
        g = random.randint(0, 255)
        r = random.randint(0, 255)
        blank[labels_image == label] = [b, g, r]

    if SHOW == "save":
        cv2.imwrite("saved_segmentation.png", blank)

    ################################################################################
    ################################################################################
    ################################################################################
    ################################################################################
    ################################################################################

    return original, labels_image
Esempio n. 3
0
 def getHairArea(self,face):
     scale=float(self.img.shape[1])/400
     (segmented,labels,n)=pms.segment(self.img,spatial_radius=int(scale*6),
                           range_radius=scale*5, min_density=300)
     #cv2.imshow('segmented',segmented)
     mv = cv2.split(self.img)
     hair = []
     x=face[0]+face[2]/2
     y=face[1]-self.img.shape[1]/8
     hair_new = [labels[y][x]]
     not_hair = range(n)
     not_hair.remove(labels[y][x])
     neighbor=self.getNeighbor(labels,n)
     factors=self.getFactor(segmented,labels,n)
     # Run until new hair area is not detected
     while(hair!=hair_new):
         hair=hair_new[:]
         for i in hair:
             for j in not_hair:
                 if neighbor[i][j]==1 and compare(factors[i],factors[j]):
                     hair_new.append(j)
                     not_hair.remove(j)
     #print(hair)
     area_hair=np.zeros(shape=labels.shape,dtype=np.int)
     #print('making array of hair area...')
     for i in range(area_hair.shape[0]):
         for j in range(area_hair.shape[1]):
             if labels[i][j] in hair:
                 area_hair[i][j]=1
     return area_hair
Esempio n. 4
0
def segment_img(original_image,
                spatial_radius=5,
                range_radius=5,
                min_density=60):
    (segmented_image, labels_image,
     number_regions) = pms.segment(original_image, spatial_radius,
                                   range_radius, min_density)
Esempio n. 5
0
def meanShift(filename):

    img = cv2.imread(filename)

   # (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=6, range_radius=4.5, min_density=50)
    (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=7, range_radius=4.5, min_density=50)
    return segmented_image
Esempio n. 6
0
def segment(img):
    shifted = cv2.pyrMeanShiftFiltering(img, 5, 200)
    ##shifted = cv2.pyrMeanShiftFiltering(ROI, sp=7, sr=25, \
    ##                                    maxLevel=1, \
    ##                                    termcrit=(
    ##                                        cv2.TERM_CRITERIA_EPS \
    ##                                        + cv2.TERM_CRITERIA_MAX_ITER, 5, 1))
    ##    cv2.imshow('olaa', shifted)
    ##    cv2.waitKey()
    (seg_image, lab_image, num_regions) = pms.segment(shifted, \
                                                      spatial_radius=10,\
                                                      range_radius=3, \
                                                      min_density=200)
    for i in range(num_regions):
        A = np.uint8(lab_image == i) * 200
        ##        cv2.imshow('seg', A)
        ##        cv2.waitKey()
        (_, cnts, _) = cv2.findContours(A, cv2.RETR_CCOMP,
                                        cv2.CHAIN_APPROX_SIMPLE)
        for c in cnts:
            if (cv2.contourArea(c) < 1000) or (cv2.contourArea(c) > 100000):
                continue
            epsilon = 0.01 * cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, epsilon, True)
            cv2.drawContours(img, [approx], 0, (10 * i, 0, 0), 2)
    return num_regions
Esempio n. 7
0
def meanshift_seg(rgb_img, spatial_radius, range_radius, min_density, br):
    start = time.time()
    (segmented_image, labels_image,
     number_regions) = pms.segment(rgb_img,
                                   spatial_radius=spatial_radius,
                                   range_radius=range_radius,
                                   min_density=min_density)
    meanShift_id_segs_dict = {}
    for i in range(number_regions):
        meanShift_id_segs_dict[i] = (i == labels_image) * 1

    fore_mask, back_id = identify_background(labels_image,
                                             meanShift_id_segs_dict, br)
    #print(np.unique(labels_image), back_id)
    fore_mask = fore_mask != back_id

    cl = 5
    closed_fore_mask = close_image_with_back(fore_mask, cl)

    fore_mask = put_back_boundary(fore_mask, closed_fore_mask, cl)

    labeled_image, object_mask = get_object_mask_from_connected_regions(
        fore_mask)
    print('size ', object_mask.shape, ' MeanShift processing time ',
          time.time() - start)
    return object_mask
Esempio n. 8
0
def segmented(img):
    (segmented_image, labels_image,
     number_regions) = pms.segment(img,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)
    return segmented_image
    def _segment(self, threshold=0.5):
         ##
         # Segment the image.
        start_time_seconds = time.time()
        print("...started segmenting...")
        # Using mean shift implementation from https://github.com/fjean/pymeanshift

        segmented_image, labels_image, number_regions = pms.segment(
                                                            self.image,
                                                            spatial_radius = 6,
                                                            range_radius = 4.5,
                                                            min_density = 50)
        # segmented_image, labels_image, number_regions = pms.segment(
        #                                                     self.image,
        #                                                     spatial_radius = 1,
        #                                                     range_radius = 1,
        #                                                     min_density = 300)

        # Gather points of each segment.
        self._set_segment_points(labels_image, number_regions)

        # Label each segment if it's annotated or note.
        self.label_annotation_segments(threshold=threshold)

        self.segmented_image = segmented_image

        return segmented_image
Esempio n. 10
0
def segment(ROI,side):
    shifted = cv2.pyrMeanShiftFiltering(ROI, 7, 31) # first filter
    ##shifted = cv2.pyrMeanShiftFiltering(ROI, sp=7, sr=25, \
    ##                                    maxLevel=1, \
    ##                                    termcrit=(
    ##                                        cv2.TERM_CRITERIA_EPS \
    ##                                        + cv2.TERM_CRITERIA_MAX_ITER, 5, 1))
    (seg_image, lab_image, num_regions) = pms.segment(shifted, \
                                                      spatial_radius=3,\
                                                      range_radius=3, \
                                                      min_density=0) # segmentation
    B = []
    min_dist = 10000
    gaze_c = []
    for i in range(num_regions):
        A = np.uint8(lab_image == i)*200
        (_,cnts,_) = cv2.findContours(A, cv2.RETR_CCOMP,
                    cv2.CHAIN_APPROX_SIMPLE)
        for c in cnts:
            if (cv2.contourArea(c) < 50) or (cv2.contourArea(c) > 3000):
                continue
            d = cv2.pointPolygonTest(c,(side,side),True)*(-1)
            if d < min_dist:
                min_dist = d
                gaze_c = c
##            epsilon = 0.01*cv2.arcLength(c,True)
##            approx = cv2.approxPolyDP(c,epsilon,True)
##            cv2.drawContours(ROI,[approx],0,(100,100,100),2)
    if gaze_c != []:
            epsilon_g = 0.01*cv2.arcLength(gaze_c,True)
            approx_g = cv2.approxPolyDP(gaze_c,epsilon_g,True)
            cv2.drawContours(ROI,[approx_g],0,(150,150,150),2)
Esempio n. 11
0
def calc_hsv_mean_each_image():
    ROOT_DIR_SRC = "./img/resource/aerial_image/fixed_histogram_v2"
    ROOT_DIR_GT = "./img/resource/ground_truth"

    # experiments = [1, 2, 3, 4, 5, 6]
    experiments = [5]

    params_mean_shift = {
        # "spatial_radius": 8,
        # "range_radius": 5,
        "spatial_radius": 8,
        "range_radius": 5,
        "min_density": 0
    }

    gt_type = "GT_ORANGE"

    results = dict()

    for exp_num in experiments:

        src_img = imread_with_error(
            path.join(ROOT_DIR_SRC, f"aerial_roi{exp_num}.png"))

        ground_truth = imread_with_error(
            path.join(ROOT_DIR_GT, f"aerial_roi{exp_num}.png"))

        eprint(
            dedent(f"""
            Experiment Num: {exp_num}
                   gt_type: {gt_type}
        """))

        eprint(f"Do Mean-Shift ... ", end="")
        src_img = pymeanshift.segment(src_img, **params_mean_shift)[0]
        eprint("done")

        metrics = calc_hsv_metrics_by_ground_truth(src_img, ground_truth)

        # print(dedent(f"""
        #     Mean (H): {means['H']}
        #     Mean (S): {means['S']}
        #     Mean (V): {means['V']}
        # """))

        results[f"aerial_roi{exp_num}"] = metrics

    for exp_name, metrics in results.items():

        print("\t".join(["EXP_NAME", exp_name]))
        print("\t".join(["Ch.", *list(metrics.values())[0].keys()]))
        for ch_name, metric in metrics.items():
            # for k, v in results.items():
            #     print(",".join([
            #         k,
            #         *metric.values()
            #     ]))
            print("\t".join([str(x) for x in [ch_name, *metric.values()]]))
    return results
Esempio n. 12
0
def segmented_downsampled(img):
    downsampled = cv2.resize(img, (0, 0), fx=0.25, fy=0.25)
    (segmented_image, labels_image,
     number_regions) = pms.segment(downsampled,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)
    return segmented_image
Esempio n. 13
0
def bar_detect(input_image, k=5):

    temp_image = np.copy(input_image)

    # aplicamos segmentacao
    (segmented_image, labels_image,
     number_regions) = pms.segment(input_image,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)

    qtde_rect = 0
    cnt_rects = []
    cnt_rects_info = []
    lst_countaprox = []
    for label in range(1, number_regions):
        filter = np.full(shape=labels_image.shape,
                         fill_value=label,
                         dtype=float)
        result = labels_image - filter
        filtered_img = result == 0

        # A detecao ocorre na imagem colorida
        # filtered_img = apply_mask(temp_image, filtered_img.astype(np.uint8))

        filtered_img = filtered_img.astype(np.uint8) * 255

        # Como algumas linhas ou outros residuos podem ter sido reconhecidos como grupos, aplicamos a erosao
        # kernel = np.ones((k, k), np.uint8)
        filtered_img = cv2.erode(filtered_img, (5, 5), iterations=1)
        # cv2.imshow('erode', filtered_img)
        # cv2.waitKey()
        # So entao o algoritmo para deteccao de retangulos eh aplicado
        filtered_img, t, lst_cnt, lst_cnt_info = rectangle_detection(
            filtered_img)
        if t > 0:
            cnt_rects.append(lst_cnt)
            cnt_rects_info += lst_cnt_info

            qtde_rect += t
            # cv2.imshow('rectangle after ', filtered_img)
            # cv2.waitKey()

        for cnt, cnt_info in zip(lst_cnt, lst_cnt_info):

            cv2.drawContours(temp_image, [cnt_info['cnt']],
                             0, (0, 0, 255),
                             thickness=2)
            cv2.circle(temp_image, cnt_info['center'], 2, (0, 255, 0), 2)

    # print('Total de barras reconhecidas', qtde_rect)
    # cv2.imshow('final ', temp_image)
    # cv2.waitKey()

    # return output_image, qtde_rect
    # return temp_image, cnt_rects, cnt_rects_info, lst_countaprox
    return temp_image, cnt_rects_info, lst_countaprox
Esempio n. 14
0
 def compute_preseg(self):
     """
     Compute the initial presegmentation using ``preseg_method``
     """
     ms_image = imread(self._path_to_image)
     (_, labels, self._number_of_regions) = segment(ms_image,
                                                    spatial_radius=self._hs,
                                                    range_radius=self._hr,
                                                    min_density=self._M)
     self._presegmentation = 1 + labels
def segment(original_image):
    (segmented_image, labels_image,
     number_regions) = pms.segment(original_image,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)
    # print("segment", len(np.unique(segments)))

    # cv2.imshow('image',segmented_image)
    # cv2.waitKey(0)
    return labels_image
Esempio n. 16
0
def meanShift(filename):

    img = cv2.imread(filename)

    # (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=6, range_radius=4.5, min_density=50)
    (segmented_image, labels_image,
     number_regions) = pms.segment(img,
                                   spatial_radius=7,
                                   range_radius=4.5,
                                   min_density=50)
    return segmented_image
def meanShiftFilter(original_image):
    #We need to homogenize tonality of the image to get the most repeated colors.
    #If we dont do that we get tons of diferent pixels that have minimal differences in their RGB and
    #we can't consider them as the same color, resulting an error to our project.

    (segmented_image, labels_image,
     number_regions) = pms.segment(original_image,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)
    img = Image.fromarray(segmented_image)
    return img
Esempio n. 18
0
def meanShift(filename):

    img = cv2.imread(filename)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)

    # (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=6, range_radius=4.5, min_density=50)
    (segmented_image, labels_image,
     number_regions) = pms.segment(img,
                                   spatial_radius=3,
                                   range_radius=11,
                                   min_density=10)
    return segmented_image
Esempio n. 19
0
def meanshif(image, raw_image, minAreaSize, maxAreaSize, minDensity):
    """ perform segmentation using meanshift modules. THIS REQUIRES
        https://github.com/fjean/pymeanshift"""

    if (len(image.shape) > 3):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    mask = np.zeros_like(image, )
    (segmented_image, labels_image,
     number_regions) = pms.segment(image,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=int(minDensity))

    # marked indeentifed objects
    for label in range(len(np.unique(labels_image))):
        if label == 0:
            continue
        mask[labels_image == label] = 255
    mask = cv2.bitwise_and(image, mask)

    if len(mask.shape) > 2:
        mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

    _, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)

    color = np.random.randint(0, 255, (5000, 3))
    mask_colored = np.zeros_like(raw_image, )
    pts, rectang, cellFeatures = [], [], []
    for (ii, cnt) in enumerate(contours):
        ((x, y), radius) = cv2.minEnclosingCircle(cnt)

        area = cv2.contourArea(cnt)
        rect = cv2.boundingRect(cnt)
        (x1, y1, w1, h1) = rect

        if area < minAreaSize or area > maxAreaSize or hierarchy[0, ii,
                                                                 3] != -1:
            del contours[ii]
        else:
            cellMorph = getCellIntensityModule(cnt, image)
            cellFeatures.append(cellMorph)
            cv2.drawContours(mask_colored, [cnt],
                             -1,
                             color[ii].tolist(),
                             thickness=cv2.FILLED)
            cv2.drawContours(raw_image, [cnt], -1, (0, 255, 0), 1)
            rectang.append((x1, y1, w1, h1))
            pts.append([x, y])

    return pts, rectang, mask_colored, image, cellFeatures
def testPMS():
    image = "pms_images/shells/image00021.jpg"
    image_obj = cv2.imread(image)
    (segmented_image, labels_image,
     number_regions) = pms.segment(image_obj,
                                   spatial_radius=15,
                                   range_radius=15,
                                   min_density=200)
    status = cv2.imwrite("test_images/test.jpg", segmented_image)
    print(type(labels_image))
    print("Labelled array shape: %s", labels_image.shape)
    print("Number of regions:\t" + str(number_regions))
    print("label image:\t" + str(labels_image))
Esempio n. 21
0
def func_worker(img, spatial_radius=None, range_radius=None, min_density=None):
    _worker_id = current_process()._identity[0]
    _desc = f"Worker #{_worker_id:3d} (sp={spatial_radius:2.1f}, sr={range_radius:2.1f})"
    
    for _ in tqdm([0], desc=_desc, position=_worker_id, leave=False):
        
        segmented = segment(
            img,
            spatial_radius=spatial_radius,
            range_radius=range_radius,
            min_density=min_density
        )[0]
    
    return segmented, spatial_radius, range_radius
Esempio n. 22
0
def determineBackground(original, imageFileName, labelFileName, SHOW=True, SPATIAL_RADIUS=5,RANGE_RADIUS=5,MIN_DENSITY=250):
    segmented_image,labels_image,number_regions = pms.segment(
            original,
            spatial_radius=SPATIAL_RADIUS,
            range_radius=RANGE_RADIUS,
            min_density=MIN_DENSITY)
    
    print("Number of Regions Found: %s" % number_regions)
    unique_labels = np.unique(labels_image)

    # Save the image and also save the labels to a folder
    if SHOW:
        if not os.path.exists('backgroundChecker'):
            os.makedirs('backgroundChecker')
        cv2.imwrite(os.path.join('backgroundChecker', imageFileName), segmented_image)
        np.savetxt(os.path.join('backgroundChecker', labelFileName), labels_image, delimiter=',')
Esempio n. 23
0
def segment(img, spatial_radius, range_radius, min_density) :
	'''
	Segment the image into regions using the Mean Shift algorithm. An image of same 
	shape is returned containing the region id of each pixel.
	'''
#	import _pymeanshift as _pms     # @UnresolvedImport

	# Check if the image has the minimum size for the algorithm to run properly 
	w, h, c = img.shape      # @UnusedVariable
	if (w <= 2*range_radius) or (h <= 2*range_radius) :
		labels_img = np.zeros(img.shape)
	
	else:
		_segmented_im, labels_img, _nregions = pms.segment(img, spatial_radius, range_radius, min_density)

	return _segmented_im, labels_img, _nregions
    def _segment(self, threshold=0):
        ##
        # Segment the image.
        start_time_seconds = time.time()
        print("...started segmenting", self.filename, "...")
        # Using mean shift implementation from https://github.com/fjean/pymeanshift

        segmented_image, labels_image, number_regions = pms.segment(
            self.image, spatial_radius=6, range_radius=4.5, min_density=50)
        # Gather points of each segment.
        self._set_segment_points(labels_image, number_regions)
        # Construct image of the segment and save to disk.
        self._make_segment_images(segmented_image)

        self.segmented_image = segmented_image

        return segmented_image
Esempio n. 25
0
def meanshift_image(image):
    """
    Description of meanshift_image
    
    Performs meanshift algorithm on the image

    Args:
        image (undefined): PIL image

    """

    (segmented_image, labels_image,
     number_regions) = pms.segment(image,
                                   spatial_radius=6,
                                   range_radius=5,
                                   min_density=100)
    return Image.fromarray(segmented_image)
Esempio n. 26
0
def segmentImage(img, path):
    print("Preprocessing...")
    pre = cv2.medianBlur(img, 5)
    pre = cv2.pyrMeanShiftFiltering(pre, 15, 25)
    if showAllImages:
        cv2.imshow("Pre-Segmentation", img)

    print("Segmenting...")
    (spatial, range, density) = (8, 8, 250)
    (segmented_image, labels_image,
     number_regions) = pms.segment(pre,
                                   spatial_radius=spatial,
                                   range_radius=range,
                                   min_density=density)
    segmented_image = cv2.medianBlur(segmented_image, 7)
    cv2.imwrite(path, segmented_image)
    return segmented_image
def pmsTransformation():
    #file path(s) to different objects
    filePath = "../../separated_Cenek_images/"

    #Traverse all the folders that have the name of the images
    for x in os.listdir(filePath):
        #Traverse all the folders of each video
        for i in os.listdir(filePath + x):
            #Traverse the images in each of the folder
            for n in os.listdir(filePath + x + "/" + i):
                path = filePath + x + "/" + i + "/" + n
                writePath = "pms_images/" + x + "_" + n
                original_image = cv2.imread(path)
                (segmented_image, labels_image,
                 number_regions) = pms.segment(original_image,
                                               spatial_radius=10,
                                               range_radius=10,
                                               min_density=300)
                status = cv2.imwrite(writePath, segmented_image)
Esempio n. 28
0
 def _mean_shift(self, data_list):
     """
     The first parameter is the image
     the second parameter is [spatial_radius, range_radius, min_density]
     If None, the default val will be 6, 4.5, and 50
     """
     im = data_list[0]
     params = data_list[1]
     if params is None:
         sr = 6
         rr = 4.5
         md = 50
     else:
         sr = params[0]
         rr = params[1]
         md = params[2]
     (segmented_image, labels_image, number_regions) = \
         pms.segment(im, spatial_radius=sr, range_radius=rr, min_density=md)
     return labels_image
Esempio n. 29
0
def extract_green(image):
    '''Return extracted image'''
    img = cv2.imread(image)
    (segmented_image, labels_image,
     number_regions) = pms.segment(img,
                                   spatial_radius=6,
                                   range_radius=4.5,
                                   min_density=50)
    b, g, r = cv2.split(segmented_image / 255)
    diff1 = g - r
    diff2 = g - b
    diff_image = diff1 * diff2
    vegetation = np.empty_like(diff_image)
    for (i, j), value in np.ndenumerate(diff_image):
        if diff_image[i][j] > 0 and diff1[i][j] > 0 and b[i][j] <= 0.7 and g[
                i][j] <= 0.7 and r[i][j] <= 0.7:
            vegetation[i][j] = 255
    # filtering noise
    kernel = np.ones((3, 3), np.uint8)
    vegetation = cv2.morphologyEx(np.uint8(vegetation), cv2.MORPH_OPEN, kernel)
    return img, vegetation
Esempio n. 30
0
def meanshift(image, spatial_radius, range_radius, min_density):
    """
    Segment an image using the meanshift clustering algorithm.

    Parameters
    ----------
    image : array_like
        Input image.
    spatial_radius : int
        Spatial radius of the search window.
    range_radius : float
        Range radius parameter of the search window.
    min_density : int
        Minimum size of a region in the segmented image.

    Returns
    -------
    segmented : array_like
        Segmented image.
    n_modes : int
        The number of modes found by the meanshift algorithm.

    Notes
    -----
    A custom fork of the "pymeanshift" module [1] is used to perform the
    meanshift. Details on the algorithm can be found in [2].

    References
    ----------
    [1] https://github.com/clememic/pymeanshift

    [2] Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
        feature space analysis". IEEE Transactions on Pattern Analysis and
        Machine Intelligence. 2002. pp. 603-619.

    """
    hs, hr, M = spatial_radius, range_radius, min_density
    segmented, labels, n_modes = pyms.segment(image, hs, hr, M)
    return segmented, n_modes
Esempio n. 31
0
def getSegments(original,
                SHOW=False,
                SPATIAL_RADIUS=5,
                RANGE_RADIUS=5,
                MIN_DENSITY=250):
    ##############################################################################################################
    #gaussian Blur
    #blur = cv2.GaussianBlur(gray_img,(5,5),0)

    #mean shift segmentation on bgr image
    #https://github.com/fjean/pymeanshift
    #http://ieeexplore.ieee.org/document/1000236/
    segmented_image, labels_image, number_regions = pms.segment(
        original,
        spatial_radius=SPATIAL_RADIUS,
        range_radius=RANGE_RADIUS,
        min_density=MIN_DENSITY)
    print("Number of Regions Found: %s" % number_regions)
    unique_labels = np.unique(labels_image)
    blank = original - original
    for label in unique_labels:
        b = random.randint(0, 255)
        g = random.randint(0, 255)
        r = random.randint(0, 255)
        if label == 0:
            blank[labels_image == label] = [b, g, r]

    # The blank images show exactly what is segmented where
    if SHOW == True:
        cv2.imwrite("saved_segmentation.png", blank)

    ################################################################################
    ################################################################################
    ################################################################################
    ################################################################################
    ################################################################################

    return segmented_image, labels_image
Esempio n. 32
0
def obtainLabels(img):
    data_array = cv2.imread(img, 1)

    b, g, r = cv2.split(data_array)


    (segmented_image, labels_image, number_regions) = pms.segment(r, spatial_radius=3, range_radius=5, min_density=100)

    segmented_image[segmented_image > segmented_image.min()] = 255
    segmented_image[segmented_image <= segmented_image.min()] = 0


    ######################################################################
    # Small spurious objects are easily removed by setting a minimum size for valid objects.
    segmented_image = morphology.remove_small_objects(segmented_image, 100, connectivity=4)


    # Watershed

    D = ndi.distance_transform_edt(segmented_image)
    localMax = peak_local_max(D, indices=False, min_distance=20, labels=segmented_image)
    # perform a connected component analysis on the local peaks,
    # using 8-connectivity, then appy the Watershed algorithm
    markers = ndi.label(localMax, structure=np.ones((3, 3)))[0]
    labels = watershed(-D, markers, mask=segmented_image)
    print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))

    # Un comment to bypass watershed
    labels, num = ndi.label(segmented_image, structure=np.ones((3, 3)))

    # remove edge nuclei
    labels_image = clear_border(labels)
    # recount labels
    number_regions = np.delete(np.unique(labels_image),0)


    return labels_image, number_regions, r, g
def VegetationClassification(Img):
    '''
    This function is used to classify the green vegetation from GSV image,
    This is based on object based and otsu automatically thresholding method
    The season of GSV images were also considered in this function
        Img: the numpy array image, eg. Img = np.array(Image.open(StringIO(response.content)))
        return the percentage of the green vegetation pixels in the GSV image
    
    By Xiaojiang Li
    '''

    import pymeanshift as pms
    import numpy as np

    # use the meanshift segmentation algorithm to segment the original GSV image
    (segmented_image, labels_image,
     number_regions) = pms.segment(Img,
                                   spatial_radius=6,
                                   range_radius=7,
                                   min_density=40)

    I = segmented_image / 255.0

    red = I[:, :, 0]
    green = I[:, :, 1]
    blue = I[:, :, 2]

    # calculate the difference between green band with other two bands
    green_red_Diff = green - red
    green_blue_Diff = green - blue

    ExG = green_red_Diff + green_blue_Diff
    diffImg = green_red_Diff * green_blue_Diff

    redThreImgU = red < 0.6
    greenThreImgU = green < 0.9
    blueThreImgU = blue < 0.6

    shadowRedU = red < 0.3
    shadowGreenU = green < 0.3
    shadowBlueU = blue < 0.3
    del red, blue, green, I

    greenImg1 = redThreImgU * blueThreImgU * greenThreImgU
    greenImgShadow1 = shadowRedU * shadowGreenU * shadowBlueU
    del redThreImgU, greenThreImgU, blueThreImgU
    del shadowRedU, shadowGreenU, shadowBlueU

    greenImg3 = diffImg > 0.0
    greenImg4 = green_red_Diff > 0
    threshold = graythresh(ExG, 0.1)

    if threshold > 0.1:
        threshold = 0.1
    elif threshold < 0.05:
        threshold = 0.05

    greenImg2 = ExG > threshold
    greenImgShadow2 = ExG > 0.05
    greenImg = greenImg1 * greenImg2 + greenImgShadow2 * greenImgShadow1
    del ExG, green_blue_Diff, green_red_Diff
    del greenImgShadow1, greenImgShadow2

    # calculate the percentage of the green vegetation
    greenPxlNum = len(np.where(greenImg != 0)[0])
    greenPercent = greenPxlNum / (400.0 * 400) * 100
    del greenImg1, greenImg2
    del greenImg3, greenImg4

    return greenPercent
Esempio n. 34
0
import cv2
import pymeanshift as ms

img_name = "test"
original_image = cv2.imread(img_name + ".png")
sk = "Gaussian"
rk = "Gaussian" # other : Uniform
i = 1
for sr in range(2,20,8):
    for rr in range(2,20,8):
        for m in range(1,50,10):
            (segmented_image, labels_image, number_regions) \
                = ms.segment(original_image, spatial_radius=sr,
                    range_radius=rr, min_density=m,
                    skernel=sk, rkernel=rk)

            cv2.imwrite(img_name+"_" + sk[0] + rk[0] +"_"+str(i)+".jpg", segmented_image)
            print i,'\t',sr,'\t',rr,'\t',m, '\t'+sk[0]+'\t'+rk[0]+'\t', number_regions
            i+=1

Esempio n. 35
0
def segmentImage(img, spatial_radius, range_radius, min_density):
    """Generate segmented image, the meanshift magic"""
    (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=10, range_radius=8, min_density=150)

    return segmented_image
Esempio n. 36
0
    file_name = "plates/license1.png"
    if len(sys.argv) != 1:
        file_name = sys.argv[1]
    
    base_name = os.path.basename(file_name)

    fname_prefix = ".".join(base_name.split(".")[:-1])
    print fname_prefix


    # Image load & conversion to cvmat
    license_plate = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_COLOR)

    # Segment
    segmented, labels, regions = pms.segment(license_plate, 3, 3, 50)
    print "Segmentation results"
    print "%s: %s" % ("labels", labels)
    print "%s: %s" % ("regions", regions)
    cv2.imwrite('%s_segmented.png' % fname_prefix, segmented)


    license_plate = cv2.imread('%s_segmented.png' % fname_prefix, cv2.CV_LOAD_IMAGE_COLOR)

    license_plate_size = (license_plate.shape[1], license_plate.shape[0])

    license_plate_cvmat = cv2.cv.fromarray(license_plate)
    license_plate_ipl = cv2.cv.CreateImage(license_plate_size, cv2.cv.IPL_DEPTH_8U, 3)
    cv2.cv.SetData(
            license_plate_ipl,
            license_plate.tostring(),
Esempio n. 37
0
from PIL import Image
import pymeanshift as pms

import numpy
import Image

def PIL2array(img):
    return numpy.array(img.getdata(),
                    numpy.uint8).reshape(img.size[1], img.size[0], 3)

def array2PIL(arr, size):
    mode = 'RGBA'
    arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])
    if len(arr[0]) == 3:
        arr = numpy.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)]
    return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)

def segment_img(original_image, spatial_radius=5,range_radius=5, min_density=60):
	(segmented_image, labels_image, number_regions) = pms.segment(original_image, spatial_radius, 
                                                              range_radius, min_density)


original_image = Image.open("/home/lforet/images/class1/1.grass3.jpg")


(segmented_image, labels_image, number_regions) = pms.segment(original_image, spatial_radius=10, 
                                                              range_radius=10, min_density=60)
original_image.show()

array2PIL(segmented_image, original_image.size).show()
Esempio n. 38
0
def seg(img,a,b,c):
    lx, ly, ch = img.shape
    img = img[lx / 4:-lx/4,ly/10:-ly/10,:]
    l = list(pms.segment(img[:,:,0:3],a,b,c)) #1,12,30 "magic"
    l.extend(list(img.shape))
    return l
Esempio n. 39
0
def segmented(img):
    (segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=6, range_radius=4.5, min_density=50)
    return segmented_image
Esempio n. 40
0
def segmented_downsampled(img):
    downsampled = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
    (segmented_image, labels_image, number_regions) = pms.segment(downsampled, spatial_radius=6, range_radius=4.5, min_density=50)
    return segmented_image
Esempio n. 41
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    desc = """%prog is a tool performs mean shift clustering and a phase symmetry transfromation\n
     on a given input image, in that order."""

    parser = optparse.OptionParser(description=desc, usage='Usage: ex) %prog imageFile.png')
    parser.add_option('--scale', help='specify number of phasesym scales', dest='NSCALE', default=4, type="int")
    parser.add_option('--ori', help='specify number of phasesym orientations', dest='NORIENT', default=6, type="int")
    parser.add_option('--mult', help='specify multiplier for phasesym', dest='MULT', default=3.0, type="float")
    parser.add_option('--sig', help='specify sigma on frequncy for phasesym', dest='SIGMAONF', default=0.55, type="float")
    parser.add_option('--k', help='specify k value for phasesym', dest='K', default=1, type="int")
    parser.add_option('--blur', help='specify blur width value N for NxN blur operation', dest='BLUR', default=3, type="int")
    parser.add_option('--srad', help='specify spatial radius for mean shift', dest='SRAD', default=5, type="int")
    parser.add_option('--rrad', help='specify radiometric radius for mean shift', dest='RRAD', default=6, type="int")
    parser.add_option('--den', help='specify pixel density value for mean shift', dest='DEN', default=10, type="int")
    parser.add_option('--amin', help='specify blob minimum area for boxing', dest='AMIN', default=5, type="int")
    parser.add_option('--amax', help='specify blob maximum area for boxing', dest='AMAX', default=400, type="int")
    parser.add_option('--wmin', help='specify box minimum width acceptance', dest='WMIN', default=3, type="int")
    parser.add_option('--wmax', help='specify box maximum width acceptance', dest='WMAX', default=35, type="int")
    parser.add_option('--hmin', help='specify box minimum height acceptance', dest='HMIN', default=2, type="int")
    parser.add_option('--hmax', help='specify box maximum height acceptance', dest='HMAX', default=55, type="int")
    parser.add_option('--arat', help='specify minimum box aspect ratio for acceptance', dest='ARATIO', default=0.25, type="float")
    parser.add_option('--edgeMin', help='specify minimum hysteresis value for edge detection', dest='EDGEMIN', default=100, type="int")
    parser.add_option('--edgeMax', help='specify maximum hysteresis value for edge detection', dest='EDGEMAX', default=200, type="int")
    parser.add_option('--ref', help='specify reference car image', dest='MASTERIMG', default="")
    parser.add_option('--win', help='specify search window size', dest='WINSZ', default=55, type="int")
    parser.add_option('--tol', help='specify shape description tolerance', dest='TOL', default=0.07, type="float")
    

    (opts, args) = parser.parse_args(argv)
    args = args[1:]

    #check to see if the user provided an output directory
    if len(args) == 0:
        print "\nNo input file provided"
        parser.print_help()
        sys.exit(-1)

    #check to see if the file system image is provided
    if len(args) > 1:
        print "\n invalid argument(s) %s provided" % (str(args))
        parser.print_help()
        sys.exit(-1)


    NSCALE = opts.NSCALE
    NORIENT = opts.NORIENT
    MULT = opts.MULT
    SIGMAONF = opts.SIGMAONF
    K = opts.K
    BLUR = (opts.BLUR, opts.BLUR)
    SRAD = opts.SRAD
    RRAD = opts.RRAD
    DEN = opts.DEN
    AMIN = opts.AMIN
    AMAX = opts.AMAX
    WMAX = opts.WMAX
    WMIN = opts.WMIN
    HMAX = opts.HMAX
    HMIN = opts.HMIN
    ARATIO = opts.ARATIO
    EDGEMIN = opts.EDGEMIN
    EDGEMAX = opts.EDGEMAX
    WINSZ = opts.WINSZ 
    TOL = opts.TOL
    masterImg = ""
    masterHist = None

    if len(opts.MASTERIMG) > 0:
        masterImg = cv2.imread(opts.MASTERIMG)

        h, w, z = masterImg.shape
        if w%2:
            x = int(np.ceil(w/float(2)))
        else:
            x = w/2
        if h%2:
            y = int(np.ceil(h/float(2)))
        else:
            y = h/2
        print y, x
        masterHist = RadAngleHist(masterImg, 0, y, x,BLUR)
    else:
        #masterImg = cv2.imread('singleCar.png')
        #masterHist = RadAngleHist(masterImg, 0, 27, 29)
        masterImg, cen = genReferenceCar(WINSZ)
        print cen
        print masterImg
        masterHist = RadAngleHist(masterImg, 0, cen[0], cen[1],BLUR)
        print str(masterHist)

    #begin transform
    filename = args[0]
    basename = os.path.splitext(filename)[0]
    img = cv2.imread(filename)

    grayImg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    cv2.imwrite(basename+"_gray.png", grayImg)
    pha, ori, tot, T = phasesym(grayImg, nscale=NSCALE, norient=NORIENT, minWaveLength=3, mult=MULT, sigmaOnf=SIGMAONF, k=K, polarity=0)
    pha = cv2.normalize(pha, pha, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    pha = np.uint8(pha)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K), pha)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_ori.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K), ori)
    np.savetxt('python_ori.txt',ori)

    pha = cv2.blur(pha, BLUR)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1]), pha)
    pha = cv2.cvtColor(pha, cv2.COLOR_GRAY2RGB)
    pha = cv2.cvtColor(pha, cv2.COLOR_RGB2LUV)


    (segmented_image, labels_image, number_regions) = pms.segment(pha, spatial_radius=SRAD, range_radius=RRAD, min_density=DEN)
    segmented_image=cv2.normalize(segmented_image, segmented_image, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    segmented_image = np.uint8(segmented_image)
    segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_LUV2RGB)
    segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2GRAY)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), segmented_image)


    #ori = np.uint8(ori)
    #(ori, labels_image, number_regions) = pms.segment(ori, spatial_radius=SRAD, range_radius=RRAD, min_density=DEN)
    #ori=cv2.normalize(ori, ori, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    #ori = np.uint8(ori)
    #cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_ori.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), ori)


    thresh_img = cv2.adaptiveThreshold(segmented_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 0)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_T.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), thresh_img)


    #get centroids
    contours = getContours(thresh_img, AMIN, AMAX, WMIN, WMAX, HMIN, HMAX, ARATIO)
    centroids = getCentroids(contours)
    
    #cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_A_%d_%d_W_%d_%d_H_%d_%d_R_%.2f.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN, AMIN, AMAX, WMIN, WMAX, HMIN, HMAX, ARATIO), img)

    histograms = []


    dirName = 'windowTiles'
    if not (os.path.isdir(dirName)):
        #create subdirectory if it does not already exist
        os.mkdir(dirName)

    outputImg = img.copy()
    centroidsImg = img.copy()
    drawCentroids(centroidsImg,centroids)
    #drawCentroids(img,centroids)

    np.set_printoptions(formatter={'float': '{: 0.2f}'.format})
    print str(masterHist) + "\n"
    for cen,cnt in zip(centroids,contours):
        print cen
        #win = getImageWindow(img, cen[0],cen[1],26,52)

        #if cen[0]==100 and cen[1] == 310:
        #    import pdb; pdb.set_trace()
        win = getImageWindow(img, cen[0],cen[1],WINSZ,WINSZ)   #correct
        filename = "win_%d_%d.jpg" % (cen[0], cen[1])
        cv2.imwrite(os.path.join(dirName, filename), win)
        #histogram = RadAngleHist(win, 90+ori[cen[0], cen[1]],cen[0],cen[1])
        histogram = RadAngleHist(win, 90+ori[cen[0], cen[1]],cen[0],cen[1],BLUR)

        #print ori[cen[0], cen[1]]
        print str(histogram) + "\n"

        if histogram.getHistSum() < 0.1:
            continue

        #if cen == (11,144):
        #    import pdb;pdb.set_trace()

        #print masterHist.compare(histogram)
        if masterHist.compare(histogram) < TOL:
            drawBox(outputImg, cnt)

    dirName = ""
    cv2.imwrite(os.path.join(dirName, "Boxes_Centroids.jpg"), outputImg)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_A_%d_%d_W_%d_%d_H_%d_%d_R_%.2f_E_%d_%d_W_%d_T_%.2f.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN, AMIN, AMAX, WMIN, WMAX, HMIN, HMAX, ARATIO,EDGEMIN,EDGEMAX,WINSZ,TOL), outputImg) 
    cv2.imwrite(os.path.join(dirName, "Centroids.jpg"), centroidsImg) 
Esempio n. 42
0
import pymeanshift as pms
import cv2
import numpy as np

img = cv2.imread("me.jpg")
for i in range(4):
    (segmented, labels, n) = pms.segment(img, spatial_radius=6, range_radius=1.5 * (i + 1), min_density=200)
    cv2.imshow("spatial_radius=%s" % str(50 + 50 * i), segmented)
Esempio n. 43
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    desc = """%prog is a tool performs mean shift clustering and a phase symmetry transfromation\n
     on a given input image, in that order."""

    parser = optparse.OptionParser(description=desc, usage='Usage: ex) %prog imageFile.png')
    parser.add_option('--scale', help='specify number of phasesym scales', dest='NSCALE', default=4, type="int")
    parser.add_option('--ori', help='specify number of phasesym orientations', dest='NORIENT', default=6, type="int")
    parser.add_option('--mult', help='specify multiplier for phasesym', dest='MULT', default=3.0, type="float")
    parser.add_option('--sig', help='specify sigma on frequncy for phasesym', dest='SIGMAONF', default=0.55, type="float")
    parser.add_option('--k', help='specify k value for phasesym', dest='K', default=1, type="int")
    parser.add_option('--blur', help='specify blur width value N for NxN blur operation', dest='BLUR', default=3, type="int")
    parser.add_option('--srad', help='specify spatial radius for mean shift', dest='SRAD', default=5, type="int")
    parser.add_option('--rrad', help='specify radiometric radius for mean shift', dest='RRAD', default=6, type="int")
    parser.add_option('--den', help='specify pixel density value for mean shift', dest='DEN', default=10, type="int")
    parser.add_option('--amin', help='specify blob minimum area for boxing', dest='AMIN', default=5, type="int")
    parser.add_option('--amax', help='specify blob maximum area for boxing', dest='AMAX', default=400, type="int")
    parser.add_option('--wmin', help='specify box minimum width acceptance', dest='WMIN', default=3, type="int")
    parser.add_option('--wmax', help='specify box maximum width acceptance', dest='WMAX', default=35, type="int")
    parser.add_option('--hmin', help='specify box minimum height acceptance', dest='HMIN', default=2, type="int")
    parser.add_option('--hmax', help='specify box maximum height acceptance', dest='HMAX', default=55, type="int")
    parser.add_option('--arat', help='specify minimum box aspect ratio for acceptance', dest='ARATIO', default=0.25, type="float")
    parser.add_option('--edgeMin', help='specify minimum hysteresis value for edge detection', dest='EDGEMIN', default=100, type="int")
    parser.add_option('--edgeMax', help='specify maximum hysteresis value for edge detection', dest='EDGEMAX', default=200, type="int")
    parser.add_option('--eps', help='specify maximum epsilon value for DBSCAN clustering algorithm', dest='EPS', default=15, type="int")
    parser.add_option('--min_samples', help='specify the numer fo minimum samples that constitute a cluster during DBSCAN',
                      dest='MINSAMPLES', default=1, type="int")

    (opts, args) = parser.parse_args(argv)
    args = args[1:]

    #check to see if the user provided an output directory
    if len(args) == 0:
        print "\nNo input file provided"
        parser.print_help()
        sys.exit(-1)

    #check to see if the file system image is provided
    if len(args) > 1:
        print "\n invalid argument(s) %s provided" % (str(args))
        parser.print_help()
        sys.exit(-1)


    NSCALE = opts.NSCALE
    NORIENT = opts.NORIENT
    MULT = opts.MULT
    SIGMAONF = opts.SIGMAONF
    K = opts.K
    BLUR = (opts.BLUR, opts.BLUR)
    SRAD = opts.SRAD
    RRAD = opts.RRAD
    DEN = opts.DEN
    AMIN = opts.AMIN
    AMAX = opts.AMAX
    WMAX = opts.WMAX
    WMIN = opts.WMIN
    HMAX = opts.HMAX
    HMIN = opts.HMIN
    ARATIO = opts.ARATIO
    EDGEMIN = opts.EDGEMIN
    EDGEMAX = opts.EDGEMAX
    EPS = opts.EPS
    MINSAMPLES = opts.MINSAMPLES


    #begin transform
    filename = args[0]
    basename = os.path.splitext(filename)[0]
    img = cv2.imread(filename)

    grayImg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    cv2.imwrite(basename+"_gray.png", grayImg)
    pha, ori, tot, T = phasesym(grayImg, nscale=NSCALE, norient=NORIENT, minWaveLength=3, mult=MULT, sigmaOnf=SIGMAONF, k=K, polarity=0)
    pha = cv2.normalize(pha, pha, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    pha = np.uint8(pha)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K), pha)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_ori.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K), ori)
    np.savetxt('python_ori.txt',ori)

    pha = cv2.blur(pha, BLUR)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1]), pha)
    pha = cv2.cvtColor(pha, cv2.COLOR_GRAY2RGB)
    pha = cv2.cvtColor(pha, cv2.COLOR_RGB2LUV)


    (segmented_image, labels_image, number_regions) = pms.segment(pha, spatial_radius=SRAD, range_radius=RRAD, min_density=DEN)
    segmented_image=cv2.normalize(segmented_image, segmented_image, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    segmented_image = np.uint8(segmented_image)
    segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_LUV2RGB)
    segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2GRAY)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), segmented_image)


    #ori = np.uint8(ori)
    #(ori, labels_image, number_regions) = pms.segment(ori, spatial_radius=SRAD, range_radius=RRAD, min_density=DEN)
    #ori=cv2.normalize(ori, ori, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
    #ori = np.uint8(ori)
    #cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_ori.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), ori)


    thresh_img = cv2.adaptiveThreshold(segmented_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 0)
    cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_T.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN), thresh_img)


    #get centroids
    contours = getContours(thresh_img, AMIN, AMAX, WMIN, WMAX, HMIN, HMAX, ARATIO)
    centroids = getCentroids(contours)
    
    #cv2.imwrite(basename + "_PS" + "_%d_%d_%.2f_%.2f_%d_B_%d_%d_MS_%d_%d_%d_A_%d_%d_W_%d_%d_H_%d_%d_R_%.2f.png" % (NSCALE, NORIENT, MULT, SIGMAONF, K, BLUR[0], BLUR[1], SRAD, RRAD, DEN, AMIN, AMAX, WMIN, WMAX, HMIN, HMAX, ARATIO), img)

    histograms = []

    f = open('masterHist.txt')
    h = np.loadtxt(f)
    masterImg = cv2.imread('singleCar.png')
    masterHist = RadAngleHist(masterImg, 180, 27, 29)

    dirName = 'windowTiles'
    if not (os.path.isdir(dirName)):
        #create subdirectory if it does not already exist
        os.mkdir(dirName)

    outputImg = img.copy()
    #ori = np.loadtxt('matlab_ori.txt',delimiter=',')

    shapedCentroids = []
    for cen,cnt in zip(centroids,contours):
        #print cen
        #win = getImageWindow(img, cen[0],cen[1],26,52)

        #import pdb; pdb.set_trace()
        win = getImageWindow(img, cen[0],cen[1],55,55)   #correct


        #win = getImageWindow(img, cen[0],cen[1],51,51)
        filename = "win_%d_%d.jpg" % (cen[0], cen[1])
        cv2.imwrite(os.path.join(dirName, filename), win)
        histogram = RadAngleHist(win, ori[cen[0], cen[1]],cen[0],cen[1])
        #print ori[cen[0], cen[1]]
        #print masterHist.compare(histogram)
#        if masterHist.compare(histogram, dist=60) == 0:
#            histograms.append(histogram)
        if masterHist.compare(histogram) < 0.07:
            drawBox(outputImg, cnt)
            shapedCentroids.append(cen)
            # boxedImg = img.copy()
            # drawBox(boxedImg,cnt)
            # filename = "win_box_%d_%d.jpg" % (cen[0], cen[1])
            # cv2.imwrite(os.path.join(dirName, filename), boxedImg)
    dirName = ""
    filename = "Boxes_+_Centroids.jpg"
    cv2.imwrite(os.path.join(dirName, filename), outputImg)

    dbResult = scanBlobs(shapedCentroids, img.copy(), EPS, MINSAMPLES)
    drawClusterColors(dbResult, img.copy(), shapedCentroids)
Esempio n. 44
0
def meanShift(img, sradius=6, rradius=4.5, mdensity=50):

    (segmented_image, labels_image, number_regions) = pms.segment(img, sradius, rradius, mdensity)
    return segmented_image
Esempio n. 45
0
if len(rects) > 0:
  face = rects[len(rects)-1]

  # print img.shape, face

  r_img, r_face = resize.resize(img, face, resize_w, resize_h, face_ratio)

  # print r_img.shape, r_face

  face_x, face_y, face_w, face_h = r_face

  # cv2.rectangle(r_img, (face_x,face_y), (face_x+face_w,face_y+face_h), (255,0,0),2)


  # meanshift
  (segmented_img, labels_img, number_regions) = pms.segment(r_img, spatial_radius=9, range_radius=4.5, min_density=20)

  # kmeans
  pixels = reshape(segmented_img, (r_img.shape[0]*r_img.shape[1], r_img.shape[2]))
  centroids, _ = kmeans(pixels, 4) # four colors will be found
  qnt, _ = vq(pixels, centroids)
  centers_idx = reshape(qnt, (r_img.shape[0], r_img.shape[1]))
  # clustered_img = centroids[centers_idx]

  m_w = int(face_w * 0.1)
  m_h = int(face_h * 0.1)

  faces_idx = centers_idx[face_y:face_y+face_h-m_h*2,face_x+m_w:face_x+face_w-m_w]
  r_faces_idx = reshape(faces_idx, faces_idx.shape[0]*faces_idx.shape[1])
  cnt = Counter(r_faces_idx).most_common()
Esempio n. 46
0
def segment_img(original_image, spatial_radius=5,range_radius=5, min_density=60):
	(segmented_image, labels_image, number_regions) = pms.segment(original_image, spatial_radius, 
                                                              range_radius, min_density)