Exemplo n.º 1
0
def	overlap_contours(i,j):
	# Take the 'j'th contour, from the contours_sorted array, and the for every point of the 'j' check if its inside 'i' or on 'i' or outside 'i'
	# If we find that the contour 'j' is conpletely outside, we return -1, else if partially overlapped, we return 0, and if completely subsumed, we return +1
	completely_outside = True
	completely_subsumed = True
	for k in contours[j][0]: #for every point in contour 'j'
		k_loc = cv2.pointPolygonTest(contours[i],(k[0],k[1]),False)
		if k_loc == 0: #Its on the contour[i]
			completely_outside = False
			completely_subsumed = False
			break
		elif k_loc == -1: #Its not subsumed
			completely_subsumed = False
	# end of for
	if completely_subsumed == True:
		return(j)
	if completely_outside == False and completely_subsumed == False:
		return(-1) #on the border
	if completely_outside == True: # we test the reverse condition, thats the object 'i' might be subsumed completely within object 'i'
		completely_subsumed = True
		for k in contours[i][0]:  # for every point in contour 'i'
			if cv2.pointPolygonTest(contours[j], (k[0], k[1]), False) < 1:  # Its on or outside the contour[j]
				completely_subsumed = False
				break
		if completely_subsumed == True:
			return(i)
		else:
			return(-2)
	return(-1)
    def distance_image(self,point=None):
     
        '''find the distance between a point and adjacent point on contour specified. Point should be a tuple or list (x,y)
            If no point is given, distance to all point is calculated and distance image is returned'''
        if type(point) == tuple:
            if len(point)==2:
                self.dist = cv2.pointPolygonTest(self.cnt,point,True)
                return self.dist
        else:
            dst = np.empty(self.img.shape)
            for i in xrange(self.img.shape[0]):
                for j in xrange(self.img.shape[1]):
                    dst.itemset(i,j,cv2.pointPolygonTest(self.cnt,(j,i),True))

            dst = dst+127
            dst = np.uint8(np.clip(dst,0,255))

            # plotting using palette method in numpy
            palette = []
            for i in xrange(256):
                if i<127:
                    palette.append([2*i,0,0])
                elif i==127:
                    palette.append([255,255,255])
                elif i>127:
                    l = i-128
                    palette.append([0,0,255-2*l])
            palette = np.array(palette,np.uint8)
            self.h2 = palette[dst]
            return self.h2
Exemplo n.º 3
0
def isEllipse(contour):
    '''
    Detects if the given polygon is an ellipse.
    Returns the ellipse form of the polygon if it's an ellipse, None otherwise.
    '''
    try: rect = cv2.fitEllipse(contour) # Rotated rectangle representing the ellipse it tries to fit
    except: return None
    (x, y), (w, h), angle = rect # x offset, y offset, width, height, angle
    w, h = h, w # Switch them since our ellipses are usually rotated 90 degrees

    TEST_POINTS = 16
    E = 0.1 # Radius percentage increase/decrease for test points
    MIN_SUCC_RATE = 0.9
    successes = 0

    for a in np.arange(0, 2*math.pi, 2*math.pi/TEST_POINTS):
        ipoint = (x+0.5*w*math.cos(a)*(1-E), y+0.5*h*math.sin(a)*(1-E))
        opoint = (x+0.5*w*math.cos(a)*(1+E), y+0.5*h*math.sin(a)*(1+E))
        test = cv2.pointPolygonTest(contour, ipoint, False)
        if cv2.pointPolygonTest(contour, ipoint, False) > 0: # The inside point is inside
            successes += 1
        if cv2.pointPolygonTest(contour, opoint, False) < 0: # The outside point is outside
            successes += 1

    succ_rate = successes / (TEST_POINTS * 2)
    if succ_rate >= MIN_SUCC_RATE:
        return rect
    else:
        return None
Exemplo n.º 4
0
def ppt(ct, p, b):	 
	if type(p) == tuple:
		return -cv2.pointPolygonTest(ct, p, b)
	elif len(np.shape(p)) == 1:
		return -cv2.pointPolygonTest(ct, tuple(p), b)
	else:
		return np.array([-cv2.pointPolygonTest(ct, tuple(pp), b) for pp in np.array(p)])
Exemplo n.º 5
0
def plot_intercontour_hist(image, outer_contour_id, contours, graph, normalized=True):
    outer_contour = contours[outer_contour_id]
    (x, y, width, height) = cv2.boundingRect(outer_contour)
    subimage = get_subimage(image, (x, y), (x + width, y + height))
    monochrome = cv2.cvtColor(subimage, cv2.COLOR_BGR2GRAY)
    inverted_mask = cv2.compare(monochrome, monochrome, cv2.CMP_EQ)
    inner_contours = [contours[int(contour_id)] for contour_id in graph.successors(outer_contour_id)]
    for i in range(width):
        for j in range(height):
            point = (x + i, y + j)
            outer_contour_test = cv2.pointPolygonTest(outer_contour, point, 0)
            inner_contours_test = -1
            for inner_contour in inner_contours:
                inner_contour_test = cv2.pointPolygonTest(inner_contour, point, 0)
                if inner_contour_test > 0:
                    inner_contours_test = 1
                    break
            if outer_contour_test >= 0 and inner_contours_test < 0:
                inverted_mask[j][i] = 0
    mask = cv2.bitwise_not(inverted_mask)
    cv.Set(cv.fromarray(subimage), WHITE, cv.fromarray(inverted_mask))
    inner_contour_id = len(str(inner_contours))
    print 'inner contour id: ', inner_contour_id
    image_name = '%d-%s'%(outer_contour_id, inner_contour_id)
    #cv2.imshow(image_name, subimage)
    #subhists = plot_hist(subimage, mask, image_name)
    (subhists, winnames) = plot_hist_hls(subimage, mask, image_name, normalized)
    return subhists, subimage, mask, x, y, winnames
def isEllipse(contour):
    rect = cv2.fitEllipse(contour) # Rotated rectangle representing the ellipse it tries to fit
    (x, y), (w, h), angle = rect # x offset, y offset, width, height, angle
    w, h = h, w # Switch them since our ellipses are usually rotated 90 degrees

    # Draw TEST_INSIDE points inside the ellipse and TEST_OUTSIDE points outside and see if they're in the hull
    TEST_INSIDE = 10
    TEST_OUTSIDE = 10

    # Equation of ellipse: (x/a)^2 + (y/b)^2 = 1

    e = 0.1
    
    tests = []
    tests.append(cv2.pointPolygonTest(contour, (x, y), False))

    tests.append(cv2.pointPolygonTest(contour, (x+w*(0.5-e), y), False))
    tests.append(cv2.pointPolygonTest(contour, (x-w*(0.5-e), y), False))
    tests.append(cv2.pointPolygonTest(contour, (x, y+h*(0.5-e)), False))
    tests.append(cv2.pointPolygonTest(contour, (x, y-h*(0.5-e)), False))

    tests.append(not cv2.pointPolygonTest(contour, (x+w*(0.5-e), y+h*(0.5-e)), False))
    tests.append(not cv2.pointPolygonTest(contour, (x+w*(0.5-e), y-h*(0.5-e)), False))
    tests.append(not cv2.pointPolygonTest(contour, (x-w*(0.5-e), y+h*(0.5-e)), False))
    tests.append(not cv2.pointPolygonTest(contour, (x-w*(0.5-e), y-h*(0.5-e)), False))

    for test in tests:
        if test == -1.0:
            return False

    return True
Exemplo n.º 7
0
def shrinkBoundingRectToCircumscribed(maskHull, x, y, w, h):
    # now we are going to SHRINK it until all 4 corners fit
    # ul= np.array([0,0])
    # ur = np.array([0,0])
    # ll = np.array([0,0])
    # lr = np.array([0,0])
    # ul= np.zeros(2,dtype = np.int32)
    # ur = np.zeros(2,dtype = np.int32)
    # ll = np.zeros(2,dtype = np.int32)
    # lr = np.zeros(2,dtype = np.int32)

    if (maskHull is None):
        return (0,0,0,0)

    testPolygon = maskHull

    # print "test polygon is:"
    # print testPolygon

    while True:
        # ul[0]=x
        # ul[1]=y
        # ur[0]=x+w
        # ur[1]=y
        # ll[0]=x
        # ll[1]=y+h
        # lr[0]=x+2
        # lr[1]=y+h
        ul = (x, y)
        ur = (x + w - 1, y)
        ll = (x, y + h - 1)
        lr = (x + w - 1, y + h - 1)
        # ul = (y,x)
        # ur = (y,x+w)
        # ll = (y+h,x)
        # lr = (y+h,x+w)
        testval = (cv2.pointPolygonTest(testPolygon, ul, False) == 1) and (
        cv2.pointPolygonTest(testPolygon, ur, False) == 1) and (cv2.pointPolygonTest(testPolygon, ll, False) == 1) and (
                  cv2.pointPolygonTest(testPolygon, lr, False) == 1)
        if (testval):
            # print "matches at:"
            # print ul, ur, ll, lr
            return (x, y, w, h)
        # print "Testing %d,%d size %d,%d", (x,y,x+w,y+h)
        x = x + 1
        y = y + 1
        w = w - 2
        h = h - 2
        if (w <= 0 or h <= 0):
            return (0, 0, 0, 0)
    pass
Exemplo n.º 8
0
    def get_targets(self, fly_erased_img, headpoint, centroidpoint, _bodyAxis):

        kernel = np.ones((5,5),np.uint8)
        _, mask = cv2.threshold(fly_erased_img, 60, 255, cv2.THRESH_BINARY)
        mask = cv2.erode(mask, kernel, iterations=1)
        contourImage = mask.copy()
        contourImage = np.pad(contourImage,((2,2),(2,2)), mode='maximum')
        contours, hierarchy1 = cv2.findContours(contourImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        hierarchy = hierarchy1[0]
        
        for x in hierarchy:
            if x[3] <0:
                parent = x
        
        #headpoint = (int(track.loc[framenumber, 'c_head_location_x']), int(track.loc[framenumber, 'c_head_location_y']))
        candidateTargets = []
        
        for component in zip(contours, hierarchy):
            c = component[0]
            h = component[1]
            centroidCheck = cv2.pointPolygonTest(c,centroidpoint,True)
            if centroidCheck <=0:
                if np.array_equal(hierarchy[h[3]], parent) :  #is in outer hierarchy (parent is edge.)  
                    if h[2] > 0:   # has child (targets have inner and outer edge)
                        if (cv2.contourArea(c) <= 150000) & (cv2.contourArea(c) >= 20000):
                            ellipse = cv2.fitEllipse(c)
                            if not self.pointInEllipse(centroidpoint[0],centroidpoint[1],ellipse[0][0],ellipse[0][1],ellipse[1][0],ellipse[1][1],ellipse[2]):
                                candidateTargets.append(c)
            
            areas = []
            if len(candidateTargets) >0:
                for T in range(len(candidateTargets)): 
                    areas.append(cv2.contourArea(candidateTargets[T]))
            
                TARGET = cv2.convexHull(candidateTargets[areas.index(max(areas))]             )
                M = cv2.moments(TARGET)
                targCentre = (int(M['m10']/M['m00']), int(M['m01']/M['m00']))
                    
                distance = -1.0*cv2.pointPolygonTest(TARGET,headpoint,True) / 135.5 # based on 135.5 pixels per mm
                angle= self.angle_from_vertical(headpoint, targCentre)
                approachAngle= angle - _bodyAxis #track.loc[framenumber, 'd_bodyAxis']
                if approachAngle < 0:
                    approachAngle *= -1.0
                if approachAngle >=180.0:
                    approachAngle -= 180.0
            else:   
                distance = np.nan
                approachAngle = np.nan
                TARGET = None
        return TARGET, distance, approachAngle
Exemplo n.º 9
0
    def triangle_is_valid(self, start):
        # check if middle point of triangle is inside polygon. otherwise triangle is invalid
        middle = self.calc_triangle_middle(self.triangle)
        if cv2.pointPolygonTest(np.array([self.points], 'int32'), middle, False) < 0:
            return False

        # check if other points pf polygon are not inside triangle. otherwise triangle is invalid
        for i, point in enumerate(self.points):
            # ignore points of triangle
            if i < start or i > start + 1:
                if cv2.pointPolygonTest(np.array([self.triangle], 'int32'), (point[0], point[1]), False) > 0:
                    return False

        # check if area of triangle is not too big and not too small for polygon
        area = cv2.contourArea(np.array(self.triangle, 'int32'))
        return MIN_COLOR_TRIANGLE_AREA < area / self.area < MAX_COLOR_TRIANGLE_AREA
Exemplo n.º 10
0
def __compare_segmentations(img_a, img_b):
    # image, contours, hierarchy = cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) [opencv3]
    # contours, hierarchy = cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) [opencv2]
    out_img_a, ct_img_a, h_a = cv.findContours(img_a, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
    out_img_b, ct_img_b, h_b = cv.findContours(img_b, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)

    error = []

    height, width = img_a.shape[:2]

    for comp_a in ct_img_a:

        mask_a = np.zeros((height, width), np.uint8)
        mask_b = np.zeros((height, width), np.uint8)

        first_point = comp_a[0][0]

        cv.fillConvexPoly(mask_a, comp_a, (255))

        for comp_b in ct_img_b:
            if cv.pointPolygonTest(comp_b, (first_point[0], first_point[1]), False) >= 0:
                cv.fillConvexPoly(mask_b, comp_b, (50))
                break

        set_difference = mask_a - mask_b
        set_difference_count = (set_difference == 255).sum()

        error.append(set_difference_count)

    return error
def rotate_contour(contour, frame_bin):
    center, size, angle = cv2.minAreaRect(contour)
    xt,yt,h,w = cv2.boundingRect(contour)
    region_points = []
    for i in range (xt,xt+h): 
        for j in range(yt,yt+w):
            dist = cv2.pointPolygonTest(contour,(i,j),False)
            if dist>=0 and frame_bin[j,i]==255: # da li se tacka nalazi unutar konture?
                region_points.append([i,j])
    cx,cy = center
    height, width = size
    if width<height:
        angle-=90
    # Rotiranje svake tačke regiona oko centra rotacije
    alpha = np.pi/2 - abs(np.radians(angle))
    region_points_rotated = np.ndarray((len(region_points), 1,2), dtype=np.int32)
    for i, point in enumerate(region_points):
        x = point[0]
        y = point[1]
        
        #TODO 1 - izračunati koordinate tačke nakon rotacije
        rx = np.sin(alpha)*(x-cx) - np.cos(alpha)*(y-cy) + cx
        ry = np.cos(alpha)*(x-cx) + np.sin(alpha)*(y-cy) + cy
        region_points_rotated[i] = [rx,ry]
    return region_points_rotated
    def select_and_convexify(self, selection_method, make_convex, object):

        # index_desired = self.candidate_objects.index(self.desired_object)
        object_index = self.candidate_objects.index(object)
        self.segments[object] = np.zeros(self.segmentation.shape, np.uint8)
        self.contours, hierarchy = cv2.findContours(np.logical_and(self.bin_mask, self.segmentation == object_index).astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        if self.contours:
            # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html
            # compute the area for each contour

            if selection_method == "all":
                self.selected_contours = range(len(self.contours))
            elif selection_method == "largest":
                contour_areas = [cv2.contourArea(cnt) for cnt in self.contours]
                self.selected_contours = [np.argmax(contour_areas)]
            elif selection_method == "max_smooth":
                # find most likely point for object on the parts that were segmented
                argmax = (self.posterior_images_smooth[object] * (self.segmentation == object_index)).argmax()
                # compute the image coordinates of this point
                max_posterior_point = np.unravel_index(argmax, self.posterior_images_smooth[object].shape)[::-1]
                # select contour that includes this point
                self.selected_contours = [np.argmax([cv2.pointPolygonTest(cnt,max_posterior_point,False) for cnt in self.contours])]
            else:
                print("I DON'T KNOW THIS SELECTION METHOD!")

            for i in self.selected_contours:

                if make_convex:
                    self.contours[i] = cv2.convexHull(self.contours[i])

                cv2.drawContours(self.segments[object], [self.contours[i]], 0, 255, -1)
Exemplo n.º 13
0
def getInternalPixels(img, contour, nullval=-1):
    maxx, maxy = 0, 0
    minx, miny = sys.maxsize, sys.maxsize

    for val in contour:
        if val[0][0] > maxx :
            maxx = val[0][0]
        if val[0][0] < minx :
            minx = val[0][0]
        if val[0][1] > maxy : 
            maxy = val[0][1] 
        if val[0][1] < miny : 
            miny = val[0][1]

    retPx = np.zeros((maxy-miny+3, maxx-minx+3, 3), np.uint8)

    ri, ci = 0, 0

    for row in range(miny-1, maxy+2):
        ci = 0
        for col in range(minx-1, maxx+2):
            if cv2.pointPolygonTest(contour, (col, row), False) >= 0:
                retPx[ri][ci] = img[row][col]
            else:
                retPx[ri][ci] = nullval
            ci += 1
        ri += 1

    return retPx
Exemplo n.º 14
0
def extract_face(face_id, img_width, img_height, face_name):
	landmark = api.detection.landmark(face_id=face_id, type='83p')
	
	#center point to expend the contour
	nose_x = landmark['result'][0]['landmark']['nose_tip']['x']
	nose_y = landmark['result'][0]['landmark']['nose_tip']['y']
	nose_x = nose_x / 100 * img_width
	nose_y = nose_y / 100 * img_height

	contour = []
	contour_point_name = []
	for v in contour_name_list2:
    		x = landmark['result'][0]['landmark'][v]['x']
    		y = landmark['result'][0]['landmark'][v]['y']
    		x = x / 100 * img_width
    		y = y / 100 * img_height
    		if 'mouth' in v:
	    		dx = (x-nose_x) * 1.2
	    		dy = (y-nose_y) * 1.2
	    		#print '%s: %5.2f %5.2f' % (v, x-nose_x, y-nose_y)
	    		x = dx + nose_x 
	    		y = dy + nose_y
    		contour.append([x, y])
	contour = np.array(contour, dtype=np.int32)
	extract_face = cv2.imread(face_name)
	#print extract_face.shape
	for x in xrange(img_width):
		for y in xrange(img_height):
			if cv2.pointPolygonTest(contour,(x,y),False) < 0:
				#print x, y
				extract_face[y][x][0] = 0
				extract_face[y][x][1] = 0
				extract_face[y][x][2] = 0
	return extract_face
Exemplo n.º 15
0
 def pointInRoi(self, point):
     """
     Returns True if the point is in the roi
     
     :param tuple point: and (x,y) point
     """
     return cv2.pointPolygonTest(self.points, point, False) > 0
Exemplo n.º 16
0
 def testInside(self, p, measureDist=False):
     """extends Widget.testInside() to support distance to ellipse.
     Note: distance to ellipse is approximate."""
     if self.type == 'ellipse': 
         pdiff = -(self.center-p)
         uc = numpy.dot(pdiff, nu) / self.u
         vc = numpy.dot(pdiff, nv) / self.v
         d2 = uc*uc + vc*vc
         if measureDist:
             d = math.sqrt(d2)
             if d:
                 uc /= d
                 vc /= d
             else:
                 uc = 1
                 vc = 0
             pc = self.center + uc*self.u*self.nu + vc*self.v*self.nv
             diff = -(pc - p)
             dc = math.sqrt(numpy.dot(diff, diff))
             if d2 < 1:
                 return dc
             else:
                 return -dc
         elif d2 == 1:
             return 0
         elif d2 < 1:
             return 1
         elif d2 > 1:
             return -1
     else:
         if len(self.points) != 9:
             updatePoints()
         ppoly = self.points[self.outer]
         return cv2.pointPolygonTest( ppoly, a2t(p), measureDist )
Exemplo n.º 17
0
def find_morse_contours(group_morse):
    h, w = group_morse.shape[:2]
    group_morse = cv2.erode(group_morse, np.ones((11, 11), np.uint8), iterations=8)
    # erode to create big contours - groups, aka letters
    group_morse[0:0.03 * h, :] = 255
    group_morse[0.97 * h:h, :] = 255
    group_morse[:, 0:0.03 * w] = 255
    group_morse[:, 0.97 * w:w] = 255
    _, grp_cnts, _ = cv2.findContours(group_morse.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    morse_grp_cnts = []
    for c in grp_cnts:
        if cv2.arcLength(c, 1) < ((2 * h + 2 * w) * 0.5):
            morse_grp_cnts.append(c)

    morse_groups = []
    morse_groups_cent = []
    for grp_cnt in morse_grp_cnts:
        grp = []
        grp_cent = []
        for i in range(0, len(morse_cnts)):
            if cv2.pointPolygonTest(grp_cnt, (morse_cent[i][0], morse_cent[i][1]), False) > 0:
                # if centroid is in group shape, this dot/dash belongs to group
                grp.append(morse_cnts[i])
                grp_cent.append(morse_cent[i])
        if len(grp) != 0:
            morse_groups.append(grp)  # add group to group set
            morse_groups_cent.append(grp_cent)

    morse_groups = morse_groups[::-1]
    morse_groups_cent = morse_groups_cent[::-1]

    for i in range(len(morse_groups)):
        morse_groups[i], morse_groups_cent[i] = sort_by_centroids(morse_groups[i], morse_groups_cent[i])

    return group_morse, morse_groups, morse_groups_cent, morse_grp_cnts
Exemplo n.º 18
0
def identifyNotes(img,noteheads,staffData):
	# Invert the image for findContours
	img = 255 - img

	# Find contours
	contours,hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
	# Remove contours that have dimensions that are too large to correspond to notes
	heightThreshold = 8*(staffData.lineThickness + staffData.lineSpacing)
	filteredContours = []
	for contour in contours:
		roi = cv2.boundingRect(contour)
		if (roi[3] < heightThreshold):
			filteredContours.append(contour)
	contours = filteredContours

	# contourMap is a list of tuples (contour,noteheads) where contour is a list of points representing a contour and noteheads is a list of noteheads whose centre lies within that contour
	contourMap = []
	
	# Identify, for each contour, which noteheads has a centre that lies within that contour
	for contour in contours:
		noteheadsInContour = []
		for notehead in noteheads:
			if (cv2.pointPolygonTest(contour,tuple(map(lambda (x,y): x+y/2,zip(notehead.point,notehead.dimensions))),False) == 1):
				noteheadsInContour.append(notehead)
		if (noteheadsInContour):
			contourMap.append((contour,noteheadsInContour))
Exemplo n.º 19
0
def findMaxContours():

    conts = []

    for c in range(180):
    # for c in [170,50,86,111]:

        lower_blue = np.array([c-hdelta,0,0])
        upper_blue = np.array([c+hdelta,255,250])
        mask = cv2.inRange(img, lower_blue, upper_blue)

        x = kersize
        kernel = np.ones((x,x), np.uint8)
        erosion = cv2.dilate(mask,kernel,iterations = 1)
        kernel = np.ones((x/2,x/2), np.uint8)
        erosion = cv2.erode(erosion,kernel,iterations = 1)

        contours,h = cv2.findContours(erosion,1,2)
        for i,cnt in enumerate(contours):
            x,y,w,h = cv2.boundingRect(cnt)
            if w > 0.6 * img.shape[1]:
                app = True
                for cnt2 in conts:
                    if cv2.pointPolygonTest(cnt2, tuple(cnt2[0][0]) , False):
                        app = False
                if app:
                    conts.append(cnt)

    contsS = sorted(conts, key=lambda x: cv2.boundingRect(x)[2] )

    for i,cnt in enumerate(contsS[-90:] ):
        cv2.drawContours(img,[cnt],0,(0,i*10 % 255, (i*223)%255  ),4)
    cv2.imshow('dst',img)

    return contsS[-100:]
Exemplo n.º 20
0
def select_roi2(image_orig, image_bin):
    
    img, contours_borders, hierarchy = cv2.findContours(image_bin.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    
    contours = []
    contour_angles = []
    contour_centers = []
    contour_sizes = []
    for contour in contours_borders:
        
        if((cv2.contourArea(contour) >200) and (isTriangleP(contour) or isOsmougao(contour) or isSquareP(contour)  or isCircleP(contour))):
            center, size, angle = cv2.minAreaRect(contour)
            xt,yt,h,w = cv2.boundingRect(contour)
            cv2.rectangle(image_orig,(xt,yt),(xt+w,yt+h),(255,0,0),10)
    
            region_points = []
            for i in range (xt,xt+h):
                for j in range(yt,yt+w):
                    dist = cv2.pointPolygonTest(contour,(i,j),False)
                    if dist>=0 and image_bin[j,i]==255: # da li se tacka nalazi unutar konture?
                        region_points.append([i,j])
            contour_centers.append(center)
            contour_angles.append(angle)
            contour_sizes.append(size)
            contours.append(region_points)
    
    #Postavljanje kontura u vertikalan polozaj
    contours = rotate_regions(contours, contour_angles, contour_centers, contour_sizes)
    
    #spajanje kukica i kvacica
    #contours = merge_regions(contours)
    
    regions_dict = {}
    for contour in contours:
    
        min_x = min(contour[:,0])
        max_x = max(contour[:,0])
        min_y = min(contour[:,1])
        max_y = max(contour[:,1])

        region = np.zeros((max_y-min_y+1,max_x-min_x+1), dtype=np.int16)
        for point in contour:
            x = point[0]
            y = point[1]
            
             # TODO 3 - koordinate tacaka regiona prebaciti u relativne koordinate
            '''Pretpostavimo da gornja leva tačka regiona ima apsolutne koordinate (100,100).
            Ako uzmemo tačku sa koordinatama unutar regiona, recimo (105,105), nakon
            prebacivanja u relativne koordinate tačka bi trebala imati koorinate (5,5) unutar
            samog regiona.
            '''
            region[y-min_y,x-min_x] = 255

        
        regions_dict[min_x] = [resize_region(region), (min_x,min_y,max_x-min_x,max_y-min_y)]
        
    sorted_regions_dict = collections.OrderedDict(sorted(regions_dict.items()))
    sorted_regions = np.array(sorted_regions_dict.values())
    
    return image_orig, sorted_regions[:, 0]
def sort_pixels(image,shape):
    spill_pixels = []
    outside_pixels = []
#    test_image = np.zeros(image.shape)
#    cv2.fillConvexPoly(test_image,shape,1)
    border = 50
    x,y,w,h = cv2.boundingRect(shape)
    x = max(0,x-border)
    y = max(0,y-border)
    w += 2*border
    h += 2*border
    x2 = min(x+w,image.shape[1])
    y2 = min(y+h,image.shape[0])
    ### Testing
#    rect = test_image[y:y+h,x:x+w]
#    cv2.imshow("test",rect)
#    cv2.waitKey(0)
    ### Testing
    for i in range(x,x2):
        for j in range(y,y2):
#            if test_image[j,i]:
            dist = cv2.pointPolygonTest(shape,(i,j),True)
            if dist >= 0:
                spill_pixels.append(image[j,i])
            elif dist >= -50:
                outside_pixels.append(image[j,i])
                
#    print cv2.contourArea(shape),len(pixels),cv2.arcLength(shape,True)
#    assert cv2.contourArea(shape) == len(pixels)
    return np.array(spill_pixels),np.array(outside_pixels)
Exemplo n.º 22
0
 def contains(self, point, closed=True):
     """do we contain point? if closed (in set-theoretic sense)
        then we include points on our border, else False"""
     contour = np.array([[x,y] for x,y in self.points], dtype=np.float32)
     point = (np.float32(point.x()), np.float32(point.y()))
     test = cv2.pointPolygonTest(contour, point, False)
     return test > (-0.5 if closed else 0.5)
Exemplo n.º 23
0
 def distFromBorder(self, point):
     """
     Returns the distance from the point to the border of the roi
     
     :param tuple point: The (x, y) point to check
     """
     return cv2.pointPolygonTest(self.points, point, True)
def minimumDistanceContour(contours, center):
    distances = []
    for i in range(0,len(contours)):
	distance = cv2.pointPolygonTest(contours[i], center, True)
    	distances.append(np.fabs(distance))
    index = distances.index(min(distances))
    return index
Exemplo n.º 25
0
 def in_region(self, contour):
     hits = 0
     for (x, y) in self.control_pts:
         if cv2.pointPolygonTest(contour, (x, y), False) >= 0:
             hits += 1
     
     # do majority vote of points
     return hits > len(self.control_pts) / 2
Exemplo n.º 26
0
    def selectByPiont(self, row, col):
        for c in self.contours:
            if cv2.pointPolygonTest(c, (col,row), False) >= 0:
                r = cv2.boundingRect(c)
                i1, j1, i2, j2 = rect2coordinates(r)
                return i1, j1, i2, j2

        return -1, -1, -1, -1
Exemplo n.º 27
0
def PolygonTestRC(contour, pt, count = 0, counter_code = ''):
    count += 1
    Inside = cv2.pointPolygonTest(contour, pt, False)
    if Inside > -1:
        counter_code = counter_code + '+' + str(count)
    else:
        counter_code = counter_code + '-' + str(count)
    return count, counter_code
Exemplo n.º 28
0
def in_contour(input_file, point, squares=True, thresh_val=255, k_size=5, iterations=30):
    """
    >>> isinstance(in_contour(test_image, (200, 200)), np.ndarray)
    True

    >>> in_contour(test_image, (0, 0))
    -1

    >>> in_contour(None, (0,0))
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    IOError: The input file can't be a None object

    >>> in_contour("", (0,0))
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    IOError: The input file can't be ''.

    >>> in_contour("fakeRoute", (0,0))
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    IOError: Input file not found.

    >>> in_contour(test_image, (0,0), thresh_val=-200)
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    ValueError: All threshold values must be between 0 and 255.

    >>> in_contour(test_image, (0,0), k_size=-10)
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    ValueError: Kernel size value must be greater than 0.

    >>> in_contour(test_image, (0,0), iterations=-10)
    Traceback (most recent call last):
      File "<stdin>", line 1, in ?
    ValueError: Iterations value must be greater than 0.
    """
    image = iu.get_image(input_file)

    check_threshold(thresh_val)
    check_kernel(k_size)
    check_iterations(iterations)

    contours = detect_contours(image, thresh_val, k_size, iterations)

    if squares == True:
        contours = get_squares(contours)
        contours = join_contours(contours)

    cnt = -1

    for c in contours:
        if cv.pointPolygonTest(c, point, measureDist=False) >= 0:
            cnt = c

    return cnt
Exemplo n.º 29
0
    def _get_palm_circle(contour, fore):
        dist_max = np.zeros((fore.shape[0], fore.shape[1]))
        for y in range(0, fore.shape[0], 4):
            for x in range(0, fore.shape[1], 4):
                if fore[y, x]:
                    dist_max[y, x] = cv2.pointPolygonTest(contour, (x, y), True)

        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(dist_max)
        return max_loc, max_val
def circleCheck(contours, hierarchy,circleX,circleY):
    for h,cnt in enumerate(contours):
        if cv2.pointPolygonTest(cnt,(circleX,circleY), False) == 1:
            if hierarchy[0,h][2] == -1:
                return h
            else:
                nuH = hierarchy[0,h][2]
                kinderList = []
                passed = True
                if cv2.pointPolygonTest(contours[nuH],(circleX,circleY), False) != 1:
                    kinderList = recursiveNeighborCheck(kinderList,hierarchy,nuH,0)
                    kinderList = recursiveNeighborCheck(kinderList,hierarchy,nuH,1)
                    for kind in kinderList:
                        if cv2.pointPolygonTest(contours[kind],(circleX,circleY), False) == 1:
                            passed = False
                    if passed:
                        return h
    return False
Exemplo n.º 31
0
 def testInside(self, p, measureDist=False):
     """tests to see if the point is inside the region described by
     this Widget, or returns an approximate distance, depending on
     whether measureDist is True."""
     return cv2.pointPolygonTest(self.points, a2t(p), measureDist)
Exemplo n.º 32
0
 def isInContourV2(cont, pt, patch_size=256):
     return 1 if cv2.pointPolygonTest(cont,
                                      (pt[0] + patch_size / 2, pt[1] +
                                       patch_size / 2), False) >= 0 else 0
Exemplo n.º 33
0
def fix_hsv(x, y, param):
    sliders = param[1].table
    frame = param[0]

    #print (sliders.value())

    sliders['low h'].setValue(0)
    sliders['low s'].setValue(100)
    sliders['low v'].setValue(50)

    sliders['high h'].setValue(187)
    sliders['high s'].setValue(255)
    sliders['high v'].setValue(255)

    blur = cv2.GaussianBlur(frame, (15, 15), 0)
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
    sample = hsv[y, x]
    print('clicked on', sample)

    sliders['high h'].setValue(sample[0] + 1)
    sliders['low h'].setValue(sample[0] - 1)

    best_area = 0.0
    best_radius = 0.0
    object_found = False
    while object_found == False:

        lower_red = np.array([
            sliders['low h'].value(), sliders['low s'].value(),
            sliders['low v'].value()
        ])
        upper_red = np.array([
            sliders['high h'].value(), sliders['high s'].value(),
            sliders['high v'].value()
        ])

        mask = cv2.inRange(hsv, lower_red, upper_red)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0]

        for contour in cnts:
            inside = cv2.pointPolygonTest(contour, (x, y), False)
            if inside >= 0:
                object_found = True
                print('found')
                ((x, y), radius) = cv2.minEnclosingCircle(contour)
                area = cv2.contourArea(contour)
                best_area = area
                break
            print('inside:', inside)
        if object_found == False:
            sliders['high h'].setValue(sliders['high h'].value() + 1)
            sliders['low h'].setValue(sliders['low h'].value() - 1)

    print('best area', radius)
    sliders['high h'].setValue(sliders['high h'].value() + 1)
    lower_red = np.array([
        sliders['low h'].value(), sliders['low s'].value(),
        sliders['low v'].value()
    ])
    upper_red = np.array([
        sliders['high h'].value(), sliders['high s'].value(),
        sliders['high v'].value()
    ])
    mask = cv2.inRange(hsv, lower_red, upper_red)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0]

    for contour in cnts:
        inside = cv2.pointPolygonTest(contour, (x, y), False)
        if inside >= 0:
            ((x, y), radius) = cv2.minEnclosingCircle(contour)
            area = cv2.contourArea(contour)
            while area > best_area and radius > best_radius:
                print('area', area, 'radius', radius)
                best_radius = radius
                best_area = area
                sliders['high h'].setValue(sliders['high h'].value() + 1)
                #sliders['low h'].setValue(sliders['low h'].value()-1)
                lower_red = np.array([
                    sliders['low h'].value(), sliders['low s'].value(),
                    sliders['low v'].value()
                ])
                upper_red = np.array([
                    sliders['high h'].value(), sliders['high s'].value(),
                    sliders['high v'].value()
                ])
                mask = cv2.inRange(hsv, lower_red, upper_red)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = cnts[0]
                for contour in cnts:
                    inside = cv2.pointPolygonTest(contour, (x, y), False)
                    if inside >= 0:
                        ((x, y), radius) = cv2.minEnclosingCircle(contour)
                        area = cv2.contourArea(contour)

    sliders['low h'].setValue(sliders['low h'].value() - 1)
    lower_red = np.array([
        sliders['low h'].value(), sliders['low s'].value(),
        sliders['low v'].value()
    ])
    upper_red = np.array([
        sliders['high h'].value(), sliders['high s'].value(),
        sliders['high v'].value()
    ])
    mask = cv2.inRange(hsv, lower_red, upper_red)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0]

    for contour in cnts:
        inside = cv2.pointPolygonTest(contour, (x, y), False)
        if inside >= 0:
            ((x, y), radius) = cv2.minEnclosingCircle(contour)
            area = cv2.contourArea(contour)
            while area > best_area and radius > best_radius:
                print('area', area, 'radius', radius)
                best_radius = radius
                best_area = area
                #sliders['high h'].setValue(sliders['high h'].value()+1)
                sliders['low h'].setValue(sliders['low h'].value() - 1)
                lower_red = np.array([
                    sliders['low h'].value(), sliders['low s'].value(),
                    sliders['low v'].value()
                ])
                upper_red = np.array([
                    sliders['high h'].value(), sliders['high s'].value(),
                    sliders['high v'].value()
                ])
                mask = cv2.inRange(hsv, lower_red, upper_red)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = cnts[0]
                for contour in cnts:
                    inside = cv2.pointPolygonTest(contour, (x, y), False)
                    if inside >= 0:
                        ((x, y), radius) = cv2.minEnclosingCircle(contour)
                        area = cv2.contourArea(contour)

    sliders['low s'].setValue(sliders['low s'].value() + 1)
    lower_red = np.array([
        sliders['low h'].value(), sliders['low s'].value(),
        sliders['low v'].value()
    ])
    upper_red = np.array([
        sliders['high h'].value(), sliders['high s'].value(),
        sliders['high v'].value()
    ])
    mask = cv2.inRange(hsv, lower_red, upper_red)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0]

    while len(cnts) > 1:
        sliders['low s'].setValue(sliders['low s'].value() + 1)

        lower_red = np.array([
            sliders['low h'].value(), sliders['low s'].value(),
            sliders['low v'].value()
        ])
        upper_red = np.array([
            sliders['high h'].value(), sliders['high s'].value(),
            sliders['high v'].value()
        ])
        mask = cv2.inRange(hsv, lower_red, upper_red)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0]
Exemplo n.º 34
0
def analyze_object(img, obj, mask):
    """Outputs numeric properties for an input object (contour or grouped contours).

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary image to use as mask for moments analysis

    Returns:
    shape_header    = shape data table headers
    shape_data      = shape data table values
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return shape_header: list
    :return shape_data: list
    :return analysis_images: list
    """

    params.device += 1

    # Valid objects can only be analyzed if they have >= 5 vertices
    if len(obj) < 5:
        return None, None, None

    ori_img = np.copy(img)
    # Convert grayscale images to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)

    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)
    size = ix, iy, 3
    size1 = ix, iy
    background = np.zeros(size, dtype=np.uint8)
    background1 = np.zeros(size1, dtype=np.uint8)
    background2 = np.zeros(size1, dtype=np.uint8)

    # Check is object is touching image boundaries (QC)
    frame_background = np.zeros(size1, dtype=np.uint8)
    frame = frame_background + 1
    frame_contour, frame_hierarchy = cv2.findContours(
        frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
    ptest = []
    vobj = np.vstack(obj)
    for i, c in enumerate(vobj):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(frame_contour[0], xy, measureDist=False)
        ptest.append(pptest)
    in_bounds = all(c == 1 for c in ptest)

    # Convex Hull
    hull = cv2.convexHull(obj)
    hull_vertices = len(hull)
    # Moments
    #  m = cv2.moments(obj)
    m = cv2.moments(mask, binaryImage=True)
    # Properties
    # Area
    area = m['m00']

    if area:
        # Convex Hull area
        hull_area = cv2.contourArea(hull)
        # Solidity
        solidity = 1
        if int(hull_area) != 0:
            solidity = area / hull_area
        # Perimeter
        perimeter = cv2.arcLength(obj, closed=True)
        # x and y position (bottom left?) and extent x (width) and extent y (height)
        x, y, width, height = cv2.boundingRect(obj)
        # Centroid (center of mass x, center of mass y)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        # Ellipse
        center, axes, angle = cv2.fitEllipse(obj)
        major_axis = np.argmax(axes)
        minor_axis = 1 - major_axis
        major_axis_length = axes[major_axis]
        minor_axis_length = axes[minor_axis]
        eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis])**2)

        # Longest Axis: line through center of mass and point on the convex hull that is furthest away
        cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
        center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
        ret, centerp_binary = cv2.threshold(center_p, 0, 255,
                                            cv2.THRESH_BINARY)
        centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_NONE)[-2:]

        dist = []
        vhull = np.vstack(hull)

        for i, c in enumerate(vhull):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
            dist.append(pptest)

        abs_dist = np.absolute(dist)
        max_i = np.argmax(abs_dist)

        caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
        caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]

        xdiff = float(caliper_max_x - caliper_mid_x)
        ydiff = float(caliper_max_y - caliper_mid_y)

        # Set default values
        slope = 1

        if xdiff != 0:
            slope = (float(ydiff / xdiff))
        b_line = caliper_mid_y - (slope * caliper_mid_x)

        if slope != 0:
            xintercept = int(-b_line / slope)
            xintercept1 = int((ix - b_line) / slope)
            if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
                cv2.line(background1, (xintercept1, ix), (xintercept, 0),
                         (255), params.line_thickness)
            elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
                # Used a random number generator to test if either of these cases were possible but neither is possible
                # if xintercept < 0 and 0 <= xintercept1 <= iy:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept1, ix), (255), 5)
                # elif xintercept > iy and 0 <= xintercept1 <= iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept1, ix), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 < 0:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept, 0), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 > iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept, 0), (255), 5)
                # else:
                yintercept = int(b_line)
                yintercept1 = int((slope * iy) + b_line)
                cv2.line(background1, (0, yintercept), (iy, yintercept1),
                         (255), 5)
        else:
            cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y),
                     (255), params.line_thickness)

        ret1, line_binary = cv2.threshold(background1, 0, 255,
                                          cv2.THRESH_BINARY)
        # print_image(line_binary,(str(device)+'_caliperfit.png'))

        cv2.drawContours(background2, [hull], -1, (255), -1)
        ret2, hullp_binary = cv2.threshold(background2, 0, 255,
                                           cv2.THRESH_BINARY)
        # print_image(hullp_binary,(str(device)+'_hull.png'))

        caliper = cv2.multiply(line_binary, hullp_binary)
        # print_image(caliper,(str(device)+'_caliperlength.png'))

        caliper_y, caliper_x = np.array(caliper.nonzero())
        caliper_matrix = np.vstack((caliper_x, caliper_y))
        caliper_transpose = np.transpose(caliper_matrix)
        caliper_length = len(caliper_transpose)

        caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
        caliper_transpose2 = [(caliper_x[i], caliper_y[i])
                              for i in caliper_transpose1]
        caliper_transpose = np.array(caliper_transpose2)

    # else:
    #  hull_area, solidity, perimeter, width, height, cmx, cmy = 'ND', 'ND', 'ND', 'ND', 'ND', 'ND', 'ND'

    # Store Shape Data
    shape_header = [
        'HEADER_SHAPES', 'area', 'hull-area', 'solidity', 'perimeter', 'width',
        'height', 'longest_axis', 'center-of-mass-x', 'center-of-mass-y',
        'hull_vertices', 'in_bounds', 'ellipse_center_x', 'ellipse_center_y',
        'ellipse_major_axis', 'ellipse_minor_axis', 'ellipse_angle',
        'ellipse_eccentricity'
    ]

    shape_data = [
        'SHAPES_DATA', area, hull_area, solidity, perimeter, width, height,
        caliper_length, cmx, cmy, hull_vertices, in_bounds, center[0],
        center[1], major_axis_length, minor_axis_length, angle, eccentricity
    ]

    analysis_images = []

    # Draw properties
    if area:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (0, 0, 255),
                         params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (0, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (0, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (0, 0, 255),
                   params.line_thickness)
        # Output images with convex hull, extent x and y
        # out_file = os.path.splitext(filename)[0] + '_shapes.jpg'
        # out_file1 = os.path.splitext(filename)[0] + '_mask.jpg'

        # print_image(ori_img, out_file)
        analysis_images.append(ori_img)

        # print_image(mask, out_file1)
        analysis_images.append(mask)

    else:
        pass

    # Store into global measurements
    if not "shapes" in outputs.measurements:
        outputs.measurements["shapes"] = {}
    outputs.measurements["shapes"]["area"] = area
    outputs.measurements["shapes"]["hull-area"] = hull_area
    outputs.measurements["shapes"]["solidity"] = solidity
    outputs.measurements["shapes"]["perimeter"] = perimeter
    outputs.measurements["shapes"]["width"] = width
    outputs.measurements["shapes"]["height"] = height
    outputs.measurements["shapes"]["longest_axis"] = caliper_length
    outputs.measurements["shapes"]["center-of-mass-x"] = cmx
    outputs.measurements["shapes"]["center-of-mass-y"] = cmy
    outputs.measurements["shapes"]["hull_vertices"] = hull_vertices
    outputs.measurements["shapes"]["in_bounds"] = in_bounds
    outputs.measurements["shapes"]["ellipse_center_x"] = center[0]
    outputs.measurements["shapes"]["ellipse_center_y"] = center[1]
    outputs.measurements["shapes"]["ellipse_major_axis"] = major_axis_length
    outputs.measurements["shapes"]["ellipse_minor_axis"] = minor_axis_length
    outputs.measurements["shapes"]["ellipse_angle"] = angle
    outputs.measurements["shapes"]["ellipse_eccentricity"] = eccentricity

    if params.debug is not None:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (0, 0, 255),
                         params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (0, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (0, 0, 255),
                   params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (0, 0, 255),
                 params.line_thickness)
        if params.debug == 'print':
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_shapes.jpg'))
        elif params.debug == 'plot':
            if len(np.shape(img)) == 3:
                plot_image(ori_img)
            else:
                plot_image(ori_img, cmap='gray')

    # Store images
    outputs.images.append(analysis_images)
    return shape_header, shape_data, analysis_images
Exemplo n.º 35
0
 def __call__(self, pt):
     return 1 if cv2.pointPolygonTest(
         self.cont, (pt[0] + self.patch_size // 2,
                     pt[1] + self.patch_size // 2), False) >= 0 else 0
Exemplo n.º 36
0
def findDefects():
    cap = cv2.VideoCapture(0)
    kernel=(5,5)
    if(cap.isOpened()==False):
        print('Unable to read camera feed')
    count=0
    global cx;global cy;

    while(True):
        ret, frame = cap.read()
        # frame=cv2.resize(frame,(1920,1080))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blur = cv2.bilateralFilter(gray,10,50,50)
        ret,thresh1 = cv2.threshold(blur,110,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
        thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel)
        thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_CLOSE, kernel)

        blah,contours,hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        # blah,contours = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        max_area=0;ci=0;

        for i in range(len(contours)):
            cnt=contours[i]
            area = cv2.contourArea(cnt)
            if(area>max_area):
                max_area=area
                ci=i
        cnt = contours[ci]
        hull = cv2.convexHull(cnt)
        moments = cv2.moments(cnt)
        if moments['m00']!=0:
            cx = int(moments['m10']/moments['m00'])
            cy = int(moments['m01']/moments['m00'])

        #print(cx,cy,end='\r')
        center = (cx,cy)
        # pyautogui.moveTo(cx,cy)
        cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)

        drawing = np.zeros(frame.shape,np.uint8)
        frame = cv2.circle(drawing,center,5,[0,255,255],2)
        frame = cv2.drawContours(drawing,[cnt],0,(0,255,0),2)
        frame = cv2.drawContours(drawing,[hull],0,(0,0,255),2)

        hull = cv2.convexHull(cnt,returnPoints = False)

        defects = cv2.convexityDefects(cnt,hull)

        if defects is None:
            continue

        mind=0;maxd=0;i=0;fin=0;


        for i in range(defects.shape[0]):
            s,e,f,d = defects[i,0]
            start = tuple(cnt[s][0])
            end = tuple(cnt[e][0])
            far = tuple(cnt[f][0])
            dist = cv2.pointPolygonTest(cnt,center,True)
            frame = cv2.line(frame,start,end,[255,0,0],2)
            frame = cv2.circle(frame,far,5,[0,0,255],-1)
            a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
            b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
            c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
            angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))
            if angle <= math.pi / 2:
                fin+=1
                #cv2.circle(drawing, far, 8, [211, 84, 0], -1)
        #print('fingers=',fin)


        cv2.imshow('frame',frame)
        if cv2.waitKey(5) & 0xFF == 27:
            break
        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
        if(fin<=3):
            count=count+1
            clicker(count)
            

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 37
0
def analyze_bound_vertical(img, obj, mask, line_position, filename=False):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    shape_header    = pass shape header data to function
    shape_data      = pass shape data so that analyze_bound data can be appended to it
    line_position   = position of boundry line (a value of 0 would draw the line through the left side of the image)
    filename        = False or image name. If defined print image.

    Returns:
    bound_header    = data table column headers
    bound_data      = boundary data table
    analysis_images = output image filenames

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :param filename: str
    :return bound_header: tuple
    :return bound_data: tuple
    :return analysis_images: list
    """

    params.device += 1
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = 0 + int(line_position)
    y_coor = int(iy)
    rec_point1 = (0, 0)
    rec_point2 = (x_coor, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), -1)
    right_contour, right_hierarchy = cv2.findContours(background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if x_coor - x <= 0:
        width_left_bound = 0
        width_right_bound = width
    elif x_coor - x > 0:
        width_1 = x_coor - x
        if width - width_1 <= 0:
            width_left_bound = width
            width_right_bound = 0
        else:
            width_left_bound = x_coor - x
            width_right_bound = width - width_left_bound

    right = []
    left = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(right_contour[0], xy, measureDist=False)
        if pptest == 1:
            left.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 0, 255))
            cv2.circle(wback, xy, 1, (0, 0, 255))
        else:
            right.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    right_bound_area = len(right)
    left_bound_area = len(left)
    percent_bound_area_right = ((float(right_bound_area)) / (float(left_bound_area + right_bound_area))) * 100
    percent_bound_area_left = ((float(left_bound_area)) / (float(right_bound_area + left_bound_area))) * 100

    bound_header = [
        'HEADER_BOUNDARY' + str(line_position),
        'width_left_bound',
        'width_right_bound',
        'left_bound_area',
        'percent_left_bound_area',
        'right_bound_area',
        'percent_right_bound_area'
    ]

    bound_data = [
        'BOUNDARY_DATA',
        width_left_bound,
        width_right_bound,
        left_bound_area,
        percent_bound_area_left,
        right_bound_area,
        percent_bound_area_right
    ]

    analysis_images = []

    if left_bound_area or right_bound_area:
        point3 = (x_coor+2, 0)
        point4 = (x_coor+2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), 5)
        cv2.line(wback, point3, point4, (255, 0, 255), 5)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), 3)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), 3)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), 3)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), 3)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0), 3)
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0), 3)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0), 3)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0), 3)
        if filename:
            # Output images with boundary line, above/below bound area
            out_file = str(filename[0:-4]) + '_boundary' + str(line_position) + '.jpg'
            print_image(ori_img, out_file)
            analysis_images = ['IMAGE', 'boundary', out_file]

    if params.debug is not None:
        point3 = (x_coor+2, 0)
        point4 = (x_coor+2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), 5)
        cv2.line(wback, point3, point4, (255, 0, 255), 5)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), 3)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), 3)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), 3)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), 3)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0), 3)
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0), 3)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0), 3)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0), 3)
        if params.debug == 'print':
            print_image(wback, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_white.jpg'))
            print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_img.jpg'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    return bound_header, bound_data, analysis_images
Exemplo n.º 38
0
    def check_modular_boxes(self):
        if len(self.contour_to_show) <= 0:
            self.OrientationStatus = "BAD"
            return self.OrientationStatus
        BlockRectanglesGood = []
        BlockRectanglesFlip = []
        TestResultsGood = []
        TestResultsFlip = []
        img_box = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
        self.OrientationStatus = "BAD"
        #self.box = cv2.minAreaRect(self.contour_to_show)
        box_angle = self.box[2]
        box_dimensions = self.box[1]
        box = cv2.cv.BoxPoints(self.box)
        box = np.array(box, dtype="int")
        cv2.drawContours(img_box, [box], -1, (255, 0, 0), 2)
        cv2.drawContours(img_box, self.contour_to_show, -1, (0, 255, 0), 2)
        if box_dimensions[0] < box_dimensions[1]:
            box_angle += 90
        #print("box angle: ",box_angle)
        #BlockRectanglesGood=np.zeros((1,4,2), dtype=np.int)

        if box_angle < 0:
            boxSorted = [
                box[box[:, 1].argmin()], box[box[:, 0].argmax()],
                box[box[:, 1].argmax()], box[box[:, 0].argmin()]
            ]
            boxTopRightX = boxSorted[0][0]
            boxTopRightY = boxSorted[0][1]
            boxBottomRightX = boxSorted[1][0]
            boxBottomRightY = boxSorted[1][1]
            boxBottomLeftX = boxSorted[2][0]
            boxBottomLeftY = boxSorted[2][1]
            boxTopLeftX = boxSorted[3][0]
            boxTopLeftY = boxSorted[3][1]
        elif box_angle > 0:
            boxSorted = [
                box[box[:, 1].argmin()], box[box[:, 0].argmax()],
                box[box[:, 1].argmax()], box[box[:, 0].argmin()]
            ]
            boxTopLeftX = boxSorted[0][0]
            boxTopLeftY = boxSorted[0][1]
            boxTopRightX = boxSorted[1][0]
            boxTopRightY = boxSorted[1][1]
            boxBottomRightX = boxSorted[2][0]
            boxBottomRightY = boxSorted[2][1]
            boxBottomLeftX = boxSorted[3][0]
            boxBottomLeftY = boxSorted[3][1]
        else:
            maxX = box[box[:, 0].argmax()][0]
            maxY = box[box[:, 1].argmax()][1]

            #boxSorted=[box[box[:,1].argmin()],box[box[:,0].argmax()],box[box[:,1].argmax()],box[box[:,0].argmin()]]
            for i in range(0, 4):
                if box[i][0] == maxX:
                    if box[i][1] == maxY:
                        boxBottomRightX = box[i][0]
                        boxBottomRightY = box[i][1]
                        #print("bottomRx: "+str(boxBottomRightX)+" bottomRy: "+str(boxBottomRightY))
                    else:
                        boxTopRightX = box[i][0]
                        boxTopRightY = box[i][1]
                        #print("TopRightx: "+str(boxTopRightX)+" TopRighty: "+str(boxTopRightY))
                else:
                    if box[i][1] == maxY:
                        boxBottomLeftX = box[i][0]
                        boxBottomLeftY = box[i][1]
                        #print("bottomLx: "+str(boxBottomLeftX)+" bottomLy: "+str(boxBottomLeftY))
                    else:
                        boxTopLeftX = box[i][0]
                        boxTopLeftY = box[i][1]
                        #print("TopLeftx: "+str(boxTopLeftX)+" TopLefty: "+str(boxTopLeftY))
        boxHeightX = boxBottomLeftX - boxTopLeftX
        boxHeightY = boxBottomLeftY - boxTopLeftY
        boxWidthX = boxBottomRightX - boxBottomLeftX
        boxWidthY = boxBottomRightY - boxBottomLeftY
        #print("box: ",box)
        #print("boxWidthX :"+str(boxWidthX)+" boxWidthY :"+str(boxWidthY)+" boxHeightX :"+str(boxHeightX)+" boxHeightY :"+str(boxHeightY)+" boxTopLeftX :"+str(boxTopLeftX)+" boxTopLeftY :"+str(boxTopLeftY))
        for i in range(0, 10):
            #print("EmptyBoxes: ",self.EmptyBoxes[i])
            if sum(self.EmptyBoxes[i]) > 0:
                #print("we draw rectangle i: ",i)
                #print("EmptyBoxes in if: ",self.EmptyBoxes[i])
                #print("BlockRect: ",BlockRectanglesGood)
                TopLeftX = boxTopLeftX + int(
                    self.EmptyBoxes[i][0] * boxWidthX) + int(
                        self.EmptyBoxes[i][1] * boxHeightX)
                TopLeftY = boxTopLeftY + int(
                    self.EmptyBoxes[i][0] * boxWidthY) + int(
                        self.EmptyBoxes[i][1] * boxHeightY)
                #print("WidthY :"+str(self.EmptyBoxes[i][1]*boxWidthY)+" HeightY: "+str(self.EmptyBoxes[i][1]*boxHeightY))
                TopRightX = TopLeftX + int(self.EmptyBoxes[i][2] * boxWidthX)
                TopRightY = TopLeftY + int(self.EmptyBoxes[i][2] * boxWidthY)
                BottomRightX = TopRightX + int(
                    self.EmptyBoxes[i][3] * boxHeightX)
                BottomRightY = TopRightY + int(
                    self.EmptyBoxes[i][3] * boxHeightY)
                BottomLeftX = BottomRightX - int(
                    self.EmptyBoxes[i][2] * boxWidthX)
                BottomLeftY = BottomRightY - int(
                    self.EmptyBoxes[i][2] * boxWidthY)

                TopLeft = [TopLeftX, TopLeftY]
                TopRight = [TopRightX, TopRightY]
                BottomLeft = [BottomLeftX, BottomLeftY]
                BottomRight = [BottomRightX, BottomRightY]
                to_append = [TopLeft, TopRight, BottomRight, BottomLeft]
                to_append = np.array(to_append, dtype="int")
                BlockRectanglesGood.append(to_append)

                BottomRightX = boxBottomRightX - int(
                    self.EmptyBoxes[i][0] * boxWidthX) - int(
                        self.EmptyBoxes[i][1] * boxHeightX)
                BottomRightY = boxBottomRightY - int(
                    self.EmptyBoxes[i][0] * boxWidthY) - int(
                        self.EmptyBoxes[i][1] * boxHeightY)
                BottomLeftX = BottomRightX - int(
                    self.EmptyBoxes[i][2] * boxWidthX)
                BottomLeftY = BottomRightY - int(
                    self.EmptyBoxes[i][2] * boxWidthY)
                TopLeftX = BottomLeftX - int(
                    self.EmptyBoxes[i][3] * boxHeightX)
                TopLeftY = BottomLeftY - int(
                    self.EmptyBoxes[i][3] * boxHeightY)
                TopRightX = TopLeftX + int(self.EmptyBoxes[i][2] * boxWidthX)
                TopRightY = TopLeftY + int(self.EmptyBoxes[i][2] * boxWidthY)

                TopLeft = [TopLeftX, TopLeftY]
                TopRight = [TopRightX, TopRightY]
                BottomLeft = [BottomLeftX, BottomLeftY]
                BottomRight = [BottomRightX, BottomRightY]
                to_append = [TopLeft, TopRight, BottomRight, BottomLeft]
                to_append = np.array(to_append, dtype="int")
                BlockRectanglesFlip.append(to_append)
        #BlockRectanglesGood = np.array(BlockRectanglesGood, dtype="int")
        #print("we draw rectangle points: ",len(BlockRectanglesFlip))
        statusText = ""
        for i in range(0, len(self.contour_to_show)):
            contourPoint = (self.contour_to_show[i][0][0],
                            self.contour_to_show[i][0][1])
            for k in range(0, len(BlockRectanglesFlip)):
                cv2.drawContours(img_box, [BlockRectanglesGood[k]], -1,
                                 (255, 255, 0), 2)  #light blue
                cv2.drawContours(img_box, [BlockRectanglesFlip[k]], -1,
                                 (0, 255, 255), 2)  #yellow
                if len(TestResultsGood) < k + 1:
                    TestResultsGood.append(
                        cv2.pointPolygonTest(BlockRectanglesGood[k],
                                             contourPoint, False))
                else:
                    if TestResultsGood[k] <= 0:
                        TestResultsGood[k] = cv2.pointPolygonTest(
                            BlockRectanglesGood[k], contourPoint, False)

                if len(TestResultsFlip) < k + 1:
                    TestResultsFlip.append(
                        cv2.pointPolygonTest(BlockRectanglesFlip[k],
                                             contourPoint, False))
                else:
                    if TestResultsFlip[k] <= 0:
                        TestResultsFlip[k] = cv2.pointPolygonTest(
                            BlockRectanglesFlip[k], contourPoint, False)
        #print("TestResultsGood: ",max(TestResultsGood))
        if max(TestResultsGood) < 0 and max(TestResultsFlip) > 0:
            self.OrientationStatus = "GOOD"
        elif max(TestResultsGood) > 0 and max(TestResultsFlip) < 0:
            self.OrientationStatus = "FLIP"
        else:
            self.OrientationStatus = "BAD"
        cv2.putText(
            img_box, " Box Dimensions: " + str(box_dimensions) + " Time: " +
            str(time.time()), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.45,
            (255, 255, 255), 2)
        #cv2.putText(img_box," Box Dimensions: "+str(box_dimensions)+" Time: "+str(time.time())+" Delta Time: "+str(QualityMeasures['deltaTime'])+" Delta Good: "+str(QualityMeasures['deltaGood']),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)
        cv2.putText(
            img_box, "ORIENTATION: " + self.OrientationStatus + " SIZE: " +
            self.SizeStatus, (10, self.dimX - 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
            (255, 255, 255), 2)
        #print("TestResultsFlip: ",TestResultsFlip)
        #if len(TestResultsGood)
        self.analyzedImage = img_box
        return self.OrientationStatus
Exemplo n.º 39
0
        cy = int(moments['m01'] / moments['m00'])  # cy = M01/M00

    centr = (cx, cy)
    cv2.circle(img, centr, 5, [0, 0, 255], 2)
    cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 2)
    cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 2)

    cnt = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
    hull = cv2.convexHull(cnt, returnPoints=False)

    if (1):
        defects = cv2.convexityDefects(cnt, hull)
        mind = 0
        maxd = 0
        for i in range(defects.shape[0]):
            s, e, f, d = defects[i, 0]
            start = tuple(cnt[s][0])
            end = tuple(cnt[e][0])
            far = tuple(cnt[f][0])
            dist = cv2.pointPolygonTest(cnt, centr, True)
            cv2.line(img, start, end, [0, 255, 0], 2)

            cv2.circle(img, far, 5, [0, 0, 255], -1)
        print(i)
        i = 0
    cv2.imshow('output', drawing)
    cv2.imshow('input', img)

    k = cv2.waitKey(10)
    if k == 27:
        break
Exemplo n.º 40
0
        topx = 0
        topy = 0
        botx = 100000000000000
        boty = 100000000000000
        for a in range(0, len(c)):
            if (c[a][0][0] > topx):
                topx = c[a][0][0]
            if (c[a][0][1] > topy):
                topy = c[a][0][1]
            if (c[a][0][0] < botx):
                botx = c[a][0][0]
            if (c[a][0][1] < boty):
                boty = c[a][0][1]
        print(topx, topy, botx, boty)
        for b in range(1, len(discs) + 1):
            dist = cv2.pointPolygonTest(c, (discs[b]), False)
            if (dist > 0):
                inzone.append(b)
        print(discs)

    matchdiscs = matchdiscs + len(inzone)

    if (len(inzone) == 1):

        minzonedist = 10000000000000000000000000000000000
        mindishdist = minzonedist
        radii = []
        avgradius = 0
        for f in range(0, len(c)):
            xdist = c[f][0][0] - discs[inzone[0]][0]
            ydist = c[f][0][1] - discs[inzone[0]][1]
Exemplo n.º 41
0
 def contains_point(self, point):
     """Is the provided point inside of the Polygon?"""
     return cv2.pointPolygonTest(self.points, point, False) > 0
Exemplo n.º 42
0
def analyze_bound_horizontal(img, obj, mask, line_position, label="default"):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    line_position   = position of boundary line (a value of 0 would draw the line through the top of the image)
    label           = optional label parameter, modifies the variable name of observations recorded

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :param label: str
    :return analysis_images: list
    """

    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = int(ix)
    y_coor = line_position
    rec_corner = int(iy - 2)
    rec_point1 = (1, rec_corner)
    rec_point2 = (x_coor - 2, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), 1)
    below_contour, below_hierarchy = cv2.findContours(
        background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if y_coor - y <= 0:
        height_above_bound = 0
        height_below_bound = height
    elif y_coor - y > 0:
        height_1 = y_coor - y
        if height - height_1 <= 0:
            height_above_bound = height
            height_below_bound = 0
        else:
            height_above_bound = y_coor - y
            height_below_bound = height - height_above_bound

    below = []
    above = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(below_contour[0], xy, measureDist=False)
        if pptest == 1:
            below.append(xy)
            cv2.circle(ori_img, xy, 1, (155, 0, 255))
            cv2.circle(wback, xy, 1, (155, 0, 255))
        else:
            above.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    above_bound_area = len(above)
    below_bound_area = len(below)
    percent_bound_area_above = (
        (float(above_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100
    percent_bound_area_below = (
        (float(below_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100

    analysis_images = []

    if above_bound_area or below_bound_area:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        # Output image with boundary line, above/below bound area
        analysis_images.append(wback)
        analysis_images.append(ori_img)

    if params.debug is not None:
        params.device += 1
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        if params.debug == 'print':
            print_image(
                wback,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_white.png'))
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_img.png'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    outputs.add_observation(sample=label,
                            variable='horizontal_reference_position',
                            trait='horizontal reference position',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=int,
                            value=line_position,
                            label='none')
    outputs.add_observation(sample=label,
                            variable='height_above_reference',
                            trait='height above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=height_above_bound,
                            label='pixels')
    outputs.add_observation(sample=label,
                            variable='height_below_reference',
                            trait='height_below_reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=height_below_bound,
                            label='pixels')
    outputs.add_observation(sample=label,
                            variable='area_above_reference',
                            trait='area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=above_bound_area,
                            label='pixels')
    outputs.add_observation(sample=label,
                            variable='percent_area_above_reference',
                            trait='percent area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_above,
                            label='none')
    outputs.add_observation(sample=label,
                            variable='area_below_reference',
                            trait='area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=below_bound_area,
                            label='pixels')
    outputs.add_observation(sample=label,
                            variable='percent_area_below_reference',
                            trait='percent area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_below,
                            label='none')

    # Store images
    outputs.images.append(analysis_images)

    return ori_img
Exemplo n.º 43
0
cv2.circle(img, tuple(cnt[cnt[:, :, 1].argmin()][0]), 2, (0, 0, 255), 2)
cv2.circle(img, tuple(cnt[cnt[:, :, 1].argmax()][0]), 2, (0, 0, 255), 2)
cv2.imshow('img', img)

# region
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
for i in range(defects.shape[0]):
    s, e, f, d = defects[i, 0]
    # cv2.circle(img, tuple(cnt[s][0]), 2, (0, 0, 255), 2)
    # cv2.circle(img, tuple(cnt[e][0]), 2, (0, 255, 0), 2)
    cv2.line(img, tuple(cnt[s][0]), tuple(cnt[e][0]), (0, 0, 255), 1)
    cv2.circle(img, tuple(cnt[f][0]), 2, (0, 255, 0), 2)
cv2.imshow('img', img)

dist = cv2.pointPolygonTest(cnt, (50, 50), True)
inner = cv2.pointPolygonTest(cnt, (343, 218), False)

img = cv2.imread('f:/index.jpg')
img = cv2.drawContours(img, contours, 7, (0, 0, 255), 2)
img = cv2.drawContours(img, contours, 9, (0, 0, 255), 2)
cv2.imshow('img', img)
cnt1 = contours[7]
cnt2 = contours[9]
cv2.matchShapes(cnt1, cnt2, cv2.CONTOURS_MATCH_I1, 0.0)

# retrieval
image, contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)

# histogram
Exemplo n.º 44
0
def imgProcess(img):
    # NEED TO SAVE AS SAME THING EACH TIME, 0 converts to grayscale
    # img = cv2.imread('test.jpg', 0)
    # Resize image 300 x 225 pixels params
    r = 400.0 / img.shape[1]
    dim = (400, int(img.shape[0] * r))

    # perform the actual resizing of the image and show it
    kSize = 3
    kernel = np.ones((kSize, kSize), np.uint8)
    resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
    # smoothed = cv2.filter2D(img, -1, kernel) # Smooth image with 15x15 array
    opening = cv2.morphologyEx(resized, cv2.MORPH_CLOSE, kernel)
    # Resize image

    # cv2.imwrite('Resized.jpg', resized)

    # apply automatic Canny edge detection using the computed median
    # Define automatic upper and lower thresholds for Canny detection
    v = np.median(opening)
    sigma = 0.333
    lower = int(max(0, (1.0 - sigma) * v))
    upper = int(min(255, (1.0 + sigma) * v))
    edges = cv2.Canny(opening, lower, upper)  # Perform Canny Edge Detection
    # edge_save = cv2.imwrite('edged.jpg', edges)

    # Resized colour version of original image
    smallColr = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
    # cv2.imshow('Edges', edges)

    # Find contours in canny image
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                                  cv2.CHAIN_APPROX_SIMPLE)
    showcontours = cv2.drawContours(smallColr, contours, -1, (0, 255, 0), 3)
    #cv2.imwrite('origcontours.jpg', showcontours)
    #cv2.imshow('original contours',showcontours)
    # for i in range(len(contours)):
    #     color = np.random.rand(3) * 255
    #     cnt_img = cv2.drawContours(cnt_img, contours, i, color, 3)

    from perspCorr2 import perspMatrix
    h = perspMatrix  # import perspective distortion matrix from other file

    # cnt_img is transformed image (transform small colour image)
    # cnt_img = cv2.warpPerspective(smallColr, h, (4000, 3000))
    # cv2.imshow('Warped', cnt_img)
    # cv2.imshow('Original', smallColr)

    # Added finding contour work:
    # This assumes that the robot is already on the line
    # create array containing closest contours to robot

    # duplicate contours list and remove any contours that are too short
    contours_shorten = list(contours)

    minLength = 19
    # remove any small contours (typically errors)
    for cont in range(len(contours)):

        if len(contours[cont]) < minLength:
            contours_shorten.remove(contours[cont])

    contour_pertr = []  # perspective correction contours
    contours_pertrInts = []

    # iterate through contour list
    for cont3 in range(len(contours_shorten)):
        contours_shorten[cont3] = contours_shorten[cont3].reshape(-1, 2)
        # take each contour in term
        a = contours_shorten[cont3]
        a = np.array([a])
        a = a.astype(float)
        # apply perspective transform to each contour
        contour_pertr.append(cv2.perspectiveTransform(a, h))
        contour_pertr[cont3] = np.reshape(np.ravel(contour_pertr[cont3]),
                                          (-1, 2))
        contours_pertrInts.append(cv2.perspectiveTransform(a, h))
        contours_pertrInts[cont3] = np.reshape(
            np.ravel(contours_pertrInts[cont3]), (-1, 2))
        # contours_pertrInts[cont3] = contours_pertrInts[cont3].astype(int)
        # for ind in range(2):
        #     contours_pertrInts[cont3][cont_pertr][ind] = int(contour_pertr[cont3][cont_pertr][ind])
        # contours_pertrInts[cont3] = np.reshape(np.ravel(contours_pertrInts[cont3]), (-1, 1, 2))
        # contours_pertrInts[cont3].astype(int)
        # contour_pertr[cont3] = np.reshape(np.ravel(contour_pertr[cont3]), (-1, 1, 2))
        # contours_shorten[cont3] = a.reshape(-1, 1, 2)
    # contours = contours3
    # contour_pertr.astype(int)

    # draw contours to image
    # cnt_img2 = cv2.drawContours(cnt_img, contours_pertrInts, -1, (0, 255, 0), 3)
    # cv2.imshow('Contours', cnt_img2)

    closestContours = []
    for i in range(len(contours)):  # iterate through each contour
        for point in range(len(
                contours[i])):  # iterate through the points in each contour
            # find contours with points in middle 165-235 pixels x and bottom 280-300 y - i.e. closest contours
            # thresholds can be changed to ensure just 2 contours are found
            if 100 <= contours[i][point][0][0] <= 300 and 280 <= contours[i][
                    point][0][1] <= 299:
                closestContours.append(i)
                break

    midpointx = []
    midpointy = []
    mptheta = []
    clcont = []
    #print(len(closestContours))

    if len(closestContours) == 0:  # Not on line

        if len(contours) >= 1:

            for k in range(len(contours)):  # iterate through list of contours
                clcont.append(
                    abs(cv2.pointPolygonTest(contours[k], (200, 299), True))
                )  # calculate the distance between bottom centre of image and contour
            followContour = clcont.index(
                min(clcont)
            )  # find the contour with the shortest distance from point
            #randpoint = [0, 0]
            p = [200, 299]
            #while distance.euclidean(p, randpoint) >= 150 or distance.euclidean(p, randpoint) <= 30:  # choose a random point on contour
            randindex = randint(0, len(contours[followContour]) - 1)
            randpoint = contours[followContour][randindex][0]

            for k in range(-2,
                           2):  # choose 2 points either side of chosen point
                refindex = randindex + k
                if 0 <= refindex < len(
                        contours[followContour]
                ):  # as long as additional points are actually on contour - if a point too close to the end is chosen, other points may fall off end of contour
                    midpointx.append(contours[followContour][refindex][0][0])
                    midpointy.append(
                        contours[followContour][refindex][0][1]
                    )  # creates a set of 5 points to head towards on line

            # plot midpoints
            midptList = np.array(list(zip(
                midpointx,
                midpointy)))  # create one list containing x and y midpoints
            midptList = midptList.reshape(
                (-1, 1, 2))  # ensure midptList is in correct format
            midptList = midptList.reshape(-1, 2)
            b = midptList
            b = np.array([b])
            b = b.astype(float)  # convert to float
            midptList_dist = cv2.perspectiveTransform(
                b, h)  # apply perspective transform to midpoints
            midptList_dist = midptList_dist.reshape(
                (-1, 1,
                 2))  # reshape output array into original contour array form
            # cv2.polylines(cnt_img2, np.int32([midptList_dist]), True, (0, 255, 255), 1)
            # cv2.namedWindow('midpoints')
            # cv2.imshow('midpoints', cnt_img2)

            for midpt1 in range(
                    len(midptList_dist) - 1
            ):  # create array of angles between each midpoint of the transformed midpoints (i.e. actual angles)
                mptheta.append(
                    np.arctan2((midptList_dist[midpt1 + 1][0][1] -
                                midptList_dist[midpt1][0][1]),
                               (midptList_dist[midpt1 + 1][0][0] -
                                midptList_dist[midpt1][0][0])))
            mptheta.append(
                mptheta[len(mptheta) - 1]
            )  # add last midpoint angle on at end, this makes angle array same size as midpoint array

    # else:
    # no contours or lines in image
    # Go to Nessa's code to move  robot to locate new contour

    if len(
            closestContours
    ) == 1:  # Can only see one contour, choose points on this line to follow

        #randpoint = [200, 150]
        p = [0, 0]
        #while distance.euclidean(p, randpoint) >= 550 or distance.euclidean(p, randpoint) <= 3: # choose random point on contour thats not too close or too far
        randindex = randint(
            0,
            len(contours[closestContours[0]]) -
            1)  # start by choosing random index on closest contour
        randpoint = contours[closestContours[0]][randindex][
            0]  # and then find the corresponding point and check the distance

        # we choose just a few points so that if there is only one horizontal contour the robot can see, it doesnt choose points along the line
        # on both sides of the robot, as we dont want it to try and go in two different directions
        for k in range(-2,
                       2):  # Again choose 2 points either side of chosen point
            refindex = randindex + k
            if 0 <= refindex < len(contours[closestContours[0]]):
                midpointx.append(
                    contours[closestContours[0]][refindex][0][0]
                )  # robot will drive to these points located on the contours
                midpointy.append(contours[closestContours[0]][refindex][0][1])

        # plot midpoints
        midptList = np.array(
            list(zip(midpointx, midpointy)
                 ))  # combine x and y midpoint coordinate data into one list
        midptList = midptList.reshape((-1, 1, 2))

        midptList = midptList.reshape(-1, 2)
        b = midptList
        b = np.array([b])
        b = b.astype(float)
        midptList_dist = cv2.perspectiveTransform(
            b, h)  # apply perspective transform to midpoint data
        midptList_dist = midptList_dist.reshape(
            (-1, 1, 2))  # reshape transformed midpoints back to original shape
        # cv2.polylines(cnt_img2, np.int32([midptList_dist]), True, (0, 255, 255), 1)
        # cv2.namedWindow('midpoints')
        # cv2.imshow('midpoints', cnt_img2)

        for midpt1 in range(len(midptList_dist) -
                            1):  # create array of angles between each midpoint
            # calculate angle between each transformed midpoint - gives real life angle
            mptheta.append(
                np.arctan2((midptList_dist[midpt1 + 1][0][1] -
                            midptList_dist[midpt1][0][1]),
                           (midptList_dist[midpt1 + 1][0][0] -
                            midptList_dist[midpt1][0][0])))
        mptheta.append(mptheta[len(mptheta) - 1])

    if len(closestContours
           ) == 2:  # If two contours are close to robot, then track both

        # Pick one of the two contours to follow at random
        followContour = random.choice(
            closestContours)  # choose one of two contours to follow at random
        # print(followContour)
        # dist2 = []
        # Find the other contour that is not being followed
        m = []
        contours2 = list(contours)  # copy contours list
        contours2.remove(
            contours2[followContour])  # remove the contour we are following

        for contour in range(
                len(contours2
                    )):  # iterate through contours excluding followed contour
            if len(
                    contours2[contour]
            ) > 19:  # for contours longer than 19, create a combined list of every other contour points
                contourpoints = np.reshape(np.ravel(contours2[contour]),
                                           (-1, 2)).tolist()
                m = m + contourpoints
        # followContourListPosition = closestContours.index(followContour)
        # otherContour = closestContours[abs(followContourListPosition-1)]
        # otherContourList = np.reshape(np.ravel(contours[otherContour]), (-1, 2))

        # For each point in the followed contour, find the closest point in the other contour
        tree = spatial.KDTree(m)  # create tree of all contour points
        for pt in range(0, len(contours[followContour]),
                        1):  # iterate through follow contour
            p = contours[followContour][pt][
                0]  # for each point on the follow contour
            pointonLine = m[tree.query(p)
                            [1]]  # find the closest point on the other contour
            lineDistance = distance.euclidean(
                p, pointonLine)  # find the distance between the two lines
            if 5 <= lineDistance <= 130:  # if distance between lines is around tape width - i.e. lines not diverging (may need to change)
                midpointx.append((p[0] + pointonLine[0]) / 2)  # add midpoints
                midpointy.append((p[1] + pointonLine[1]) / 2)

        # plot midpoints
        midptList = np.array(list(
            zip(midpointx,
                midpointy)))  # combine midpoint coordinates into one list
        midptList = midptList.reshape(
            (-1, 1, 2))  # reshape this new list into the original shape

        midptList = midptList.reshape(-1, 2)
        b = midptList
        b = np.array([b])
        b = b.astype(float)
        midptList_dist = cv2.perspectiveTransform(
            b, h
        )  # find perspecitve transform of the midpoints i.e. map them to real life positions
        midptList_dist = midptList_dist.reshape((-1, 1, 2))
        # cv2.polylines(cnt_img2, np.int32([midptList_dist]), True, (0, 255, 255), 1)

        for midpt1 in range(len(midptList_dist) -
                            1):  # create array of angles between each midpoint
            # iterate through corrected midpoint list, and calculate angle between each midpoint - as midpoints have been persp.corrected, these angles will be real life angle
            mptheta.append(
                np.arctan2((midptList_dist[midpt1 + 1][0][1] -
                            midptList_dist[midpt1][0][1]),
                           (midptList_dist[midpt1 + 1][0][0] -
                            midptList_dist[midpt1][0][0])))
        mptheta.append(
            mptheta[len(mptheta) - 1]
        )  # add on last angle to list to make angle list same length as midpoint list

        # cv2.namedWindow('midpoints')
        # cv2.imshow('midpoints', cnt_img2)
        # cv2.imwrite('midpts.jpg', cnt_img2)

    if len(closestContours) == 3:  # 3 contours found in radius i.e. t junction

        dist3 = []
        randcont = []
        # ** Old Code **
        #    for cont3 in range(len(closestContours)):
        #        dist3.append(abs(cv2.pointPolygonTest(contours[closestContours[cont3]], (200, 299), True)))
        #    max_dist_index = dist3.index(max(dist3))
        #    furthestcontour = closestContours[max_dist_index]
        #    randcont.append(furthestcontour) # always follow new contour (starts furthest away)
        #    closestContours.remove(furthestcontour)
        #    randcont.append(random.choice(closestContours))
        #    followContour = random.choice(randcont) # choose at random one of other two contours to follow
        #    followContourListPosition = randcont.index(followContour)
        #    otherContour = randcont[abs(followContourListPosition-1)] # choose at random which contour will be the main contour to follow
        #    otherContourList = np.reshape(np.ravel(contours[otherContour]), (-1, 2))

        # **New Code **
        correctcontour = 0
        while correctcontour == 0:
            followContour = random.choice(
                closestContours)  # choose random contour to follow
            # print(followContour)

            # Find the other contour that is not being followed
            for cnt3 in range(len(contours)):  # iterate through contour list
                dist3 = []
                if cnt3 != followContour:  # don't choose the contour that is being followed
                    for c in range(
                            0, len(contours[followContour]),
                            round(len(contours[followContour]) /
                                  10)):  # choose 10 points along contour
                        fcPt = contours[followContour][c][
                            0]  # points on follow contour
                        dist3.append(
                            abs(
                                cv2.pointPolygonTest(contours[cnt3],
                                                     tuple(fcPt.tolist()),
                                                     True))
                        )  # find distance between follow contour and these 10 points
                    if all(
                            10 <= dst <= 130 for dst in dist3
                    ):  # if all distances are within a set distance, assume the contours are parallel
                        cnt3list = np.reshape(np.ravel(contours[cnt3]),
                                              (-1, 2))
                        tree = spatial.KDTree(cnt3list)
                        pointonLine = cnt3list[tree.query(
                            fcPt)[1]]  # find nearest point on other contour
                        midx = int(round((fcPt[0] + pointonLine[0]) / 2))
                        midy = int(round((fcPt[1] + pointonLine[1]) / 2))
                        midpt = [midx, midy]
                        if resized[midy][
                                midx] < 100:  # check that this midpoint is on a black line, and not the white gap between two lines
                            otherContour = cnt3
                            correctcontour = 1  # set flag to break from while loop
                            break

        otherContourList = np.reshape(np.ravel(contours[otherContour]),
                                      (-1, 2))

        # For each point in the followed contour, find the closest point in the other contour

        tree = spatial.KDTree(
            otherContourList)  # create tree of points on other contour
        for pt in range(len(
                contours[followContour])):  # iterate through contour followed
            p = contours[followContour][pt][0]
            pointonLine = otherContourList[tree.query(
                p
            )[1]]  # find closest point on other contour to each point on follow contour
            lineDistance = distance.euclidean(p, pointonLine)
            if 5 <= lineDistance <= 150:  # if distance between lines is around tape width - i.e. lines not diverging (may need to change)
                midpointx.append((p[0] + pointonLine[0]) / 2)  # add midpoints
                midpointy.append((p[1] + pointonLine[1]) / 2)

        # plot midpoints
        midptList = np.array(list(zip(
            midpointx, midpointy)))  # join x and y midpoints into one list

        midptList = midptList.reshape(-1, 2)
        b = midptList
        b = np.array([b])
        b = b.astype(float)
        midptList_dist = cv2.perspectiveTransform(
            b, h)  # apply perspective transforms to midpoints
        midptList_dist = midptList_dist.reshape((-1, 1, 2))
        #cv2.polylines(cnt_img2, np.int32([midptList_dist]), True, (0, 255, 255), 1)

        for midpt1 in range(len(midptList_dist) -
                            1):  # create array of angles between each midpoint
            # find angle between each midpoint
            mptheta.append(
                np.arctan2((midptList_dist[midpt1 + 1][0][1] -
                            midptList_dist[midpt1][0][1]),
                           (midptList_dist[midpt1 + 1][0][0] -
                            midptList_dist[midpt1][0][0])))
        mptheta.append(mptheta[len(mptheta) - 1])

        #cv2.namedWindow('midpoints')
        # cv2.imshow('midpoints', cnt_img2)
        #cv2.imwrite('midpointsnew',)

    if len(closestContours) > 3:

        for k in range(len(contours)):  # iterate through contours
            clcont.append(
                abs(cv2.pointPolygonTest(contours[k], (200, 299), True))
            )  # create array of distances between robot and each contour
            followContour = clcont.index(
                min(clcont))  # choose the closest contour to follow
            #randpoint = [0, 0]
            p = [200, 299]
            #while distance.euclidean(p, randpoint) >= 250 or distance.euclidean(p, randpoint) <= 30:
            randindex = randint(0, len(contours[followContour]) - 1)
            randpoint = contours[followContour][randindex][0]

            for k in range(
                    -2,
                    2):  # choose some points on the closest contour to follow
                refindex = randindex + k
                if 0 <= refindex < len(contours[followContour]):
                    midpointx.append(contours[followContour][refindex][0][0])
                    midpointy.append(contours[followContour][refindex][0][1])

            # plot midpoints
            midptList = np.array(list(zip(midpointx, midpointy)))
            midptList = midptList.reshape((-1, 1, 2))
            midptList = midptList.reshape(-1, 2)
            b = midptList
            b = np.array([b])
            b = b.astype(float)
            midptList_dist = cv2.perspectiveTransform(b, h)
            midptList_dist = midptList_dist.reshape((-1, 1, 2))
            # cv2.polylines(cnt_img2, np.int32([midptList_dist]), True, (0, 255, 255), 1)
            # cv2.namedWindow('midpoints')
            # cv2.imshow('midpoints', cnt_img2)

            for midpt1 in range(
                    len(midptList_dist) -
                    1):  # create array of angles between each midpoint
                mptheta.append(
                    np.arctan2((midptList_dist[midpt1 + 1][0][1] -
                                midptList_dist[midpt1][0][1]),
                               (midptList_dist[midpt1 + 1][0][0] -
                                midptList_dist[midpt1][0][0])))
            mptheta.append(mptheta[len(mptheta) - 1])

#    end_time = time.time()-start_time
#print('finished')
    return [midptList_dist,
            mptheta]  # return list of x and y midpoints and angle list


##test:
#img = cv2.imread('test5.jpg',0)
#[midptList_dist, mptheta] = imgProcess(img)
Exemplo n.º 45
0
    def main(self):
        #model
        prototxt = 'MobileNetSSD_deploy.prototxt.txt'
        model = 'MobileNetSSD_deploy.caffemodel'

        CLASSES = ["car"]

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(prototxt, model)

        # m = coordinates
        m = ((48, 443), (196, 367), (446, 581), (243, 685))

        # Read a image
        I = cv2.imread('images/image.jpg')
        # I = frame

        # First find the minX minY maxX and maxY of the polygon
        minX = I.shape[1]
        maxX = -1
        minY = I.shape[0]
        maxY = -1
        for point in m:
            x = point[0]
            y = point[1]

            if x < minX:
                minX = x
            if x > maxX:
                maxX = x
            if y < minY:
                minY = y
            if y > maxY:
                maxY = y

        # Go over the points in the image if thay are out side of the emclosing rectangle put zero
        # if not check if thay are inside the polygon or not
        cropedImage = np.zeros_like(I)
        for y in range(0, I.shape[0]):
            for x in range(0, I.shape[1]):

                if x < minX or x > maxX or y < minY or y > maxY:
                    continue

                if cv2.pointPolygonTest(np.asarray(m), (x, y), False) >= 0:
                    cropedImage[y, x, 0] = I[y, x, 0]
                    cropedImage[y, x, 1] = I[y, x, 1]
                    cropedImage[y, x, 2] = I[y, x, 2]

        # Now we can crop again just the envloping rectangle
        finalImage = cropedImage[minY:maxY, minX:maxX]

        image = finalImage
        (h, w) = image.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        print("[INFO] computing object detections...")
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            # print type(confidence)
            if confidence >= 0.2:
                print 'confidence'
            else:
                print 'no detection'

        cv2.imshow('d', finalImage)
def acute(obj, mask, win, thresh):
    """acute: identify landmark positions within a contour for morphometric analysis

    Inputs:
    obj         = An opencv contour array of interest to be scanned for landmarks
    mask        = binary mask used to generate contour array (necessary for ptvals)
    win         = maximum cumulative pixel distance window for calculating angle
                  score; 1 cm in pixels often works well
    thresh      = angle score threshold to be applied for mapping out landmark
                  coordinate clusters within each contour


    Outputs:
    homolog_pts = pseudo-landmarks selected from each landmark cluster
    start_pts   = pseudo-landmark island starting position; useful in parsing homolog_pts in downstream analyses
    stop_pts    = pseudo-landmark island end position ; useful in parsing homolog_pts in downstream analyses
    ptvals      = average values of pixel intensity from the mask used to generate cont;
                  useful in parsing homolog_pts in downstream analyses
    chain       = raw angle scores for entire contour, used to visualize landmark
                  clusters
    verbose_out = supplemental file which stores coordinates, distance from
                  landmark cluster edges, and angle score for entire contour.  Used
                  in troubleshooting.

    :param obj: ndarray
    :param mask: ndarray
    :param win: int
    :param thresh: int
    :return homolog_pts:
    """
    chain = []                                         # Create empty chain to store angle scores
    for k in list(range(len(obj))):                    # Coordinate-by-coordinate 3-point assignments
        vert = obj[k]
        dist_1 = 0
        for r in range(len(obj)):                      # Reverse can to obtain point A
            rev = k - r
            pos = obj[rev]
            dist_2 = np.sqrt(np.square(pos[0][0]-vert[0][0])+np.square(pos[0][1]-vert[0][1]))
            if r >= 2:
                if (dist_2 > dist_1) & (dist_2 <= win):  # Further from vertex than current pt A while within window?
                    dist_1 = dist_2
                    ptA = pos                              # Load best fit within window as point A
                elif dist_2 > win:
                    break
            else:
                ptA = pos
        dist_1 = 0
        for f in range(len(obj)):                      # Forward scan to obtain point B
            fwd = k + f
            if fwd >= len(obj):
                fwd -= len(obj)
            pos = obj[fwd]
            dist_2 = np.sqrt(np.square(pos[0][0]-vert[0][0])+np.square(pos[0][1]-vert[0][1]))
            if f >= 2:
                if (dist_2 > dist_1) & (dist_2 <= win):  # Further from vertex than current pt B while within window?
                    dist_1 = dist_2
                    ptB = pos                              # Load best fit within window as point B
                elif dist_2 > win:
                    break
            else:
                ptB = pos

        # Angle in radians derived from Law of Cosines, converted to degrees
        P12 = np.sqrt((vert[0][0]-ptA[0][0])*(vert[0][0]-ptA[0][0])+(vert[0][1]-ptA[0][1])*(vert[0][1]-ptA[0][1]))
        P13 = np.sqrt((vert[0][0]-ptB[0][0])*(vert[0][0]-ptB[0][0])+(vert[0][1]-ptB[0][1])*(vert[0][1]-ptB[0][1]))
        P23 = np.sqrt((ptA[0][0]-ptB[0][0])*(ptA[0][0]-ptB[0][0])+(ptA[0][1]-ptB[0][1])*(ptA[0][1]-ptB[0][1]))
        dot = (P12*P12 + P13*P13 - P23*P23)/(2*P12*P13)

        # Used a random number generator to test if either of these cases were possible but neither is possible
        # if dot > 1:              # If float exceeds 1 prevent arcos error and force to equal 1
        #     dot = 1
        # elif dot < -1:           # If float exceeds -1 prevent arcos error and force to equal -1
        #     dot = -1
        ang = math.degrees(math.acos(dot))
        chain.append(ang)

    index = []                      # Index chain to find clusters below angle threshold

    for c in range(len(chain)):     # Identify links in chain with acute angles
        if float(chain[c]) <= thresh:
            index.append(c)         # Append positions of acute links to index

    acute_pos = obj[index]            # Extract all island points blindly

    float(len(acute_pos)) / float(len(obj))  # Proportion of informative positions

    if len(index) != 0:

        isle = []
        island = []

        for c in range(len(index)):           # Scan for iterative links within index
            if not island:
                island.append(index[c])       # Initiate new link island
            elif island[-1]+1 == index[c]:
                island.append(index[c])       # Append successful iteration to island
            elif island[-1]+1 != index[c]:
                ptA = obj[index[c]]
                ptB = obj[island[-1]+1]
                dist = np.sqrt(np.square(ptA[0][0]-ptB[0][0])+np.square(ptA[0][1]-ptB[0][1]))
                if win/2 > dist:
                    island.append(index[c])
                else:
                    isle.append(island)
                    island = [index[c]]

        isle.append(island)

        if len(isle) > 1:
            if (isle[0][0] == 0) & (isle[-1][-1] == (len(chain)-1)):
                print('Fusing contour edges')

                # Cannot add a range and a list (or int)
                # island = range(-(len(chain)-isle[-1][0]), 0)+isle[0]  # Fuse overlapping ends of contour
                # Delete islands to be spliced if start-end fusion required
                del isle[0]
                del isle[-1]
                # isle.insert(0, island)      # Prepend island to isle
        else:
            print('Microcontour...')

        # Homologous point maximum distance method
        pt = []
        vals = []
        maxpts = []
        SSpts = []
        TSpts = []
        ptvals = []
        max_dist = [['cont_pos', 'max_dist', 'angle']]
        for x in range(len(isle)):

            # Identify if contour is concavity/convexity using image mask
            pix_x, pix_y, w, h = cv2.boundingRect(obj[isle[x]])  # Obtain local window around island

            for c in range(w):
                for r in range(h):
                    # Identify pixels in local window internal to the island hull
                    pos = cv2.pointPolygonTest(obj[isle[x]], (pix_x+c, pix_y+r), 0)
                    if 0 < pos:
                        vals.append(mask[pix_y+r][pix_x+c])  # Store pixel value if internal
            if len(vals) > 0:
                ptvals.append(sum(vals)/len(vals))
                vals = []
            else:
                ptvals.append('NaN')        # If no values can be retrieved (small/collapsed contours)
                vals = []

            # Identify pixel coordinate to use as pseudolandmark for island
            if len(isle[x]) == 1:           # If landmark is a single point (store position)
                # print 'route A'
                pt = isle[x][0]
                max_dist.append([isle[x][0], '-', chain[isle[x][0]]])
                # print pt
            elif len(isle[x]) == 2:         # If landmark is a pair of points (store more acute position)
                # print 'route B'
                ptA = chain[isle[x][0]]
                ptB = chain[isle[x][1]]
                if ptA < ptB:
                    pt = isle[x][0]             # Store point A if more acute
                    max_dist.append([isle[x][0], '-', chain[isle[x][0]]])
                elif ptA > ptB:
                    pt = isle[x][1]             # Store point B if more acute
                    max_dist.append([isle[x][1], '-', chain[isle[x][1]]])
                # print pt
            else:                           # If landmark is multiple points (distance scan for position)
                # print 'route C'
                SS = obj[[isle[x]]][0]          # Store isle "x" start site
                TS = obj[[isle[x]]][-1]         # Store isle "x" termination site
                dist_1 = 0
                for d in range(len(isle[x])):   # Scan from SS to TS within isle "x"
                    site = obj[[isle[x][d]]]
                    SSd = np.sqrt(np.square(SS[0][0]-site[0][0][0])+np.square(SS[0][1]-site[0][0][1]))
                    TSd = np.sqrt(np.square(TS[0][0]-site[0][0][0])+np.square(TS[0][1]-site[0][0][1]))
                    # Current mean distance of 'd' to 'SS' & 'TS'
                    dist_2 = np.mean([np.abs(SSd), np.abs(TSd)])
                    max_dist.append([isle[x][d], dist_2, chain[isle[x][d]]])
                    if dist_2 > dist_1:                           # Current mean distance better fit that previous best?
                        pt = isle[x][d]
                        dist_1 = dist_2                           # Current mean becomes new best mean
                # print pt
            maxpts.append(pt)           # Empty 'pts' prior to next mean distance scan
            SSpts.append(isle[x][0])
            TSpts.append(isle[x][-1])

        homolog_pts = obj[maxpts]
        start_pts = obj[SSpts]
        stop_pts = obj[TSpts]

        return homolog_pts, start_pts, stop_pts, ptvals, chain, max_dist
    else:
        return [], [], [], [], [], []
Exemplo n.º 47
0
plt.imshow(NewMaxFlare)

#contours on MinFlare image
_, contours2, _ = cv2.findContours(MinFlare.astype('uint8'), cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)
NewMinFlare = cv2.drawContours(MinFlare.astype('uint8'), contours2, -1,
                               (0, 0, 255), 1)
plt.figure()
plt.title(' contours on  AIA 1700 (at time of flare minimum) ')
plt.imshow(NewMinFlare)

area = []
for rows in range(MaxFlare.shape[0]):
    for columns in range(MaxFlare.shape[1]):
        for n in contours:
            if (cv2.pointPolygonTest(n, (rows, columns),
                                     measureDist=False)) == 1:
                area.append([rows, columns])

#contours on  Magnetogram (max)
NewMag = cv2.drawContours(Mag.astype('uint8'), contours, -1, (0, 0, 255), 1)
plt.figure()
plt.title(' contours on  Magnetogram (at time of flare maximum) ')
plt.imshow(NewMag)
#contours on Continuum
NewCon = cv2.drawContours(Con.astype('uint8'), contours, -1, (0, 0, 255), 1)
plt.figure()
plt.title(' contours on Continuum (at time of flare maximum) ')
plt.imshow(NewCon)

#contours on  Magnetogram (min)
NewMag2 = cv2.drawContours(Mag.astype('uint8'), contours2, -1, (0, 0, 255), 1)
Exemplo n.º 48
0
 def __iter__(self):
     for x in range(self.min[0], self.max[0] + 1):
         for y in range(self.min[1], self.max[1] + 1):
             if cv2.pointPolygonTest(self._contour, (x, y),
                                     measureDist=False) >= 0:
                 yield x, y
Exemplo n.º 49
0
    def random_warp_landmarks(self, image, src_points=None, dst_points=None):
        """ get warped image, target image and target mask
            From DFAKER plugin """
        logger.trace("Randomly warping landmarks")
        size = image.shape[0]
        coverage = self.get_coverage(image) // 2

        p_mx = size - 1
        p_hf = (size // 2) - 1

        edge_anchors = [(0, 0), (0, p_mx), (p_mx, p_mx), (p_mx, 0), (p_hf, 0),
                        (p_hf, p_mx), (p_mx, p_hf), (0, p_hf)]
        grid_x, grid_y = np.mgrid[0:p_mx:complex(size), 0:p_mx:complex(size)]

        source = src_points
        destination = (dst_points.copy().astype('float32') +
                       np.random.normal(size=dst_points.shape, scale=2.0))
        destination = destination.astype('uint8')

        face_core = cv2.convexHull(
            np.concatenate(  # pylint:disable=no-member
                [source[17:], destination[17:]],
                axis=0).astype(int))

        source = [(pty, ptx) for ptx, pty in source] + edge_anchors
        destination = [(pty, ptx) for ptx, pty in destination] + edge_anchors

        indicies_to_remove = set()
        for fpl in source, destination:
            for idx, (pty, ptx) in enumerate(fpl):
                if idx > 17:
                    break
                elif cv2.pointPolygonTest(
                        face_core,  # pylint:disable=no-member
                    (pty, ptx),
                        False) >= 0:
                    indicies_to_remove.add(idx)

        for idx in sorted(indicies_to_remove, reverse=True):
            source.pop(idx)
            destination.pop(idx)

        grid_z = griddata(destination,
                          source, (grid_x, grid_y),
                          method="linear")
        map_x = np.append([], [ar[:, 1] for ar in grid_z]).reshape(size, size)
        map_y = np.append([], [ar[:, 0] for ar in grid_z]).reshape(size, size)
        map_x_32 = map_x.astype('float32')
        map_y_32 = map_y.astype('float32')

        warped_image = cv2.remap(
            image,  # pylint:disable=no-member
            map_x_32,
            map_y_32,
            cv2.INTER_LINEAR,  # pylint:disable=no-member
            cv2.BORDER_TRANSPARENT)  # pylint:disable=no-member
        target_image = image

        # TODO Make sure this replacement is correct
        slices = slice(size // 2 - coverage, size // 2 + coverage)
        #        slices = slice(size // 32, size - size // 32)  # 8px on a 256px image
        warped_image = cv2.resize(  # pylint:disable=no-member
            warped_image[slices, slices, :],
            (self.input_size, self.input_size), cv2.INTER_AREA)  # pylint:disable=no-member
        logger.trace("Warped image shape: %s", warped_image.shape)
        target_images = [
            cv2.resize(
                target_image[slices, slices, :],  # pylint:disable=no-member
                (size, size),
                cv2.INTER_AREA)  # pylint:disable=no-member
            for size in self.output_sizes
        ]

        logger.trace("Target image shapea: %s",
                     [img.shape for img in target_images])
        return self.compile_images(warped_image, target_images)
Exemplo n.º 50
0
def create_cloth_mesh(cloth_img, cloth_contours):

    points = []

    height, width, _ = cloth_img.shape

    # 1.1 get the biggest/external contour
    maxcontouridx = 0
    maxcontourlen = 0
    for i in range(len(cloth_contours)):
        if maxcontourlen < len(cloth_contours[i]):
            maxcontourlen = len(cloth_contours[i])
            maxcontouridx = i
    max_contour = cloth_contours[maxcontouridx]

    # get all mesh points/vertices and handle points

    # note control vertices
    handle_v_list = []

    # 1.2 add sampled points from contour
    # seglen = maxcontourlen//20
    seglen = maxcontourlen // 30

    vidx = 0
    for ind, each in enumerate(max_contour):

        if ind % seglen == 0 and ind > 0 and not _checkCloseToHandles(
                handle_v_list, points, each[0]):
            if vidx not in handle_v_list:
                handle_v_list.append(
                    vidx)  # now we add only contours for handles
            points.append(tuple(each[0]))  # add mesh vertices also
            vidx = vidx + 1
        else:
            try:
                # check angles of the points, take acute or smaller obtuse angles for adding to control points
                this_angle = get_angle(max_contour[ind - 5][0],
                                       max_contour[ind][0],
                                       max_contour[ind + 5][0])
                if this_angle < 150 and not _checkCloseToHandles(
                        handle_v_list, points, each[0]):
                    # if this_angle < 150:
                    if vidx not in handle_v_list:
                        handle_v_list.append(
                            vidx)  # now we add only contours for handles
                    points.append(tuple(each[0]))  # add mesh vertices also
                    vidx = vidx + 1
            except Exception as err:
                print(err)

    # 2. the bounding box

    # get minimum and maximum of the contour
    mc_x = max_contour[:, :, 0]
    mc_y = max_contour[:, :, 1]
    xmin = min(mc_x)[0]
    ymin = min(mc_y)[0]
    xmax = max(mc_x)[0]
    ymax = max(mc_y)[0]

    seglen = (xmax - xmin) // 10  # 10  # 20
    # add points from inside cloth
    for _y in range(int((ymax - ymin) / seglen)):
        for _x in range(int((xmax - xmin) / seglen)):
            x, y = xmin + seglen * _x, ymin + seglen * _y  # bug fixed 2020. 8. 16

            if ymin <= y <= ymax and xmin <= x <= xmax:
                # >= 0:  # check if inside cloth contour
                dist = cv2.pointPolygonTest(max_contour, (x, y), True)
                if dist > 0 and dist > seglen / 4:  # inside and not too close to the contours
                    points.append((x, y))

    # 3 list to numpy array @TODO
    o_vertices = np.asarray(points)
    o_handles = np.asarray(handle_v_list)

    # 4 build triangles
    # @Note: now we generate rectangle mesh only, so do not need to use Subdiv2D
    rect = (xmin, ymin, xmax, ymax)
    # @TODO Do we need to use opencv ? We could build more easily
    subdiv = cv2.Subdiv2D(rect)
    for p in points:
        subdiv.insert(p)

    # why we get points for minus coordinates
    triangleList = subdiv.getTriangleList()

    # 5 build triangles (of indices of vertices) from point locations
    triangles = np.zeros((len(triangleList), 3), dtype=int)

    tidx = 0
    for t in triangleList:

        x, y = t[0], t[1]
        if (xmin > x or x > xmax) or (
                ymin > y or y > ymax):  # often subdiv2d gives out of rectangle
            continue
        x, y = t[2], t[3]
        if (xmin > x or x > xmax) or (
                ymin > y or y > ymax):  # often subdiv2d gives out of rectangle
            continue
        x, y = t[4], t[5]
        if (xmin > x or x > xmax) or (
                ymin > y or y > ymax):  # often subdiv2d gives out of rectangle
            continue

        idx0 = _findNearestinMesh(o_vertices, (t[0], t[1]))
        idx1 = _findNearestinMesh(o_vertices, (t[2], t[3]))
        idx2 = _findNearestinMesh(o_vertices, (t[4], t[5]))

        # get the triangle center
        if False:
            centerX = (points[idx0][0] + points[idx1][0] + points[idx2][0]) / 3
            centerY = (points[idx0][1] + points[idx1][1] + points[idx2][1]) / 3

            # check if inside cloth contour
            if cv2.pointPolygonTest(max_contour,
                                    (centerX, centerY), True) >= 0:
                triangles[tidx] = (idx0, idx1, idx2)
                tidx = tidx + 1
        else:
            triangles[tidx] = (idx0, idx1, idx2)
            tidx = tidx + 1

    triangles = np.resize(triangles, (tidx, 3))  # remove triangle out of cloth

    # 3. Finally create meshes and handle points objects
    o_mesh = TriangleMesh(o_vertices, triangles, o_handles)
    handle_tracker = ControlPtsTrack(handle_v_list)

    for each in handle_v_list:
        handle_tracker.srcPos.append(o_mesh.vertices[each, :])

    handle_tracker.tgtPos = handle_tracker.srcPos.copy()

    return o_mesh, handle_tracker
Exemplo n.º 51
0
 def isOutside(self, point):
     dist = cv2.pointPolygonTest(self.contour, point, True)
     if dist < 0:
         return True
     else:
         return False
Exemplo n.º 52
0
 def __call__(self, pt):
     return 1 if cv2.pointPolygonTest(self.cont, pt, False) >= 0 else 0
def analyze_bound_vertical(img, obj, mask, line_position):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    shape_header    = pass shape header data to function
    shape_data      = pass shape data so that analyze_bound data can be appended to it
    line_position   = position of boundary line (a value of 0 would draw the line through the left side of the image)

    Returns:
    analysis_images = output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :return analysis_images: list
    """
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = 0 + int(line_position)
    y_coor = int(iy)
    rec_point1 = (0, 0)
    rec_point2 = (x_coor, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), -1)
    right_contour, right_hierarchy = cv2.findContours(
        background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if x_coor - x <= 0:
        width_left_bound = 0
        width_right_bound = width
    elif x_coor - x > 0:
        width_1 = x_coor - x
        if width - width_1 <= 0:
            width_left_bound = width
            width_right_bound = 0
        else:
            width_left_bound = x_coor - x
            width_right_bound = width - width_left_bound

    right = []
    left = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(right_contour[0], xy, measureDist=False)
        if pptest == 1:
            left.append(xy)
            cv2.circle(ori_img, xy, 1, (155, 0, 255))
            cv2.circle(wback, xy, 1, (155, 0, 255))
        else:
            right.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    right_bound_area = len(right)
    left_bound_area = len(left)
    percent_bound_area_right = (
        (float(right_bound_area)) /
        (float(left_bound_area + right_bound_area))) * 100
    percent_bound_area_left = (
        (float(left_bound_area)) /
        (float(right_bound_area + left_bound_area))) * 100

    analysis_images = []

    if left_bound_area or right_bound_area:
        point3 = (x_coor + 2, 0)
        point4 = (x_coor + 2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0),
                     params.line_thickness)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)),
                         (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (x_coor + 2, int(cmy)),
                         (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)),
                         (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)),
                         (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
        # Output images with boundary line
        analysis_images.append(wback)
        analysis_images.append(ori_img)

    if params.debug is not None:
        params.device += 1
        point3 = (x_coor + 2, 0)
        point4 = (x_coor + 2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0),
                     params.line_thickness)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)),
                         (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (x_coor + 2, int(cmy)),
                         (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)),
                         (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)),
                         (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
        if params.debug == 'print':
            print_image(
                wback,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_white.png'))
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_img.png'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    outputs.add_observation(variable='vertical_reference_position',
                            trait='vertical reference position',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='none',
                            datatype=int,
                            value=line_position,
                            label='none')
    outputs.add_observation(variable='width_left_reference',
                            trait='width left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='pixels',
                            datatype=int,
                            value=width_left_bound,
                            label='pixels')
    outputs.add_observation(variable='width_right_reference',
                            trait='width right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='pixels',
                            datatype=int,
                            value=width_right_bound,
                            label='pixels')
    outputs.add_observation(variable='area_left_reference',
                            trait='area left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='pixels',
                            datatype=int,
                            value=left_bound_area,
                            label='pixels')
    outputs.add_observation(variable='percent_area_left_reference',
                            trait='percent area left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_left,
                            label='none')
    outputs.add_observation(variable='area_right_reference',
                            trait='area right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='pixels',
                            datatype=int,
                            value=right_bound_area,
                            label='pixels')
    outputs.add_observation(variable='percent_area_right_reference',
                            trait='percent area right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_right,
                            label='none')

    # Store images
    outputs.images.append(analysis_images)

    return ori_img
Exemplo n.º 54
0
def analyze_bound_horizontal(img, obj, mask, line_position, filename=False):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    line_position   = position of boundry line (a value of 0 would draw the line through the bottom of the image)
    filename        = False or image name. If defined print image.

    Returns:
    bound_header    = data table column headers
    bound_data      = boundary data table
    analysis_images = output image filenames

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :param filename: str
    :return bound_header: tuple
    :return bound_data: tuple
    :return analysis_images: list
    """

    params.device += 1
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = int(ix)
    y_coor = int(iy) - int(line_position)
    rec_corner = int(iy - 2)
    rec_point1 = (1, rec_corner)
    rec_point2 = (x_coor - 2, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), 1)
    below_contour, below_hierarchy = cv2.findContours(
        background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if y_coor - y <= 0:
        height_above_bound = 0
        height_below_bound = height
    elif y_coor - y > 0:
        height_1 = y_coor - y
        if height - height_1 <= 0:
            height_above_bound = height
            height_below_bound = 0
        else:
            height_above_bound = y_coor - y
            height_below_bound = height - height_above_bound

    below = []
    above = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(below_contour[0], xy, measureDist=False)
        if pptest == 1:
            below.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 0, 255))
            cv2.circle(wback, xy, 1, (0, 0, 255))
        else:
            above.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    above_bound_area = len(above)
    below_bound_area = len(below)
    percent_bound_area_above = (
        (float(above_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100
    percent_bound_area_below = (
        (float(below_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100

    bound_header = [
        'HEADER_BOUNDARY' + str(line_position), 'height_above_bound',
        'height_below_bound', 'above_bound_area', 'percent_above_bound_area',
        'below_bound_area', 'percent_below_bound_area'
    ]

    bound_data = [
        'BOUNDARY_DATA', height_above_bound, height_below_bound,
        above_bound_area, percent_bound_area_above, below_bound_area,
        percent_bound_area_below
    ]

    analysis_images = []

    if above_bound_area or below_bound_area:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), 5)
        cv2.line(wback, point3, point4, (255, 0, 255), 5)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), 3)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     3)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), 3)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), 3)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         3)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         3)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         3)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         3)
        if filename:
            # Output images with boundary line, above/below bound area
            out_file = str(
                filename[0:-4]) + '_boundary' + str(line_position) + '.jpg'
            print_image(ori_img, out_file)
            analysis_images = ['IMAGE', 'boundary', out_file]

    if params.debug is not None:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), 5)
        cv2.line(wback, point3, point4, (255, 0, 255), 5)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), 3)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     3)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), 3)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), 3)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         3)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         3)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         3)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         3)
        if params.debug == 'print':
            print_image(
                wback,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_white.jpg'))
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_img.jpg'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    return bound_header, bound_data, analysis_images
def step1():
    cell_area_hist_list = []
    print("============Step 1 Start============")
    #-----read-----
    root = tk.Tk()
    root.withdraw()

    file_path = filedialog.askopenfilename()
    #img=cv.imread("G:\\2020summer\\Project\\Chromophobe_dataset1\\4.jpg")
    img = cv.imread(file_path)
    img_original = img
    print("Img size: [Height :", img.shape[0], "]", "[Width :", img.shape[1],
          "]")
    if img.shape[0] < 450 or img.shape[1] < 600:
        img = cv.copyMakeBorder(img,
                                80,
                                450,
                                360,
                                360,
                                cv.BORDER_CONSTANT,
                                value=[255, 255, 255])
    else:
        img = cv.copyMakeBorder(img,
                                80,
                                450,
                                60,
                                60,
                                cv.BORDER_CONSTANT,
                                value=[255, 255, 255])
    #img=cv.cvtColor(img,cv.COLOR_BGR2BGRA)

    img_masked = img.copy()
    img_nucleus_white_img = img.copy()

    #-----preprocess-----
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    #cv.imshow("gray", gray)

    gauss = cv.GaussianBlur(gray, (5, 5), 5)
    #cv.imshow("gauss1",gauss)

    ret, thresh = cv.threshold(gauss, 190, 255, 0)
    cv.imwrite(
        "G:\\2020summer\\Project\\Chromophobe_dataset1\\figure3_left.jpg",
        thresh)
    #cv.imshow("thresh",thresh)

    erode = cv.erode(thresh, None, iterations=1)
    #cv.imshow("erode",erode)

    #-----remove outlines-----

    #cv.imshow("erode",erode)
    for i in range(0, img.shape[0]):
        for j in range(0, img.shape[1]):
            erode[0][j] = 255
    #-----find contours-----
    cnts, hierarchy = cv.findContours(erode.copy(), cv.RETR_LIST,
                                      cv.CHAIN_APPROX_NONE)

    def cnt_area(cnt):
        area = cv.contourArea(cnt)
        return area

    counter_number = 0
    location_cells_center = {}
    area_of_cells_nucleus = []
    Whole_pic_cell_area_ave_percent = []
    Whole_pic_cell_color_ave = []
    for i in range(0, len(cnts)):
        if 250 <= cnt_area(cnts[i]) <= 0.2 * (img.shape[0] * img.shape[1]):
            cell_area_hist_list.append(cnt_area(cnts[i]))
            #print(cnts[i])
            #cell_area_hist_list.append(area_calculate_from_points(cnts[i]))
            counter_number += 1
            #print(cnts[i])
            #print("======")
            cv.drawContours(img_masked, cnts[i], -1, (0, 0, 255),
                            2)  #draw contours
            cv.drawContours(img_nucleus_white_img, [cnts[i]], -1,
                            (255, 255, 255), -1)  #masked white
            M = cv.moments(cnts[i])

            #检索每个细胞的内部颜色
            x, y, w, h = cv.boundingRect(cnts[i])

            #cv.imshow('single_cell', newimage)

            cell_area_percent = 0

            for row in range(y, y + h):
                for col in range(x, x + w):
                    result = cv.pointPolygonTest(cnts[i], (col, row), False)
                    if result == -1:

                        cv.circle(gray, (col, row), 1, 255, -1)
                        cv.circle(img, (col, row), 1, (255, 255, 255), -1)
                        cell_area_percent += 1
            cv.rectangle(img, (x, y), (x + w, y + h), (153, 153, 0), 1)
            newimage_gray = gray[y:y + h, x:x + w]

            Single_Cell_Color_Distrution = []
            for row in range(h):
                for col in range(w):
                    if newimage_gray[row, col] != 255:
                        Single_Cell_Color_Distrution.append(newimage_gray[row,
                                                                          col])
            '''
            plt.hist(Single_Cell_Color_Distrution,bins=50)
            plt.title(str(counter_number))
            plt.show()
            '''
            #print("this cell area percent= ",str(cell_area_percent/(w*h)))
            numpy.set_printoptions(precision=3)
            Whole_pic_cell_area_ave_percent.append(cell_area_percent / (w * h))
            Whole_pic_cell_color_ave.append(
                numpy.mean(Single_Cell_Color_Distrution))
            """#找出masked细胞内点的坐标
            rect = cv.minAreaRect(cnts[i])
            cx, cy = rect[0]
            box = cv.boxPoints(rect)
            box = np.int0(box)
            cv.drawContours(img_masked, [box], 0, (0, 0, 255), 2)
            #cv.circle(img_masked, (np.int32(cx), np.int32(cy)), 2, (255, 0, 0), 2, 8, 0)

            box_gray_color=[]
            for by in range(box[2][1],box[0][1]+1):
                for bx in range(box[1][0],box[3][0]+1):
                    #print(bx,by)
                    #cv.circle(img_masked,(bx, by), 1, (255, 0, 0), 2, 8, 0)
                    box_gray_color.append(gray[bx,by])
            plt.hist(box_gray_color)

            plt.hist(box_gray_color,bins=50)
            plt.title(str(counter_number))
            plt.show()

            dist=cv.pointPolygonTest(cnts[i],(50,50),True)
            """
            try:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                cv.circle(img_masked, (cX, cY), 3, (255, 255, 255), -1)
                cv.putText(img_masked, str(counter_number), (cX - 20, cY - 20),
                           cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                area_of_cells_nucleus.append(cnt_area(cnts[i]))
                location_cells_center[counter_number] = [cX, cY]
            except:
                pass
            if counter_number == 1:
                x1 = cX
                y1 = cY
            if counter_number == 2:
                x2 = cX
                y2 = cY
            if counter_number == 1:
                x_sample = cX
                y_sample = cY

            #cv.drawContours(img_masked, [cnts[i]], -1, (255, 255, 255), -1)#mask contours
    print("Whole pic average cell nucleus area percent: ",
          Whole_pic_cell_area_ave_percent)
    print("Whole pic average cell nucleus area percent_ave: ",
          numpy.mean(Whole_pic_cell_area_ave_percent))
    print("Whole pic average cell nucleus color deep percent: ",
          Whole_pic_cell_color_ave)
    print("Whole pic average cell nucleus color deep percent_ave: ",
          numpy.mean(Whole_pic_cell_color_ave))
    cv.imshow('single_cell', img)
    #-----put Text-----
    print("total cells number : ", counter_number)

    #cv.line(img_masked, (x1,y1), (x2,y2), (0,0,255), 2)

    list_of_two_points = pixel_between_two_points(x1, x2, y1, y2)

    #-----output information on the line
    height_of_two_points = []
    height_of_two_points_B = []
    height_of_two_points_G = []
    height_of_two_points_R = []

    for m in range(0, len(list_of_two_points)):
        height = img[list_of_two_points[m][1], list_of_two_points[m][0]]
        try:
            height_B = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][0]
            height_G = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][1]
            height_R = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][2]
            height_of_two_points_B.append(height_B)
            height_of_two_points_G.append(height_G)
            height_of_two_points_R.append(height_R)
        except:
            pass
        #print(height)
        height_of_two_points.append(height)
    img_sample = img.copy()
    cv.circle(img_sample, (x_sample, y_sample), 3, (0, 0, 255), -1)
    font = cv.FONT_HERSHEY_SIMPLEX
    cv.putText(img_sample, "Sample_Point", (x_sample - 20, y_sample - 20),
               font, 0.7, (255, 255, 255), 2)
    #cv.imshow("img_sample_location_RED_DOT", img_sample)

    # save to local
    f = open("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\dict.txt",
             'w')
    f.write(str(location_cells_center))
    f.close()

    # < list save
    file1 = open('area_of_nucleus.txt', 'w')
    for fp in area_of_cells_nucleus:
        file1.write(str(fp))
        file1.write('\n')
    file1.close()
    # list save >

    cv.imwrite("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\temp.bmp",
               img_masked)
    cv.imwrite("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\temp_1.bmp",
               img_nucleus_white_img)
    cv.imwrite(
        "G:\\2020summer\\Project\\Chromophobe_dataset1\\figure3_right.jpg",
        img_masked)
    #================hist of cells area==================
    #plt.hist(cell_area_hist_list)
    #plt.show()

    #=================================
    #-----
    #=================================UI/
    cv.putText(img_masked, "Overview", (80, 40), cv.FONT_HERSHEY_SIMPLEX, 1,
               (0, 0, 0), 2)
    image_size_text = "Image size: [Width :" + str(
        img_original.shape[0]) + "]" + "[Height :" + str(
            img_original.shape[1]) + "]"
    cv.putText(img_masked, image_size_text, (80, img.shape[0] - 400),
               cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
    cv.putText(img_masked, "Total cells number: " + str(counter_number),
               (80, img.shape[0] - 350), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0),
               2)
    cv.putText(img_masked, "Close window to continue",
               (80, img.shape[0] - 250), cv.FONT_HERSHEY_SIMPLEX, 0.8,
               (0, 0, 0), 1)
    #=================================/UI
    cv.imshow('img_copy', img_masked)
    print("============Step 1 End============")
    cv.waitKey()
    return counter_number
 def is_in_contour(cont, point):
     x, y = point
     return cv2.pointPolygonTest(cont, (x, y), False) > 0
Exemplo n.º 57
0
for idx, (_curr, _next) in enumerate(zip(contours[::], contours[1::])):
    # https://www.quora.com/How-do-I-iterate-through-a-list-in-python-while-comparing-the-values-at-adjacent-indices/answer/Jignasha-Patel-14
    if article_complete:
        article_mask = np.ones(
            image.shape, dtype="uint8"
        ) * 255  # blank layer image for antother separate article
    [cx, cy, cw, ch] = cv2.boundingRect(_curr)
    [nx, ny, nw, nh] = cv2.boundingRect(_next)
    if (ny - cy) > (nh + ch) * 2:  # next is greater than current...
        print('Big Gap! {}'.format(idx))

        # loop through contents and insert any valid ones in this gap
        for idxx in range(len(content_contours)):
            [x, y, w, h] = cv2.boundingRect(content_contours[idxx])
            # search_area_rect = cv2.rectangle(contents_mask,(cx,cy),(x+w,y+h),(0,0,255),thickness=3,shift=0)
            dist = cv2.pointPolygonTest(content_contours[idxx], (x, y), False)
            # https://stackoverflow.com/a/50670359/754432
            if cy < y and cx - 10 < x and x < (
                    cx + w):  # less than because it appears above
                # check but not greater than the next title!!
                if y > ny:  # or next is another column
                    break
                # cv2.drawContours(contents_mask, [c], -1, 0, -1)
                # cv2.rectangle(contents_mask, (x,y), (x+w,y+h), (0, 0, 255), 3)
                contents = contents_mask[y:y + h, x:x + w]
                article_mask[
                    y:y + h, x:x +
                    w] = contents  # copied title contour onto the blank image
                image[y:y + h, x:x +
                      w] = 255  # nullified the title contour on original image
                # cv2.putText(contents_mask, "#{},x{},y{}".format(idxx, x, y), cv2.boundingRect(contours[idxx])[:2], cv2.FONT_HERSHEY_PLAIN, 2.0, [255, 153, 255], 2) # [B, G, R]
Exemplo n.º 58
0
def applyMask(
    image_path,
    mask_path,
    save_path,
    segmented_save_path,
    mat_save,
    threshold,
    git_repo_base,
    bregma_list,
    atlas_to_brain_align,
    model,
    dlc_pts,
    atlas_pts,
    olfactory_check,
    use_unet,
    use_dlc,
    plot_landmarks,
    align_once,
    atlas_label_list,
    region_labels=True,
    original_label=False,
):
    """
    Use mask output from model to segment brain image into brain regions, and save various outputs.
    :param image_path: path to folder where brain images are saved
    :param mask_path: path to folder where masks are saved
    :param save_path: path to overall folder for saving all images
    :param segmented_save_path: path to overall folder for saving segmented/labelled brain images
    :param mat_save: choose whether or not to output brain regions to .mat files
    :param threshold: set threshold for segmentation of foregrounds
    :param git_repo_base: The path to the base git repository containing necessary resources for MesoNet (reference
    atlases, DeepLabCut config files, etc.)
    :param bregma_list: The list of bregma locations (or landmarks closest to bregma).
    :param region_labels: Choose whether or not to attempt to label each region with its name from the Allen Institute
    Mouse Brain Atlas.
    :param use_unet: Choose whether or not to define the borders of the cortex using a U-net model.
    :param atlas_to_brain_align: If True, registers the atlas to each brain image. If False, registers each brain image
    to the atlas.
    :param model: The name of the U-net model (for passthrough to mask_functions.py)
    :param dlc_pts: The landmarks for brain-atlas registration as determined by the DeepLabCut model.
    :param atlas_pts: The landmarks for brain-atlas registration from the original brain atlas.
    :param olfactory_check: If True, draws olfactory bulb contours on the brain image.
    :param plot_landmarks: If True, plots DeepLabCut landmarks (large circles) and original alignment landmarks (small
    circles) on final brain image.
    :param atlas_label_list: A list of aligned atlases in which each brain region is filled with a unique numeric label.
    This allows for consistent identification of brain regions across images. If original_label is True, this is an
    empty list.
    :param align_once: If True, carries out all alignments based on the alignment of the first atlas and brain. This can
    save time if you have many frames of the same brain with a fixed camera position.
    :param region_labels: choose whether to assign a name to each region based on an existing brain atlas (not currently
    implemented).
    :param original_label: If True, uses a brain region labelling approach that attempts to automatically sort brain
    regions in a consistent order (left to right by hemisphere, then top to bottom for vertically aligned regions). This
    approach may be more flexible if you're using a custom brain atlas (i.e. not one in which each region is filled with
    a unique number).
    """

    tif_list = glob.glob(os.path.join(image_path, "*tif"))
    if atlas_to_brain_align:
        if use_dlc and align_once:
            image_name_arr = glob.glob(
                os.path.join(mask_path, "*_brain_warp.png"))
        else:
            image_name_arr = glob.glob(os.path.join(image_path, "*.png"))
        image_name_arr.sort(key=natural_sort_key)
        if tif_list:
            tif_stack = imageio.mimread(os.path.join(image_path, tif_list[0]))
            image_name_arr = tif_stack
    else:
        # FOR ALIGNING BRAIN TO ATLAS
        image_name_arr = glob.glob(os.path.join(mask_path, "*_brain_warp.png"))
        image_name_arr.sort(key=natural_sort_key)

    region_bgr_lower = (220, 220, 220)
    region_bgr_upper = (255, 255, 255)
    base_c_max = []
    count = 0
    # Find the contours of an existing set of brain regions (to be used to identify each new brain region by shape)
    mat_files = glob.glob(
        os.path.join(git_repo_base, "atlases/mat_contour_base/*.mat"))
    mat_files.sort(key=natural_sort_key)

    # adapt. from https://stackoverflow.com/questions/3016283/create-a-color-generator-from-given-colormap-in-matplotlib
    cm = pylab.get_cmap("viridis")
    colors = [cm(1.0 * i / NUM_COLORS)[0:3] for i in range(NUM_COLORS)]
    colors = [
        tuple(color_idx * 255 for color_idx in color_t) for color_t in colors
    ]
    for file in mat_files:
        mat = scipy.io.loadmat(
            os.path.join(git_repo_base, "atlases/mat_contour_base/", file))
        mat = mat["vect"]
        ret, thresh = cv2.threshold(mat, 5, 255, cv2.THRESH_BINARY)
        base_c = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_NONE)
        base_c = imutils.grab_contours(base_c)
        base_c_max.append(max(base_c, key=cv2.contourArea))
    if not atlas_to_brain_align and use_unet:
        # FOR ALIGNING ATLAS TO BRAIN
        num_images = len(glob.glob(os.path.join(mask_path, "*_brain_warp*")))
        output = os.path.join(mask_path, "..")
        from mesonet.predict_regions import predictRegion

        mask_generate = True
        tif_list = glob.glob(os.path.join(image_path, "*tif"))
        if tif_list:
            input_path = image_path
        else:
            input_path = mask_path
        predictRegion(
            input_path,
            num_images,
            model,
            output,
            mat_save,
            threshold,
            mask_generate,
            git_repo_base,
            atlas_to_brain_align,
            dlc_pts,
            atlas_pts,
            olfactory_check,
            use_unet,
            plot_landmarks,
            align_once,
            atlas_label_list,
            region_labels,
            original_label,
        )
    for i, item in enumerate(image_name_arr):
        label_num = 0
        if not atlas_to_brain_align:
            atlas_path = os.path.join(mask_path, "{}_atlas.png".format(str(i)))
            mask_input_path = os.path.join(mask_path, "{}.png".format(i))
            mask_warped_path = os.path.join(
                mask_path, "{}_mask_warped.png".format(str(i)))
            atlas_to_mask(atlas_path, mask_input_path, mask_warped_path,
                          mask_path, i, use_unet, atlas_to_brain_align,
                          git_repo_base, olfactory_check, [])
        new_data = []
        if len(tif_list) != 0 and atlas_to_brain_align:
            img = item
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        else:
            img = cv2.imread(item)
        if atlas_to_brain_align:
            img = cv2.resize(img, (512, 512))
        if use_dlc:
            bregma_x, bregma_y = bregma_list[i]
        else:
            bregma_x, bregma_y = [
                round(img.shape[0] / 2),
                round(img.shape[1] / 2)
            ]
            original_label = True
        mask = cv2.imread(os.path.join(mask_path, "{}.png".format(i)))
        mask = cv2.resize(mask, (img.shape[0], img.shape[1]))
        # Get the region of the mask that is white
        mask_color = cv2.inRange(mask, region_bgr_lower, region_bgr_upper)
        io.imsave(os.path.join(save_path, "{}_mask_binary.png".format(i)),
                  mask_color)
        # Marker labelling
        # noise removal
        kernel = np.ones((3, 3), np.uint8)  # 3, 3
        mask_color = np.uint8(mask_color)
        thresh_atlas, atlas_bw = cv2.threshold(mask_color, 128, 255, 0)
        # if atlas_to_brain_align and use_dlc:
        #    atlas_bw = cv2.dilate(atlas_bw, kernel, iterations=1)  # 1
        # io.imsave(os.path.join(save_path, "{}_atlas_binary.png".format(i)), atlas_bw)

        if not atlas_to_brain_align:
            watershed_run_rule = i == 0
        else:
            if len(tif_list) == 0:
                watershed_run_rule = True
            else:
                watershed_run_rule = i == 0
        if align_once:
            watershed_run_rule = i == 0

        labels_from_region = []

        if watershed_run_rule:
            orig_list = []
            orig_list_labels = []
            orig_list_labels_left = []
            orig_list_labels_right = []
            # unique_regions = (np.unique(atlas_label)).tolist()
            # unique_regions = [e for e in unique_regions if e.is_integer()]
            unique_regions = [
                -275,
                -268,
                -255,
                -249,
                -164,
                -150,
                -143,
                -136,
                -129,
                -98,
                -78,
                -71,
                -64,
                -57,
                -50,
                -43,
                -36,
                -29,
                -21,
                -15,
                0,
                15,
                21,
                29,
                36,
                43,
                50,
                57,
                64,
                71,
                78,
                98,
                129,
                136,
                143,
                150,
                164,
                249,
                255,
                268,
                275,
                300,
                400,
            ]
            # atlas_label_df = pd.DataFrame(atlas_label)
            # atlas_label_df.to_csv(os.path.join(save_path, "atlas_label.csv"))
            cnts_orig = []
            # Find contours in original aligned atlas
            if atlas_to_brain_align and not original_label:
                np.savetxt(
                    "atlas_label_list_{}.csv".format(i),
                    atlas_label_list[i],
                    delimiter=",",
                )
                for region_idx in unique_regions:
                    if region_idx in [300, 400]:
                        # workaround to address olfactory contours not being found
                        region = cv2.inRange(atlas_label_list[i],
                                             region_idx - 5, region_idx + 5)
                        cnt_for_idx, hierarchy = cv2.findContours(
                            region.copy(), cv2.RETR_TREE,
                            cv2.CHAIN_APPROX_NONE)[-2:]
                        if len(cnt_for_idx) >= 1:
                            cnt_for_idx = cnt_for_idx[0]
                    else:
                        region = cv2.inRange(atlas_label_list[i], region_idx,
                                             region_idx)
                        cnt_for_idx = cv2.findContours(region.copy(),
                                                       cv2.RETR_EXTERNAL,
                                                       cv2.CHAIN_APPROX_NONE)
                        cnt_for_idx = imutils.grab_contours(cnt_for_idx)
                        if len(cnt_for_idx) >= 1:
                            cnt_for_idx = max(cnt_for_idx, key=cv2.contourArea)
                    if len(cnt_for_idx) >= 1:
                        cnts_orig.append(cnt_for_idx)
                        labels_from_region.append(region_idx)
            else:
                cnts_orig = cv2.findContours(atlas_bw.copy(),
                                             cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_NONE)
                cnts_orig = imutils.grab_contours(cnts_orig)
            if not use_dlc:
                # cnts_orig = cv2.findContours(
                #    atlas_bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
                #)
                cnts_orig, hierarchy = cv2.findContours(
                    atlas_bw.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
                # cnts_orig = imutils.grab_contours(cnts_orig)
            labels_cnts = []
            for (num_label, cnt_orig) in enumerate(cnts_orig):
                labels_cnts.append(cnt_orig)
                try:
                    cv2.drawContours(img, cnt_orig, -1, (255, 0, 0), 1)
                    # io.imsave(os.path.join(segmented_save_path, "check_contour.png"), img)
                except:
                    print("Could not draw contour!")
                # try:
                if atlas_to_brain_align:
                    c_orig_as_list = cnt_orig.tolist()
                    c_orig_as_list = [[c_val[0] for c_val in c_orig_as_list]]
                else:
                    c_orig_as_list = cnt_orig.tolist()
                    c_orig_as_list = [[c_val[0] for c_val in c_orig_as_list]]
                orig_polylabel = polylabel(c_orig_as_list)
                orig_x, orig_y = int(orig_polylabel[0]), int(orig_polylabel[1])

                if not original_label and atlas_to_brain_align:
                    label_to_use = unique_regions.index(
                        labels_from_region[num_label])
                    (text_width,
                     text_height) = cv2.getTextSize(str(label_to_use),
                                                    cv2.FONT_HERSHEY_SIMPLEX,
                                                    0.4,
                                                    thickness=1)[0]
                    label_jitter = 0
                    label_color = (0, 0, 255)
                    cv2.rectangle(
                        img,
                        (orig_x + label_jitter, orig_y + label_jitter),
                        (
                            orig_x + label_jitter + text_width,
                            orig_y + label_jitter - text_height,
                        ),
                        (255, 255, 255),
                        cv2.FILLED,
                    )
                    cv2.putText(
                        img,
                        str(label_to_use),
                        (int(orig_x + label_jitter),
                         int(orig_y + label_jitter)),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.4,
                        label_color,
                        1,
                    )
                    label_num += 1
                orig_list.append((orig_x, orig_y))
                orig_list_labels.append((orig_x - bregma_x, orig_y - bregma_y,
                                         orig_x, orig_y, num_label))
                if (orig_x - bregma_x) < 0:
                    orig_list_labels_left.append((
                        orig_x - bregma_x,
                        orig_y - bregma_y,
                        orig_x,
                        orig_y,
                        num_label,
                    ))
                elif (orig_x - bregma_x) > 0:
                    orig_list_labels_right.append((
                        orig_x - bregma_x,
                        orig_y - bregma_y,
                        orig_x,
                        orig_y,
                        num_label,
                    ))
                orig_list.sort()
            orig_list_labels_sorted_left = sorted(orig_list_labels_left,
                                                  key=lambda t: t[0],
                                                  reverse=True)
            orig_list_labels_sorted_right = sorted(orig_list_labels_right,
                                                   key=lambda t: t[0])
            flatten = lambda l: [obj for sublist in l for obj in sublist]
            orig_list_labels_sorted = flatten(
                [orig_list_labels_sorted_left, orig_list_labels_sorted_right])
            vertical_check = np.asarray(
                [val[0] for val in orig_list_labels_sorted])
            for (orig_coord_val,
                 orig_coord) in enumerate(orig_list_labels_sorted):
                vertical_close = np.where(
                    (abs(vertical_check - orig_coord[0]) <= 5))
                vertical_close_slice = vertical_close[0]
                vertical_matches = np.asarray(
                    orig_list_labels_sorted)[vertical_close_slice]
                if len(vertical_close_slice) > 1:
                    vertical_match_sorted = sorted(vertical_matches,
                                                   key=lambda t: t[1])
                    orig_list_labels_sorted_np = np.asarray(
                        orig_list_labels_sorted)
                    orig_list_labels_sorted_np[
                        vertical_close_slice] = vertical_match_sorted
                    orig_list_labels_sorted = orig_list_labels_sorted_np.tolist(
                    )
            img = np.uint8(img)
        else:
            for num_label, cnt_orig in enumerate(cnts_orig):  # cnts_orig
                try:
                    cv2.drawContours(img, cnt_orig, -1, (255, 0, 0), 1)
                except:
                    print("Could not draw contour!")
        if not atlas_to_brain_align and use_unet:
            cortex_mask = cv2.imread(
                os.path.join(mask_path, "{}_mask.png".format(i)))
            cortex_mask = cv2.cvtColor(cortex_mask, cv2.COLOR_RGB2GRAY)
            thresh, cortex_mask_thresh = cv2.threshold(cortex_mask, 128, 255,
                                                       0)
            cortex_cnt = cv2.findContours(cortex_mask_thresh,
                                          cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_NONE)
            cortex_cnt = imutils.grab_contours(cortex_cnt)
            cv2.drawContours(img, cortex_cnt, -1, (0, 0, 255), 3)
        labels_x = []
        labels_y = []
        areas = []
        sorted_labels_arr = []
        label_jitter = 0
        mask = np.zeros(mask_color.shape, dtype="uint8")
        cnts = cnts_orig
        print("LEN CNTS: {}".format(len(cnts)))
        print("LEN LABELS: {}".format(len(orig_list_labels_sorted)))
        if original_label or not atlas_to_brain_align:
            labels_from_region = [0] * len(orig_list_labels_sorted)
        for (z, cnt), (coord_idx, coord), label_from_region in zip(
                enumerate(cnts), enumerate(orig_list_labels_sorted),
                labels_from_region):
            if atlas_to_brain_align and not original_label:
                coord_label_num = unique_regions.index(
                    labels_from_region[coord_idx])
            else:
                coord_label_num = coord_idx
            # compute the center of the contour
            if len(cnts) > 1:
                z = 0
            c_x, c_y = int(coord[2]), int(coord[3])
            c = cnt
            if not atlas_to_brain_align and use_unet:
                cnt_loc_label = ("inside" if [1.0] in [
                    list(
                        set([
                            cv2.pointPolygonTest(
                                cortex_sub_cnt,
                                (
                                    c_coord.tolist()[0][0],
                                    c_coord.tolist()[0][1],
                                ),
                                False,
                            ) for c_coord in c
                        ])) for cortex_sub_cnt in cortex_cnt
                ] else "outside")
            else:
                cnt_loc_label = ""
            rel_x = c_x - bregma_x
            rel_y = c_y - bregma_y

            pt_inside_cnt = [
                coord_check for coord_check in orig_list_labels_sorted
                if cv2.pointPolygonTest(c, (int(coord_check[2]),
                                            int(coord_check[3])), False) == 1
            ]
            if original_label:
                try:
                    pt_inside_cnt_idx = orig_list_labels_sorted.index(
                        pt_inside_cnt[0])
                    label_for_mat = pt_inside_cnt_idx
                except:
                    label_for_mat = coord_label_num
                    print(
                        "WARNING: label was not found in region. Order of labels may be incorrect!"
                    )
            else:
                label_for_mat = coord_label_num

            # if cnt_loc_label != '':
            #     coord_label_num = "{} {}".format(coord_label_num, cnt_loc_label)

            # The centroid of the contour works as the contour centre in most cases. However, sometimes the
            # centroid is outside of the contour. As such, using the average x and y positions of the contour edges
            # that intersect with the centroid could be a safer option. We try to find this average position and if
            # there are more than two intersecting edges or if the average position is over 200 px from the
            # centroid, we fall back to using the centroid as our measure of the centre of the contour.
            # for coord in c_for_centre:
            #     if coord[0][0] == c_x:
            #         edge_coords_y.append(coord[0].tolist())
            #     if coord[0][1] == c_y:
            #         edge_coords_x.append(coord[0].tolist())
            # print("{}: edge coords x: {}, edge coords y: {}".format(label, edge_coords_x, edge_coords_y))
            # adj_centre_x = int(np.mean([edge_coords_x[0][0], edge_coords_x[-1][0]]))
            # adj_centre_y = int(np.mean([edge_coords_y[0][1], edge_coords_y[-1][1]]))
            # adj_centre = [adj_centre_x, adj_centre_y]
            # if abs(adj_centre_x - c_x) <= 100 and abs(adj_centre_x - c_y) <= 100:
            #     print("adjusted centre: {}, {}".format(adj_centre[0], adj_centre[1]))
            #     c_x, c_y = (adj_centre[0], adj_centre[1])
            # edge_coords_x = []
            # edge_coords_y = []
            # compute center relative to bregma
            # rel_x = contour centre x coordinate - bregma x coordinate
            # rel_y = contour centre y coordinate - bregma y coordinate

            # print("Contour {}: centre ({}, {}), bregma ({}, {})".format(label, rel_x, rel_y, bregma_x, bregma_y))
            c_rel_centre = [rel_x, rel_y]
            if not os.path.isdir(
                    os.path.join(segmented_save_path, "mat_contour_centre")):
                os.mkdir(
                    os.path.join(segmented_save_path, "mat_contour_centre"))

            # If .mat save checkbox checked in GUI, save contour paths and centre to .mat files for each contour
            if mat_save:
                mat_save = True
            else:
                mat_save = False
            # Prepares lists of the contours identified in the brain image, in the order that they are found by
            # OpenCV
            # labels_arr.append(label)
            sorted_labels_arr.append(coord_label_num)
            labels_x.append(int(c_x))
            labels_y.append(int(c_y))
            areas.append(cv2.contourArea(c))
            # The first contour just outlines the entire image (which does not provide a useful label or .mat
            # contour) so we'll ignore it
            # if coord_label_num != 0:
            # Cross-references each contour with a set of contours from a base brain atlas that was manually
            # labelled with brain regions (as defined in 'region_labels.csv' in the 'atlases' folder). If the
            # area of the contour is within 5000 square px of the original region and the centre of the contour
            # is at most 100 px away from the centre of the original contour, label the contour with its
            # corresponding brain region. Until we figure out how to consistently and accurately label small
            # brain regions, we only label brain regions with an area greater than 1000 square px.
            shape_list = []
            label_color = (0, 0, 255)
            for n_bc, bc in enumerate(base_c_max):
                shape_compare = cv2.matchShapes(c, bc, 1, 0.0)
                shape_list.append(shape_compare)
            # for (n_r, r), (n_bc, bc) in zip(enumerate(regions.itertuples()), enumerate(base_c_max)):
            #     min_bc = list(bc[0][0])
            #     min_c = list(c[0][0])
            #     max_bc = list(bc[0][-1])
            #     max_c = list(c[0][-1])
            #
            #     # 0.3, 75
            #     if label_num == 0 and region_labels and \
            #             (min(shape_list) - 0.3 <= cv2.matchShapes(c, bc, 1, 0.0) <= min(shape_list) + 0.3) and \
            #             min_bc[0] - 75 <= min_c[0] <= min_bc[0] + 75 and \
            #             min_bc[1] - 75 <= min_c[1] <= min_bc[1] + 75 and \
            #             max_bc[0] - 75 <= max_c[0] <= max_bc[0] + 75 and \
            #             max_bc[1] - 75 <= max_c[1] <= max_bc[1] + 75:
            #         # print("Current contour top left corner: {},{}".format(min_c[0], min_c[1]))
            #         # print("Baseline contour top left corner: {},{}".format(min_bc[0], min_bc[1]))
            #         closest_label = r.name
            #         cv2.putText(img, "{} ({})".format(closest_label, r.Index),
            #                     (int(c_x + label_jitter), int(c_y + label_jitter)),
            #                     cv2.FONT_HERSHEY_SIMPLEX, 0.3, label_color, 1)
            #         label_num += 1
            # if label_num == 0 and not region_labels:
            if (not region_labels
                    and original_label) or (not region_labels
                                            and not atlas_to_brain_align):
                (text_width,
                 text_height) = cv2.getTextSize(str(coord_label_num),
                                                cv2.FONT_HERSHEY_SIMPLEX,
                                                0.4,
                                                thickness=1)[0]
                cv2.rectangle(
                    img,
                    (c_x + label_jitter, c_y + label_jitter),
                    (c_x + label_jitter + text_width,
                     c_y + label_jitter - text_height),
                    (255, 255, 255),
                    cv2.FILLED,
                )
                cv2.putText(
                    img,
                    str(coord_label_num),
                    (int(c_x + label_jitter), int(c_y + label_jitter)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.4,
                    label_color,
                    1,
                )
                label_num += 1

            if mat_save:
                # Create an empty array of the same size as the contour, with the centre of the contour
                # marked as "255"
                c_total = np.zeros_like(mask)
                c_centre = np.zeros_like(mask)
                # Follow the path of the contour, setting every pixel along the path to 255
                # Fill in the contour area with 1s
                cv2.fillPoly(c_total, pts=[c], color=(255, 255, 255))
                # Set the contour's centroid to 255
                if c_x < mask.shape[0] and c_y < mask.shape[0]:
                    c_centre[c_x, c_y] = 255
                if not os.path.isdir(
                        os.path.join(segmented_save_path, "mat_contour")):
                    os.mkdir(os.path.join(segmented_save_path, "mat_contour"))
                if not os.path.isdir(
                        os.path.join(segmented_save_path,
                                     "mat_contour_centre")):
                    os.mkdir(
                        os.path.join(segmented_save_path,
                                     "mat_contour_centre"))
                sio.savemat(
                    os.path.join(
                        segmented_save_path,
                        "mat_contour/roi_{}_{}_{}_{}.mat".format(
                            cnt_loc_label, i, label_for_mat, z),
                    ),
                    {
                        "roi_{}_{}_{}_{}".format(cnt_loc_label, i, label_for_mat, z):
                        c_total
                    },
                    appendmat=False,
                )
                sio.savemat(
                    os.path.join(
                        segmented_save_path,
                        "mat_contour_centre/roi_centre_{}_{}_{}_{}.mat".format(
                            cnt_loc_label, i, label_for_mat, z),
                    ),
                    {
                        "roi_centre_{}_{}_{}_{}".format(
                            cnt_loc_label, i, label_for_mat, z):
                        c_centre
                    },
                    appendmat=False,
                )
                sio.savemat(
                    os.path.join(
                        segmented_save_path,
                        "mat_contour_centre/rel_roi_centre_{}_{}_{}_{}.mat".
                        format(cnt_loc_label, i, label_for_mat, z),
                    ),
                    {
                        "rel_roi_centre_{}_{}_{}_{}".format(
                            cnt_loc_label, i, label_for_mat, z):
                        c_rel_centre
                    },
                    appendmat=False,
                )
            count += 1
        if align_once:
            idx_to_use = 0
        else:
            idx_to_use = i
        if plot_landmarks:
            for pt, color in zip(dlc_pts[idx_to_use], colors):
                cv2.circle(img, (int(pt[0]), int(pt[1])), 10, color, -1)
            for pt, color in zip(atlas_pts[idx_to_use], colors):
                cv2.circle(img, (int(pt[0]), int(pt[1])), 5, color, -1)
        io.imsave(
            os.path.join(segmented_save_path,
                         "{}_mask_segmented.png".format(i)), img)
        img_edited = Image.open(
            os.path.join(save_path, "{}_mask_binary.png".format(i)))
        # Generates a transparent version of the brain atlas.
        img_rgba = img_edited.convert("RGBA")
        data = img_rgba.getdata()
        for pixel in data:
            if pixel[0] == 255 and pixel[1] == 255 and pixel[2] == 255:
                new_data.append((pixel[0], pixel[1], pixel[2], 0))
            else:
                new_data.append(pixel)
        img_rgba.putdata(new_data)
        img_rgba.save(
            os.path.join(save_path, "{}_mask_transparent.png".format(i)))
        img_transparent = cv2.imread(
            os.path.join(save_path, "{}_mask_transparent.png".format(i)))
        img_trans_for_mat = np.uint8(img_transparent)
        if mat_save:
            sio.savemat(
                os.path.join(segmented_save_path,
                             "mat_contour/transparent_{}".format(i)),
                {"transparent_{}".format(i): img_trans_for_mat},
            )
        masked_img = cv2.bitwise_and(img, img_transparent, mask=mask_color)
        if plot_landmarks:
            for pt, color in zip(dlc_pts[idx_to_use], colors):
                cv2.circle(masked_img, (int(pt[0]), int(pt[1])), 10, color, -1)
            for pt, color in zip(atlas_pts[idx_to_use], colors):
                cv2.circle(masked_img, (int(pt[0]), int(pt[1])), 5, color, -1)
        io.imsave(os.path.join(save_path, "{}_overlay.png".format(i)),
                  masked_img)
        print("Mask {} saved!".format(i))
        d = {
            "sorted_label": sorted_labels_arr,
            "x": labels_x,
            "y": labels_y,
            "area": areas,
        }
        df = pd.DataFrame(data=d)
        if not os.path.isdir(os.path.join(segmented_save_path,
                                          "region_labels")):
            os.mkdir(os.path.join(segmented_save_path, "region_labels"))
        df.to_csv(
            os.path.join(segmented_save_path, "region_labels",
                         "{}_region_labels.csv".format(i)))
    print("Analysis complete! Check the outputs in the folders of {}.".format(
        save_path))
    k.clear_session()
    if dlc_pts:
        os.chdir("../..")
    else:
        os.chdir(os.path.join(save_path, '..'))
Exemplo n.º 59
0
 def isInContourV1(cont, pt, patch_size=None):
     return 1 if cv2.pointPolygonTest(cont, pt, False) >= 0 else 0
Exemplo n.º 60
0
    def __init__(self, root, cropSize, trainFlag, imageRGBfilenames, rgbContours, rgbXYWHCR, depthImgFilename,
                 depthXYWHCR):

        super(grantXdataset, self).__init__()
        """
        Args:
        root(string):  directory with all the input images.
        cropSize(integer): size of classification image (in meter)
        trainFlag(string): 'train', 'vali' or 'test'

        Outputs:
        self.imageRGBfilenames: for future reading
        self.rgbContours: The contours of all rgb pics
        self.rgbXYWHCR: The originalX, originalY, pixel width, pixel height, cols and rows of rgb pics
        self.trainShapes: 
        self.trainCategory:
        self.testShapes:
        self.testCategory:
        self.valiShapes:
        self.valiCategory:
        """
        self.root = root
        self.cropSize = cropSize

        # read all rgb tif images and load the image's polygon for each image
        self.imageRGBfilenames = imageRGBfilenames
        self.rgbContours = rgbContours
        self.rgbXYWHCR = rgbXYWHCR
        self.depthImgFilename = depthImgFilename
        self.depthXYWHCR = depthXYWHCR

        self.trainFlag = trainFlag

        # transform function
        # self.toTensor = transforms.ToTensor()
        # self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                                       std=[0.229, 0.224, 0.225])

        # augmentation
        self.color_jitter = transforms.ColorJitter(0.1, 0.1, 0.1, 0.1)

        # read all shapes and divide them
        self.categories = {'Water': 0, 'Bog': 1, 'Channel_Fen': 2,
                           'Forest_Dense': 3, 'Forest_Sparse': 4, 'Wetland': 5,
                           'Dense_Forest': 3, 'Sparse_Forest': 4}

        # training flag
        if trainFlag == 'train':
            self.augmentationFlag = True
        else:
            self.augmentationFlag = False

        # sf = shapefile.Reader(join(root, "Class_Samples/Class_Samples.shp"))
        sf = shapefile.Reader(join(self.root, "Class_Samples/{}Shapes.shp".format(trainFlag)))

        shapes = []
        shapeCategories = []
        for rec in sf.shapeRecords():
            pts = []
            for point in rec.shape.points:
                pts.append(point)
            shapes.append(pts)
            shapeCategories.append(self.categories[rec.record[-1]])

        # training shape balance
        if 'train' in trainFlag:
            trainingShapeEachCategory = [[], [], [], [], [], []]
            categoryState = np.zeros(6, dtype=np.int32)
            for shape, category in zip(shapes, shapeCategories):
                trainingShapeEachCategory[category].append(shape)
                categoryState[category] += 1
            upbound = np.max(categoryState)
            for cntCategory, categoryNum in enumerate(categoryState):
                for q in range(upbound - categoryNum):
                    trainingShapeEachCategory[cntCategory].append(
                        shapeAug(random.choice(trainingShapeEachCategory[cntCategory])))

            shapes = []
            shapeCategories = []
            for cntCategory, shapeVec in enumerate(trainingShapeEachCategory):
                for shape in shapeVec:
                    shapes.append(shape)
                    shapeCategories.append(cntCategory)

        # writeShape(self.shapes, self.ShapeCategories, 'fortesting')

        # generate RGB shapes for pics
        self.originShapes = shapes
        self.shapes = []
        if cropSize is None:
            pass
        else:
            for i, shape in enumerate(self.originShapes):
                resizeRate = cropSize / 30
                centerPt = (np.array(shape[0]) + np.array(shape[1]) + np.array(shape[2]) + np.array(shape[3])) / 4
                newShape = []
                for pt in shape:
                    newShape.append(tuple(resizeRate * (pt - centerPt) + centerPt))
                self.shapes.append(newShape)

        # generate which shape is in which image
        shapeInImg = []
        for i, shape in enumerate(self.shapes):
            shapeInImg.append([])
            for j, rgbCnt in enumerate(self.rgbContours):
                for cnt, pt in enumerate(shape):
                    if cv2.pointPolygonTest(rgbCnt, pt, False) < 0:
                        break
                    if cnt == 3:
                        shapeInImg[i].append(j)

        # if shape is not in any image
        newOriginShape, newShape, newCategory, newShapeInImg = [], [], [], []
        for i, shapeImgInfo in enumerate(shapeInImg):
            if not shapeImgInfo:
                continue
            newOriginShape.append(self.originShapes[i])
            newShape.append(self.shapes[i])
            newCategory.append(shapeCategories[i])
            newShapeInImg.append(shapeInImg[i])
        self.originShapes = newOriginShape
        self.shapes = newShape
        self.ShapeCategories = newCategory
        self.shapeInImg = newShapeInImg