def getContours(image):
    global mask
    _,contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
    
    
    mainContour = None
    mainMoments = None
    contourCentreX = None
    contourCentreY = None
    
    maxArea = 0.0
#    print " "
    for c in cnts:
        area = cv2.contourArea(c)
#        print "pupil area: %d" % area
        if area > maxArea and area > 600 and area < 3000: #ensure the correct contour is detected
            M_2 = cv2.moments(c)
            cX = int(M_2['m10']/M_2['m00'])
            cY = int(M_2['m01']/M_2['m00'])
            if cX >= topLeftCornerX and cY >= topLeftCornerY and cX <= bottomRightCornerX and cY <= bottomRightCornerY:
                maxArea = area
                mainContour = c
                M = cv2.moments(c)
                contourCentreX = int(M['m10']/M['m00'])
                contourCentreY = int(M['m01']/M['m00'])

#    if mainContour is None:
#        print "pupil contour is none"

    print maxArea
    return contourCentreX, contourCentreY, mainContour
 def image_core(self):
   #time1 = time.time()
   val,im = self.vid.read()
   #cv2.imshow("image2",im)
   posX,posY=0,0
   if val:
       im2=self.image_filter(im)
       #r,im1=cv2.threshold(im2,90,255,1)
       contours,hierarchy = cv2.findContours(im2,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
       print contours
       for h,cnt in enumerate(contours):
          area = cv2.contourArea(cnt)#error in opencv think of changing version to 2.4.2 (dang) suggest using linux
          if area > 1000:
           posX = int((cv2.moments(cnt)['m10']) / (cv2.moments(cnt)['m00']))
           posY = int((cv2.moments(cnt)['m01']) / (cv2.moments(cnt)['m00']))
           '''moments = cv2.moments(cnt)
           moment00 = moments['m00']
           moment10=moments['m10']
           moment01=moments['m01']
           posX = int(moment10/moment00)
           posY = int(moment01/moment00)'''
           cv2.circle(im,(int((posX)),int((posY))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY+5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY+5))),40,(0,0,255),2,1)
          else:
           posX,posY=0,0
       im1=cv.fromarray(im)
       #cv2.imshow("image1",im)
       cv2.waitKey(10)
       #time2 = time.time()
       #print ((time2-time1)*1000.0)
       return im1,posX,posY
def bot_position(hsv,c):

    if(c==1):
        # bot_lower = np.array([20,25,230]) # for image 
        # bot_upper = np.array([40,255,255])

        bot_lower = np.array([20,25,230]) # for image 
        bot_upper = np.array([40,255,255])
    else:
        bot_lower = np.array([0,70,240])
        bot_upper = np.array([45,255,255])

    #####front end masking and centroid
    mask = cv2.inRange(hsv,bot_lower, bot_upper)
    contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    contours=sorted(contours, key = cv2.contourArea, reverse = True)[:2]
    #contours,length=areacon(contours,700,300)
    #contours=sorted(contours, key = cv2.contourArea, reverse = True)[:length]
    #cv2.drawContours(frame,contours,-1,(100,100,255),1)

    M = cv2.moments(contours[0])
    cx1 = int(M['m10']/M['m00'])
    cy1 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx1,cy1), 2, (255,0,255), -1)
    Bot_position[0][0]=cx1
    Bot_position[0][1]=cy1

    M = cv2.moments(contours[1])
    cx2 = int(M['m10']/M['m00'])
    cy2 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx2,cy2), 2, (0,0,255), -1)
    Bot_position[1][0]=cx2
    Bot_position[1][1]=cy2
    def detect_color(self, img):

        pic = cv2.inRange(img, np.asarray((50, 10, 40)), np.asarray((80, 255, 255)))

        moments = cv2.moments(pic, 0)
        area = moments.get('m00')
        if(area > 10000):
            x = moments.get('m10')/area 
            y = moments.get('m01')/area
            #print 'green'
            return (x, y, pic, settings.GREEN)

        pic = cv2.inRange(img, np.asarray((97, 10, 40)), np.asarray((125, 255, 255)))
    
        moments = cv2.moments(pic, 0)
        area = moments.get('m00')
        if(area > 10000):
            x = moments.get('m10')/area 
            y = moments.get('m01')/area 
            #print 'blue'
            return (x, y, pic, settings.BLUE)

        pic = cv2.inRange(img, np.asarray((25, 20, 40)), np.asarray((50, 255, 255)))
    
        moments = cv2.moments(pic, 0)
        area = moments.get('m00')
        if(area > 10000):
            x = moments.get('m10')/area 
            y = moments.get('m01')/area 
            #print 'blue'
            return (x, y, pic, settings.YELLOW)


        return None
def getContoursCornealVideo(image):
    global mask
    _,contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:len(contours)]
    
    mainContour = None
    mainMoments = None
    contourCentreX = None
    contourCentreY = None
    contourList = []
    
    maxArea = 0.0
    for c in cnts:
        area = cv2.contourArea(c)
        M = cv2.moments(c)
        
        if M['m00'] == 0:
            M['m00'] = 1
        
        cX = int(M['m10']/M['m00'])
        cY = int(M['m01']/M['m00'])
        
        if area > maxArea and area < 150 and abs(cpX - cX) < 100 and abs(cpY - cY) < 100 : #ensure the correct contour is detected 15000
            contourList.append(c)
            maxArea = area
            mainContour = c
            M = cv2.moments(c)
            contourCentreX = int(M['m10']/M['m00'])
            contourCentreY = int(M['m01']/M['m00'])

    contourImg = np.zeros((470,620),np.uint8)
    contourImg = cv2.cvtColor(contourImg, cv2.COLOR_GRAY2BGR)
    cv2.drawContours(contourImg,contourList,-1,(0,0,255),3)

    return contourCentreX, contourCentreY, mainContour
def PickBlob(im):
    global cntrs, hier
    [conts, hier] = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    high_bnd, low_bnd = getTrack("", FilterWindowName)
    cntrs = []
    max_area = 0
    max_ind = -1
    i = 0
    for cntr in conts:
        ara = cv2.contourArea(cntr)
        if(1000 < ara):
            if ara > max_area:
                max_area = ara
                max_ind = i
            #print "fnd: ", str(ara)
            cntrs.append(cntr)
        i = i + 1

    centroids = []
    for cntr in cntrs:
        mu = cv2.moments(cntr)
        #print "MU: " + str(mu)
        mx = mu['m10']/mu['m00']
        my = mu['m01']/mu['m00']
        centroids.append( (mx,my) )

    if max_ind != -1:
        mu = cv2.moments(conts[max_ind])
        #print "MU: " + str(mu)
        mx = mu['m10']/mu['m00']
        my = mu['m01']/mu['m00']

        return [ (mx, my) ]
    return []
    def detect_big_objects(self, img):        
        if self.frame % interval_big:
            return
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 )

        for cnt in contours:
	    x,y,w,h = cv2.boundingRect(cnt)
	    if w < 100 and h < 100:
		continue
            a = cv2.HuMoments(cv2.moments(cnt))
            m = -np.sign(a)*np.log10(np.abs(a))
            m = [m[0][0], m[1][0], m[2][0], m[3][0], m[4][0], m[5][0], m[6][0]]
	    M = cv2.moments(cnt)
	    cX = x + (w/2)
	    cY = y + (h/2)
	    #print cX, cY
	    color = img[cY, cX]
            dst = float("inf")
	    for sample in self.samples_big:
		d = sum(abs(m - sample))
		if d < dst:
			dst = d
            
            cv2.putText(self.right_image,"dst:"+str(dst.astype(int)), (x,y-10), cv2.FONT_HERSHEY_PLAIN, 1.0, (255,255,255), thickness = 1)
            #print dst        
            if dst < 15:
                cv2.putText(self.right_image,"BIG: ORANGE", (x+w,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,255,255), thickness = 2)
                self.object_detected(cnt, 'ORANGE')
                cv2.drawContours(self.right_image, [cnt], 0, (255,255,255), 2)
def getContours(image):
    global mask
    
    # uses opencv function findContours to find contours
    _,contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
    
    
    mainContour = None
    mainMoments = None
    contourCentreX = None
    contourCentreY = None
    
    maxArea = 0.0
    
    # loops through all contours detected and narrows possible pupil by area size and location
    for c in cnts:
        area = cv2.contourArea(c)
        if area > maxArea and area > 600 and area < 5000: #ensure the correct contour is detected
            M_2 = cv2.moments(c)
            cX = int(M_2['m10']/M_2['m00'])
            cY = int(M_2['m01']/M_2['m00'])
            if cX >= topLeftCornerX and cY >= topLeftCornerY and cX <= bottomRightCornerX and cY <= bottomRightCornerY:
                maxArea = area
                mainContour = c
                M = cv2.moments(c)
                contourCentreX = int(M['m10']/M['m00'])
                contourCentreY = int(M['m01']/M['m00'])

    return contourCentreX, contourCentreY, mainContour
Exemple #9
0
    def __grabAtoms(self, image):
        from scipy.spatial import ConvexHull

        segImg = self.segmenter.segment(image)
        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                c = np.squeeze(cnt)
                cv2.fillConvexPoly(segImg, c[ConvexHull(c).vertices], 255)
            else:
                cv2.fillConvexPoly(segImg, cnt, 0)

        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        conts = []
        centers = []
        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                centers.append(np.array((int(M['m10']/M['m00']), int(M['m01']/M['m00']))))
                conts.append(cnt)

        self.segImg = segImg
        self.points = np.array(centers)
        self.contours = np.array(conts)
        return self.points
    def detect_drop_zone(self, img):     
        for obj in ObjectList:##Already have drop zone
            if obj.color == 'DROP_ZONE' and obj.islost(tracking_timeout) == False:
                return
        #if self.frame % interval_big:
         #   return
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 )

        for cnt in contours:
	    x,y,w,h = cv2.boundingRect(cnt)
	    if w < 100 and h < 100:
		continue
	    if w > 600 and h > 600:
		continue
            a = cv2.HuMoments(cv2.moments(cnt))
            m = -np.sign(a)*np.log10(np.abs(a))
            m = [m[0][0], m[1][0], m[2][0], m[3][0], m[4][0], m[5][0], m[6][0]]
	    M = cv2.moments(cnt)
	    cX = x + (w/2)
	    cY = y + (h/2)
	    #print cX, cY
	    color = img[cY, cX]
            dst = float("inf")
	    for sample in self.samples_dropzone:
		d = sum(abs(m - sample))
		if d < dst:
			dst = d
            
            cv2.putText(self.right_image,"dst:"+str(dst.astype(int)), (x,y-10), cv2.FONT_HERSHEY_PLAIN, 1.0, (255,255,255), thickness = 1)
            #print dst        
            if dst < 45:
                cv2.putText(self.right_image,"DROP_ZONE", (x+w,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,255,0), thickness = 2)
                self.object_detected(cnt, 'DROP_ZONE')
                cv2.drawContours(self.right_image, [cnt], 0, (255,0,0), 2)
    def run(self, rect, cur_frame, next_frame):
        x, y, w, h = rect
        cur_roi = PostProcessing.get_roi_from_images(rect, cur_frame)
        center_of_window = (x + (w / 2), y + (h / 2))

        # compute centroid of current frame
        cur_moment = cv2.moments(cur_roi)
        cx = x + int(cur_moment['m10'] / cur_moment['m00'])
        cy = y + int(cur_moment['m01'] / cur_moment['m00'])
        cur_frame_centroid = (cx, cy)

        # compute centroid of next frame with current windows
        cur_roi_next = PostProcessing.get_roi_from_images(rect, next_frame)
        cur_moment_next = cv2.moments(cur_roi_next)
        next_cx = x + int(cur_moment_next['m10'] / cur_moment_next['m00'])
        next_cy = y + int(cur_moment_next['m01'] / cur_moment_next['m00'])
        next_frame_centroid = (next_cx, next_cy)

        # calculate distance between current frame centroid and next frame centroid
        x0, y0 = cur_frame_centroid
        x1, y1 = next_frame_centroid
        xwin, ywin = center_of_window
        new_center_of_window = ((xwin + (x1 - x0)), (ywin + (y1 - y0)))
        new_rect = (new_center_of_window[0] - (w / 2), new_center_of_window[1] - (h / 2), w, h)
        print new_rect

        pass
 def locate_color(self,thresh, cimg):
     centers = []
     areas = []
     ret,gray = cv2.threshold(thresh,127,255,0)
     gray2 = gray.copy()
     mask = np.zeros(gray.shape,np.uint8)
     contours, hier = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
     for cnt in contours:
         #if 200<cv2.contourArea(cnt)<5000:
         M = cv2.moments(cnt)
         if M['m00'] != 0 and 1000<cv2.contourArea(cnt):
             cv2.drawContours(cimg,[cnt],0,(0,255,0),2)
             cv2.drawContours(mask,[cnt],0,255,-1)
             M = cv2.moments(cnt)
             cx = int(M['m10']/M['m00'])
             cy = int(M['m01']/M['m00'])
             area = cv2.contourArea(cnt)
             areas.append(area)
             tooClose = False
             for center in centers:
                 if center[0] - cx < 5 and center[1] - cy < 5:
                     tooClose = True
             if not tooClose:        
                 centers.append((cx,cy))
                 cv2.circle(cimg,(cx,cy),2,(0,0,255),3)
     cv2.imshow('contors',cimg)
     cv2.waitKey(3)
     return centers
def group_contours(contours):
	if len(contours):
		items = []
		found = False
		#seeds = []
		#seed[0] = contours[0]
		for cnt in contours:
			M = cv2.moments(cnt)
			cx = int(M['m10']/M['m00'])
			cy = int(M['m01']/M['m00'])
			if len(items):
				for i in items:
					for i_cnt in i:
						iM = cv2.moments(i_cnt)
						icx = int(iM['m10']/iM['m00'])
						icy = int(iM['m01']/iM['m00'])
						d = np.sqrt(math.pow(icx-cx, 2)+ math.pow(icy-cy, 2))
						if d < 110:
							i.append(cnt)
							found = True
							break
					if found:
						break
				if not found:
					n = []
					n.append(cnt)
					items.append(n)
			else:
				n = []
				n.append(cnt)
				items.append(n)
		return items
	else:
		return np.array([])
Exemple #14
0
	def __detectLineBlob(self,croppedImage):
		image, contours, hierarchy = cv2.findContours(croppedImage,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

		max_area = 0
		best_cnt = None
		if len(contours) == 0:
			return None

		for cnt in contours:
			area = cv2.contourArea(cnt)
			M = cv2.moments(cnt)
			if M["m00"] != 0:
				if area > max_area:
					max_area = area
					best_cnt = cnt
		if best_cnt is None:
			return None
		M = cv2.moments(best_cnt)
		cx, cy = 0, 0
		if M["m00"] != 0:
			cx = int(M["m10"] / M["m00"])
			cy = int(M["m01"] / M["m00"])
		else:
			cx, cy = 0, 0
		return [cx, cy]
def green_centroid(image, lower, upper):
    hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    mask = cv2.inRange(hsv, lower, upper)
    mask[40:100, 0:150] = 0
    #plt.imshow(mask)
    M = cv2.moments(mask)
    area = M['m00']
    if area > 0:
        cx = int(M['m10']/area)
        cy = int(M['m01']/area)
    else:
        mask = cv2.inRange(hsv, lower, upper)
        mask[70:100, 0:150] = 0
        M = cv2.moments(mask)
        area = M['m00']
        if area > 0:
            cx = int(M['m10']/area)
            cy = int(M['m01']/area)
        else:
            mask = cv2.inRange(hsv, lower, upper)
            M = cv2.moments(mask)
            area = M['m00']
            if area > 0:
                cx = int(M['m10']/area)
                cy = int(M['m01']/area)
            else:
                cx = None
                cy = None
    return area, cx, cy
Exemple #16
0
def findBotPose(bgr,point):
	#the orientation of the bot is decided by a red and a blue colored circle acting as an arrowhead pointing towards the direction of movement (red--blue-->)

	hsv=cv2.cvtColor(bgr,cv2.COLOR_BGR2HSV)

	#red
	img = cv2.inRange(hsv,(0,170,0),(14,255,255))

	moments = cv2.moments(img)
	m00 = moments['m00']
	c1_x, c1_y = None, None
	if m00 != 0:
	    c1_x = int(moments['m10']/m00)
	    c1_y = int(moments['m01']/m00)
	#print c1_x,c1_y
	#blue
	hsv=cv2.cvtColor(bgr,cv2.COLOR_BGR2HSV)
	img = cv2.inRange(hsv,(140,94,74),(199,255,148))

	moments = cv2.moments(img)
	m00 = moments['m00']
	c2_x, c2_y = None, None
	if m00 != 0:
	    c2_x = int(moments['m10']/m00)
	    c2_y = int(moments['m01']/m00)
	#print c2_x,c2_y
	theta_diff = math.acos(float((c2_y-c1_y)*(point[1]-c1_y)+(c2_x-c1_x)*(point[0]-c1_x))/(math.sqrt((c2_x-c1_x)**2+(c2_y-c1_y)**2)*(math.sqrt((point[0]-c1_x)**2+(point[1]-c1_y)**2))))*180/math.pi

	#print theta_diff,((c1_x+c2_x)/2,(c1_y+c2_y)/2)
	return theta_diff,((c1_x+c2_x)/2,(c1_y+c2_y)/2)
def bot_position(hsv):
    
    bot_lower = np.array([0,45,255])
    bot_upper = np.array([40,255,255])
    
    #####front end masking and centroid
    mask = cv2.inRange(hsv,bot_lower, bot_upper)
    contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    contours=sorted(contours, key = cv2.contourArea, reverse = True)[:2]
    #contours,length=areacon(contours,700,300)
    #contours=sorted(contours, key = cv2.contourArea, reverse = True)[:length]
    #cv2.drawContours(frame,contours,-1,(100,100,255),1)
    cv2.imshow('bot',mask)
    #print "len ",len(contours)
    M = cv2.moments(contours[0])
    cx1 = int(M['m10']/M['m00'])
    cy1 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx1,cy1), 5, (255,0,255), -1)
    Bot_position[0][0]=cx1
    Bot_position[0][1]=cy1
    #print cx1,cy1
    #print Bot_position[0][0],
    M = cv2.moments(contours[1])
    cx2 = int(M['m10']/M['m00'])
    cy2 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx2,cy2), 5, (0,0,255), -1)
    Bot_position[1][0]=cx2
    Bot_position[1][1]=cy2

    print cx1,cy1, "1"
    print cx2,cy2, "2"
Exemple #18
0
    def detecteRectangle(self, frame, n_blur, kernel, aire_min, aire_max, seuil_aire, n_zone, v_moy, epsi_ratio):
        """
        détecte tous les rectangles d'une image aux caractéristiques précisées dans le constructeur de classe.
        
        Si plusieurs rectangle repérés, met les attributs de l'objet rectangle sous forme de liste (liste des positions, 
        des aires)
        """                
        self.position = []
        self.aire = []
        self.estDetecte = False
        
        # Denoising et smoothing
        frame_denoise = cv2.medianBlur(frame,n_blur)
#        frame_blurred = cv2.GaussianBlur(frame_denoise, (n_blur, n_blur), 0)
#        cv2.imshow('frame_blurred', frame_blurred)
        # passage en niveaux de gris
        frame_gray = cv2.cvtColor(frame_denoise, cv2.COLOR_BGR2GRAY)
        # seuillage
        th = cv2.adaptiveThreshold(frame_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, n_zone,v_moy)
#        cv2.imshow('th 1', th)
        # opening
        opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
#        cv2.imshow('opening', opening)        
        
        # Calcul des contours
        contours, hierarchy = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_SIMPLE)
        
        # 1ère passe : filtrage des contours pertinents (aire, solidité, ratio de longueurs)
        cnt_pertinents = track.trouveObjetsProbables(opening, aire_min, aire_max, self.sMin, 1, self.rMin, self.rMax)
        
        # 2ème passe : fittage de rectangle (si on le fait après l'approx tout ressemble à un rectangle)        
        rect_probables = []
        for cnt in cnt_pertinents:
#            cv2.drawContours(frame, [cnt], -1, (0,0,127),2)
           # fitte un rectangle
            rect = cv2.minAreaRect(cnt)
            # calcule l'aire du rectangle fitté
            aire_rect = rect[1][0] * rect[1][1]
            # calcule l'aire du contour trouvé
            M = cv2.moments(cnt)
            aire_cnt = M['m00'] 
            if aire_cnt/aire_rect > seuil_aire:
               rect_probables.append(cnt)
#               cv2.drawContours(frame, [cnt], -1, (255, 0, 0), 2)        
      
        # 3ème passe : on approxime les contours restants et on ne garde que ceux à 4 coins
        for cnt in rect_probables:
          epsilon = epsi_ratio*cv2.arcLength(cnt,True)
          approx = cv2.approxPolyDP(cnt,epsilon,True)
          if len(approx) >= 4:   
                M = cv2.moments(approx)
                cv2.drawContours(frame, [approx], -1, (255,255,255), 2)
                cx, cy = int(M['m10']/M['m00']) , int(M['m01']/M['m00'])
                cv2.circle(frame, (cx,cy), 1, (255,255,255), 3)
                self.position.append((cx, cy))
                self.aire.append(M['m00'])
                self.estDetecte = True
Exemple #19
0
 def __init__(self, mask=None, contour=None, moments=None):
     if moments is not None:
         self.moments = moments
     elif mask is not None:
         self.moments = cv2.moments(mask.astype(np.uint8))
     elif contour is not None:
         self.moments = cv2.moments(contour)
     else:
         raise ValueError('Either the mask or the moments must be given')
Exemple #20
0
def getContourWithArea(cnts, area, floor=500, ceil=1000):
    if cnts == None or len(cnts) == 0:
        return None
    # m00 moment of a contour = area of enclosed blob
    handCnt = min(cnts, key=lambda x: abs(area - cv2.moments(x)["m00"]))
    if floor < cv2.moments(handCnt)["m00"] and cv2.moments(handCnt)["m00"] < ceil:
        return handCnt
    else:
        return None
def runPolyDetect(buffer_size):

    camera = cv2.VideoCapture(0)

    while True:
        grabbed, frame = camera.read()

        frame = imutils.resize(frame, width = 600)
        blur = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        mask = cv2.inRange(hsv, lower, upper)
        mask = cv2.erode(mask, None, iterations = erode_iter)
        mask = cv2.dilate(mask, None, iterations = dilate_iter)

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]

        centers = []


        if len(cnts) == 1:
            # print "One Object Found!"
            # c = max(cnts, key = cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(cnts[0])
            M = cv2.moments(cnts[0])
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)

        else:
            # print "%d Objects Found!" % (len(cnts))
            for i in range(0, len(cnts)):
                ((x, y), radius) = cv2.minEnclosingCircle(cnts[i])
                M = cv2.moments(cnts[i])
                center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

                if radius > max_radius:
                    centers.append(center)
                    cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
                    cv2.circle(frame, center, 5, (0, 0, 255), -1)

            for x in range(1, len(centers)):
                thickness = 1
                cv2.line(frame, centers[x], centers[x - 1], (0, 0, 255), thickness, lineType = cv2.CV_AA)
                if x == len(centers) - 1:
                    cv2.line(frame, centers[x], centers[0], (0, 0, 255), thickness, lineType = cv2.CV_AA)



        cv2.imshow("frame", cv2.flip(frame, 1))
        cv2.imshow("mask", cv2.flip(mask, 1))

        maskFilterEdit()  

        key = cv2.waitKey(1)
        if key == ord("q"):
            break
Exemple #22
0
def findIndex(cnt,contour):
    '''returns the index of an item in a python array
        IF NOT FOUND then return -1'''
    m1 = cv2.moments(cnt)
    for i,c in enumerate(contour):
        m2 = cv2.moments(c)
        if  m1 == m2:
            return i
    
    return -1
    def removeSpatially(self, ratio=0.6):
        from scipy.spatial.distance import cdist
        from scipy.spatial import ConvexHull
        points, contours, binImg = self.points, self.contours, self.segImg
        bondlength = self.getBLmean()

        def getViolatingPairs(points):
            distMat = cdist(points, points)
            np.fill_diagonal(distMat,np.inf)
            return np.where(distMat < bondlength * 0.60)

        ir, ic = getViolatingPairs(points)

        ellipses = []
        for c1, c2 in zip(contours[ir], contours[ic]):

            M1 = cv2.moments(c1)
            M2 = cv2.moments(c2)
            sc1 = np.squeeze(c1)
            sc2 = np.squeeze(c2)

            ind = min([M1['m00'],M2['m00']])
            if ind == 0:
                if M1['m00'] < ratio * M2['m00']:
                    cv2.fillConvexPoly(binImg, sc1, 0)
                    continue
                elif M2['m00'] < ratio * M1['m00']:
                    continue
            else:
                if M2['m00'] < ratio * M1['m00']:
                    cv2.fillConvexPoly(binImg, sc2, 0)
                    continue
                elif M1['m00'] < ratio * M2['m00']:
                    continue
        points, conts = self.getPoints(binImg)
        contours = np.squeeze(conts)
        ir, ic = getViolatingPairs(points)

        for c1, c2 in zip(contours[ir], contours[ic]):
            M1 = cv2.moments(c1)
            M2 = cv2.moments(c2)
            sc1 = np.squeeze(c1)
            sc2 = np.squeeze(c2)

            cdistMat = cdist(sc1,sc2)
            if np.min(cdistMat) < 15.0:
                nc = np.vstack((sc1,sc2))
                hull = ConvexHull(nc)
                h = nc[hull.vertices]
                cv2.fillConvexPoly(binImg, h, 1)

        self.points, self.contours = self.getPoints(binImg)
        self.segImg = binImg
        return self.points, self.contours, self.segImg
Exemple #24
0
def check_easy_sample(left, right):
       xyz = None

       # filter image so that only purple objects show up in each image
       filter_left = Color_Filter.filter_colors(frame=left, show_images=False, verbose=False)
       filter_right = Color_Filter.filter_colors(frame=right, show_images=False, verbose=False)

       # detect edges of the purple objects in each image
       edges_left = Color_Filter.contour_color(frame=filter_left["Median Blur"][filter_left["Colors"][0]], show_images=False)
       edges_right = Color_Filter.contour_color(frame=filter_right["Median Blur"][filter_right["Colors"][0]], show_images=False)

       #cv2.imshow('LEFT', edges_left)
       #cv2.imshow('RIGHT', edges_right)

       # blur to create consistency
       #blurred_left = cv2.GaussianBlur(edges_left, (5,5), 0)
       #blurred_right = cv2.GaussianBlur(edges_right, (5,5), 0)
       
       # find image contours
       (cnts_left, _) = cv2.findContours(edges_left.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
       (cnts_right, _) = cv2.findContours(edges_right.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
       if cnts_left and cnts_right:
           
           # take the biggest contour from each image and calculate their centroids

           left_cont = max(cnts_left, key = cv2.contourArea)
           right_cont = max(cnts_right, key = cv2.contourArea)
           left_moments = cv2.moments(left_cont)
           right_moments = cv2.moments(right_cont)
           if left_moments['m00'] != 0 and right_moments['m00'] != 0:
              left_centx = int(left_moments['m10']/left_moments['m00'])
              right_centx = int(right_moments['m10']/right_moments['m00'])
              left_centy = int(left_moments['m01']/left_moments['m00'])
              right_centy = int(right_moments['m01']/right_moments['m00'])

              # calulcate distance from camera (in mm):
	      xyz = calculate_distance(left_centx, right_centx, left_centy, right_centy)
	            
              rospy.loginfo("Easy_Sample:(x,y,z): {0}".format(xyz))
	      
           else:
              rospy.loginfo("DIVIDE BY ZERO")
          
       else:
           rospy.loginfo("NO PURPLE")
           return None

       # print for testing only
       #cv2.imshow('LEFT', blurred_left)
       #cv2.imshow('RIGHT', blurred_right)

       # returns a tuple (x,y,z), or None if no sample is detected
       print "see thing at "+str(xyz)
       return xyz
def dec(img,color_low,color_upper,x,y):
    area = [0.0,0.0]
    indmax1 = 0
    indmax2 = 0
    dec = np.NaN
    cx1 = np.NaN
    cy1 = np.NaN
    cx2 = np.NaN
    cy2 = np.NaN

    mask = cv2.inRange(hsv,color_low,color_upper)
#     cv2.imshow('mask',mask)

    contours, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    if(len(contours) > 1):
        area = range(len(contours))
        for index in range(len(contours)):
            area[index] = cv2.contourArea(contours[index])

        maxsort = np.argsort(area)
        indmax1 = maxsort[len(maxsort)-1]
        indmax2 = maxsort[len(maxsort)-2]
    if(area[indmax1] > 1.0 and area[indmax2]  > 1.0 ):
        M1 = cv2.moments(contours[indmax1])
        M2 = cv2.moments(contours[indmax2])
        # sodek 1 leda
        if(M1['m00'] != 0.0 and M2['m00'] != 0.0):
            cx1 = M1['m10']/M1['m00']
            cy1 = M1['m01']/M1['m00']
            # srodek 2 leda
            cx2 = M2['m10']/M2['m00']
            cy2 = M2['m01']/M2['m00']
            print("1 = {0:.2f},{1:.2f}  2 = {2:.2f},{3:.2f}".format(cx1,cy1,cx2,cy2))
            if(cx1 == cx2):
                if(cx1 >= x):
                    dec = 90.0
                else:
                    dec = -90.0
            else:
                a = (cy1-cy2)/(cx1-cx2)
                b = cy1 - a * cx1
                dec = np.arctan(a)*180.0/3.14159
                if(y < (a*x+b) and dec < 0.0):
                    dec = 180.0 + dec
                elif(y < (a*x+b) and dec > 0.0):
                    dec = -180.0 + dec
                elif(dec == 0.0 and y > cy1):
                    dec = 180.0


    print(dec)
    return dec,cx1,cy1,cx2,cy2
Exemple #26
0
 def find_bot(self, frame):
     # FIXME:
     #  - use multiple methods and get probability for each position
     #  - bigger area (or similar to last)
     #  - distance from last
     #  - motion
     #  - last position, speed and direction get "wannabe" current position
     #  - get a probability for each possibility and select max
     frame2 = cv2.blur(frame, (3, 3))
     hsv = cv2.cvtColor(frame2, cv2.COLOR_RGB2HSV)
     #mask = cv2.inRange(hsv, np.array([60,40,40], dtype=np.uint8), np.array([75,255,255], dtype=np.uint8))
     #mask1 = cv2.inRange(hsv, np.array([0,135,135], dtype=np.uint8), np.array([15,255,255], dtype=np.uint8))
     #mask2 = cv2.inRange(hsv, np.array([159,135,135], dtype=np.uint8), np.array([179,255,255], dtype=np.uint8))
     mask1 = cv2.inRange(hsv, np.array([0,40,40], dtype=np.uint8), np.array([15,255,255], dtype=np.uint8))
     mask2 = cv2.inRange(hsv, np.array([159,40,40], dtype=np.uint8), np.array([179,255,255], dtype=np.uint8))
     mask = mask1 | mask2
     # find contours in the threshold image
     contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
     # finding contour with maximum area and store it as best_cnt
     best_cnt = None
     if (self.bx < 0):
         max_area = 0
         for cnt in contours:
             area = cv2.contourArea(cnt)
             if area > max_area:
                 max_area = area
                 best_cnt = cnt
     else:
         maxdist = -1
         for cnt in contours:
             M = cv2.moments(cnt)
             if M['m00'] == 0:
                 continue
             bx, by = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
             print "1", bx, by, self.bx, self.by
             d = np.linalg.norm(np.array((bx, by)) - np.array((self.bx, self.by)))
             print "2", d
             if (d < maxdist)or(maxdist < 0):
                 maxdist = d
                 best_cnt = cnt
         print "go", maxdist
     if self.refframe_vision is False:
         self.dorefframe_vision(frame)
         self.refframe_vision = True
     if (best_cnt is not None)and(not TEST):
         # finding centroids of best_cnt and draw a circle there
         M = cv2.moments(best_cnt)
         (self.lbx, self.lby) = (self.bx, self.by)
         self.bx, self.by = int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])
     cv2.circle(frame, (self.bx, self.by), 2, (255, 0, 0), -1)
     cv2.circle(frame, (self.targetx, self.targety), 2, (0, 255, 0), -1)
     cv2.polylines(frame, self.poly, True, [0, 255, 0])
     return frame
def masking_colors(img,blue1,green1,red1,blue2,green2,red2):

	param1 = [blue1,green1,red1]                     ##B,G,R values higher and lower range
	param2 = [blue2,green2,red2]

	lower = np.array(param1)
	upper = np.array(param2)
	mask = cv2.inRange(img, lower, upper)
	#img= cv2.bitwise_and(img, img, mask=mask)
	#cv2.imshow('img',img)
	cv2.imshow('mask',mask)
	cv2.imwrite('Mask.jpg',mask)
	
	#gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
	ret,thresh = cv2.threshold(mask,127,255,0)
	contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
	
	
	
	for i in xrange(0,len(contours)):
		print len(contours)
		print 'Area_first',cv2.contourArea(contours[i])
		cv2.drawContours(img,contours,-1,(0,0,255),5)

		M = cv2.moments(contours[i])
		cx = int(M['m10']/M['m00'])
		cy = int(M['m01']/M['m00'])
		#print "Centroid = ", cx, ", ", cy
		
		if cy<125:
			cv2.circle(img,(cx,cy), 2, (0,0,0), -1)
			for j in range(0,len(contours)):
				delivery_position_pair = []
				print 'Area',cv2.contourArea(contours[i])-cv2.contourArea(contours[j])
				if (cv2.contourArea(contours[i])-cv2.contourArea(contours[j]))<80 and (cv2.contourArea(contours[i])-cv2.contourArea(contours[j]))>-80:

					M1= cv2.moments(contours[j])
					cx1 = int(M1['m10']/M1['m00'])
					cy1 = int(M1['m01']/M1['m00'])
					print "                        Centroid = ", cx1, ", ", cy1
					if cx==cx1 and cy==cy1:
						continue
					else:
						delivery_position_pair.append((cx,cy))
						delivery_position_pair.append((cx1,cy1))

						total_paths.append(delivery_position_pair)


	#print total_paths
	cv2.imshow('img',mask)
	return
Exemple #28
0
    def _getMoments( self, imgFiltered, image ):
        # If use_contours is "off": calculate moments from threshold image
        if not self.use_contours:
            return cv2.moments( imgFiltered, 0 )

        image.contours, _ = cv2.findContours( imgFiltered,
                                              cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE )
        if not image.contours:
            return False
        # Get the contour with maximum area
        image.bestcontour = max( image.contours, key=lambda c: cv2.contourArea( c ) )
        return cv2.moments( image.bestcontour, 0 )
def play(img):
    '''
    img-- a single test image as input argument
    letter -- returns the single character specifying the target that was 
    hit  eg. 'A', 'D', etc
    '''
    #thresholding the image to find the contours of the gun points
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    ret,thresh2 = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)
    ret,thresh3 = cv2.threshold(gray,145,255,cv2.THRESH_BINARY)
    #smoothing the image for getting perfect contours
    blur3 = cv2.medianBlur(thresh2,33)
    ret,thresh = cv2.threshold(blur3,127,255,0)
    contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #getting centroids of both the contours
    M = cv2.moments(contours[1])
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00'])
    blur2 = cv2.medianBlur(thresh3,25)
    ret,thresh = cv2.threshold(blur2,127,255,0)
    contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    M = cv2.moments(contours[1])
    cx1 = int(M['m10']/M['m00'])
    cy1 = int(M['m01']/M['m00'])
    #as the balloons are arranges symetrically, range is specified for each balloon
    m1=cy-cy1
    m2=cx-cx1
    cv2.line(img,(cx,cy),(cx1-5*m2,cy1-5*m1),(255,0,0),3)
    if cx1-5*m2<65:
        letter = 'L'
    elif  65<cx1-5*m2<135:
        letter = 'M'
    elif  135<cx1-5*m2<205:
        letter = 'N'
    elif  205<cx1-5*m2<275:
        letter = 'O'
    elif  275<cx1-5*m2<345:
        letter = 'P'
    elif  345<cx1-5*m2<415:
        letter = 'Q'
    elif  415<cx1-5*m2<485:
        letter = 'R'
    elif  485<cx1-5*m2<555:
        letter = 'S'
    elif  555<cx1-5*m2<625:
        letter = 'T'
    elif  625<cx1-5*m2<695:
        letter = 'U'
    elif  695<cx1-5*m2<765:
        letter = 'V'
    return letter
def grid_to_arrays(img):
    img = cv2.resize(img,(300,300), interpolation = cv2.INTER_CUBIC) 
    h,w,c = img.shape                                             #identify the height and width of the given image
    imghl=img.copy()                                              #to avoid changes in original image
    img_1=img.copy()
    gray = cv2.cvtColor(imghl,cv2.COLOR_BGR2GRAY)
    ret,thresh2 = cv2.threshold(gray,90,255,cv2.THRESH_BINARY_INV) #convert image to binary form
    ret,thresh3 = cv2.threshold(gray,90,255,cv2.THRESH_BINARY_INV)
    i=0
    while i<7:                                                   #removing vertical links to find horizontal links using draw-image
        cv2.line(thresh2,(int(round(Decimal(i)*Decimal(w)/Decimal(6.24)+Decimal(w)/Decimal(62.4))),0),(int(round(Decimal(i)*Decimal(w)/Decimal(6.24)+Decimal(w)/Decimal(62.4))),h),(0,0,0),w/25)
        i=i+1
    ret,thresh55 = cv2.threshold(thresh2,127,255,cv2.THRESH_BINARY_INV)
    contours, hierarchy = cv2.findContours(thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(img_1,contours,-1,(0,255,0),3)                   #contours to detect horizontal links only
    i=0
    hl=[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]#initializing horizontal links array
    while i<len(contours):                                      #determination of contour centroids of horizontal links
        area=cv2.contourArea(contours[i])
        if area>(h*w/750):
            M = cv2.moments(contours[i])
            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            cx=cx-(63*w/650)
            m=int(round(Decimal(6)*Decimal(cy)/Decimal(h)))       #calculating indices of links using their centroids
            n=int(round(Decimal(6)*Decimal(cx)/Decimal(w)))
            hl[m][n]=1                                              #assigning 1 to indices of the horizontal link array when link is detected
        i=i+1
    j=0
    while j<7:                                                  #removing horizontal links to find vertical links using draw-image
        cv2.line(thresh3,(0,int(round(Decimal(j)*Decimal(h)/Decimal(6.2)+Decimal(h)/Decimal(62.4)))),(w,int(round(Decimal(j)*Decimal(h)/Decimal(6.24)+Decimal(h)/Decimal(62.4)))),(0,0,0),h/25)
        j=j+1
    ret,thresh56 = cv2.threshold(thresh3,127,255,cv2.THRESH_BINARY_INV) 
    contours, hierarchy = cv2.findContours(thresh3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(img,contours,-1,(0,255,0),3)                #contours to detect vertical links only
    i=0
    vl=[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0]] #initializing vertical links array
    while i<len(contours):
        area=cv2.contourArea(contours[i])
        if area>(h*w/750):#determination of contour centroids of vertical links
            M = cv2.moments(contours[i])
            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            cy=cy-(63*h/650)
            m=int(round(Decimal(6)*Decimal(cy)/Decimal(h)))       #calculating indices of links using their centroids
            n=int(round(Decimal(6)*Decimal(cx)/Decimal(w)))
            vl[m][n]=1                                              #assigning 1 to indices of the vertical link array when link is detected
        i=i+1
    horizontal_links=hl
    vertical_links=vl   
    return horizontal_links, vertical_links
    mask = cv2.inRange(hsv, lower_blue, upper_blue)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame, frame, mask=mask)
    #converting to grayscale
    gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    #converting to binary
    ret, thresh = cv2.threshold(gray, 127, 255, 0)
    # Remove the noise
    #opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    # Repairing the object
    #closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    #finding contours
    #contours, hierarchy = cv2.findContours(thresh,1,2)
    #cnt = contours[0]
    M = cv2.moments(thresh)
    #print M
    if M['m00'] == 0:
        M['m00'] = 0.01
    else:
        M['m00'] = M['m00']

    cx = int(M['m10'] / M['m00'])
    cy = int(M['m01'] / M['m00'])
    print cx
    print cy
    cv2.imshow('frame', frame)
    cv2.imshow('mask', mask)
    #cv2.imshow('res',opening)
    k = cv2.waitKey(5) & 0xFF
    if k == 10:
def compute_center(contours, i):
    M = cv2.moments(contours[i])
    cx = int(M['m10'] / M['m00'])
    cy = int(M['m01'] / M['m00'])
    return cx, cy
Exemple #33
0
def dir():
    cap_region_x_begin=0.6
    cap_region_y_end=0.6
    threshold = 30
    blurValue = 41
    bgSubThreshold = 50
    learningRate = 0

    isBgCaptured = 0
    triggerSwitch = False
    counter = 0
    q = []
    out = []


    def printThreshold(thr):
        print("! Changed threshold to "+str(thr))

    def removeBG(frame):
        fgmask = bgModel.apply(frame,learningRate=learningRate)
        kernel = np.ones((3, 3), np.uint8)
        fgmask = cv2.erode(fgmask, kernel, iterations=1)
        res = cv2.bitwise_and(frame, frame, mask=fgmask)
        return res

    def push(q, x):
        if len(q) < 5:
            q.append(x)
        else:
            for i in range(4):
                q[i] = q[i+1]
                q[4] = x


    camera = cv2.VideoCapture(0)
    camera.set(10,200)
    cv2.namedWindow('trackbar')
    cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)

    while camera.isOpened():
        ret, frame = camera.read()
        threshold = cv2.getTrackbarPos('trh1', 'trackbar')
        frame = cv2.bilateralFilter(frame, 5, 50, 100)
        frame = cv2.flip(frame, 1)
        cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                     (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
        cv2.imshow('original', frame)

        if isBgCaptured == 1:
            img = removeBG(frame)
            img = img[0:int(cap_region_y_end * frame.shape[0]),
                        int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
            ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY)

            thresh1 = copy.deepcopy(thresh)
            _,contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            length = len(contours)
            maxArea = -1
            if length > 0:
                c = max(contours, key=cv2.contourArea)
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                M = cv2.moments(c)
                try:
                    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                except:
                    pass
                push (q, center)
                if len(q)==5:
                    if (q[0][0] - q[-1][0]) < -30:
                        if len(out) < 5:
                            out.append("Right")
                    elif (q[0][0] - q[-1][0]) > 30:
                        if len(out) < 5:
                            out.append("Left")
                    if abs(q[0][1] - q[-1][1]) > 60:
                        if len(out) < 5:
                            out.append("Up")
                print (out)
                if len(out) == 5:
                    return(max(out, key = out.count))
                for i in range(length):
                    temp = contours[i]
                    area = cv2.contourArea(temp)
                    if area > maxArea:
                        maxArea = area
                        ci = i

                res = contours[ci]
                hull = cv2.convexHull(res)
                drawing = np.zeros(img.shape, np.uint8)
                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
                cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)

            cv2.imshow('output', drawing)

        k = cv2.waitKey(10)
        if k == 27:
            break
        elif k == ord('r'):
            bgModel = None
            triggerSwitch = False
            isBgCaptured = 0
            print ('!!!Reset BackGround!!!')

        if isBgCaptured == 0:
            time.sleep(1)
            bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)
            isBgCaptured = 1
            print ('Background Captured')
def calibrate(serPort):
    #start serial connection with arduino
    ser = serPort #serial.Serial(serPort,9600)

    # define the lower and upper boundaries of the green duct tape and the puck
    # ball in the HSV color space, then initialize the
    # list of tracked points
    greenDuctLower = (47, 73, 0)
    greenDuctUpper = (70, 190, 255)
    calibrationQ = deque(maxlen=5)

    #grab the reference to the webcam
    vs = VideoStream(src=0).start()


    c = 0 #used to count frames
    step0y = step2100y = 0 #variables to store y coordinates of table extremities


    #move the puck to the 0 step position in the y direction
    while True:
        # grab the current frame
        frame = vs.read()

        # resize the frame, blur it, and convert it to the HSV
        # color space
        frame = imutils.resize(frame, width=600)
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)


        # construct a mask for the color "green", then perform
        maskGreen = cv2.inRange(hsv, greenDuctLower, greenDuctUpper)
        maskGreen = cv2.erode(maskGreen, None, iterations=2)
        maskGreen = cv2.dilate(maskGreen, None, iterations=2)

        # find contours in the puck mask and initialize the current
        # (x, y) center of the ball
        cntsGreen = cv2.findContours(maskGreen.copy(), cv2.RETR_EXTERNAL,
        	cv2.CHAIN_APPROX_SIMPLE)

        #this line to is to make this code work with multiple versions of opencv
        cntsGreen = imutils.grab_contours(cntsGreen)
        centerGreen = None


        #draw enclosing circle around green puck striker
        if len(cntsGreen) > 0:
            #display the circle and centroid
            cGreen = max(cntsGreen, key=cv2.contourArea)
            ((xGreen, yGreen), radiusGreen) = cv2.minEnclosingCircle(cGreen)
            MGreen = cv2.moments(cGreen)
            centerGreen = (int(MGreen["m10"] / MGreen["m00"]), int(MGreen["m01"] / MGreen["m00"]))

            # only proceed if the green radius meets a minimum size
            if radiusGreen > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(xGreen), int(yGreen)), int(radiusGreen),
                    (0, 255, 255), 2)
                cv2.circle(frame, centerGreen, 5, (0, 0, 255), -1)
                cv2.putText(frame,str(centerGreen),centerGreen,cv2.FONT_HERSHEY_PLAIN,1.0,(255,255,255))
                calibrationQ.appendleft(centerGreen[1])
                print(calibrationQ[0])

            if len(calibrationQ) > 1:
                if calibrationQ[0] > calibrationQ[1]:
                    print("moving left")
                    c = 0
                else:
                    print("no move")
                    c = c + 1
                    if c == 3:
                        ser.write(str.encode("!\n"))
                        step0y = calibrationQ[0]
                        break

        # show the frame to our screen
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break

        for i in range(1000000):
            pass

    c = 0

    print("First while loop done")
    movingFlag = 0 #flag to signal when the striker starts moving
    #move the puck to the 2100 step position in the y direction
    while True:
        # grab the current frame
        frame = vs.read()

        # resize the frame, blur it, and convert it to the HSV
        # color space
        frame = imutils.resize(frame, width=600)
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)


        # construct a mask for the color "green", then perform
        maskGreen = cv2.inRange(hsv, greenDuctLower, greenDuctUpper)
        maskGreen = cv2.erode(maskGreen, None, iterations=2)
        maskGreen = cv2.dilate(maskGreen, None, iterations=2)

        # find contours in the puck mask and initialize the current
        # (x, y) center of the ball
        cntsGreen = cv2.findContours(maskGreen.copy(), cv2.RETR_EXTERNAL,
        	cv2.CHAIN_APPROX_SIMPLE)

        #this line to is to make this code work with multiple versions of opencv
        cntsGreen = imutils.grab_contours(cntsGreen)
        centerGreen = None


        #draw enclosing circle around green puck striker
        if len(cntsGreen) > 0:
            #display the circle and centroid
            cGreen = max(cntsGreen, key=cv2.contourArea)
            ((xGreen, yGreen), radiusGreen) = cv2.minEnclosingCircle(cGreen)
            MGreen = cv2.moments(cGreen)
            centerGreen = (int(MGreen["m10"] / MGreen["m00"]), int(MGreen["m01"] / MGreen["m00"]))

            # only proceed if the green radius meets a minimum size
            if radiusGreen > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(xGreen), int(yGreen)), int(radiusGreen),
                    (0, 255, 255), 2)
                cv2.circle(frame, centerGreen, 5, (0, 0, 255), -1)
                cv2.putText(frame,str(centerGreen),centerGreen,cv2.FONT_HERSHEY_PLAIN,1.0,(255,255,255))
                calibrationQ.appendleft(centerGreen[1])


            if len(calibrationQ) > 4:
                if (calibrationQ[0] < calibrationQ[1]) and (calibrationQ[1] < calibrationQ[2]) and (calibrationQ[2] < calibrationQ[3]) and (calibrationQ[3] < calibrationQ[4]):
                    movingFlag = 1
                else:
                    if movingFlag == 0:
                        print("waiting")
                    else:
                        if calibrationQ[0] < calibrationQ[1]:
                            print("moving right")
                            c = 0
                        else:
                            print("no move")
                            c = c + 1

                            if c == 3:
                                ser.write(str.encode("?\n"))
                                step2100y = calibrationQ[0]
                                break


        # show the frame to our screen
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break

        for i in range(1000000):
            pass

    print("second while loop done")

    #stop the camera video stream
    vs.stop()

    # close all windows
    cv2.destroyAllWindows()

    return (step0y,step2100y)
    def detect_colors(self, targ):
        #detects a plain piece of paper on the ground, sends a command to the drone to fly to this position

        hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

        # construct a mask for the color "white", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, targ["color_lower"], targ["color_higher"])
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        # find contours in the mask and initialize the current
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]

        status = "Target not Acquired"
        self.detected = 0
        for c in cnts:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, .01 * peri, True)
            if len(approx) >= 4 and len(approx) <= 6:
                #bounding box
                (x, y, w, h) = cv2.boundingRect(approx)
                #rotated rectangle
                rect = cv2.minAreaRect(approx)
                self.w_act, self.h_act = rect[1]

                #make sure width is the longer dimension
                tmp = self.h_act
                if self.h_act > self.w_act:
                    self.h_act = self.w_act
                    self.w_act = tmp

                #aspect ratio of detected shape
                aspectRatio = self.w_act / float(self.h_act)

                area = cv2.contourArea(c)
                hullArea = cv2.contourArea(cv2.convexHull(c))
                solidity = area / float(hullArea)

                #check to make sure the characteristics of the shape are what we are looking for
                keepDims = w > 25 and h > 25
                keepSolidity = True  #solidity > .9
                keepAspectRatio = aspectRatio >= targ[
                    "aspect_ratio_min"] and aspectRatio <= targ[
                        "aspect_ratio_max"]
                #print(keepDims,",",keepSolidity,",",keepAspectRatio)
                if keepDims and keepSolidity and keepAspectRatio:
                    #cv2.drawContours(image, [approx], -1, (0,0,255), 4)
                    status = "Red Acquired"
                    self.detected = True
                    #get the center of the image
                    M = cv2.moments(approx)
                    (self.cX, self.cY) = (int(M["m10"] / M["m00"]),
                                          int(M["m01"] / M["m00"]))
                    length_side_pixels = self.w_act
                    obj_image_sensor = length_side_pixels / self.m
                    d_cam_image_estimate = targ[
                        "width"] * self.foc / obj_image_sensor
                    self.d_cam_image = d_cam_image_estimate
                    self.t_last_seen = time()
                    if self.debug_mode:
                        ### This code slows everything down by ~10Hz
                        (startX, endX) = (int(self.cX - (w * .15)),
                                          int(self.cX + (w * .15)))
                        (startY, endY) = (int(self.cY - (h * .15)),
                                          int(self.cY + (h * .15)))
                        #             #draw target bounding box (red lines)
                        cv2.line(self.frame, (startX, self.cY),
                                 (endX, self.cY), (0, 0, 255), 2)
                        cv2.line(self.frame, (self.cX, startY),
                                 (self.cX, endY), (0, 0, 255), 2)
                        #             #indicate offset from center of image (blue lines)
                        cv2.line(self.frame, (self.c_x_image, self.c_y_image),
                                 (self.c_x_image, self.cY), (255, 0, 0), 1)
                        cv2.line(self.frame, (self.c_x_image, self.cY),
                                 (self.cX, self.cY), (255, 0, 0), 1)
                        break

        if self.debug_mode:
            cv2.putText(self.frame, status, (20, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        .5, (0, 0, 255), 2)
            #disable imshow for field tests
            #cv2.imshow("Frame",image)
            self.out.write(self.frame)
Exemple #36
0
class drawings():
    def setValues(x):
        print("")

    def stackImages(scale, imgArray):
        rows = len(imgArray)
        cols = len(imgArray[0])
        rowsAvailable = isinstance(imgArray[0], list)
        width = imgArray[0][0].shape[1]
        height = imgArray[0][0].shape[0]
        if rowsAvailable:
            for x in range(0, rows):
                for y in range(0, cols):
                    if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
                        imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),
                                                    None, scale, scale)
                    else:
                        imgArray[x][y] = cv2.resize(
                            imgArray[x][y],
                            (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),
                            None, scale, scale)
                    if len(imgArray[x][y].shape) == 2:
                        imgArray[x][y] = cv2.cvtColor(imgArray[x][y],
                                                      cv2.COLOR_GRAY2BGR)
            imageBlank = np.zeros((height, width, 3), np.uint8)
            hor = [imageBlank] * rows
            hor_con = [imageBlank] * rows
            for x in range(0, rows):
                hor[x] = np.hstack(imgArray[x])
            ver = np.vstack(hor)
        else:
            for x in range(0, rows):
                if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
                    imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,
                                             scale)
                else:
                    imgArray[x] = cv2.resize(
                        imgArray[x],
                        (imgArray[0].shape[1], imgArray[0].shape[0]), None,
                        scale, scale)
                if len(imgArray[x].shape) == 2:
                    imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
            hor = np.hstack(imgArray)
            ver = hor
        return ver

    Color_detectors = cv2.namedWindow("Color_detectors")
    Color_detectors = cv2.createTrackbar("Upper Hue", "Color_detectors", 153,
                                         180, setValues)
    Color_detectors = cv2.createTrackbar("Upper Saturation", "Color_detectors",
                                         255, 255, setValues)
    Color_detectors = cv2.createTrackbar("Upper Value", "Color_detectors", 255,
                                         255, setValues)
    Color_detectors = cv2.createTrackbar("Lower Hue", "Color_detectors", 64,
                                         180, setValues)
    Color_detectors = cv2.createTrackbar("Lower Saturation", "Color_detectors",
                                         72, 255, setValues)
    Color_detectors = cv2.createTrackbar("Lower Value", "Color_detectors", 49,
                                         255, setValues)

    bpoints = [deque(maxlen=1024)]
    gpoints = [deque(maxlen=1024)]
    rpoints = [deque(maxlen=1024)]
    ypoints = [deque(maxlen=1024)]

    blue_index = 0
    green_index = 0
    red_index = 0
    yellow_index = 0

    kernel = np.ones((5, 5), np.uint8)

    colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255),
              (0, 255, 255)]  #blue,green,red,yellow
    colorIndex = 0

    paintWindow = np.zeros((480, 640, 3)) + 255
    paintWindow = cv2.rectangle(paintWindow, (40, 1), (140, 65), (0, 0, 0), 2)
    paintWindow = cv2.rectangle(paintWindow, (160, 1), (255, 65), colors[0],
                                -1)
    paintWindow = cv2.rectangle(paintWindow, (275, 1), (370, 65), colors[1],
                                -1)
    paintWindow = cv2.rectangle(paintWindow, (390, 1), (485, 65), colors[2],
                                -1)
    paintWindow = cv2.rectangle(paintWindow, (505, 1), (600, 65), colors[3],
                                -1)

    cv2.putText(paintWindow, "CLEAR", (49, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                (0, 0, 0), 2, cv2.LINE_AA)
    cv2.putText(paintWindow, "", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(paintWindow, "", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(paintWindow, "", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(paintWindow, "", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (150, 150, 150), 2, cv2.LINE_AA)
    #cv2.namedWindow('Paint', cv2.WINDOW_AUTOSIZE)

    ssss = 0
    cap = cv2.VideoCapture(ssss)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 430)

    while True:
        if ssss == 0:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)
        elif ssss == 1:
            ret, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        u_hue = cv2.getTrackbarPos("Upper Hue", "Color_detectors")
        u_saturation = cv2.getTrackbarPos("Upper Saturation",
                                          "Color_detectors")
        u_value = cv2.getTrackbarPos("Upper Value", "Color_detectors")
        l_hue = cv2.getTrackbarPos("Lower Hue", "Color_detectors")
        l_saturation = cv2.getTrackbarPos("Lower Saturation",
                                          "Color_detectors")
        l_value = cv2.getTrackbarPos("Lower Value", "Color_detectors")
        Upper_hsv = np.array([u_hue, u_saturation, u_value])
        Lower_hsv = np.array([l_hue, l_saturation, l_value])

        frame = cv2.rectangle(frame, (40, 1), (140, 65), (0, 0, 0), 2)
        frame = cv2.rectangle(frame, (160, 1), (255, 65), colors[0], -1)
        frame = cv2.rectangle(frame, (275, 1), (370, 65), colors[1], -1)
        frame = cv2.rectangle(frame, (390, 1), (485, 65), colors[2], -1)
        frame = cv2.rectangle(frame, (505, 1), (600, 65), colors[3], -1)
        cv2.putText(frame, "CLEAR ALL", (49, 33), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (0, 0, 0), 2, cv2.LINE_AA)
        cv2.putText(frame, "1", (185, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(frame, "2", (298, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(frame, "3", (420, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(frame, "4", (520, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (150, 150, 150), 2, cv2.LINE_AA)

        Mask = cv2.inRange(hsv, Lower_hsv, Upper_hsv)
        Mask = cv2.erode(Mask, kernel, iterations=1)
        Mask = cv2.morphologyEx(Mask, cv2.MORPH_OPEN, kernel)
        Mask = cv2.dilate(Mask, kernel, iterations=1)

        cnts, _ = cv2.findContours(Mask.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
        center = None

        if len(cnts) > 0:
            cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
            ((x, y), radius) = cv2.minEnclosingCircle(cnt)
            cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            M = cv2.moments(cnt)
            center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))

            if center[1] <= 65:
                if 40 <= center[0] <= 140:  # Clear Button
                    bpoints = [deque(maxlen=512)]
                    gpoints = [deque(maxlen=512)]
                    rpoints = [deque(maxlen=512)]
                    ypoints = [deque(maxlen=512)]

                    blue_index = 0
                    green_index = 0
                    red_index = 0
                    yellow_index = 0

                    paintWindow[67:, :, :] = 255
                elif 160 <= center[0] <= 255:
                    colorIndex = 0  # Blue
                elif 275 <= center[0] <= 370:
                    colorIndex = 1  # Green
                elif 390 <= center[0] <= 485:
                    colorIndex = 2  # Red
                elif 505 <= center[0] <= 600:
                    colorIndex = 3  # Yellow
            else:
                if colorIndex == 0:
                    bpoints[blue_index].appendleft(center)
                elif colorIndex == 1:
                    gpoints[green_index].appendleft(center)
                elif colorIndex == 2:
                    rpoints[red_index].appendleft(center)
                elif colorIndex == 3:
                    ypoints[yellow_index].appendleft(center)
        else:
            bpoints.append(deque(maxlen=512))
            blue_index += 1
            gpoints.append(deque(maxlen=512))
            green_index += 1
            rpoints.append(deque(maxlen=512))
            red_index += 1
            ypoints.append(deque(maxlen=512))
            yellow_index += 1

        points = [bpoints, gpoints, rpoints, ypoints]
        for i in range(len(points)):
            for j in range(len(points[i])):
                for k in range(1, len(points[i][j])):
                    if points[i][j][k - 1] is None or points[i][j][k] is None:
                        continue
                    cv2.line(frame, points[i][j][k - 1], points[i][j][k],
                             colors[i], 2)
                    cv2.line(paintWindow, points[i][j][k - 1], points[i][j][k],
                             colors[i], 2)

        imgStack = stackImages(1, ([frame, paintWindow]))
        cv2.imshow("Draw", imgStack)
        #cv2.imshow("Paint", paintWindow)

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #37
0
def gen_features(video_img, bin_mask):
    # Output vector of features
    feat_vect = []
    contour_vect = []
    centroid_vect = []
    all_feats = []
    for i in range(0, len(video_img)):
        # Segmentation image
        bw_img = bin_mask[i]
        # Find contours
        ret, thresh = cv2.threshold(bw_img, 127, 255, 0)
        _, contours, _ = cv2.findContours(thresh, 1, 2)
        contour_vect.append(contours)
        # Feature vector for current image
        img_feats = np.empty([16, len(contours)])
        centroid_contours = np.empty([2, len(contours)])
        for c in range(0, len(contours)):
            cnt = np.squeeze(contours[c])
            M = cv2.moments(cnt)
            # Centroid
            centroid = np.array([M['m10'] / M['m00'], M['m01'] / M['m00']])
            centroid_contours[:, c] = centroid
            img_feats[0, c] = centroid[0]
            img_feats[1, c] = centroid[1]
            # Area
            img_feats[2, c] = cv2.contourArea(cnt)
            # Perimeter
            img_feats[3, c] = cv2.arcLength(cnt, True)
            # Calculate distances from centroid and circularity measures
            dist = np.sum((cnt - centroid)**2, axis=1)
            v11 = np.sum(np.prod(cnt - centroid, axis=1))
            v02 = np.sum(np.square(cnt - centroid)[:, 1])
            v20 = np.sum(np.square(cnt - centroid)[:, 0])
            # Circularity
            m = 0.5 * (v02 + v20)
            n = 0.5 * np.sqrt(4 * v11**2 + (v20 - v02)**2)
            img_feats[4, c] = (m - n) / (m + n)
            # Min/max distance
            img_feats[5, c] = dist.min()
            img_feats[6, c] = dist.max()
            # Mean distance
            img_feats[7, c] = dist.mean()
            img_feats[8, c] = dist.std()
            img_feats[9:16, c] = cv2.HuMoments(M).flatten()
        feat_vect.append(img_feats)
        centroid_vect.append(centroid_contours)
        if i == 0:
            all_feats = img_feats
        else:
            all_feats = np.concatenate((all_feats, img_feats), axis=1)
    # Normalize features
    for i in range(0, len(feat_vect)):
        # NORMALIZATION ASSUMING GAUSSIAN DISTRIBUTION OF FEATS
        # mean_feats = np.tile(np.mean(all_feats, axis=1), (feat_vect[i].shape[1], 1)).T
        # std_feats = np.tile(np.std(all_feats, axis=1), (feat_vect[i].shape[1], 1)).T
        # feat_vect[i] = (feat_vect[i] - mean_feats)/std_feats
        # FEATURE SCALING
        min_feats = np.tile(np.min(all_feats, axis=1),
                            (feat_vect[i].shape[1], 1)).T
        max_feats = np.tile(np.max(all_feats, axis=1),
                            (feat_vect[i].shape[1], 1)).T
        feat_vect[i] = np.divide(np.subtract(feat_vect[i], min_feats),
                                 np.subtract(max_feats, min_feats))
    return feat_vect, contour_vect, centroid_vect
Exemple #38
0
# yellow
lower_range = np.array([14, 135, 139], dtype=np.uint8)
upper_range = np.array([30, 255, 255], dtype=np.uint8)

while (cap.isOpened()):
    ret, img = cap.read()
    # cv2.rectangle(img,(300,300),(100,100),(0,255,0),0)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    mask = cv2.inRange(img, lower_range, upper_range)
    img = cv2.bitwise_and(img, img, mask=mask)
    sq, click = find_squares(img)

    if sq:
        M = cv2.moments(sq[0])
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
        if click:
            com.send_message([cx, cy, 1])
        else:
            com.send_message([cx, cy, 1])

        cv2.drawContours(img, sq, -1, (0, 255, 0), 3)

        print 'Clicked ? %r' % click
    # print sq

    cv2.imshow('Img', img)

    k = cv2.waitKey(10)
Exemple #39
0
def execute_face_swap(img1, img2):
    im1Display = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    im2Display = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)

    img1Warped = np.copy(img2)
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # Read array of corresponding points
    points1 = fbc.getLandmarks(detector, predictor, img1)
    points2 = fbc.getLandmarks(detector, predictor, img2)
    hullIndex = cv2.convexHull(np.array(points2), returnPoints=False)
    if(len(points1) == 0 or len(points2) == 0):
        print("Landmark detection failed for selected Images Source:{} Dest:{}".format(len(points1), len(points)))

    # Create convex hull lists
    hull1 = []
    hull2 = []
    for i in range(0, len(hullIndex)):
        hull1.append(points1[hullIndex[i][0]])
        hull2.append(points2[hullIndex[i][0]])
    hull8U = []
    for i in range(0, len(hull2)):
        hull8U.append((hull2[i][0], hull2[i][1]))

        mask = np.zeros(img2.shape, dtype=img2.dtype)
        cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))

            # Find Centroid
        m = cv2.moments(mask[:,:,1])
        center = (int(m['m10']/m['m00']), int(m['m01']/m['m00']))
    
    sizeImg2 = img2.shape
    rect = (0, 0, sizeImg2[1], sizeImg2[0])
    dt = fbc.calculateDelaunayTriangles(rect, hull2)

    # If no Delaunay Triangles were found, quit
    if len(dt) == 0:
        #quit()
        print("No Delaunay Triangles were found!")
        return None
    imTemp1 = im1Display.copy()
    imTemp2 = im2Display.copy()

    tris1 = []
    tris2 = []
    for i in range(0, len(dt)):
        tri1 = []
        tri2 = []
        for j in range(0, 3):
            tri1.append(hull1[dt[i][j]])
            tri2.append(hull2[dt[i][j]])

        tris1.append(tri1)
        tris2.append(tri2)

    cv2.polylines(imTemp1,np.array(tris1),True,(0,0,255),2);
    cv2.polylines(imTemp2,np.array(tris2),True,(0,0,255),2);
    for i in range(0, len(tris1)):
        fbc.warpTriangle(img1, img1Warped, tris1[i], tris2[i])
    output = cv2.seamlessClone(np.uint8(img1Warped[:,:,::-1]), img2, mask, center,cv2.NORMAL_CLONE)
    ### Default scaling to 25 percent
    scale_percent = 25
    width = int(output.shape[1] * scale_percent / 100)
    height = int(output.shape[0] * scale_percent / 100)

    # dsize
    dsize = (width, height)

    # resize image
    output = cv2.resize(output, dsize)

    return output
Exemple #40
0
    # find contours in the mask and initialize the current
    # (x, y) center of the QR Code
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    center = None

    # only proceed if at least one contour was found
    if len(cnts) > 0:
        # find the largest contour in the mask, then use
        # it to compute the straight bounding rectangle and
        # centroid
        c = max(cnts, key=cv2.contourArea)
        x, y, w, h = cv2.boundingRect(cnt)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        area = x * y

        # only proceed if the area meets a minimum size
        if area > 15:
            # draw the rectangle and centroid on the frame,
            # then update the list of tracked points
            cv2.rectangle(frame, (int(x), int(y)), (int(x + w), int(y - h)),
                          (0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)

    # update the points queue
    pts.appendleft(center)

    # loop over the set of tracked points
        #             count_up = 0
        #             print 'down' ,count_down
        #         elif (rect_co[-1]-rect_co[-2]) < 0:
        #             count_up =  rect[2]/60
        #             count_down = 0
        #             print 'up',count_up
                
        #     continue
        area = cv2.contourArea(cnt)
        if area > areaTH:
            #################
            #   TRACKING    #
            #################
            
            #Missing conditions for multipersons, outputs and screen entries
            M = cv2.moments(cnt)
            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            x,y,w,h = cv2.boundingRect(cnt)
            # print 'working'
            # print w

            new = True
            if cy in range(up_limit,down_limit):
                for i in persons:
                    if abs(cx-i.getX()) <= w and abs(cy-i.getY()) <= h:
                        # the object is close to one that has already been detected before
                        # print 'update'
                        new = False
                        i.updateCoords(cx,cy)   #update coordinates in the object and resets age
                        if i.going_UP(line_down,line_up) == True:
# Cách 1: truy xuất theo giá trị/value
# for i in contours:
#     cv2.drawContours(I,i,-1,(255,0,255),5)

# Cách 2: truy xuất theo chỉ số/index
for i in range(len(contours)):
    cv2.drawContours(I, contours, i, (255, 0, 255), 3)
    leni = cv2.arcLength(contours[i], True)
    print("len of contours:", leni)
    areai = cv2.contourArea(contours[i])
    print("area of contours:", areai)
    # approximate polygon
    nedges = cv2.approxPolyDP(contours[i], 5, True)  # epsilon: sai số
    print("polyedges:",len(nedges))
    if len(nedges)==3:
        cv2.putText(I,"triangle",(nedges[1][0][0],nedges[1][0][1]),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0))
    elif len(nedges)==4:
        cv2.putText(I,"square",(nedges[0][0][0],nedges[0][0][1]),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0))
    else:
        cv2.putText(I, "circle", (nedges[1][0][0], nedges[0][0][1]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))
    M = cv2.moments(contours[i]) #tìm tâm dựa vào momen
    cx = int(M['m10']/M['m00']) #tính tổng số điểm theo x
    cy = int(M['m01']/M['m00'])
    cv2.circle(I,(cx,cy),10,(120,255,0),5)

cv2.namedWindow("Image contour",cv2.WINDOW_NORMAL)
cv2.imshow("Image contour", I)

cv2.waitKey()
def CircularBlink(labels,circularity = 0.5,minarea = 2):  
    
    """
    Based on the blinks separated from watershed (labels), determine if the 
    blink is big enough (larger than minarea in pixels) and circular enough 
    (circularity is larger than some user defined threshold).
    Output the x-y coordiate of the top-left corner of the bounding box for 
    blinks that pass both criteria 
    
    Parameters
    ----------
    labels: ndarray
            Indexed separted blinks
    circularity: float
                 user defined threshold of circularity (1 means circle), 
                 deviate from circle means non-circle, default value is 0.6
    minarea: int
             user defined threshold of minimum blink size in pixels, default 
             value is 4 pixels 
   
    Returns
    -------
    x, y: ndarray
          x and y coordinates ofthe top-left corner of the bounding box for 
          blinks that pass both criteria
        
    Notes
    -----
    This function uses circularity to select only circular blinks for output
    """
    # create dynamic list of x and y to store coordinates
    x=[]
    y=[]
    
    # loop through all the separated blinks and filter them by size and 
    # circularity
    for t in range(1,len(np.unique(labels))):
        # select individual blink and store the mask in blink
        blink = np.zeros(labels.shape)
        label = np.unique(labels)[t]
        blink[labels == label]=1
        
        # detect contour of this blink
        contours =cv2.findContours(blink.astype('uint8'),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
        
        # filter by blink area
        area = cv2.contourArea(contours[1][0])
        
        if area>=minarea:
            
            # find perimeters of the blink
            perimeter = cv2.arcLength(contours[1][0],True)   
            
            # calculate circularity circ based on the ratio of area and 
            # perimeter^2
            circ= (4*np.pi*(area/(perimeter*perimeter))) 
            
            # filter out blinks that are not circular
            if circularity<circ<2-circularity:
                # get centroid, recalculate the position of the molecule using 
                # the moments
                M = cv2.moments(contours[1][0])
                cx = int(M['m10']/M['m00'])
                cy = int(M['m01']/M['m00'])
                # calculate the top-left corner coordinates
                # the blink size is 6x6
                x.append(cx-3)
                y.append(cy-3)
        
    return x,y  
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(warped,[box],0,(0,0,255),2)
        print("Box")
        print(box)
    #detect canny edges
    #paramters: source, threshold1, threshold2
    edges = cv2.Canny(mask, 75, 150)
    
    cv2.imshow("image", frame)
    #cv2.imshow("mask", mask)
    cv2.imshow("contours", mask2)
    #cv2.imshow("gray", gray)
    #cv2.imshow("edges", edges)
    #cv2.imshow("hsv", hsv)
    retval = cv2.moments(mask2, binaryImage = True)

    lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50)
    avgLine = 0
    if lines is not None:
        for line in lines:
            x1, y1, x2, y2 = line[0]
            radian_angle = math.atan2((x2-x1),(y1-y2))
            #degree_angle = math.degrees(radian_angle)
            if radian_angle > (np.pi / 4):
                radian_angle = radian_angle - (np.pi)
            #if degree_angle > 90:
                #degree_angle = degree_angle - 180
            avgLine = avgLine + radian_angle
            #print degree_angle
	    #cv2.line(warped, (x1, y1), (x2, y2), (0, 255, 0), 2)
def get_lanes(yellow_contours, white_contours):
    global ycx, ycy, wcx, wcy, pwbc, pybc, ybc, wbc
    #Reset biggest contour
    ybc = [0, 0]
    #Reset counter variable
    yi = 0

    #Check if any contour is found
    if len(yellow_contours):

        #Create for loop to run over all the countours
        for c in yellow_contours:
            #Check if the actual contour area is the biggest
            yx, yy, yw, yh = cv2.boundingRect(c)
            yM = cv2.moments(c)
            try:
                ycx = int(yM['m10'] / yM['m00'])
            except:
                continue

            if wcx != 0:
                if yh > ybc[0] and ycx <= wcx - 200 * scale:
                    #Actual contour area is new biggest
                    ybc = [yh, yellow_contours[yi]]
            else:
                if yh > ybc[0]:
                    #Actual contour area is new biggest
                    ybc = [yh, yellow_contours[yi]]

            #Add 1 to the i variable to keep counting the index
            yi += 1
    else:
        ybc = pybc

#Reset counter variable
    wi = 0
    #Reset array of biggest contour
    wbc = [0, 0]

    #Check if any white_contours is found
    if len(white_contours):

        #Create for loop to run over all the countours
        for c in white_contours:
            #Check if the actual contour area is the biggest
            wx, wy, ww, wh = cv2.boundingRect(c)
            wM = cv2.moments(c)
            try:
                wcx = int(wM['m10'] / wM['m00'])
            except:
                continue

            if (wh > wbc[0] and wcx > ycx + 200 * scale):
                #Actual contour area is new biggest
                wbc = [wh, c]
            #Add 1 to the i variable to keep counting the index
            wi += 1
    else:
        wbc = pwbc
    print("wbc:{}".format(wbc[0]))
    return ybc, wbc
Exemple #46
0
#cv2.destroyAllWindows()
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
circleCnt = []
for cnt in cnts:
    (x, y, w, h) = cv2.boundingRect(cnt)
    ar = w / (float(h))
    if w > 20 and h > 20 and ar > 0.9 and ar < 1.1:
        circleCnt.append(cnt)
#print(circleCnt)
copy = orig.copy()
centers = []
#print(len(circleCnt))
for cnt in circleCnt:
    x = cv2.moments(cnt)['m10'] / cv2.moments(cnt)['m00']
    y = cv2.moments(cnt)['m01'] / cv2.moments(cnt)['m00']
    centers.append((y, x))

#centers.sort()
#print('centers',len(centers),'\n')
#for cnt in circleCnt:
sorted_y = sorted(
    circleCnt,
    key=lambda x: int(cv2.moments(x)['m01'] / cv2.moments(x)['m00']))
#print(sorted_y[0:5])
#cv2.drawContours(copy,sorted_y,-1,(255,0,0),3)
#cv2.imshow('orig',copy)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x = []
Exemple #47
0
        # apply background substraction
        fgmask = fgbg.apply(depth)
        contours = cv.findContours(fgmask.copy(), cv.RETR_EXTERNAL,
                                   cv.CHAIN_APPROX_SIMPLE)
        contours = imutils.grab_contours(contours)

        # looping for contours
        for c in contours:
            if cv.contourArea(c) < 1000:
                continue

            # get bounding box from countour
            # (x, y, w, h) = cv.boundingRect(c)
            ((x, y), radius) = cv.minEnclosingCircle(c)
            M = cv.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # draw bounding box
            #     cv.rectangle(depth, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv.circle(depth, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            cv.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)

            print("depth:", depth[center[1], center[0]])

        depthImage = depth.astype(np.uint8)
        # cv.imshow('crop', crop_frame)
        cv.imshow('depth', depthImage)
        cv.imshow('Foreground & Background Mask', fgmask)

        depthImage = cv.cvtColor(depthImage, cv.COLOR_GRAY2BGR)
Exemple #48
0
def camTrack(previewName, camID):
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video", help="path to the (optional) video file")
    ap.add_argument("-b",
                    "--buffer",
                    type=int,
                    default=64,
                    help="max buffer size")
    args = vars(ap.parse_args())

    # define the lower and upper boundaries of the "green"
    # ball in the HSV color space, then initialize the
    # list of tracked points
    greenLower = (80, 80, 0)
    greenUpper = (100, 255, 255)
    pts = deque(maxlen=args["buffer"])

    # if a video path was not supplied, grab the reference
    # to the webcam
    if not args.get("video", False):
        vs = VideoStream(src=camID).start()

    # otherwise, grab a reference to the video file
    else:
        vs = cv2.VideoCapture(args["video"])

    # allow the camera or video file to warm up
    time.sleep(2.0)

    # keep looping
    while True:
        # grab the current frame
        frame = vs.read()

        # handle the frame from VideoCapture or VideoStream
        frame = frame[1] if args.get("video", False) else frame

        # if we are viewing a video and we did not grab a frame,
        # then we have reached the end of the video
        if frame is None:
            break

        # resize the frame, blur it, and convert it to the HSV
        # color space
        frame = imutils.resize(frame, width=600)
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # construct a mask for the color "green", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, greenLower, greenUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        center = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            if previewName == "Camera 1":
                coordinate1.x = x
                coordinate1.y = y
                #print("x1: %d y1: %d x2: " % (x1, y1))
            elif previewName == "Camera 2":
                coordinate2.x = x
                coordinate2.y = y
            # print("x2: %d y2: %d" % (x2, y2))
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # only proceed if the radius meets a minimum size
            if radius > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255),
                           2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)

        # update the points queue
        pts.appendleft(center)

        # loop over the set of tracked points
        for i in range(1, len(pts)):
            # if either of the tracked points are None, ignore
            # them
            if pts[i - 1] is None or pts[i] is None:
                continue

            # otherwise, compute the thickness of the line and
            # draw the connecting lines
            thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
            cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

        # show the frame to our screen
        cv2.imshow(previewName, frame)
        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break

    # if we are not using a video file, stop the camera video stream
    if not args.get("video", False):
        vs.stop()

    # otherwise, release the camera
    else:
        vs.release()

    # close all windows
    cv2.destroyAllWindows()
pr1 = threading.Thread(target=mail2arduino_pr1)
pr1.daemon = True
pr1.start()
pr2 = threading.Thread(target=camera2inet_pr2)
pr2.daemon = True
pr2.start()
pr3 = threading.Thread(target=see_red_pr3)
pr3.daemon = True
pr3.start()

while 1:
    frame = cap.read()
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    frame_gray = cv2.inRange(hsv[420:, :, :], (0, 0, 0), (255, 255, 80))
    if (np.sum(frame_gray) > porog and see_red != 1):
        speed = 255
        moments = cv2.moments(frame_gray, 1)
        dM01 = moments['m01']
        dM10 = moments['m10']
        dArea = moments['m00']

        x = 320 - int(dM10 / dArea)
        y = 120 - int(dM01 / dArea)
    else:
        speed = 0
    i_main += 1
    if (time.time() - time_main > 1):
        time_main = time.time()
        fps_main = i_main
        i_main = 0
    image_hsv = cv.cvtColor(image_smooth, cv.COLOR_BGR2HSV)
    image_threshold = cv.inRange(image_hsv, lower_red, upper_red)

    # Find contours
    contours, heirarchy = cv.findContours(image_threshold, \
                                        cv.RETR_TREE, \
                                        cv.CHAIN_APPROX_NONE)

    # Find the index of the largest contour
    if (len(contours) != 0):
        areas = [cv.contourArea(c) for c in contours]
        max_index = np.argmax(areas)
        cnt = contours[max_index]

        #Pointer on Video
        M = cv.moments(cnt)
        if (M['m00'] != 0):
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            cv.circle(frame, (cx, cy), 4, (0, 255, 0), -1)

            #Cursor Motion. Depending on the position of cursor and in the x and y axis inside the grid it send values of f b l r s to arduino.
            if cx in range(150, 250):
                if cy < 150:
                    Arduino.write(b'f')
                    print("Forward")
                elif cy > 250:
                    Arduino.write(b'b')
                    print("Backward")

                else:
Exemple #51
0
def hasprivacythreat_real(position, occ_grid_known, config, index, colorflag,
                          sizeflag, log):
    # picture_index = np.zeros((30, 1), dtype=int)
    # picture_index = [12,17,21,41,50,55,60,70,81,86,94,99,106,113,119,126,132,140,148,155,160,166]
    # picture_index = [15, 22, 27, 31, 33, 36, 45, 48, 92, 110, 118, 122, 127, 129, 134, 136, 145, 146, 147, 148, 149, 150,151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164]
    # picture_list = [1, 2, 5, 7, 10, 13, 17, 18, 21, 24, 27, 28, 30, 33, 37, 41, 45, 49, 53, 56, 59, 62, 65, 67, 69, 73,
    #                 74, 76, 80, 82, 86, 88, 92, 93, 95, 99, 103]
    """
    :param position: current position of drone
    :param occ_grid_known: current environment map
    :param config: system configurations
    :param colorflag: detect blue or red
    :param sizeflag: split the picture into 3*3 or 5*5 to locate the center of privacy region
    :return: flag: if privacy region is detected, return the updated occ_grid_known
    """
    ## try this for offline testing with settled picture index
    # picture_list_1 = [17, 23, 29, 36, 43, 56, 62, 68, 73, 343, 349, 354, 361, 368]
    picture_list_1 = [
        1, 7, 13, 19, 25, 31, 37, 43, 49, 55, 61, 67, 73, 79, 85, 91, 97, 103,
        109, 115, 121, 127, 133, 139, 145, 151, 157, 163, 169
    ]

    ## when camera is on: position.ca==1, launch privacy region detection
    flag = 0
    if position.ca == 1:
        # flag=1
        x = position.x
        y = position.y
        z = position.z

        # print("position:", position)
        log.info("current position [%d, %d, %d, %d]" %
                 (position.x, position.y, position.z, position.ca))

        ## try this for online
        num = file_name('D:/home/kids/1')
        img1 = 'D:/home/kids/1/' + str(num) + '.jpg'

        ## try this for offline testing with settled picture index
        # picture_index = picture_list_1[index]
        # img1 = os.getcwd() + '/pic5-4/'+str(picture_index)+".jpg"

        print("\033[92m image index: %s \033[0m" % (img1))
        log.info("image index %s" % (img1))
        img = cv2.imread(img1)
        # print(img)
        img = cv2.resize(
            img,
            (4000, 2250),
        )
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        if colorflag == 1:
            mask = cv2.inRange(hsv, blueLower, blueUpper)
        elif colorflag == 2:
            mask = cv2.inRange(hsv, redLower, redUpper)

        ret, binary = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)
        kernel = np.ones((5, 5), np.uint8)
        dilation = cv2.dilate(binary, kernel, iterations=1)
        contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        arr_x = []
        arr_y = []
        # size_num = 5

        if sizeflag == 1:
            size_num = 3
        elif sizeflag == 2:
            size_num = 5
        elif sizeflag == 3:
            if x >= 3:
                size_num = 5
            else:
                size_num = 3

        x_start = 4000 / (size_num * 2)
        y_start = 2250 / (size_num * 2)
        for i in range(size_num):
            arr_x.append(x_start + i * x_start * 2)
            arr_y.append(y_start + i * y_start * 2)

        center_x = 4000 / 2
        center_y = 2250 / 2
        # print("center", center_x, center_y)
        # print(arr_x, arr_y)

        if contours:

            c = max(contours, key=cv2.contourArea)
            array = cv2.minAreaRect(c)
            box = cv2.boxPoints(array)
            box = np.int0(box)
            # print (box)

            length = abs(box[0][0] - box[2][0])
            width = abs(box[0][1] - box[1][1])
            area = length * width

            if area > 0.001 * 4000 * 2250:

                for xx in range(len(arr_x)):
                    for yy in range(len(arr_y)):
                        # print("fffff", (center_x - arr_x[xx]) / (4000 / 3), center_y - arr_y[yy])
                        y_ = round((center_x - arr_x[xx]) / (4000 / size_num))
                        x_ = round((center_y - arr_y[yy]) / (2250 / size_num))
                        # print(x_, y_)
                        if isInside(box, arr_x[xx], arr_y[yy]) == 1:
                            # print("inside", isInside(box, arr_x[xx], arr_y[yy]), arr_x[xx], arr_y[yy])
                            # x_ = int ((center_x - arr_x[xx]) / (4000 / 3))
                            # y_ = int ((center_y - arr_y[yy]) / (2250 / 3))
                            # print(x_, y_, y + x_, z + y_)
                            if y + x_ <= occ_grid_known.shape[1] - 1 and y + x_ >= 0 and z + y_ <= \
                                    occ_grid_known.shape[2] - 1 and z + y_ >= 0:
                                delta_y = y + x_
                                delta_z = z + y_
                                # if occ_grid_known[0][delta_y][delta_z] != 4:
                                flag = 1
                                occ_grid_known[0][delta_y][delta_z] = 4
                                print(
                                    "\033[92m threat position: [%d, %d] \033[0m"
                                    % (delta_y, delta_z))
                                log.info("threat position: [%d, %d]" %
                                         (delta_y, delta_z))

                # cv2.drawContours(img, [box], 0, (0, 0, 0), 8)
                # cv2.imwrite(savepath, img)

                M = cv2.moments(c)
                # 得到物体中心点坐标
                cx = int(M['m10'] / M['m00'])
                cy = int(M['m01'] / M['m00'])

                # print("box center", cx, cy)

                y_ = math.ceil(size_num / 2) - math.ceil(cx /
                                                         (4000 / size_num))
                x_ = math.ceil(size_num / 2) - math.ceil(cy /
                                                         (2250 / size_num))
                # print(y_, x_)

                if y + x_ <= occ_grid_known.shape[1] - 1 and y + x_ >= 0 and z + y_ <= \
                        occ_grid_known.shape[2] - 1 and z + y_ >= 0:
                    delta_y = y + x_
                    delta_z = z + y_
                    # if occ_grid_known[0][delta_y][delta_z] != 4:
                    flag = 1
                    occ_grid_known[0][delta_y][delta_z] = 4
                    print("\033[92m center threat position: [%d, %d] \033[0m" %
                          (delta_y, delta_z))
                    log.info("center threat position: [%d, %d]" %
                             (delta_y, delta_z))

            else:
                flag = 0

        return flag, occ_grid_known
    else:
        flag = 0
        return flag, occ_grid_known
kernel = np.ones((2, 2), np.uint8)
eroded2 = cv2.erode(g, kernel, iterations=3)
ret4, thresh4 = cv2.threshold(eroded2, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

edges = cv2.Canny(thresh4, 0, 255)
ppp, contours, hier = cv2.findContours(edges, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
filled = cv2.drawContours(thresh4, contours, -1, (255, 255, 255), 15)
(winW, winH) = (cols, 35)
stepSize = 10
blob = []
centroid = []
for height in range(0, rows, stepSize):
    crop = filled[height:height + winH, 0:0 + winW]
    M = cv2.moments(crop)
    cv2.imshow("OUTPUT1", crop)
    # calculate x,y coordinate of center
    cX = int(M["m10"] / M["m00"])
    cY = int(M["m01"] / M["m00"])
    blob = (cX, height + cY)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    centroid.append(blob)
centers = np.int0(centroid)
for center in centers:
    x, y = center.ravel()
    cv2.circle(img, (x, y), 3, (0, 0, 255), -1)
cv2.imshow("OUTPUT2", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
    lower = np.array([lH, lS, lV])
    upper = np.array([hH, hS, hV])

    mask = cv2.inRange(hsv, lower, upper)

    result = cv2.bitwise_and(frame, frame, mask=mask)

    filtered = result.copy()

    res, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_NONE)

    font = cv2.FONT_HERSHEY_PLAIN
    for contour in contours:
        try:
            M = cv2.moments(contour)
            x = int(M["m10"] / M["m00"])
            y = int(M["m01"] / M["m00"])
            cv2.putText(frame,
                        str(cv2.contourArea(contour)), (x, y),
                        font,
                        2.0, (255, 255, 255),
                        lineType=16)
        except ZeroDivisionError:
            pass
    cv2.drawContours(result, contours, -1, (0, 255, 0), 3)

    images = np.hstack((frame, filtered, result))

    cv2.imshow('result', images)
            # loop over the contours
            largest_contour = 0
            largest_contourV = 0
            for c in cnts:
                # if the contour is too small, ignore it
                print(cv2.contourArea(c))
                if cv2.contourArea(c) > largest_contourV:
                    #print(type(largest_contourV))
                    largest_contour = c
                    largest_contourV = cv2.contourArea(c)
                    continue

            #print(type(largest_contour))

            if largest_contourV > 0:
                M = cv2.moments(largest_contour)
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                Width = frame.shape[1]
                #print(Width)
                # draw the contour and center of the shape on the image
                #cv2.drawContours(frame, [largest_contour], -1, (0, 255, 0), 2)
                # compute the bounding box for the contour, draw it on the frame,
                # and update the text

                (x, y, w, h) = cv2.boundingRect(largest_contour)
                #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                crop_img = frame[y:y + h, x:x + w]
                if (h > 240) | (w > 240):
                    if (h > 240):
                        scaler = 240 / h
Exemple #55
0
smax = 0
for contour in contours:
    if cv2.contourArea(contour) > max:
        smax = max
        sec = c
        max = cv2.contourArea(contour)
        c = contour
    elif cv2.contourArea(contour) > smax:
        smax = cv2.contourArea(contour)
        sec = contour
    if cv2.contourArea(contour) > 1000000 and cv2.contourArea(
            contour) < 1100000:
        smax = cv2.contourArea(contour)
        sec = contour

center = cv2.moments(c)
r = math.ceil((cv2.contourArea(c) / np.pi)**0.5)
r = r * 0.7
img2 = np.zeros_like(grayscale)
cx = int(center['m10'] / center['m00'])  #centroid
cy = int(center['m01'] / center['m00'])
cv2.circle(img2, (cx, cy), int(r), (255, 255, 255), -1)
res = cv2.bitwise_and(grayscale, img2)
resized = cv2.resize(res, (256, 256))
mean, std = cv2.meanStdDev(resized)
mean = mean[0][0]
std = std[0][0]
U = abs((1 - std / mean))
count = 0
sum = 0
def fd_hu_moments(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    feature = cv2.HuMoments(cv2.moments(image)).flatten()
    return feature
    def go_to_ball(self, ros_image):
        ## @param image_np (decompresed image and converted to CV2)
        np_arr = np.fromstring(ros_image.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        ## Reduce noise
        blurred = cv2.GaussianBlur(image_np, (11, 11), 0)
        ## Conversion to hsv
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # Apply the proper color mask
        if self.color == "black":
            mask = cv2.inRange(hsv, blackLower, blackUpper)
        elif self.color == "red":
            mask = cv2.inRange(hsv, redLower, redUpper)
        elif self.color == "yellow":
            mask = cv2.inRange(hsv, yellowLower, yellowUpper)
        elif self.color == "green":
            mask = cv2.inRange(hsv, greenLower, greenUpper)
        elif self.color == "blue":
            mask = cv2.inRange(hsv, blueLower, blueUpper)
        '''elif self.color == "magenta":
            mask = cv2.inRange(hsv, magentaLower, magentaUpper)'''

        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        center = None

        if len(cnts) > 0:
            c = max(cnts, key=cv2.contourArea)
            ((x, y), self.radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            if self.radius > 10:
                cv2.circle(image_np, (int(x), int(y)), int(self.radius),
                           (0, 255, 255), 2)
                cv2.circle(image_np, center, 5, (0, 0, 255), -1)
                # SETTING VELOCITIES TO APPLIED TO THE ROBOT
                vel = Twist()
                # ANGULAR VELOCITY IS COMPUTED FROM THE MISALIGNMENT BETWEEN THE CENTER OF THE BALL AND THE CENTER OF THE IMAGE
                vel.angular.z = -self.sigmaAngular * (center[0] - 400)
                # LINEAR VELOCITY IS COMPUTED FROM THE DIMENSION OF THE RADIUS OF THE BALL IN THE IMAGE
                vel.linear.x = -self.sigmaLinear * (self.radius - 130)
                self.vel_publisher.publish(vel)
                rospy.loginfo("[trackingBall]: TRACKING ")
                # THRESHOLD FOR CONSIDERING THE BALL AS REACHED
                if (self.radius >= 120) and (abs(center[0] - 400) < 5):
                    rospy.loginfo("ballDetection --> BALL REACHED")
                    self.result.x = self.position.x
                    self.result.y = self.position.y
                    # SEND ACK_POSITIVO OF MISSION ACCOMPLISHED
                    self.ACK_POSITIVO = True

        else:
            # Routine that happend if the ball is lost during tracking, simplty rotate the robot to find the lost ball. After a while abort mission.
            rospy.loginfo("[trackingBall]: BALL NOT FOUND")
            vel = Twist()
            if self.lost_ball_counter <= 10:
                rospy.loginfo("[trackingBall]: TURN RIGHT SEARCHING THE BALL")
                vel.angular.z = 0.5
                self.vel_publisher.publish(vel)
            elif self.lost_ball_counter < 20:
                rospy.loginfo("[trackingBall]: TURN LEFT SEARCHING THE BALL")
                vel.angular.z = -0.5
                self.vel_publisher.publish(vel)
            elif self.lost_ball_counter == 20:
                rospy.loginfo("[trackingBall]: UNABLE TO FIND BALL")
                self.lost_ball_counter = 0
                # SEND ACK_NEGATIVO OF MISSION ABORTED DUE TO BALL LOST.
                self.ACK_NEGATIVO = True
            self.lost_ball_counter += 1
def detect_rects(image, draw=False):

    # resized = imutils.resize(image, width=600)
    # ratio = image.shape[0] / float(resized.shape[0])

    rects = []

    ratio = 1.
    resized = image

    # convert the resized image to grayscale, blur it slightly,
    # and threshold it
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

    mask = thresh.copy()

    kernel = np.ones((7, 7), np.uint8)
    mask = cv2.erode(mask, kernel, iterations=1)

    kernel = np.ones((7, 7), np.uint8)
    mask = cv2.dilate(mask, kernel, iterations=1)

    # cv2.imshow("Image", mask)

    thresh = mask.copy()

    # cv2.waitKey(0)

    # if cv2.waitKey(1) & 0xFF == ord('q'):
    # 	exit(0)

    # continue

    # find contours in the thresholded image and initialize the
    # shape detector
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    # sd = ShapeDetector()

    # loop over the contours

    for c in cnts:
        # compute the center of the contour, then detect the name of the
        # shape using only the contour
        M = cv2.moments(c)

        try:
            cX = int((M["m10"] / M["m00"]) * ratio)
            cY = int((M["m01"] / M["m00"]) * ratio)
        except:
            continue

        cnt = c
        rect = cv2.minAreaRect(cnt)
        # print(rect)
        box = cv2.boxPoints(rect)

        rects.append(box)

        box = np.int0(box)
        cv2.drawContours(image, [box], 0, (0, 0, 255), 2)

        # cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
        # cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
        # 	0.5, (255, 255, 255), 2)

    if draw:
        cv2.imshow("Image", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            exit(0)

    return rects
Exemple #59
0
for frame in camera.capture_continuous(rawCapture,format="bgr",use_video_port=True):
    image = frame.array
    #show the image
    #wait until some key is pressed to procced

    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert Image captured from Image Input to GrayScale
    edges = cv2.Canny(image_gray,100,200,3)      # Apply Canny edge detection on the gray image
    (_,contours,hierarchy) = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # Find contours with hierarchy

 # Get Moments for all Contours and the mass centers
    mu = []
    mc = []
    mark = 0
    for x in range(0,len(contours)):
        mu.append(cv2.moments(contours[x]))

    for m in mu:
        if m['m00'] != 0:
            mc.append((m['m10']/m['m00'],m['m01']/m['m00']))
        else:
            mc.append((0,0))

    # Start processing the contour data

    # Find Three repeatedly enclosed contours A,B,C
    # NOTE: 1. Contour enclosing other contours is assumed to be the three Alignment markings of the QR code.
    # 2. Alternately, the Ratio of areas of the "concentric" squares can also be used for identifying base Alignment markers.
    # The below demonstrates the first method

    for x in range(0,len(contours)):
Exemple #60
0
def ct_initial_alignment(source, target, echo=True):
    y_size, x_size = source.shape
    source = (source * 255).astype(np.uint8)
    target = (target * 255).astype(np.uint8)
    max_size = max(y_size, x_size)
    smoothing_size = utils.round_up_to_odd(max_size / 2048 * 31)
    source = cv2.GaussianBlur(source, (smoothing_size, smoothing_size), 0)
    target = cv2.GaussianBlur(target, (smoothing_size, smoothing_size), 0)

    ret_source, thresholded_source = fd.threshold_calculation_with_rotation(source)
    ret_target, thresholded_target = fd.threshold_calculation_with_rotation(target)

    xs_m = utils.round_up_to_odd(x_size * 20 / 2048)
    ys_m = utils.round_up_to_odd(y_size * 20 / 2048)

    struct = min([xs_m, ys_m])
    thresholded_source = nd.binary_erosion(thresholded_source, structure=np.ones((struct, struct))).astype(np.uint8)
    thresholded_source = nd.binary_dilation(thresholded_source, structure=np.ones((struct, struct))).astype(np.uint8)                            
    thresholded_target = nd.binary_erosion(thresholded_target, structure=np.ones((struct, struct))).astype(np.uint8)
    thresholded_target = nd.binary_dilation(thresholded_target, structure=np.ones((struct, struct))).astype(np.uint8)

    Ms = cv2.moments(thresholded_source)
    Mt = cv2.moments(thresholded_target)

    cXs = Ms["m10"] / Ms["m00"]
    cYs = Ms["m01"] / Ms["m00"]
    cXt = Mt["m10"] / Mt["m00"]
    cYt = Mt["m01"] / Mt["m00"]

    transform_centroid = np.array([
            [1, 0, (cXt-cXs)],
            [0, 1, (cYt-cYs)],
            [0, 0, 1]])
    u_x_t, u_y_t = utils.rigid_dot(source, np.linalg.inv(transform_centroid))
    failed = True
    angle_step = 2
    initial_dice = utils.dice(thresholded_source, thresholded_target)
    if echo:
        print("Initial dice: ", initial_dice)
    best_dice = initial_dice
    for i in range(0, 360, angle_step):
        if echo:
            print("Current angle: ", i)
        rads = i * np.pi/180
        matrix_1 = np.array([
            [1, 0, cXt],
            [0, 1, cYt],
            [0, 0, 1],
        ])
        matrix_i = np.array([
            [np.cos(rads), -np.sin(rads), 0],
            [np.sin(rads), np.cos(rads), 0],
            [0, 0, 1],
        ])
        matrix_2 = np.array([
            [1, 0, -cXt],
            [0, 1, -cYt],
            [0, 0, 1],
        ])

        matrix = matrix_1 @ matrix_i @ matrix_2
        u_x, u_y = utils.rigid_dot(source, np.linalg.inv(matrix))
        transformed_source = utils.warp_image(source, u_x + u_x_t, u_y + u_y_t)

        ret_transformed_source, thresholded_transformed_source = fd.threshold_calculation_with_threshold_with_rotation(transformed_source, ret_source)
        thresholded_transformed_source = nd.binary_erosion(thresholded_transformed_source, structure=np.ones((struct, struct))).astype(np.uint8)
        thresholded_transformed_source = nd.binary_dilation(thresholded_transformed_source, structure=np.ones((struct, struct))).astype(np.uint8)
        current_dice = utils.dice(thresholded_transformed_source, thresholded_target)
        if echo:
            print("Current dice: ", current_dice)

        if (current_dice > best_dice and current_dice > initial_dice + 0.10 and current_dice > 0.85) or (current_dice > 0.95 and current_dice > best_dice):
            failed = False
            best_dice = current_dice
            transform = matrix.copy()
            if echo:
                print("Current best dice: ", best_dice)

    if failed:
        transform = np.eye(3)

    final_transform = transform @ transform_centroid
    if echo:
        print("Calculated transform: ", final_transform)
    if failed:
        final_transform = np.eye(3)
    u_x, u_y = utils.rigid_dot(source, np.linalg.inv(final_transform))
    return u_x, u_y, final_transform, failed