def task(self):
		self.next_frame()

		gray = cv2.cvtColor(self.clean_image, cv2.COLOR_BGR2GRAY)
		edges = cv2.Canny(gray,
					self.canny_threshold1.get(),
					self.canny_threshold2.get(),
					apertureSize = ((self.canny_apertureSize.get()*2) - 1)
				)

		lines = cv2.HoughLines(edges,
						self.rho.get(),
						np.pi/self.theta.get(),
						self.threshold.get()
					)
		if lines != None:
				for line in lines:
						rho_x,theta_x = line[0]
						a = np.cos(theta_x)
						b = np.sin(theta_x)
						x0 = a*rho_x
						y0 = b*rho_x
						x1 = int(x0 + 1000*(-b))
						y1 = int(y0 + 1000*(a))
						x2 = int(x0 - 1000*(-b))
						y2 = int(y0 - 1000*(a))

						cv2.line(self.clean_image,(x1,y1),(x2,y2),(0,0,255),2)
		
		self.clean_image = imutils.resize(self.clean_image, width=1000)
		cv2.imshow(self.window_name, self.clean_image)

		self.master.after(20, self.task)
Пример #2
0
    def draw_motion(self, im=None, draw_outliers=False):

        show_image = False

        if im is None:
            im = self.result_image
            show_image = True

        if draw_outliers:
            print("points shape: " + str(self.prev_points.shape[0]) + " outliers " +
                  str(self.outliers.shape))

        for i in range(0, self.prev_points.shape[0]):
            prev_pt = self.prev_points[i]
            next_pt = self.next_points[i]

            color = np.array([0, 255, 0])

            if draw_outliers:
                id = 2*i
                if (self.outliers[id] or self.outliers[id+1]) and draw_outliers:
                    color = np.array([0, 0, 255])
                else:
                    color = np.array([0, 255, 0])

            cv2.circle(im, (prev_pt[0], prev_pt[1]), 2, color, -1)
            cv2.line(im, (prev_pt[0], prev_pt[1]), (next_pt[0], next_pt[1]), np.array([255, 0, 0]), 1)

        if show_image:
            cv2.imshow("", im)
            cv2.waitKey(0)

        return im
Пример #3
0
def draw_lines(src, lines, color=(255, 0, 0), thickness=3):
    if lines:
        for p1, p2 in lines:
            p1 = new_point(*p1)
            p2 = new_point(*p2)
            line(src, p1, p2,
                   convert_color(color), thickness, 8)
Пример #4
0
def drawLine(img, point1, point2):
    line = np.cross(point1, point2)
    a, b, c = line
    x1, x2 = 0, imgX - 1
    y1, y2 = -(a * x1 + c) / b, -(a * x2 + c) / b
    cv2.line(img, (y1, x1), (y2, x2), (0, 0, 0))
    return line
def Hough_line_transform():
    img = cv2.imread('sd.jpg')
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    #cv2.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) → edges
    edges = cv2.Canny(gray,50,150,apertureSize = 3)

    #cv2.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn]]]) → lines
    #rho – Distance resolution of the accumulator in pixels.
    #theta – Angle resolution of the accumulator in radians
    #Accumulator threshold parameter. Only those lines are returned that get enough votes (>threshold)
    lines = cv2.HoughLines(edges,1,np.pi/180,100)

    for rho,theta in lines[0]:
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a*rho
        y0 = b*rho
        x1 = int(x0 + 1000*(-b))
        y1 = int(y0 + 1000*(a))
        x2 = int(x0 - 1000*(-b))
        y2 = int(y0 - 1000*(a))

        cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)

    return img
Пример #6
0
def task_proc_segmxr(data):
    ptrPathWdir     = data[0]
    ptrPathImg      = data[1]
    regXR           = RegisterXray()
    regXR.loadDB(ptrPathWdir)
    # regXR.printInfo()
    retMsk,retCorr  = regXR.registerMask(ptrPathImg)
    pathImgMask     = "%s_mask.png"   % ptrPathImg
    pathImgMasked   = "%s_masked.png" % ptrPathImg
    if retCorr>regXR.threshCorrSum:
        cv2.imwrite(pathImgMask,   regXR.newMsk)
        cv2.imwrite(pathImgMasked, regXR.newImgMsk)
    else:
        tmpNewImgMsk = cv2.imread(ptrPathImg, 1) #cv2.IMREAD_COLOR)
        p00=(0,0)
        p01=(0,tmpNewImgMsk.shape[0])
        p10=(tmpNewImgMsk.shape[1],0)
        p11=(tmpNewImgMsk.shape[1], tmpNewImgMsk.shape[0])
        cv2.line(tmpNewImgMsk, p00, p11, (0,0,255), 4)
        cv2.line(tmpNewImgMsk, p01, p10, (0,0,255), 4)
        regXR.newMsk[:]=0
        cv2.imwrite(pathImgMask,   regXR.newMsk)
        cv2.imwrite(pathImgMasked, tmpNewImgMsk)
        fnErr="%s.err" % ptrPathImg
        f=open(fnErr,'w')
        f.close()
    fzip="%s.zip" % ptrPathImg
    zObj=zipfile.ZipFile(fzip, 'w')
    zipDir='%s_dir' % os.path.basename(ptrPathImg)
    lstFimg=(ptrPathImg, pathImgMask, pathImgMasked)
    for ff in lstFimg:
        ffbn = os.path.basename(ff)
        zObj.write(ff, "%s/%s" % (zipDir, ffbn))
    print "retCorr = %s" % retCorr
def get_boundaries(img, color_img_blanks):
    """This function calculates the upper and lower boundary of text in
the document."""
    img_height = np.shape(img)[0]
    blank_lines = []
    first_non_blank = False
    upper_boundary = 0
    lower_boundary = 0

    for idx in range(img_height/2):
        line = img[idx,:]
        if len(np.where(line == 0)[0]) >= 50 and len(np.where(line == 0)[0]) < 0.1 * len(line):
            first_non_blank = True
        if len(np.where(line == 0)[0]) < 50 and first_non_blank:
            blank_lines.append(idx)
            cv2.line(color_img_blanks, (0, idx), (np.shape(img)[1], idx), (0,255,0), 1)
            
    first_non_blank = False
    for idx in reversed(range(img_height/2, img_height)):
        line = img[idx,:]
        if len(np.where(line == 0)[0]) >= 50 and len(np.where(line == 0)[0]) < 0.1 * len(line):
            first_non_blank = True
        if len(np.where(line == 0)[0]) < 50 and first_non_blank:
            blank_lines.append(idx)
            cv2.line(color_img_blanks, (0, idx), (np.shape(img)[1], idx), (0,255,0), 1)

    upper_blank_lines = [item for item in blank_lines if item < img_height/2]
    if len(upper_blank_lines) > 0:
        upper_boundary = longest_increasing_run(upper_blank_lines)[0]
    
    lower_blank_lines = [item for item in blank_lines if item >= img_height/2]
    if len(lower_blank_lines) > 0:
        lower_boundary = longest_increasing_run(lower_blank_lines)[0]

    return upper_boundary, lower_boundary
Пример #8
0
    def show_match(self, image_test, descriptors_all):
        """
        :type image_test: Image
        :type matches: np.array
        :type distances: np.array
        """
        distances, matches = self._classifier.kneighbors(descriptors_all, return_distance=True, n_neighbors=1)

        image_test_rgb = image_test.get_rgb()  # type: Image

        for feature, matchs, distancess in zip(image_test.get_all_features(), matches, distances):
            xy1, w1 = feature.get_global_xy_w()
            for m in matchs:
                other_feature = self._features_all[m]
                image_train_rgb = other_feature.get_image().get_rgb()
                xy2, w2 = other_feature.get_global_xy_w()

                offset = image_test_rgb.shape[1]
                size = offset + image_train_rgb.shape[1]
                xy2 += [offset, 0]

                showoff = np.zeros((Image.DEFAULT_HEIGHT, size, 3), np.uint8)

                showoff[0:image_test_rgb.shape[0], 0:image_test_rgb.shape[1], :] = image_test_rgb
                showoff[0:image_train_rgb.shape[0], 0 + offset:image_train_rgb.shape[1] + offset, :] = image_train_rgb

                cv2.line(showoff, tuple(xy1), tuple(xy2), (0, 0, 255), thickness=1)
                cv2.circle(showoff, tuple(xy1), w1, (0, 0, 255), thickness=1)
                cv2.circle(showoff, tuple(xy2), w2, (0, 0, 255), thickness=1)
                plt.imshow(cv2.cvtColor(showoff, cv2.COLOR_RGB2BGR)), plt.show()
Пример #9
0
def remove_line(box_bw,line_thickness):
    edges = cv2.Canny(box_bw, 80, 120)

    # dilate: it will fill holes between line segments 
    (r,c)=np.shape(box_bw)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(1,1))
    edges=cv2.dilate(edges,element)
    min=np.minimum(r,c)
    lines = cv2.HoughLinesP(edges, 1, math.pi/2, 2, None, min*0.75, 1);
        
    r_low_lim=r*0.1
    r_high_lim=r-r_low_lim

    c_low_lim=c*0.1
    c_high_lim=c-c_low_lim

    if lines!=None:
        for line in lines[0]:
            pt1 = (line[0],line[1])
            pt2 = (line[2],line[3])                 
            theta_radian2 = np.arctan2(line[2]-line[0],line[3]-line[1]) #calculating the slope and the result returned in radian!
            theta_deg2 = (180/math.pi)*theta_radian2 # converting radian into degrees!
            if (theta_deg2>85 and theta_deg2<95): # horizontal line                
                # if starting of line is below or above 30% of box, remove it
                if (line[1]<=r_low_lim or line[1]>=r_high_lim) and (line[3]<=r_low_lim or line[3]>=r_high_lim):
                    cv2.line(box_bw, pt1, pt2, 255, line_thickness)        
            if(theta_deg2>175 and theta_deg2<185):# vertical line
                if (line[0]<=c_low_lim or line[0]>=c_high_lim) and (line[2]<=c_low_lim or line[2]>=c_high_lim):
                    cv2.line(box_bw, pt1, pt2, 255, line_thickness)                    
    return box_bw
Пример #10
0
def make_image():
    img = np.zeros((500, 500), np.uint8)
    black, white = 0, 255
    for i in xrange(6):
        dx = (i%2)*250 - 30
        dy = (i/2)*150

        if i == 0:
            for j in xrange(11):
                angle = (j+5)*np.pi/21
                c, s = np.cos(angle), np.sin(angle)
                x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
                x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
                cv2.line(img, (x1, y1), (x2, y2), white)

        cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
    return img
Пример #11
0
def visualize_descriptor():
	""" Visualize the current value of the SIFT descriptor """
	global V
	dc, des = extractor.compute(im, [keypoint])

	# hardcoded, there are 16 orientation histograms with 8 orientations each
	O = des[0].reshape((16,8))
	V = 255*np.ones(visualizer_size,dtype=np.uint8)

	# draw the grid that divides the different histograms
	for i in range(0,V.shape[0]+1,V.shape[0]/4):
		cv2.line(V,(0,i),(V.shape[1],i),(0,0,0),2)
		cv2.line(V,(i,0),(i,V.shape[0]),(0,0,0),2)

	# loop over all columns of the grid
	for i in range(4):
		# loop over all rows of the grid
		for j in range(4):
			# loop over all orientations
			for k in range(8):
				theta = k*pi/4.0
				center = ((i+0.5)*V.shape[0]/4.0,(j+0.5)*V.shape[0]/4.0)
				v = O[i+j*4,k]/10.0
				end_point = ((i+0.5)*V.shape[0]/4.0+v*cos(theta),(j+0.5)*V.shape[0]/4.0-v*sin(theta))

				# compute the end points of the lines on the arrow head
				arrow_e1 = (end_point[0]+cos(theta+3*pi/4)*v/4.0, end_point[1]-sin(theta+3*pi/4)*v/4.0)
				arrow_e2 = (end_point[0]+cos(theta-3*pi/4)*v/4.0, end_point[1]-sin(theta-3*pi/4)*v/4.0)

				# draw a line with an arrow head
				cv2.line(V,(int(center[0]),int(center[1])),(int(end_point[0]),int(end_point[1])),2)
				cv2.line(V,(int(end_point[0]), int(end_point[1])), (int(arrow_e1[0]),int(arrow_e1[1])),2)
				cv2.line(V,(int(end_point[0]), int(end_point[1])),(int(arrow_e2[0]),int(arrow_e2[1])),2)
Пример #12
0
def draw_match(img1, img2, p1, p2, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1+w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
        cv2.polylines(vis, [corners], True, (255, 255, 255))
    
    if status is None:
        status = np.ones(len(p1), np.bool_)
    green = (0, 255, 0)
    red = (0, 0, 255)
    for (x1, y1), (x2, y2), inlier in zip(np.int32(p1), np.int32(p2), status):
        col = [red, green][inlier]
        if inlier:
            cv2.line(vis, (x1, y1), (x2+w1, y2), col)
            cv2.circle(vis, (x1, y1), 2, col, -1)
            cv2.circle(vis, (x2+w1, y2), 2, col, -1)
        else:
            r = 2
            thickness = 3
            cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
            cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
            cv2.line(vis, (x2+w1-r, y2-r), (x2+w1+r, y2+r), col, thickness)
            cv2.line(vis, (x2+w1-r, y2+r), (x2+w1+r, y2-r), col, thickness)
    return vis
	def draw_morphometric_line(self, text, coefficient, point, tilt=(0,0), thickness=2):
		#numpyfied
		try:
			A = point[::-1]
			cx, cy =  np.array((int(coefficient),0)), np.array((0,int(coefficient)))
			tilt = np.array(tilt)
			if point is self.topmost:
				if text is not 'ED':
					B = A + cy
					M = (A + B)/2.0 - (30,0 )
				else:
					A = (A[0], int(A[1] + self.PoL))
					B = (A[0], int(A[1] + self.ED))
					mx,my = midpoint(A,B); mx -= 30; M = (mx,my)
			elif point is self.bottommost:
				B = A - cy
				M = (A + B)/2.0 - (30,0 )

			elif point is self.leftmost:
				B = A + cx
				M = (A+B)/2.0 - (0,30)
			elif point is self.rightmost:	
				B = A - cx
				M = (A+B)/2.0 - (0,30)
			else:
				return
			A += tilt; B+=tilt; M+=tilt; M = M.astype(np.integer)
			c  = random.randint(0,255)
			cv2.line(self.drawn_img, tuple(A), tuple(B), (c,0,0), thickness)
			cv2.putText(self.drawn_img,text,tuple(M), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,(c,0,0),1)
		except NameError:
			return
Пример #14
0
def power(src, angle, a, b):
    dx = (b[0]-a[0])/math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)
    dy = (b[1]-a[1])/math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)
    sum = 0.0
    n   = 0
    out = copy.copy(src)
    
    for i in range(int(math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2))):
        y = a[1] + i*dy
        x = a[0] + i*dx
        
        if angle[y][x]*(180.0/pi)<361 and angle[y][x]*(180.0/pi)>-1:
            e = math.acos(abs(dx*math.cos(angle[y][x]) + dy*math.sin(angle[y][x])))*(180.0/pi)
            sum = sum + e
            n = n + 1
            ddx = 30*math.cos(angle[y][x])
            ddy = 30*math.sin(angle[y][x])
            
            cv2.circle(out, (x,y),2,(0,255,255),2,8)
            cv2.line(out, (x,y),(x+ddx,y+ddy),(0,255,255),2,8)
            cv2.line(out, a,b,(255,255,0),2,8)
    if n==0:
        return 90
    else:
        return sum/float(n)
Пример #15
0
    def redraw_monocular(self, drawable):
        height = drawable.scrib.shape[0]
        width = drawable.scrib.shape[1]

        display = numpy.ones((max(480, height), width + 100, 3), dtype=numpy.uint8)
        display[0:height, 0:width,:] = drawable.scrib
        display[0:height, width:width+100,:].fill(255)

        self.buttons(display)
        if not self.c.calibrated:
            if drawable.params:
                 for i, (label, lo, hi, progress) in enumerate(drawable.params):
                    (w,_) = self.getTextSize(label)
                    self.putText(display, label, (width + (100 - w) // 2, self.y(i)))
                    color = (0,255,0)
                    if progress < 1.0:
                        color = (0, int(progress*255.), 255)
                    cv2.line(display,
                            (int(width + lo * 100), self.y(i) + 20),
                            (int(width + hi * 100), self.y(i) + 20),
                            color, 4)

        else:
            self.putText(display, "lin.", (width, self.y(0)))
            linerror = drawable.linear_error
            if linerror < 0:
                msg = "?"
            else:
                msg = "%.2f" % linerror
                #print "linear", linerror
            self.putText(display, msg, (width, self.y(1)))

        self.queue_display.append(display)
Пример #16
0
def draw_match(img1, kp1,
               img2, kp2, 
               matches,
               output = None,
               matchColor = (255,0,0),
               matchesMask = None,
               *args, **kargs):
    h1 = img1.shape[0]
    w1 = img1.shape[1]
    
    h2 = img2.shape[0]
    w2 = img2.shape[1]
    
    w = w1 + w2
    h = max(h1, h2)
    
    outimg = np.full((h,w,3), 0, np.uint8)
    

    outimg[0:h1,0:w1, :] = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
    outimg[0:h2,w1:w1+w2, :] = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
    
    for i, m in enumerate(matches):
        if matchesMask is not None and matchesMask[i] == 0: continue
        i1 , i2  = m.queryIdx, m.trainIdx
        pt1, pt2 = kp1[i1].pt, kp2[i2].pt
        pt1 = ( int(pt1[0]), int(pt1[1])) 
        pt2 = ( int(w1 + pt2[0]), int(pt2[1]))
        cv2.line(outimg, pt1, pt2, matchColor)
    
    if output is not None:
        output = outimg.copy()
    
    return outimg
Пример #17
0
def test(lpinfo_list, hmmfile):
    import nhmm
    
    hmm = nhmm.siNHMM()
    hmm.read(hmmfile)
    (prior_new, trans_new, obs_new) = hmm.getparams()
    funcs.siPrintArray2D('%5.2f', trans_new)
    
    for li, lpi in enumerate(lpinfo_list):
        print '%d:%s'%(li, lpi.img_fn)
        onelpinfo = lpi
        obs_chain = onelpinfo.charobj.obs_chain
        print 'obs:', obs_chain
        print 'state:', onelpinfo.charobj.state_chain
        find_chain, score = hmm.viterbi_one(obs_chain)
        img = lpi.charobj.grayimg
        imgh, imgw = img.shape
        print 'score:', score
        print find_chain
        cimg = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
        for i in xrange(1, find_chain.shape[0]):
            if find_chain[i] == 1 or (find_chain[i] == 0 and find_chain[i-1] > 0) or (find_chain[i] > 0 and find_chain[i-1] == 0):
                cv.line(cimg, (i, 0), (i, imgh-1), (0, 0, 255), 1)
        print '------------------------------'
        allimg = np.append(lpi.charobj.grayimg, lpi.charobj.sblimg, axis=1)
        cv.imshow('allimg', allimg)
        cv.imshow('mark', cimg)
        cv.waitKey(0)
Пример #18
0
def detect_gaze_direction(video_capture,predictor):
	cam = cv2.VideoCapture(video_capture)
	cam.set(3,640)
	cam.set(4,480)
	video_capture = cam

	detector = dlib.get_frontal_face_detector()

	while True:
	    # Capture frame-by-frame
		ret, frame = video_capture.read()
		if ret:
			frame_color = frame
			frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			dets = detector(frame, 1)
			for k, d in enumerate(dets):
		        # Get the landmarks/parts for the face in box d.
				shape = predictor(frame, d)
		        # print(type(shape.part(1).x))
				cv2.circle(frame_color,(shape.part(36).x,shape.part(36).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(39).x,shape.part(39).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(42).x,shape.part(42).y),2,(0,0,255))
				cv2.circle(frame_color,(shape.part(45).x,shape.part(45).y),2,(0,0,255))
				x1 = shape.part(36).x
				y1 = shape.part(37).y-2
				x2 = shape.part(39).x
				y2 = shape.part(40).y+2
				split = frame[y1:y2,x1:x2]
				split = process_eye(split)
				split = filter_eye(split)
				centre = cross_spread(split)
				frame[y1:y2,x1:x2]=split
				y1 = y1+2
				y2 = y2-2
				centre[1]=centre[1]-2
				# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
				# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
				cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))			
				x1 = shape.part(42).x
				y1 = shape.part(43).y-2
				x2 = shape.part(45).x
				y2 = shape.part(46).y+2
				split = frame[y1:y2,x1:x2]
				split = process_eye(split)
				split = filter_eye(split)
				centre = cross_spread(split)
				frame[y1:y2,x1:x2]=split
				y1 = y1+2
				y2 = y2-2
				centre[1]=centre[1]-2
				# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
				# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
				cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))
			# Display the resulting frame
	        cv2.imshow('Video', frame_color)
	        if cv2.waitKey(1) & 0xFF == ord('q'):
	            break
	# Release video capture
	video_capture.release()
	cv2.destroyAllWindows()
 def update_image(self):
     if self.img is None:
         return
     img = self.img.copy()
     self.clear_markers()
     ma = MarkerArray()
     for s in self.id_table.selectedItems():
         if s.column() is not 0: continue
         t = self.tracks[int(s.text())]
         if self.checkLabels.isChecked():
             c1 = ColorPalette.c[self.get(s.row(),1)]
             c2 = c1
         else:
             c1 = ColorPalette.c[0]
             c2 = (220,220,220)
         for i in range(1,len(t)):
             if self.checkLines.isChecked():
                 cv2.line(img,t[i-1],t[i],c1,1,cv2.CV_AA)
             if self.radioAll.isChecked():
                 cv2.circle(img,t[i],5,c2,1,cv2.CV_AA)
         if self.radioLast.isChecked():
             cv2.circle(img,t[-1],5,c2,1,cv2.CV_AA)
         self.add_marker(ma.markers, self.tracks3d[int(s.text())], c1, int(s.text()))
     msg = self.bridge.cv2_to_imgmsg(img,'rgb8')
     self.pub_marker.publish(ma)
     self.pub_img.publish(msg)
Пример #20
0
def pen_lines_simple():
    # TODO: Like problem 3 above, but using ps2-input1.png
    noise_img = cv2.imread(os.path.join(input_dir, 'ps2-input1.png'), 0)  # flags=0 ensures grayscale
    blur = cv2.GaussianBlur(noise_img,(7,7),11)
    #Output: Smoothed monochrome image: ps2-4-a-1.png
    cv2.imwrite(os.path.join(output_dir, 'ps2-4-a-1.png'), blur)

    blur_img_edges = cv2.Canny(blur, 10, 50)
    #Output: Edge image: ps2-4-b-1.png
    cv2.imwrite(os.path.join(output_dir, 'ps2-4-b-1.png'), blur_img_edges)

    H, rho, theta = hough_lines_acc(blur_img_edges)
    peaks = hough_peaks(H, 4)

    #Hough accumulator array image with peaks highlighted: ps2-4-c-1.png
    copiedH = cv2.cvtColor(H.astype(np.uint8), cv2.COLOR_GRAY2BGR)  # copy & convert to color image
    copiedH = copiedH.astype(np.uint8)
    cv2.line(copiedH, (0,0), (250,250),(0,0,0,255),2)
    for peak in peaks:
        cv2.circle(copiedH, (peak[1], peak[0]), 10, (0,0,255), 1)
    cv2.imwrite(os.path.join(output_dir, 'ps2-4-c-1.png'), copiedH)

    img_out = cv2.cvtColor(noise_img, cv2.COLOR_GRAY2BGR)
    hough_lines_draw(img_out, peaks, rho, theta)
    #Original monochrome image with lines drawn on it: ps2-4-c-2.png
    cv2.imwrite(os.path.join(output_dir, 'ps2-4-c-2.png'), img_out)
Пример #21
0
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Пример #22
0
def draw_circle(event,x,y,flags,param):
    global ix,iy,drawing,mode,spareimg,img
    
    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        ix,iy = x,y
        
    elif event == cv2.EVENT_MOUSEMOVE:
        if drawing == True:
            if mode == True:
                img = spareimg.copy()
                spareimg = img.copy()
                cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),2)
            else:
                cv2.circle(img,(ix,iy),3,(0,0,255),-1)
                cv2.line(img,(ix,iy),(x,y),(0,0,255),3)
                ix,iy = x,y
            
    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
        if mode == True :
            cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),2)
            spareimg = img.copy()
        else:
            cv2.circle(img,(x,y),3,(0,0,255),-1)
Пример #23
0
def create_fiveline(image):
    edges = cv2.Canny(image, 50, 150, apertureSize=3)

    ys = list()
    minLineLength = 1
    maxLineGap = 10

    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 70, minLineLength, maxLineGap)
    
    for line in lines:
        for x1, y1, x2, y2 in line:
            cv2.line(image, (x1,y1), (x2,y2), (0, 255, 0), 2)
            if (abs(y1 - y2 < 4)):
                innerlist = list()
                innerlist.append((y1 + y2) / 2)
                ys.append(innerlist)
    
    cv2.imwrite('images/houghlines.jpg', image)
    display_image(image)

    kmeans = KMeans(init='k-means++', n_clusters=5, n_init=10)
    kmeans.fit(np.asarray(ys))

    fiveline = list()
    for innerlist in kmeans.cluster_centers_:
        fiveline.append(innerlist[0])

    fiveline.sort()
    print "K-MEANS centers"
    print fiveline
    return fiveline
Пример #24
0
Файл: HW4.py Проект: mfintz/CV
def mouse_collectSegments(event, x, y, flags, param):
    global workingImage, accumulatedImage, undo_img, drawLine, undo_available, segmentStart_X, segmentStart_Y

    if event == cv2.EVENT_LBUTTONDOWN:
        drawLine = True
        segmentStart_X = x
        segmentStart_Y = y
        undo_img = accumulatedImage.copy()

    elif event == cv2.EVENT_MOUSEMOVE:
        if drawLine == True:
            print (currentSection)
            workingImage = accumulatedImage.copy()
            cv2.line(workingImage, (segmentStart_X, segmentStart_Y), (x, y), SECTION_HIGHLIGHT_COLOR[currentSection], 1)

    elif event == cv2.EVENT_LBUTTONUP:
        drawLine = False
        undo_available = True
        accumulatedImage = workingImage.copy()

        #
        # If this is a simple one-click - it should be ignored
        #
        if not(segmentStart_X == x and segmentStart_Y == y):
            segments[currentSection].append([[segmentStart_X, segmentStart_Y], [x, y]])
            print(segments[currentSection])

    return
Пример #25
0
def drawLines(lines_data, pic):
    """
	横线rho为正,竖线为负。theta均为0;
	:param lines_data:
	:param pic:
	:return: rho列表
	"""
    line_para = []
    for rho, theta in lines_data[0]:
        print "rho:  " + str(rho) + "theta:  " + str(theta)
        line_para.append(rho)
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        x1 = int(x0 + 1000 * (-b))
        y1 = int(y0 + 1000 * (a))
        x2 = int(x0 - 1000 * (-b))
        y2 = int(y0 - 1000 * (a))
        cv2.line(pic, (x1, y1), (x2, y2), (255, 0, 0), 2)
        cv2.imshow("image", pic)
        k = cv2.waitKey(0) & 0xFF
        if k == 27:
            cv2.destroyAllWindows()
    return line_para
Пример #26
0
    def draw_zones(self, frame, width, height):
        # Re-initialize zones in case they have not been initalized
        if self.zones is None:
            self.zones = tools.get_zones(width, height, pitch=self.pitch)

        for zone in self.zones:
            cv2.line(frame, (zone[1], 0), (zone[1], height), BGR_COMMON['orange'], 1)
Пример #27
0
    def draw_robot(self, frame, position_dict, color):
        if position_dict['box']:
            cv2.polylines(frame, [np.array(position_dict['box'])], True, BGR_COMMON[color], 2)

        frame = consol.draw_dots(frame)

        if position_dict['front']:
            p1 = (position_dict['front'][0][0], position_dict['front'][0][1])
            p2 = (position_dict['front'][1][0], position_dict['front'][1][1])
            cv2.circle(frame, p1, 3, BGR_COMMON['white'], -1)
            cv2.circle(frame, p2, 3, BGR_COMMON['white'], -1)
            cv2.line(frame, p1, p2, BGR_COMMON['red'], 2)

        if position_dict['dot']:
            cv2.circle(
                frame, (int(position_dict['dot'][0]), int(position_dict['dot'][1])),
                4, BGR_COMMON['black'], -1)

        #Draw predicted position
        '''
        cv2.circle(
            frame, (,
                    ),
            4, BGR_COMMON['yellow'], -1)
        '''
        px = self.launch.planner.world.our_attacker.predicted_vector.x
        py = len(frame) - self.launch.planner.world.our_attacker.predicted_vector.y
        consol.log_dot([px,py], 'yellow', 'kalman')

        if position_dict['direction']:
            cv2.line(
                frame, position_dict['direction'][0], position_dict['direction'][1],
                BGR_COMMON['orange'], 2)
Пример #28
0
  def _draw_on_frame(self,left_frame,right_frame,pose):
  
    transformed_points = self._transform_model(pose)
    
    for point in transformed_points:
      
      left_projected_point,right_projected_point = self.camera.project(point.vertex.reshape(1,3))
      
      for neighbor_index in point.neighbors:
      
        neighbor = transformed_points[neighbor_index]
        left_projected_neighbor,right_projected_neighbor = self.camera.project(neighbor.vertex.reshape(1,3))


        left_projected_point = left_projected_point.reshape(2).astype(np.int32)
        right_projected_point = right_projected_point.reshape(2).astype(np.int32)
        left_projected_neighbor = left_projected_neighbor.reshape(2).astype(np.int32)
        right_projected_neighbor = right_projected_neighbor.reshape(2).astype(np.int32)
    
        #note projected_pint,projected_neightbor need to be tuples
        cv2.line(left_frame,tuple(left_projected_point),tuple(left_projected_neighbor),(255,12,15),1,cv2.LINE_AA)
        
        cv2.line(right_frame,tuple(right_projected_point),tuple(right_projected_neighbor),(255,12,15),1,cv2.LINE_AA)
        
    return left_frame,right_frame 
Пример #29
0
def recognize_linear_shapes(img, shapes):
    largest = rectify.find_largest_container(shapes)
    threshold_shape_sizes(shapes)
    for shape in shapes:
        if shape.is_complete():
            # Recognize the shapes
            if len(shape) == 3:
                color = (255, 0, 0)
            elif len(shape) == 4:
                color = (0, 255, 0)
            elif len(shape) > 4:
                color = (255, 255, 0)

            # Add the first vertex to the end, so I can iterate over
            # consecutive pairs and nicely get the ordered shape
            vs = shape.get_vertices()
            for i, v in enumerate(vs):
                cv2.line(img, v, vs[(i+1)%len(vs)], color, 2)
                color = (color[0], color[1], color[2] + np.uint8(255./(len(shape)-1)))

    color = (0, 0, 255)
    # Draw the biggest quadrilateral
    if largest:
        vs = largest.get_vertices()
        # print vs
        for i, v in enumerate(vs):
            cv2.line(img, v, vs[(i+1)%len(vs)], color, 2)
Пример #30
0
def boxesWork():
        # 1-a
    # Load the input grayscale image
    img = cv2.imread(os.path.join(input_dir, 'ps2-input0.png'), 0)  # flags=0 ensures grayscale
    img_edges = cv2.Canny(img,100,200)
    # TODO: Compute edge image (img_edges)
    cv2.imwrite(os.path.join(output_dir, 'ps2-1-a-1.png'), img_edges)  # save as ps2-1-a-1.png

    # 2-a
    # Compute Hough Transform for lines on edge image
    H, rho, theta = hough_lines_acc(img_edges)

    # TODO: Store accumulator array (H) as ps2-2-a-1.png
    # Note: Write a normalized uint8 version, mapping min value to 0 and max to 255
    cv2.imwrite(os.path.join(output_dir, 'ps2-2-a-1.png'), H)


    # 2-b
    # Find peaks (local maxima) in accumulator array
    peaks = hough_peaks(H, 6)  # TODO: implement this, try different parameters

    # TODO: Store a copy of accumulator array image (from 2-a), with peaks highlighted, as ps2-2-b-1.png
   # copiedH = np.copy(H).astype(np.uint8)
    copiedH = cv2.cvtColor(H.astype(np.uint8), cv2.COLOR_GRAY2BGR)  # copy & convert to color image
    copiedH = copiedH.astype(np.uint8)
    cv2.line(copiedH, (0,0), (250,250),(0,0,0,255),2)
    for peak in peaks:
        cv2.circle(copiedH, (peak[1], peak[0]), 10, (0,0,255), 1)
    cv2.imwrite(os.path.join(output_dir, 'ps2-2-b-1.png'), copiedH)

    # 2-c
    # Draw lines corresponding to accumulator peaks
    img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)  # copy & convert to color image
    hough_lines_draw(img_out, peaks, rho, theta)  # TODO: implement this
    cv2.imwrite(os.path.join(output_dir, 'ps2-2-c-1.png'), img_out)  # save as ps2-2-c-1.png
  def callback(self,data):
    try:
      ir = self.bridge.imgmsg_to_cv2(data)
    except CvBridgeError as e:
      print(e)
      
    global lines,coeff,dilation, outer, circles, edges, cls 
    global grip,img,metr,zeros,gray,cloud
    
    mtxloaded = np.load('mtx.npy')
    distloaded = np.load('dist.npy')
    display_min = 1
    display_max = 30000
    
    img = lut_display(ir,display_min,display_max)
    h,  w = img.shape[:2]
    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtxloaded,distloaded,(w,h),1,(w,h))
    zeros = np.zeros((h,w),np.uint8)
    img = cv2.undistort(img, mtxloaded, distloaded, None, newcameramtx)
  
    x,y,w,h = roi
    img = img[y:y+h, x:x+w]
   
    ##########################
    metric = np.zeros((3,5),np.float16)
    metric[0] = [3.1,3.5,3.9,2.7,4.3]    # BOLT
    metric[1] = [100,0.7,0.55,-1,-1]     # NUT
    metric[2] = [3.6, 4.0, 3.2,4.4,-1]   # BALK
    metr = ['BOLT','NUT','BALK']

    met_count = np.zeros((3),np.uint16)
    met_count[0] = 1
    met_count[1] = 2
    met_count[2] = 1
    met = []
    for i in range(3):
        met.append([])
    pairs = []
    grip = []
    for i in range(15):
        grip.append([])
    met_circ = []
    for i in range(3):
        met_circ.append([])
    
    gray = img
    gray = cv2.medianBlur(gray, 3)
    h, w = gray.shape[:2]
    #self.dstPlane(0)
    x,y=[400,200]
    #cv2.circle(img,(x,y),7,255,-1)
    #print(self.cloud[y-30,x+20])
    lines, outer, circles, edges, dilation,cls = line(gray)
    if lines is not(None) and 1==0:
        a = len(lines)

        mask = np.zeros((h+2,w+2), np.uint8)

        coeff = []
        for i in xrange(lines.shape[0]):#
            x1,y1,x2,y2 = lines.item(i,0,0),lines.item(i,0,1),lines.item(i,0,2 \
                                ), lines.item(i,0,3)                  
            lenght = int(math.sqrt((x1-x2)**2+(y1-y2)**2))
            if x1 != x2:
                k = round((float(y2 - y1))/(float(x2 - x1)),5)
                b = y1 - k*x1
            else:
                k = -337
                b = y1 - k*x1
            coeff.append([k,b,lenght,(x1+x2)/2,(y1+y2)/2,y2-y1,x2-x1])
        coeff = np.array(coeff)
        
        best_I, best_J, maxDist = 0,0,0
        k=0
        ###   removin' close lines   ###
        for i in xrange(lines.shape[0]):
            if (lines[i,0,0] != 0):
                for j in xrange(lines.shape[0]):
                    if (lines[j,0,0] != 0) and (i != j):
                        cdst = math.sqrt(math.pow(coeff[j,3]-coeff[i,3],2) + math.pow(coeff[j,4]-coeff[i,4],2))
                        if (abs((math.atan2(coeff[i,5],coeff[i,6])-math.atan2(coeff[j,5],coeff[j,6]))) < 0.2) and       (cdst < 100):
                            dst = dist(lines[i,0,0],lines[i,0,1],coeff[i,0],coeff[j,0],coeff[j,1])
                            if (coeff[j,2] < coeff[i,2]) and (dst<6):
                                lines[j]=0
                                k += 1
        ################################

        ###  showin' remaining lines ###
        for i in xrange(lines.shape[0]):#
            cv2.line(img, (lines[i,0,0], lines[i,0,1]), (lines[i,0,2], lines[i,0,3]), \
                     (0,255,0),1,cv2.LINE_AA)
            
        ################################

        ### findin' parallel line   ####
        m=1
        a = len(lines)

        for i in xrange(lines.shape[0]):#
            best_J=-1
            maxDist = 0
            d = dilation.item(lines[i,0,1],lines[i,0,0])
            if  (lines[i,0,0]!=0):
                for j in xrange(lines.shape[0]):
                    if (lines[j,0,0]!=0) and (abs((math.atan2(coeff[i,5],coeff[i,6])- \
                                        math.atan2(coeff[j,5],coeff[j,6]))) < 0.25) and (i!=j):
                        if (dilation[lines[j,0,1],lines[j,0,0]] == 255):
                            cv2.floodFill(dilation,mask,(lines[j,0,0],lines[j,0,1]),m*10+100)
                            m=m+1
                        if (dilation[lines[j,0,1],lines[j,0,0]] == dilation[lines[i,0,1],lines[i,0,0]]):
                            dst = dist(lines[i,0,0],lines[i,0,1],coeff[i,0],coeff[j,0],coeff[j,1])
                            if (dst >= maxDist) and (float(min(coeff[i,2],coeff[j,2]))/float( \
                                max(coeff[i,2],coeff[j,2])) > 0.5):
                                maxDist = dst              
                                best_J = j
            
            if (best_J != -1):
                
                c = 2
                d = dilation[lines[best_J,0,1],lines[best_J,0,0]]
                best_I = parallel(best_J, a, d)
                
                length = (coeff[best_I,2]+coeff[best_J,2])/2
                dst = dist(lines[best_I,0,0],lines[best_I,0,1],coeff[best_I,0],coeff[best_J,0],coeff[best_J,1])
                
                cdst = math.sqrt(math.pow(coeff[best_J,3]-coeff[best_I,3],2) + math.pow(coeff[best_J,4]-coeff[best_I,4],2))
                if (dst != 0):
                    
                    if ((float(min(dst, cdst)))/(float(max(dst, cdst))) > 0.5):
                        
                        cv2.line(img, (lines[best_J,0,0], lines[best_J,0,1]), \
                                     (lines[best_J,0,2], lines[best_J,0,3]), \
                                     (0,0,255),1,cv2.LINE_AA)
                        cv2.line(img, (lines[best_I,0,0], lines[best_I,0,1]), \
                                     (lines[best_I,0,2], lines[best_I,0,3]), \
                                     (0,0,255),1,cv2.LINE_AA)
                        if best_I not in pairs:
                            dst = dist(lines[best_I,0,0],lines[best_I,0,1],coeff[best_I,0],coeff[best_J,0],coeff[best_J,1])
                            m2 = (d-100)/10
                            if dst==0:dst=0.001
                            coef = float(max(coeff[best_J,2],coeff[best_I,2]))/float(dst)
                           
                            k = np.where(abs(metric - coef) <= 0.2)
                            
                            for i in range(len(k[0])):
                                grip[m2].append([best_I,best_J])
                                met[k[0][i]].append(m2)
                                
                                cv2.line(img, (lines[best_J,0,0], lines[best_J,0,1]), \
                                     (lines[best_J,0,2], lines[best_J,0,3]), \
                                     (255),3,cv2.LINE_AA)
                                
                                cv2.line(img, (lines[best_I,0,0], lines[best_I,0,1]), \
                                     (lines[best_I,0,2], lines[best_I,0,3]), \
                                     (255),3,cv2.LINE_AA)
                                
                        if best_I not in pairs:
                            pairs.append(best_I)
                        if best_J not in pairs:
                            pairs.append(best_J)
                              
        
        if circles is not None:
            for (x,y,r) in circles[0,:]:
                d = dilation[int(y),int(x)]
                m2 = (d-100)/10
                k = np.where(metric == 100)
                if d > 0:
                    for i in range(len(k[0])):
                        met_circ[k[0][i]].append(m2)
                        
                        cv2.circle(edges, (x, y), r, 255 , 2)
               
        c = 0
        c0 = 0
        for i in range(1,m):
            bolt = ifBolt(i,img)
            for j in range(len(metric)):
                for z in range(len(met[j])):
                    if met[j][z] == i:
                        c += 1
                for l in range(len(met_circ[j])):   
                    if met_circ[j][l] == i:
                        c0 = 1
                if c >= met_count[j] and (metric[j][0]==100 and c0==1 or
                        metric[j][0]!=100 and c0==0):
                    if (metr[j] == 'BOLT'):
                        if (bolt==1):
                            print(str(i)+' - '+metr[j])
                            for o in range(0, len(grip[i])):
                                gripper(grip[i][o][0],grip[i][o][1],j)
                    elif (metr[j] == 'BALK'):
                        if (bolt==0):
                            print(str(i)+' - '+metr[j])
                            for o in range(0, len(grip[i])):
                                gripper(grip[i][o][0],grip[i][o][1],j)
                            
                    else:    
                        print(str(i)+' - '+metr[j])
                        for o in range(0, len(grip[i])):
                            gripper(grip[i][o][0],grip[i][o][1],j)
                        
                c = 0
                c0 = 0
    #print(np.nanmean(self.h))

    cv2.imshow('img',img)
    cv2.imshow('img2',self.grayDst)
    #cv2.imshow('edges',edges)
    #cv2.imshow('cls',cls)

    cv2.waitKey(3)

    #print(met)
    
    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(ir))
    except CvBridgeError as e:
      print(e)
Пример #32
0
def main():

    #for eachArg in sys.argv:
    #    print eachArg
    filename = sys.argv[1]

    image = cv2.imread(filename)
    if image is None:
        print 'Unable to open file ', filename
        return
    
    rows=image.shape[0]
    cols=image.shape[1]
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,150,250,apertureSize = 3)
    # Arguments are distance resolution, angle resolution, threshold
    # Large distance resolution yields larger bins so more lines meeting
    # threshold.  Larger angular resolution yeilds fewer lines with similar
    # lines counting as the same line
    lines = cv2.HoughLines(edges,2,2*np.pi/180,100)
    if lines is None:
        print 'No lines found'
        return
      
    for line in lines:
        for rho, theta in line:
          if theta != 0: # ignore verticals    

            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a*rho
            y0 = b*rho
            seglength = 1000
            x1 = int(x0 + seglength*(-b))
            y1 = int(y0 + seglength*(a))
            x2 = int(x0 - seglength*(-b))
            y2 = int(y0 - seglength*(a))
            cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)
    xtotal=0
    xcount=0
    ytotal=0
    ycount=0
    for line1,line2 in combinations(lines,2):
      rho1=line1[0][0]
      theta1=line1[0][1]
      rho2=line2[0][0]
      theta2=line2[0][1]
      #print rho1, theta1, rho2, theta2
      if theta1 != 0 and theta2 != 0: # ignore verticals    

        x0, y0 = intersection(rho1,theta1,rho2,theta2)
        xtotal+=x0
        xcount+=1
        ytotal+=y0
        ycount+=1
        cv2.circle(image, (x0,y0),3,(255,0,0),-1)

    
    cv2.circle(image, (xtotal/xcount,ytotal/ycount),50,(0,255,255),3)  
    cv2.namedWindow('Hall with Line', cv2.WINDOW_NORMAL)
    cv2.imshow('Hall with Line',image)    
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #33
0
    def vis_keypoints(self, img, kps, kp_thresh=0.4, alpha=1):

        # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
        cmap = plt.get_cmap('rainbow')
        colors = [cmap(i) for i in np.linspace(0, 1, len(self.kps_lines) + 2)]
        colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

        # Perform the drawing on a copy of the image, to allow for blending.
        kp_mask = np.copy(img)

        # Draw mid shoulder / mid hip first for better visualization.
        mid_shoulder = (kps[:2, 5] + kps[:2, 6]) / 2.0
        sc_mid_shoulder = np.minimum(kps[2, 5], kps[2, 6])
        mid_hip = (kps[:2, 11] + kps[:2, 12]) / 2.0
        sc_mid_hip = np.minimum(kps[2, 11], kps[2, 12])
        nose_idx = 0
        if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
            cv2.line(kp_mask,
                     tuple(mid_shoulder.astype(np.int32)),
                     tuple(kps[:2, nose_idx].astype(np.int32)),
                     color=colors[len(self.kps_lines)],
                     thickness=2,
                     lineType=cv2.LINE_AA)
        if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
            cv2.line(kp_mask,
                     tuple(mid_shoulder.astype(np.int32)),
                     tuple(mid_hip.astype(np.int32)),
                     color=colors[len(self.kps_lines) + 1],
                     thickness=2,
                     lineType=cv2.LINE_AA)

        # Draw the keypoints.
        for l in range(len(self.kps_lines)):
            i1 = self.kps_lines[l][0]
            i2 = self.kps_lines[l][1]
            p1 = kps[0, i1].astype(np.int32), kps[1, i1].astype(np.int32)
            p2 = kps[0, i2].astype(np.int32), kps[1, i2].astype(np.int32)
            if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                cv2.line(kp_mask,
                         p1,
                         p2,
                         color=colors[l],
                         thickness=2,
                         lineType=cv2.LINE_AA)
            if kps[2, i1] > kp_thresh:
                cv2.circle(kp_mask,
                           p1,
                           radius=3,
                           color=colors[l],
                           thickness=-1,
                           lineType=cv2.LINE_AA)
            if kps[2, i2] > kp_thresh:
                cv2.circle(kp_mask,
                           p2,
                           radius=3,
                           color=colors[l],
                           thickness=-1,
                           lineType=cv2.LINE_AA)

        # Blend the keypoints.
        return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
    def _find_steering_angle_by_color(self, dashboard):
        if "frame" not in dashboard:
            return -100.0  # special case

        frame = dashboard["frame"]
        img_height = frame.shape[0]
        img_width = frame.shape[1]
        camera_x = img_width // 2

        track_view_slice = slice(*(int(x * img_height)
                                   for x in self._track_view_range))
        track_view = self._flatten_rgb(frame[track_view_slice, :, :])

        track_view_gray = cv2.cvtColor(track_view, cv2.COLOR_BGR2GRAY)
        tracks = map(lambda x: len(x[x > 20]), [track_view_gray])
        tracks_seen = filter(lambda y: y > 200, tracks)

        if len(list(tracks_seen)) == 0:
            show_image("frame", frame)  # display image to opencv window
            show_image("track_view",
                       track_view)  # display image to opencv window

            # show track image to webconsole
            dashboard["track_view"] = track_view
            dashboard["track_view_info"] = (track_view_slice.start,
                                            track_view_slice.stop, None)
            return -100.0  # special case

        _y, _x = np.where(track_view_gray == 76)
        if len(_x) == 0:
            return -100.0

        px = np.mean(_x)
        if np.isnan(px):
            show_image("frame", frame)  # display image to opencv window
            show_image("track_view",
                       track_view)  # display image to opencv window

            # show track image to webconsole
            dashboard["track_view"] = track_view
            dashboard["track_view_info"] = (track_view_slice.start,
                                            track_view_slice.stop, None)
            return -100.0  # special case

        steering_angle = math.atan2(track_view.shape[0] * float(3.5),
                                    (px - camera_x))

        #draw the steering direction and display on webconsole
        r = 60
        x = track_view.shape[1] // 2 + int(r * math.cos(steering_angle))
        y = track_view.shape[0] - int(r * math.sin(steering_angle))
        cv2.line(track_view, (track_view.shape[1] // 2, track_view.shape[0]),
                 (x, y), (255, 0, 255), 2)

        show_image("frame", frame)  # display image to opencv window
        show_image("track_view", track_view)  # display image to opencv window

        # show track image to webconsole
        dashboard["track_view"] = track_view
        dashboard["track_view_info"] = (track_view_slice.start,
                                        track_view_slice.stop,
                                        (np.pi / 2 - steering_angle) * 180.0 /
                                        np.pi)
        return (np.pi / 2 - steering_angle) * 180.0 / np.pi
Пример #35
0
     pass
 else:
     m = (y2 - y1) / (x2 - x1)
     if Ball_DirectionY_Movement == 1:  #Up
         if Ball_DirectionY_Movement_last != 0 and Ball_DirectionY_Movement_last != Ball_DirectionY_Movement:
             for i in range(
                     len(ball_positionY)):
                 del ball_positionY[0]
         elif y2 < 140:  #ball is close Red_Player wall
             y = wall_Y_min
             x = (y - y2) / m + x2
             if x < wall_X_min:  #Ball is close Left wall
                 x = wall_X_min
                 y = m * (x - x2) + y2
                 cv2.line(
                     img2, (x2, y2),
                     (int(x), int(y)),
                     (0x99, 0xff, 0x33), 2)
                 m = -m
                 y2 = y
                 x2 = x
                 y = y2 - 60
                 if y < wall_Y_min:
                     y = wall_Y_min
                 x = (y - y2) / m + x2
                 if x > wall_X_max:
                     x = wall_X_max
                     y = m * (x - x2) + y2
                 cv2.line(
                     img2, (int(x2), int(y2)),
                     (int(x), int(y)),
                     (0x99, 0xff, 0x33), 2)
Пример #36
0
import numpy as np
# Create an image
r = 100
src = np.zeros((4*r, 4*r), dtype=np.uint8)
# Create a sequence of points to make a contour
vert = [None]*6
vert[0] = (3*r//2, int(1.34*r))
vert[1] = (1*r, 2*r)
vert[2] = (3*r//2, int(2.866*r))
vert[3] = (5*r//2, int(2.866*r))
vert[4] = (3*r, 2*r)
vert[5] = (5*r//2, int(1.34*r))
# Draw it in src
for i in range(6):
    #cv.putText(src,str(i),vert[i],cv.FONT_HERSHEY_SIMPLEX, .8,( 255 ), 2)
    cv.line(src, vert[i],  vert[(i+1)%6], ( 255 ), 3)
# Get the contours
_, contours, _ = cv.findContours(src, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)

# Calculate the distances to the contour
raw_dist = np.empty(src.shape, dtype=np.float32)
for i in range(src.shape[0]):
    for j in range(src.shape[1]):
        raw_dist[i,j] = cv.pointPolygonTest(contours[0], (j,i), True)

# 获取最大值即内接圆半径,中心点坐标
minVal, maxVal, _, maxDistPt = cv.minMaxLoc(raw_dist)
minVal = abs(minVal)
maxVal = abs(maxVal)

# Depicting the  distances graphically
Пример #37
0
        break

    params = [0, 0]
    for i, param in enumerate(params):
        send_serial(i, param, True)
    # Run detection
    padding_image = np.zeros((960, 1280, 3), np.uint8)
    padding_image[240:720, 320:960] = rgb_image  # 近距離でも遠くに見えるようにパディングする
    result = model.detect([padding_image], verbose=0)[0]
    result_image, mask = render(result, padding_image.copy(), sys.argv[2])
    result_image = result_image[240:720, 320:960]  # paddingを元に戻す
    if mask is not None:
        mask = mask[240:720, 320:960]
    # fps.calculate(result_image)  # FPSを測定
    if mask is None:
        cv2.line(result_image, (GOAL_POS, 0), (GOAL_POS, cap.HEGIHT),
                 (255, 0, 0))
        cv2.imshow('Mask R-CNN', np.hstack((result_image, depth_image)))
        send_serial(1, MAX_SPEED, True)
        continue

    # Distance
    mask_binary = mask.astype('uint8')
    mask_pixels = (mask_binary > 0).sum()
    print('Area: ',
          mask_pixels / (mask_binary.shape[0] * mask_binary.shape[1]))

    center_pos = calc_center(mask_binary)
    print(f'G{center_pos}')
    target_distance = cap.depth_frame.get_distance(center_pos[0],
                                                   center_pos[1])
Пример #38
0
    def get_frame(self):
        try:
            ret, frame = self.video.read()
            now = time.time()
            (locs, preds) = pre_dect(frame, self.faceNet, self.model)
            for (box, pred) in zip(locs, preds):
                (startX, startY, endX, endY) = box
                cla = np.argmax(pred[0])
                label = "Mask" if cla == 0 else "No Mask"
                color = (0, 255, 0) if cla == 0 else (0, 0, 255)

                # include the probability in the label
                label = "{}: {:.2f}%".format(label, max(pred[0]) * 100)

                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.line(frame, (startX, startY), (startX, startY + 25), color,
                         2)
                cv2.line(frame, (startX, startY), (startX + 25, startY), color,
                         2)

                cv2.line(frame, (endX, startY), (endX, startY + 25), color, 2)
                cv2.line(frame, (endX, startY), (endX - 25, startY), color, 2)

                cv2.line(frame, (startX, endY), (startX, endY - 25), color, 2)
                cv2.line(frame, (startX, endY), (startX + 25, endY), color, 2)

                cv2.line(frame, (endX, endY), (endX, endY - 25), color, 2)
                cv2.line(frame, (endX, endY), (endX - 25, endY), color, 2)

            #  cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
            (hei, wid) = frame.shape[:2]
            #fps=cap.get(cv2.CAP_PROP_FPS)
            end = time.time()
            f = 1 / (end - now)
            FPS = 'FPS : ' + str(math.ceil(f))
            cv2.putText(frame, str(FPS), (0, hei - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 1)
            no_faces = 'No. of faces in video   : ' + str(len(locs))
            cv2.putText(frame, str(no_faces), (80, hei - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 1)
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()
        except:
            pass
Пример #39
0
    if prob > threshold : 
        cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
        cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)

        # Add the point to the list if the probability is greater than the threshold
        points.append((int(x), int(y)))
    else :
        points.append(None)

# Draw Skeleton
for pair in POSE_PAIRS:
    partA = pair[0]
    partB = pair[1]

    if points[partA] and points[partB]:
        cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
        cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)


cv2.imshow('Output-Keypoints', frameCopy)
cv2.imshow('Output-Skeleton', frame)


cv2.imwrite('Output-Keypoints.jpg', frameCopy)
cv2.imwrite('Output-Skeleton.jpg', frame)

print("Total time taken : {:.3f}".format(time.time() - t))

cv2.waitKey(0)

             adjusted_pt = np.add(start_points[point], vector_proj(vect, diff))
             cv.circle(frame, (int(adjusted_pt[0]), int(adjusted_pt[1])), 30, (255, 0, 0), thickness = -1)
             found[point] = True
     if (not found[point]) and found[point] is not last_found[point]:
         d = math.hypot(current_points[point][0] - current_center[0], current_points[point][1] - current_center[1])
         current_points[point] = start_points[point]
         if d < mag:
             inner[point] = True
             outer[point] = False
         else:
             inner[point] = False
             outer[point] = True
         cv.circle(frame, (current_points[point][0], current_points[point][1]), 25, (255, 0, 255))
     last_found[point] = found[point]
     cv.circle(frame, (start_points[point][0], start_points[point][1]), 30, (0, 255, 0), thickness = 1)
     cv.line(frame, (start_points[point][0], start_points[point][1]), (int(start_points[point][0] + vect[0] * 15 / mag), int(start_points[point][1] + vect[1] * 15 / mag)), (0, 255, 0), thickness = 1)
 cv.circle(frame, current_center, 25, (0, 0, 255))
 thumb_dist = math.hypot(current_points[0][0] - current_center[0], current_points[0][1] - current_center[1])
 thumb_vect = [start_points[0][0] - start_center[0], start_points[0][1] - start_center[1]]
 thumb_mag = math.sqrt(vect[0]**2 + vect[1]**2)
 if thumb_dist - thumb_mag < -15:
     thumb_inner = True
     thumb_outer = False
 elif thumb_dist - thumb_mag > 15:
     thumb_outer = True
     thumb_inner = False
 
 for finger in range(len(inner)):
     if inner[finger] is True:
       if(thumb_inner):
           #Tapping letters detection method used to type letters
Пример #41
0
    #cv2.imshow("img",img)
    
    maskfinal = maskClose
    _,conts,h = cv2.findContours(maskfinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    
    if len(conts)>0:
        cnt = sorted(conts, key = cv2.contourArea, reverse = True)[0]
        #for i in range(len(conts)):
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
        if flag==0:
            cv2.circle(img,( int((x+x+w)/2),int((y+y+h)/2) ) ,2,(0,0,0),-1)
            flag=1
            gpoint=( int((x+x+w)/2),int((y+y+h)/2) )
        else:
            cv2.line(img,( int((x+x+w)/2),int((y+y+h)/2)) ,gpoint,(0,0,0),2,-1)
            gpoint=( int((x+x+w)/2),int((y+y+h)/2) )
    else:
        flag=0
        
    
    #mask2= cv2.inRange(frame,np.array([50,50,50]),np.array([20,255,20]))
    #cv2.imshow("closed",maskClose)
    cv2.imshow("img",img)
    #cv2.imshow("maskopen",maskOpen)
    cv2.imshow("bgr",frame)
    if cv2.waitKey(1)==27:
        break;
        
cv2.destroyAllWindows()
cap.release()
def draw_line(point1, point2, img):
    pt_x1 = point1[0]
    pt_y1 = int(math.floor(point1[1]))
    pt_x2 = point2[0]
    pt_y2 = int(math.floor(point2[1]))
    cv2.line(img, (pt_x1, pt_y1), (pt_x2, pt_y2), (255, 0, 0), 1)
Пример #43
0
def destinationCalculation(robots, broadcastPos, frame, client):
    # check homing sequence
    homeBots()
    # create a array to store protobuf information
    newBotPosArr = BotPositionArr()

    # need to optimize
    # print(robots)
    # print(broadcastPos)
    global robot, desReachedFlag
    robots_data = []
    keys = []
    for key, robot_i in robots.items():
        keys.append(key)
        robots_data.append(robot(robot_i[0], 0, robot_i[4], 0))
    # print(robots_data)
    result = movements.action(robots_data)
    # print('fin',result)

    # count the bots who have reached the destination
    countDesReach = 0

    for i, robot_i in enumerate(result):
        # calculate the direction
        F = robot_i[0] * 150000  # resultant force
        # F = min(0.5, F)
        Dir = robot_i[1]  # relustant force direction
        dx = F * math.cos((Dir / 180 * math.pi))
        dy = F * math.sin((Dir / 180 * math.pi))
        # print(dx,dy)

        # add the destination circle
        frame = cv2.circle(frame, tuple(robots_data[i].des_pos), 1,
                           (0, 255, 0), 2)
        # print(frame.shape)
        #print((int(robots_data[i].init_pos[0] + dx), int(robots_data[i].init_pos[1] + dy)))
        frame = cv2.line(
            frame,
            (int(robots_data[i].init_pos[0]), int(robots_data[i].init_pos[1])),
            tuple(robots_data[i].des_pos), (0, 255, 0), 2)
        frame = cv2.line(
            frame,
            (int(robots_data[i].init_pos[0]), int(robots_data[i].init_pos[1])),
            (int(robots_data[i].init_pos[0] + dx),
             int(robots_data[i].init_pos[1] + dy)), (0, 0, 255), 2)

        # calculate the broadcast positions
        broadcastPos[keys[i]] = positions(
            robots[keys[i]][0], robots[keys[i]][3],
            [robots_data[i].init_pos[0] + dx, robots_data[i].init_pos[1] + dy],
            0)

        # prepare data to send through mqtt
        newBot = BotPosition()
        newBot.bot_id = i
        newBot.x_cod = robots_data[i].init_pos[0] / img_x * 30
        newBot.y_cod = robots_data[i].init_pos[1] / img_y * 30
        newBot.angle = 0
        newBotPosArr.positions.append(newBot)

        # print(distanceTwoPoints((int(robots_data[i].init_pos[0]), int(robots_data[i].init_pos[1])), tuple(robots_data[i].des_pos)))
        if 40 < distanceTwoPoints(
            (int(robots_data[i].init_pos[0]), int(robots_data[i].init_pos[1])),
                tuple(robots_data[i].des_pos)):
            countDesReach += 1

    # find if the destination reached
    if (countDesReach == 0):
        if desReachedFlag:
            # print('All the bots reached the destinations')
            for i, robot_i in enumerate(result):
                broadcastPos[keys[i]] = positions(
                    robots[keys[i]][0], robots[keys[i]][3], [
                        robots_data[i].init_pos[0],
                        robots_data[i].init_pos[1] + 5
                    ], 0)
        else:
            temp = []
            for i, robot_i in enumerate(result):
                broadcastPos[keys[i]] = positions(
                    robots[keys[i]][0], robots[keys[i]][3],
                    [robots_data[i].des_pos[0], robots_data[i].des_pos[1]], 0)
            for key in robotData:
                temp.append({
                    'x': int(robotData[key][0][0]),
                    'y': int(robotData[key][0][1])
                })

            arrageBot(robotData, temp)
            desReachedFlag = True
    else:
        desReachedFlag = False

    # publishing data to mqtt
    data = newBotPosArr.SerializeToString()
    # print(data)
    client.publish(TOPIC_SEVER_BOT_POS, aesEncrypt(data))

    return broadcastPos
Пример #44
0
    def detectTurn(self):
        ### Params for region of interest
        bot_left = [0, 480]
        bot_right = [640, 480]
        apex_right = [640, 170]
        apex_left = [0, 170]
        v = [np.array([bot_left, bot_right, apex_right, apex_left], dtype=np.int32)]

        cropped_raw_image = self.region_of_interest(cf.img_rgb_raw, v)
        # cropped_raw_image = cf.img_rgb_raw[self.crop_top:self.crop_bottom, :]
        
        ### Run canny edge dection and mask region of interest
        # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        hsv = cv2.cvtColor(cropped_raw_image, cv2.COLOR_BGR2HSV) 
        lower_white = np.array([0,0,255], dtype=np.uint8)
        upper_white = np.array([179,255,255], dtype=np.uint8)
        mask = cv2.inRange(hsv, lower_white, upper_white) 
        dilation = cv2.dilate(mask, self.kernel, iterations=1)
        closing = cv2.morphologyEx(dilation, cv2.MORPH_GRADIENT, self.kernel)
        closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, self.kernel)

        blur = cv2.GaussianBlur(closing, (9,9), 0)
        edge = cv2.Canny(blur, 150,255)

        cropped_image = self.region_of_interest(edge, v)
        # cropped_image = edge[self.crop_top:self.crop_bottom, :]
        
        # blank_image = np.zeros(cropped_raw_image.shape)

        # turnSignal = False

        lines = cv2.HoughLines(cropped_image, rho=0.2, theta=np.pi/80, threshold=70)
        if lines is not None:
            # print('lines', len(lines))
            for line in lines:
                for rho,theta in line:
                    a = np.cos(theta)
                    b = np.sin(theta)
                    x0 = a*rho
                    y0 = b*rho
                    x1 = int(x0 + 1000*(-b))
                    y1 = int(y0 + 1000*(a))
                    x2 = int(x0 - 1000*(-b))
                    y2 = int(y0 - 1000*(a))

                    cv2.line(cropped_raw_image, (x1,y1), (x2,y2), cf.listColor[0], 2)
                    # cv2.line(blank_image, (x1,y1), (x2,y2), cf.listColor[0], 2)

                    if abs(y1-y2) < 40:
                        # turnSignal = True
                        # break
                        return True
        
        # cv2.imshow('hsv', hsv)
        # cv2.imshow('closing', closing)
        # cv2.imshow('cropped_image', cropped_image)
        # cv2.imshow('cropped_raw_image', cropped_raw_image)
        # cv2.imshow('blank_image', blank_image)

        return False
    try:
        for line in segmented[0]:
            print("group 0 lines:")
            print(line)
            for rho,theta in line:
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a*rho
                y0 = b*rho
                x1 = int(x0 + 1000*(-b))
                y1 = int(y0 + 1000*(a))
                x2 = int(x0 - 1000*(-b))
                y2 = int(y0 - 1000*(a))
            
                cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
                cv2.line(img_rec_red,(x1,y1),(x2,y2),(0,255,0),2)
        for line in segmented[1]:
            print("group 1 lines:")
            print(line)
            for rho,theta in line:
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a*rho
                y0 = b*rho
                x1 = int(x0 + 1000*(-b))
                y1 = int(y0 + 1000*(a))
                x2 = int(x0 - 1000*(-b))
                y2 = int(y0 - 1000*(a))
            
                cv2.line(img,(x1,y1),(x2,y2),(255,0,0),2)
Пример #46
0
            s, e, f, d = defects[i, 0]
            start = tuple(contour[s][0])
            end = tuple(contour[e][0])
            far = tuple(contour[f][0])

            a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
            b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
            c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
            angle = (math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 180) / 3.14

            # if angle > 90 draw a circle at the far point
            if angle <= 90:
                count_defects += 1
                cv2.circle(crop_image, far, 1, [0, 0, 255], -1)

            cv2.line(crop_image, start, end, [0, 255, 0], 2)

        # Print number of fingers
        if count_defects == 0:
            cv2.putText(frame, "ONE", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255),2)
        elif count_defects == 1:
            cv2.putText(frame, "TWO", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
        elif count_defects == 2:
            cv2.putText(frame, "THREE", (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
        elif count_defects == 3:
            cv2.putText(frame, "FOUR", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
        elif count_defects == 4:
            cv2.putText(frame, "FIVE", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255), 2)
        else:
            pass
    except:
Пример #47
0
	def video_loop(self):
		"""实现人脸识别与检测的函数 太长了可简化"""

		# 加载人脸编码数据库文件
		# if not self.face_data_dic:
		if self.i == 0:
			try:
				with open(ENCODER_PATH, "rb") as f:
					self.face_data_dic = pickle.load(f)
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"Loading Files:"+ENCODER_PATH+"\n")
					self.T.see(tk.END)
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"Completed!\n")
					self.T.see(tk.END)

			except:
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"No such files:"+ENCODER_PATH+"\n")
				self.T.see(tk.END)
				with open(ENCODER_PATH, "wb") as f:
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"Creating files:"+ENCODER_PATH+"\n")
					self.T.see(tk.END)
					self.face_data_dic = {}
					pickle.dump(self.face_data_dic, f)
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"Completed!\n")
					self.T.see(tk.END)
				MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+ERROR7
				print(MESSAGE)
				self.T.insert(tk.END,MESSAGE+"\n")
				self.T.see(tk.END)
			self.i=1
		else:
			MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+ "self.i=" +str(self.i)
			print(MESSAGE)
			self.T.insert(tk.END,MESSAGE+"\n")
			self.T.see(tk.END)
			self.i += 1

		# success:是否读取到图像的flag
		# img:返回的单帧图像数据
		self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
		self.T.see(tk.END)
		self.T.insert(tk.END,"Reading Frame...\n")
		self.T.see(tk.END)
		success, img = self.camera.read()  # 从摄像头读取照片
		if not success:
			MESSAGE="["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+NO_PIC
			print(MESSAGE)
			self.T.insert(tk.END,MESSAGE+"\n")
			self.T.see(tk.END)

		if success:
			cv2.waitKey(0)
			# 开始计算帧数
			self.calfps()
			self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
			self.T.see(tk.END)
			self.T.insert(tk.END,"FPS= "+str(int(self.fps))+" frames per second\n")
			self.T.see(tk.END)
			# 判断摄像头是否启动成功
			if int(self.fps) < 1:
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"self.fps = 0\n")
				self.T.see(tk.END)
				self.camera.release()
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.insert(tk.END,"self.camera.release()\n")
				self.T.see(tk.END)
				time.sleep(1)
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"正在重新启动摄像机\n")
				self.T.see(tk.END)
				self.camera=cv2.VideoCapture(0)
				self.fps = 1
				self.detect_start_time = time.time()
				self.start_time = time.time()

			# img = np.minimum(img,150)
			# 对帧进行灰度处理
			gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
			self.mean_light = int(np.mean(gray))
			self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
			self.T.insert(tk.END,"Light Level: "+str(self.mean_light)+"\n")
			self.T.see(tk.END)
			if self.mean_light > LIGHT_LEVEL:
				gray = np.minimum(gray,155)
				MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:" + "光线太强!"
				print(MESSAGE)
				self.T.insert(tk.END,MESSAGE+"\n")
				self.T.see(tk.END)
				# gray = gray/2
				self.top_text_time = time.time()
				# self.dec_name = UNKNOWN
				# self.detect_flag = 1
				self.top_text = "What a Sunny Day"

			# 调用opencv函数对图像进行检测
			self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
			self.T.see(tk.END)
			self.T.insert(tk.END,"开始进行人像检测。。。\n")
			self.T.see(tk.END)
			faces = self.face_xml.detectMultiScale(gray,1.2,5)  # 参数了解一下?
			###################################################
			# 消灭登录窗口1或者注册窗口
			try:
				if (time.time() - self.detect_start_time) > KEEP_TIME:
					# 判断是哪个
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"关闭检测窗口\n")
					self.T.see(tk.END)
					if self.window_name == 1:
						self.sign_in_window.destroy()
						self.confirmed_face()
					else:
						self.sign_up_face_window.destroy()
						self.confirmed_face()
					self.release()
					return 
			except:pass

			# 当检测到不止一个人脸时,提示 三 秒
			if (time.time() - self.top_text_time) > 3:
				self.top_text = TOPTEXT_1

			if len(faces)>1:
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"人像数目大于一\n")
				self.T.see(tk.END)
				self.top_text_time = time.time()
				self.dec_name = UNKNOWN
				self.detect_flag = 1
				self.top_text = "Keep Your Face Only"
			if len(faces)==0:
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"没有检测到人像\n")
				self.T.see(tk.END)
				self.detect_flag = 1

			# 提取左上角坐标,与宽,高信息
			for (x, y, w, h) in faces:
				self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
				self.T.see(tk.END)
				self.T.insert(tk.END,"检测到人脸\n")
				self.T.see(tk.END)
				MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+"x="+str(x)+" y="+str(y)+" w="+str(w)+" h="+str(h)
				self.T.insert(tk.END,MESSAGE+"\n")
				self.T.see(tk.END)
				print(MESSAGE)
				x_ = x - int(w/4)
				y_ = y - int(h/4)
				w_ = int(1.25*w)
				h_ = int(1.25*h)
				# 当没有检测出来时,进行下一步
				if (self.detect_flag == 1) and ((self.frame_number+1)%SAMPLE == 0):
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"开始进行识别。。。\n")
					self.T.see(tk.END)
					face = img[x_:x_+w_, y_:y_+h_]
					face_locations = fr.face_locations(face)
					if face_locations:
						# print("["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:","##################################")
						unknown_encoding = fr.face_encodings(face, face_locations)[0]
						self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
						self.T.see(tk.END)
						self.T.insert(tk.END,"开始对人脸进行编码。。。\n")
						self.T.see(tk.END)
						self.i = 1000
						if self.window_name == 1:
							"""登录"""
							MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+"This is:"+self.usr_name
							print(MESSAGE)
							self.T.insert(tk.END,MESSAGE+"\n")
							self.T.see(tk.END)
							self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
							self.T.see(tk.END)
							self.results = fr.compare_faces([self.face_data_dic[self.usr_name]], unknown_encoding)
							self.T.insert(tk.END,"对比完成\n")
							self.T.see(tk.END)
							MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+str(self.results)
							print(MESSAGE)
							self.T.insert(tk.END,MESSAGE+"\n")
							self.T.see(tk.END)
							if self.results[0]:
								self.dec_name = self.usr_name
								self.hold_time_start = time.time()
								self.detect_flag = 0
								self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
								self.T.see(tk.END)
								self.T.insert(tk.END,"识别完成,"+self.dec_name+"\n")
								self.T.see(tk.END)
								self.game_flag = 1
							else:
								
								self.dec_name = UNKNOWN
								MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+"认证失败!"+self.dec_name
								print(MESSAGE)
								self.T.insert(tk.END,MESSAGE+"\n")
								self.T.see(tk.END)
						else:
							"""注册"""
							if self.i == 1000:
								self.store_face_codings(unknown_encoding)
								self.dec_name = self.new_name
								MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+"已将您的身份信息录入:"+self.dec_name
								print(MESSAGE)
								self.T.insert(tk.END,MESSAGE+"\n")
								self.T.see(tk.END)
								self.hold_time_start = time.time()
								self.detect_flag = 0
							else:
								pass
				
				l_w = int(w/8)

				cv2.rectangle(img,(x+l_w,y+h),(x+w-l_w,y+h+max(30,int(30*h/200))),WHITE,-1)
				cv2.putText(img, self.dec_name,(x+l_w+min(10,int(2000/w)),y+h+max(25,int(30*h/250))),self.font, max(0.75,w/250),RED,min(3,max(int(w/75),2)),cv2.LINE_AA)

				# 左上					
				cv2.line(img,(x,y),(x+l_w,y),WHITE,2)
				cv2.line(img,(x,y),(x,y+l_w),WHITE,2)
				# 右上
				cv2.line(img,(x+w,y),(x+w-l_w,y),WHITE,2)
				cv2.line(img,(x+w,y),(x+w,y+l_w),WHITE,2)
				# 左下
				cv2.line(img,(x,y+h),(x+l_w,y+h),WHITE,2)
				cv2.line(img,(x,y+h),(x,y+h-l_w),WHITE,2)
				# 右下
				cv2.line(img,(x+w,y+h),(x+w-l_w,y+h),WHITE,2)
				cv2.line(img,(x+w,y+h),(x+w,y+h-l_w),WHITE,2)
				
			cv2.putText(img, self.top_text,(220,50), self.font,0.75,BLUE,2,cv2.LINE_AA)
			cv2.putText(img, "FPS: "+str(int(self.fps)),(10,25), self.font,0.75,WHITE,2,cv2.LINE_AA)
			cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)#转换颜色从BGR到RGBA
			current_image = Image.fromarray(cv2image)#将图像转换成Image对象
			imgtk = ImageTk.PhotoImage(image=current_image)
			
			try:
				if self.window_name == 1:
					self.label1.imgtk = imgtk
					self.label1.config(image=imgtk)
				else:
					self.label0.imgtk = imgtk
					self.label0.config(image=imgtk)
				self.window.after(1, self.video_loop)
			except:
				if self.window_name == 1:
					self.sign_in_window.destroy()
					self.release()
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"self.sign_in_window.destroy()\n")
					self.T.see(tk.END)
					
				else:
					self.sign_up_face_window.destroy()
					self.release()
					self.T.insert(tk.END,"["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:")
					self.T.see(tk.END)
					self.T.insert(tk.END,"self.sign_up_face_window.destroy()\n")
					print("self.sign_up_face_window.destroy()")
					self.T.see(tk.END)
				return 
				MESSAGE = "["+str(time.strftime("%Y-%m-%d %H:%M:%S"))+"]:"+WIN_CLOSSE
				print(MESSAGE)
				self.T.insert(tk.END,MESSAGE+"\n")
				self.T.see(tk.END)
Пример #48
0
img = cv2.imread('data/tennis1.png')

gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

blur = cv2.GaussianBlur(gray, (5,5),0)

#cv2.imshow('blur',blur)
#cv2.waitKey(0)

edges = cv2.Canny(blur,50,150,apertureSize = 3)

#cv2.imshow('edges',edges)
#cv2.waitKey(0)

lines = cv2.HoughLines(edges,1,np.pi/180,160)

for rho,theta in lines[0]:
    a = np.cos(theta)
    b = np.sin(theta)
    x0 = a*rho
    y0 = b*rho
    x1 = int(x0 + 1000*(-b))
    y1 = int(y0 + 1000*(a))
    x2 = int(x0 - 1000*(-b))
    y2 = int(y0 - 1000*(a))

    cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)

cv2.imshow("ouptut", img)
cv2.waitKey(0)
Пример #49
0
    for decodedObject in decodedObjects:
        points = decodedObject.polygon

        # If the points do not form a quad, find convex hull
        if len(points) > 4:
            hull = cv2.convexHull(
                np.array([point for point in points], dtype=np.float32))
            hull = list(map(tuple, np.squeeze(hull)))
        else:
            hull = points

        # Number of points in the convex hull
        n = len(hull)
        # Draw the convext hull
        for j in range(0, n):
            cv2.line(frame, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)

        x = decodedObject.rect.left
        y = decodedObject.rect.top

        print(x, y)

        print('Type : ', decodedObject.type)

        print('Data : ', decodedObject.data, '\n')

        barCode = str(decodedObject.data)
        print(barCode)
        cv2.putText(frame, barCode, (x, y), font, 1, (0, 255, 255), 2,
                    cv2.LINE_AA)
Пример #50
0
            )  #filters contours and takes contours with max area here the object
            centers, radius = cv.minEnclosingCircle(cnt)

            drawing = np.zeros((threshold.shape[0], threshold.shape[1], 3),
                               dtype=np.uint8)  #tracks the path of the object
            color = (255, 0, 0)
            cv.circle(drawing, (int(centers[0]), int(centers[1])), int(radius),
                      color, 2)  #shows the circular object boundary
            cv.circle(vidp, (int(centers[0]), int(centers[1])), 5, color, 2)

            if x == 0 and y == 0:
                x = int(centers[0])
                y = int(centers[1])
            else:
                cv.line(
                    vidp, (int(centers[0]), int(centers[1])), (int(x), int(y)),
                    color, 8
                )  #draws line from center in the current frame to center from previous frame

            x = int(centers[0])  # updates center with current object center
            y = int(centers[1])

            #cv.imshow("og image",frame) # displays video
            cv.imshow('Contours',
                      drawing)  #displays the object and its current position
            cv.imshow('virtual', vidp)  # displays the drawing
            key = cv.waitKey(1)
            if key == 27:  #exit command
                break
        else:
            x = 0
            y = 0
                    v = v.reshape((1,1,2))
                    pT[iter,:,:] = v
                    iter = iter + 1

                vis = frame.copy()
                flowVectorLength = []
                flowVectorLength_x = []
                flowVectorLength_y = []
                flowAngle = []
                flowVectorLength_H = []
                flowVectorLength_xH = []
                flowVectorLength_yH = []
                flowAngle_H = []
                for (x0, y0), (x1, y1), (xT, yT), good in zip(p0[:, 0], p1[:, 0], pT[:, 0], st[:, 0]):
                    if good:
                        cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
                        flowVectorLength_H.append(math.sqrt((x1 - xT) ** 2 + (y1 - yT) ** 2)) # calculate flow vector length (speed)
                        flowVectorLength_xH.append(x1 - xT) # calculate the x vector
                        flowVectorLength_yH.append(y1 - yT) # calculate the y vector
                        flowAngle_H.append(math.degrees(math.atan2((y1 - yT), (x1 - xT)))) # calculate flow vector angle (direction)
                        flowVectorLength.append(math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)) # calculate flow vector length (speed)
                        flowVectorLength_x.append(x1 - x0) # calculate the x vector
                        flowVectorLength_y.append(y1 - y0) # calculate the y vector
                        flowAngle.append(math.degrees(math.atan2((y1 - y0), (x1 - x0)))) # calculate flow vector angle (direction)
                    vis = cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1)
                    #vis = cv2.circle(vis, (x0, y0), 2, (red, green)[good], -1)
                    vis = cv2.circle(vis, (xT, yT), 2, (255, 0, 0), -1)

                # get distance, and store in polar format
                x = p1[:, 0, 0] - p0[:, 0, 0]
                y = p1[:, 0, 1] - p0[:, 0, 1]
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while (1):
    ret, frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)
    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]
    # draw the tracks
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
        mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
        frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
    img = cv2.add(frame, mask)
    cv2.imshow('frame', img)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()
cap.release()
Пример #53
0
                                              (tltrX, tltrY))
        ################FROM LEFT###################################################################
        (color1_leftX, color1_leftY) = midpoint((cX, cY),
                                                (extLeft_x, extLeft_y))
        (color2_leftX, color2_leftY) = (cX, cY)
        (tempc3_leftX, tempc3_leftY) = midpoint((cX, cY), (trbrX, trbrY))
        (color3_leftX, color3_leftY) = midpoint((tempc3_leftX, tempc3_leftY),
                                                (trbrX, trbrY))
        ################FROM RIGHT###################################################################
        (color1_rightX, color1_rightY) = midpoint((cX, cY),
                                                  (extRight_x, extRight_y))
        (color2_rightX, color2_rightY) = (cX, cY)
        (tempc3_rightX, tempc3_rightY) = midpoint((cX, cY), (tlblX, tlblY))
        (color3_rightX, color3_rightY) = midpoint(
            (tempc3_rightX, tempc3_rightY), (tlblX, tlblY))
        cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                 (255, 0, 255), 2)
        cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                 (255, 0, 255), 2)
        ################################Tip#Detect##################################################

        if (dA > dB):

            if (topbot() == 3):
                #print(topbot())
                #side_tltr=cv2.line(orig, tuple(tl),tuple(tr), (0,255,0), 2)
                #line1=cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),(255, 0, 255), 2)
                #line2=cv2.line(orig, (int(cdtlblX), int(cdtlblY)), (int(cdtrbrX), int(cdtrbrY)),(255, 0, 255), 2)
                #line_intersection(line1,line2)
                print("TOP")
                cv2.circle(orig, (int(color1_topX), int(color1_topY)), 5,
                           (0, 255, 0), -1)
Пример #54
0
    if clip_to_circle:
        a = rdx * rdx + rdy * rdy
        b = rsx * rdx + rsy * rdy
        c = rsx * rsx + rsy * rsy - v * v

        delta_sqrt = np.sqrt(max(b * b - a * c, 1.0))
        alpha_s = (-b - delta_sqrt) / a
        alpha_e = (-b + delta_sqrt) / a

        rsx += rdx * alpha_s + v
        rsy += rdy * alpha_s + v
        rdx *= (alpha_e - alpha_s)
        rdy *= (alpha_e - alpha_s)

        cv2.line(img, (int(rsx + pad), int(rsy) + pad),
                 (int(rsx + rdx) + pad, int(rsy + rdy) + pad), 255)
    else:
        alpha_x_m = (-v - rsx) / rdx
        alpha_x_p = (v - rsx) / rdx
        alpha_y_m = (-v - rsy) / rdy
        alpha_y_p = (v - rsy) / rdy
        print(alpha_x_m, alpha_x_p, alpha_y_m, alpha_y_p)

        alpha_s = max(min(alpha_x_p, alpha_x_m), min(alpha_y_p, alpha_y_m))
        alpha_e = min(max(alpha_x_p, alpha_x_m), max(alpha_y_p, alpha_y_m))

        print(alpha_s, alpha_e)
        if alpha_s < alpha_e:
            rsx += rdx * alpha_s + v
            rsy += rdy * alpha_s + v
            rdx *= (alpha_e - alpha_s)
Пример #55
0
                                     np.pi / 180,
                                     30,
                                     maxLineGap=200)
    lines_direita = cv2.HoughLinesP(pista_direita_segmentado,
                                    1,
                                    np.pi / 180,
                                    30,
                                    maxLineGap=200)

    # print(lines_direita.shape)

    try:
        x1e, y1e, x2e, y2e = lines_esquerda[0][0]

        me = ((y1e - y2e) * 1.0) / (x1e - x2e)
        cv2.line(frame, (x1e, y1e), (x2e, y2e), (0, 255, 0), 3)

        x1d, y1d, x2d, y2d = lines_direita[0][0]
        md = ((y1d - y2d) * 1.0) / ((x1d - x2d))
        cv2.line(frame, (x1d, y1d), (x2d, y2d), (255, 0, 0), 3)

        # Equação da reta esquerda:
        # y - y1e = me(x - x1e)
        # y = me*x + y1e - me*x1e

        # Equação da reta direita:
        # y - y1d = md(x - x1d)
        # y = md*x + y1d - md*x1d

        # Intersecção:
        # me*x + y1e - me*x1e  = md*x + y1d - md*x1d
# apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# useful when reducing high frequency noise
blurred = cv2.GaussianBlur(image, (11, 11), 0)
cv2.imshow("Blurred", blurred)
cv2.waitKey(0)

# draw a 2px thick red rectangle surrounding the face
output = image.copy()
cv2.rectangle(output, (320, 60), (420, 160), (0, 0, 255), 2)
cv2.imshow("Rectangle", output)
cv2.waitKey(0)

# draw a blue 20px (filled in) circle on the image centered at
# x=300,y=150
output = image.copy()
cv2.circle(output, (300, 150), 20, (255, 0, 0), -1)
cv2.imshow("Circle", output)
cv2.waitKey(0)

# draw a 5px thick red line from x=60,y=20 to x=400,y=200
output = image.copy()
cv2.line(output, (60, 20), (400, 200), (0, 0, 255), 5)
cv2.imshow("Line", output)
cv2.waitKey(0)

# draw green text on the image
output = image.copy()
cv2.putText(output, "OpenCV + Jurassic Park!!!", (10, 25), 
	cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("Text", output)
cv2.waitKey(0)
Пример #57
0
 def drawLines(self, lines, paint):
     if len(lines) > 0:
         for x1, y1, x2, y2 in lines:
             cv2.line(self.bgr, (x1, y1), (x2, y2), paint, 2)
             cv2.circle(self.bgr, (x1, y1), 2, (0, 255, 0))
             cv2.circle(self.bgr, (x2, y2), 2, (0, 0, 255))
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
	
    # Draw the right and left lines on image
	for line_now in lines:
         for x1, y1, x2, y2  in  line_now: 
             cv2.line(img,(x1,y1),(x2,y2),color,thickness)
Пример #59
0
def _get_room_connections_corner_grpah(room_info, density_img, room_idx,
                                       global_idx):
    corners_info = room_info['corners_info']
    mask = room_info['mask']
    contour = room_info['contour']
    source_idx = room_info['max_corner_idx']
    end_idx = room_info['adj_corner_idx']
    graph_weights = room_info['graph_weights']
    # build the graph, define the distance between different corners

    # for debugging use
    import cv2
    from scipy.misc import imsave
    debug_img = np.zeros([256, 256, 3])
    debug_img += np.stack([mask] * 3, axis=-1).astype(np.float32) * 255
    result_img = np.copy(debug_img)
    for corner_idx, corner_info in enumerate(corners_info):
        cv2.circle(debug_img, corner_info['corner'], 2, (255, 0, 0), 2)
        cv2.circle(result_img, corner_info['corner'], 2, (255, 0, 0), 2)
        cv2.putText(debug_img, '{}'.format(corner_idx), corner_info['corner'],
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)

        for bin_idx, edge_conf in enumerate(corner_info['binning'].tolist()):
            if edge_conf > 0.1:
                unit_vec = (np.cos(bin_idx * 10 / 180 * np.pi),
                            np.sin(bin_idx * 10 / 180 * np.pi))
                end_point = (int(corner_info['corner'][0] + unit_vec[0] * 10),
                             int(corner_info['corner'][1] - unit_vec[1] * 10))
                cv2.line(debug_img, corner_info['corner'], end_point,
                         (255, 255, 255), 1)

    imsave('./debug/{}_{}_corners.png'.format(global_idx, room_idx), debug_img)
    # imsave('./debug/{}_{}_density.png'.format(global_idx, room_idx), density_img)

    if end_idx is None:
        room_connections = defaultdict(list)
    else:
        # #build corner detection based graphs
        heuristic_room_graph = _build_room_graph(corners_info, mask, contour,
                                                 source_idx, end_idx)
        final_graph = _refine_predicted_graph_weights(graph_weights,
                                                      heuristic_room_graph,
                                                      corners_info, source_idx,
                                                      end_idx)
        room_corners = [info['corner'] for info in corners_info]

        # solve the shortest path given source and end using Dijkstra's algorithm
        # shortest_path, dists = _dijkstra(final_graph, source_idx, end_idx)
        # path = list(reversed(shortest_path))

        # if global_idx == 10 and room_idx == 2:

        # if len(shortest_path) < 0.8 * len(final_graph):  # the path is too short
        trial_num = 0
        reselected_path = None
        while reselected_path is None:
            all_paths, all_lens = dfs_all_paths(final_graph, room_corners,
                                                source_idx, end_idx, trial_num)
            reselected_path = reselect_path(all_paths, all_lens,
                                            len(final_graph), trial_num)
            print('search trial No.{}'.format(trial_num))
            trial_num += 1
            if trial_num >= 3:
                pdb.set_trace()
        path = reselected_path
        room_connections = defaultdict(list)
        # construct room connections according to shortest path
        for idx, corner_idx in enumerate(path):
            next_idx = idx + 1 if idx < len(path) - 1 else 0
            corner = corners_info[corner_idx]['corner']
            to_corner = corners_info[path[next_idx]]['corner']
            room_connections[corner].append(to_corner)
            room_connections[to_corner].append(corner)

    for corner, to_corners in room_connections.items():
        for to_corner in to_corners:
            cv2.line(result_img, corner, to_corner, (0, 255, 255), 2)

    path_str = '-'.join([str(node) for node in path])
    cv2.putText(result_img, path_str, (20, 20), 1, 1, (255, 255, 255))
    imsave('./debug/{}_{}_results.png'.format(global_idx, room_idx),
           result_img)

    return room_connections
Пример #60
0
    fig.add_subplot(2, 4, 4)
    plt.imshow(depth_tgt)

    fig.add_subplot(2, 4, 5)
    height = depth_src.shape[0]
    width = depth_src.shape[1]
    img_tgt = np.zeros((height, width, 3), np.uint8)
    img_src = np.zeros((height, width, 3), np.uint8)
    for h in range(height):
        for w in range(width):
            if visible[h, w]:
                cur_flow = flow[h, w, :]
                img_src = cv2.line(
                    img_src,
                    (np.round(w).astype(int), np.round(h).astype(int)),
                    (np.round(w).astype(int), np.round(h).astype(int)),
                    (255, h * 255 / height, w * 255 / width),
                    5,
                )
                img_tgt = cv2.line(
                    img_tgt,
                    (np.round(w + cur_flow[1]).astype(int),
                     np.round(h + cur_flow[0]).astype(int)),
                    (np.round(w + cur_flow[1]).astype(int),
                     np.round(h + cur_flow[0]).astype(int)),
                    (255, h * 255 / height, w * 255 / width),
                    5,
                )
    plt.imshow(img_src)
    fig.add_subplot(2, 4, 6)
    plt.imshow(img_tgt)