Exemplo n.º 1
0
 def __enter__(self):
     cv2.namedWindow(self.name, cv2.WINDOW_NORMAL)
     if self.fullscreen:
         cv2.setWindowProperty(self.name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
     if DEBUG:
         print("Opening window: " + str(self.name))
     return self
Exemplo n.º 2
0
	def step(self):
		
		key = cv2.waitKey(1) & 255

		if key == ord('q'):
			self.close()
			return False
		elif key == ord('s'):
			if self.recording:
				self.recording = False
			else:
				self.recording = True
		elif key == ord('f'):
			cv2.setWindowProperty('Frame', 
								cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)

		if not self.videoManager.read():
			return False
		(img, res) = self.processor.process(self.videoManager)
		
		if self.recording:
			cv2.imwrite("screenshot" + str(self.screenshot) + ".jpg", img)
			if self.screenshot % 4 == 0 or self.screenshot % 4 == 1:
				cv2.circle(img, (20,20), 10, (0,0,255), -1)
			self.screenshot += 1
		
		
		if self.stack:
			double = np.hstack((img, res))
			cv2.imshow('Frame', double)
		else:
			cv2.imshow('Frame', img)
		
		self.previousImg = img
		return True
Exemplo n.º 3
0
 def flash_window(self, img, title=''):
     cv2.namedWindow(title, cv2.WINDOW_NORMAL)
     if self.config['FULLSCREEN']: cv2.setWindowProperty(title, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
     cv2.imshow(title, img)
     if cv2.waitKey(5) == 0:
         time.sleep(0.05)            
         pass
def show_optimal_ring():
	print "Press q or Q to quit"
	time.sleep(2)
	
	global rings_list
	max_nodes=0
	i=0
	optimal_rings = []
	for ring in rings_list:
		if len(ring) > max_nodes:
			max_nodes = len(ring)
	
	cv2.namedWindow('dst_rt', cv2.WND_PROP_FULLSCREEN)
	cv2.setWindowProperty('dst_rt', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)	
	for ring in rings_list:
		if max_nodes == len(ring):
			i += 1
			ring_object,isMultigraph=gui.Network_graph.makeGraph(ring) 
			optimal_rings.append(ring_object)	
			gui.Network_graph.drawTopology(ring_object,False,'optimal_ring'+str(i)+'.png')
			img = cv2.imread('optimal_ring'+str(i)+'.png',0)
			cv2.imshow('dst_rt',img)
			ch=cv2.waitKey(0)
			if ch==81 or ch==113: 
				break

			
	cv2.destroyAllWindows()
	for i in range (1,5):
    		cv2.waitKey(1)
	return optimal_rings
Exemplo n.º 5
0
 def __showOpenCV(self, image):
     cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN)
     cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
     bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)  # I think this is being converted both ways ...
     cv2.imshow("test", bgr)
     cv2.waitKey(0)  # Scripting languages are weird, It will not display without this
     cv2.destroyAllWindows()
Exemplo n.º 6
0
    def __init__(self):
        self.config = Config()

        # Pull the matrices from the brain file.
        print "Unpickling brain in formaldehyde.."
        learn_file = open(self.config.brain_file, "rb")
        male_eigenfaces = pickle.load(learn_file)
        female_eigenfaces = pickle.load(learn_file)
        mtf_matrix = pickle.load(learn_file)
        ftm_matrix = pickle.load(learn_file)

        # Spin up mirrors.
        print "Spooling up the mirrors.."
        self.p = FaceProcessor(self.config)
        self.mtf_mirror = Mirror(male_eigenfaces, female_eigenfaces, mtf_matrix)
        self.ftm_mirror = Mirror(female_eigenfaces, male_eigenfaces, ftm_matrix)

        # Default is MTF ('cause I made this :D)
        self.current = self.mtf_mirror

        # Our camera is started here.
        print "Opening shutter.."
        self.cam = cv2.VideoCapture(self.config.camera)
        cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN)          
        cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
        cv2.setMouseCallback("Video", self.mouse_callback)
def doFullscreen():
    global fullscreen
    if not fullscreen:
        cv2.setWindowProperty(screen_name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
        fullscreen = True
    else:
        cv2.setWindowProperty(screen_name, cv2.WND_PROP_FULLSCREEN, 0)
        fullscreen = False
Exemplo n.º 8
0
def start_fullscreen():
    global fullscreen
    global window_name
    if not fullscreen:
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
        fullscreen = True
    else:
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, 0)
        fullscreen = False
Exemplo n.º 9
0
def read(filename):
    global cap, fgbg, kernel
    cv2.namedWindow("App", cv2.WND_PROP_FULLSCREEN)          
    cv2.setWindowProperty("App", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)

    kernel = np.ones((3, 3), np.uint8)
    fgbg = cv2.BackgroundSubtractorMOG(history, nmixtures, backgroundRatio)
    cap = cv2.VideoCapture(filename)
    cap.read()
    play()
def main():
    img = cv2.imread('./data/small_rect.jpg')

    cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)

    cv2.imshow('frame',img)
    key=cv2.waitKey(0)
    if key==27:
        cv2.destroyAllWindows()
Exemplo n.º 11
0
def show_topology():
	
	cv2.startWindowThread()
	img = cv2.imread('graph.png',0)
	cv2.namedWindow('dst_rt', cv2.WND_PROP_FULLSCREEN)
	cv2.setWindowProperty('dst_rt', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
	cv2.imshow('dst_rt',img)
	cv2.waitKey(0)
	cv2.destroyAllWindows()	
	for i in range (1,5):
    		cv2.waitKey(1)
Exemplo n.º 12
0
 def __init__(self,cascadeFile=None,fullscreen=True,device=0,mirror=True,header='Camera Output'):
     self.cap = cv2.VideoCapture(device)
     self.frame=None
     self.mirror=mirror
     self.header=header
     if cascadeFile!=None:
         self.cascade = cv2.CascadeClassifier(cascadeFile)
     if fullscreen==True:
         cv2.namedWindow(header, cv2.WND_PROP_FULLSCREEN)
         cv2.setWindowProperty(header, cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
     self.fetch()    
Exemplo n.º 13
0
def show_img(img, wait=0, norm=True, fs=False, resize=True):
    ratio = 1.*img.shape[1]/img.shape[0]
    if norm: img = norm_img(img)
    img = img.astype("uint8")
    if resize:
        size=256 if img.shape[0]<200 else img.shape[0]
        img = cv2.resize(img, (int(size*ratio), size))
    if fs:
        cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN) 
        cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
    cv2.imshow("Video", img)
    cv2.waitKey(wait)
Exemplo n.º 14
0
    def __init__(self, doRecord=True, showWindows=True):
        self.doRecord = doRecord
        self.show = showWindows
        self.frame = None
        self.frame_rate = camera.framerate
        self.isRecording = False

        self.trigger_time = None
        # cv2.namedWindow("Image", cv2.WINDOW_AUTOSIZE)
        # cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Image", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("Image", cv2.WND_PROP_FULLSCREEN, cv.CV_WINDOW_FULLSCREEN)
Exemplo n.º 15
0
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = img2

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2))
        # cv2.polylines(vis, [corners], True, (255, 255, 255))
        # cv2.fillPoly(vis, [corners], (255, 255, 255))

        mask = np.zeros(vis.shape, dtype=np.uint8)
        roi_corners = np.array([[(corners[0][0],corners[0][1]), 
            (corners[1][0],corners[1][1]), 
            (corners[2][0],corners[2][1]), 
            (corners[3][0], corners[3][1])]], dtype=np.int32)
        white = (255, 255, 255)
        cv2.fillPoly(mask, roi_corners, white)

        # apply the mask
        masked_image = cv2.bitwise_and(vis, mask)

        # blurred_image = cv2.blur(vis, (15, 15), 0)
        blurred_image = cv2.boxFilter(vis, -1, (27, 27))
        vis = vis + (cv2.bitwise_and((blurred_image-vis), mask))

    # if status is None:
    #     status = np.ones(len(kp_pairs), np.bool_)
    # p2 = np.int32([kpp[1].pt for kpp in kp_pairs])

    # green = (0, 255, 0)
    # red = (0, 0, 255)
    # white = (255, 255, 255)
    # kp_color = (51, 103, 236)
    # for (x, y), inlier in zip(p2, status):
    #     if inlier:
    #         col = green
    #         cv2.circle(vis, (x, y), 2, col, -1)

    # view params
    width, height = 1280, 800
    x_offset = 260
    y_offset = 500
    l_img = create_blank(width, height, rgb_color=(0,0,0))

    vis = np.append(vis, vis, axis=1)
    vis = cv2.resize(vis, (0,0), fx=0.6, fy=0.6)

    l_img[y_offset:y_offset+vis.shape[0], x_offset:x_offset+vis.shape[1]] = vis 

    cv2.namedWindow(win, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(win, cv2.WND_PROP_AUTOSIZE, cv2.cv.CV_WINDOW_AUTOSIZE)
    cv2.imshow(win, l_img)
Exemplo n.º 16
0
	def show(self):
		while(True):
			self.capture()
			cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN )
			cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
			self.frmae=cv2.flip(self.frame,1)
			cv2.imshow("Frame",self.frame)
	
    			if cv2.waitKey(1) & 0xFF == ord('q'):
        			break

		self.cap.release()
		cv2.destroyAllWindows()
Exemplo n.º 17
0
 def GetStatusBounds(self):
     img = np.array(ImageGrab.grab())
     cv_img = img.astype(np.uint8)
     
     cv2.namedWindow("Capture", cv2.WND_PROP_FULLSCREEN)
     cv2.setWindowProperty("Capture", cv2.WND_PROP_FULLSCREEN, 1)
     cv2.setMouseCallback("Capture", self.region_select_callback, cv_img)
     
     while True:
         cv2.imshow("Capture", cv_img)
         key = cv2.waitKey(1)
         
         if key == ord("c"):
             cv2.destroyAllWindows()
             break
             
     sleep(0.1)
Exemplo n.º 18
0
	def __init__(self, processor, fps, n=2, fullscreen=False, stack=True):
		cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
		if fullscreen:
			cv2.setWindowProperty('Frame', 
								cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
		
		if stack:
			cv2.resizeWindow('Frame', 2133, 600)
		else:
			cv2.resizeWindow('Frame', 1066, 600)
		
		self.processor = processor
		self.videoManager = VideoManager(n)
		self.previousImg = None
		self.interval = int(1000 / fps)
		self.stack = stack
		self.screenshot = 0
		self.recording = False
Exemplo n.º 19
0
Arquivo: vr_new.py Projeto: KM7/ros_vr
  def virtualize(self):
    rate = rospy.Rate(30) # 10hz
    while not rospy.is_shutdown():
	if (self.left!=[] and self.right!=[]):
	    left_frame=self.cropcrop(cv2.flip(self.left,1))
	    right_frame=self.cropcrop(cv2.flip(self.right,1))

	    #put two images together
	    composite_frame = join_images(
		        right_frame,
                        left_frame,
	    )
            composite_frame=cv2.transpose(composite_frame)
            cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty("test", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
            cv2.imshow("test",composite_frame)
    	    cv2.waitKey(3)
        rate.sleep()
Exemplo n.º 20
0
    def show(self):
        'Show the motion.'
        while True:
            self.capture()
            cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty(
                "Frame", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
            self.frmae = cv2.flip(self.frame, 0)
            cv2.imshow("Frame", self.frame)

            if cv2.waitKey(1) & 0xFF == ord('a'):
		self.Por=self.Por+1
		if self.Por>=len(self.Methods):
			self.Por=0
		
		print str(self.Por)+"==="+str(self.Methods[self.Por])

        self.cap.release()
        cv2.destroyAllWindows()
Exemplo n.º 21
0
    def run(self):
        if (DEBUG_IMAGE):
            cv2.namedWindow(IMAGE_NAME)
        else:
            cv2.namedWindow(IMAGE_NAME, cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty(IMAGE_NAME, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
        cv2.setMouseCallback(IMAGE_NAME, self.mouseCallback)

        while (True):
            # Get current touch state
            touch_state = self.getTouchState()
            # Apply touch state to polygon filler
            self.updatePolygons(touch_state)

            # Cycle Colors
            self.adjustColors()

            # Display
            self.show()
Exemplo n.º 22
0
def video():
    import cv2

    cv2.namedWindow('Output', cv2.WND_PROP_FULLSCREEN)
    camera = cv2.VideoCapture(0)

    context = zmq.Context()
    publisher = context.socket(zmq.PUB)
    publisher.bind('tcp://*:{}'.format(CAMERA))

    projector = context.socket(zmq.PULL)
    projector.bind('tcp://*:{}'.format(PROJECTOR))
    
    eventQ = context.socket(zmq.SUB)
    eventQ.connect('tcp://localhost:{}'.format(EVENT))
    eventQ.setsockopt(zmq.SUBSCRIBE, b'')

    poller = zmq.Poller()
    poller.register(eventQ, zmq.POLLIN)
    poller.register(projector, zmq.POLLIN)

    while True:
        events = dict(poller.poll(timeout=0))

        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'display2' in pushbutton:
                cv2.moveWindow('Output', 2000, 100)
            if 'fullscreen' in pushbutton:
                cv2.setWindowProperty('Output', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)

        if projector in events:
            cv2.imshow('Output', cv2.resize(recv_array(projector), FRAME_SIZE))
        else:
            cv2.imshow('Output', np.zeros(FRAME_SIZE[::-1]))
        
        _, frame = camera.read()
        frame = cv2.resize(frame, FRAME_SIZE)
        send_array(publisher, frame)

        cv2.waitKey(VIDEO_SAMPLE_TIME)
Exemplo n.º 23
0
def show_rings(timer):
	print "Press q or Q to quit"
	time.sleep(2)
	timer = timer * 1000
	
	cv2.namedWindow('dst_rt', cv2.WND_PROP_FULLSCREEN)
	cv2.setWindowProperty('dst_rt', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)	
	
	if len(rings_list)>0:
		for i in range(1,len(rings_list)+1):
			name='ring'+str(i)+'.png'
			img = cv2.imread(name,0)
			cv2.imshow('dst_rt',img)
			ch=cv2.waitKey(timer)
			if ch==81 or ch==113:
				break			
	else:
		print "No rings are there in Physical Topology"
		sys.exit(0)
	cv2.destroyAllWindows()
	for i in range (1,5):
    		cv2.waitKey(1)	
Exemplo n.º 24
0
def main():
  global frequency

  if displayFaces:
    showCam = True
  else:
    showCam = displayCam

  capture = getCameraCapture()

  if showCam:
    cv2.startWindowThread()
    cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)

  if args.gather_training_data:
    print 'Recording training data for emotion:', args.recording_emotion
    print 'Please try to display that emotion during the recording.'
    saveFaceImage(capture, frequency, showCam, displayFaces)
  else:
    print 'Detection emotions from net ', args.netFile
    detectEmotions(capture, frequency, showCam, displayFaces)
Exemplo n.º 25
0
def initialize():
    global windows, WIDTH, HEIGHT

    imagefile = "frame%04d.jpg" % 1
    path = "%s%s" % (directory, imagefile)
    image = cv.LoadImage(path, cv.CV_LOAD_IMAGE_GRAYSCALE)
    WIDTH, HEIGHT = cv.GetSize(image)

    num_windows_x = WIDTH / (SPATIAL_WINDOW_X / OVERLAP_FACTOR)
    num_windows_y = HEIGHT / (SPATIAL_WINDOW_Y / OVERLAP_FACTOR)
    num_windows = num_windows_x * num_windows_y
    for k in range(0, num_windows):
        temporal_window = [0] * TEMPORAL_WINDOW
        windows.append(temporal_window)

    if not HEADLESS:
        if DISPLAY_HIST:
            plot.figure(1)
            plot.show(block=False)

        if DISPLAY_WAVEFORM:
            plot.figure(2)
            plot.show(block=False)

        if DISPLAY_SECOND_DEGREE_FOURIER:
            plot.figure(3)
            plot.show(block=False)

        cv.NamedWindow("display", cv.CV_WINDOW_NORMAL)
        if FULLSCREEN:
            cv2.setWindowProperty("display", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)

        cv.SetMouseCallback("display", handle_mouse)

        if OPTICAL_FLOW:
            cv.NamedWindow("optical_flow", cv.CV_WINDOW_NORMAL)
        
        if CROPPED_OPTICAL_FLOW:
            cv.NamedWindow("cropped_optical_flow", cv.CV_WINDOW_NORMAL)
Exemplo n.º 26
0
def myVideo(url):
	flag = 0  #flag for checking if the file in present in the memory.
        search_folder = "." 
        videoFile = url.split('/')[-1].split('#')[0].split('?')[0] #stripping the name of the file.

        for root, dirs, files in os.walk(search_folder): # using the os.walk module to find the files.
                for name in files:
                        """Checking the videofile in the current directory and the sub-directories"""
                        if videoFile == os.path.join(name):  #checking if the file is already present in the internal memory.(Traverse through subdirectories as well)
                        	flag += 1
				print "The file is already present in the internal memory"
                                return -1  # Returning the confirmation that the file is present.

        if flag == 0: # dowiloding only when the flag is zero(i.e the file is not in the internal memory.)
        	print "Downloading the file"
                video = urllib.FancyURLopener() #downloading the file using urllib.
                video.retrieve(url,videoFile)
		curDir = os.getcwd()   # getting the current working directory.
                fullVideoPath = os.path.join(curDir,videoFile)  # Making the full path of the video file.
				
                """For playing the file using openCV first read the file.
                Find the number of frames and the frame rate.
                Finally use these parameters to display each extracted frame on the screen"""

                vidFile = cv.CaptureFromFile(fullVideoPath)  #Video capturing from the file.
                nFrames = int(  cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FRAME_COUNT ) ) #Number of frames in the video.
                fps = cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FPS ) # Frame rate
                waitPerFrame = int( 1/fps * 1000/1 ) # Wait time between frames.

                for f in xrange(nFrames):
                	frameImg = cv.QueryFrame( vidFile ) # decoding and returning the grabbed video frame.
                        cv2.namedWindow("EPIC", cv2.WND_PROP_FULLSCREEN) #Making full size display.
                        cv2.setWindowProperty('EPIC', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN) # setting the window property to full screen.
                        cv.ShowImage("EPIC",  frameImg) # Showing the frame Image 
                        cv.WaitKey(waitPerFrame) # Waiting between the frames.

                cv.DestroyWindow( "EPIC" ) # Deleting the window once the playing is done.
                return 1 # The file is successfully played.
Exemplo n.º 27
0
    def __init__(self):
        self.dr = dreamer.Dreamer()

        self.video_capture = cv2.VideoCapture(0)
        self.cs = cam_states_faces.CamStatesFaces()

        self.backgrounds = ['Paintings/BigSue53.jpg',
                            'Paintings/imagine.jpg',
                            'Paintings/flower.jpg',
                            'Paintings/figures.jpg',
                            'Paintings/flower2.jpg',
                            'Paintings/lazy.jpg',
                            'Paintings/flower3.jpg',
                            'Paintings/floating2.jpg',
                            'Paintings/wondering.jpg',
                            'Paintings/seduction.jpg',
                            'Paintings/spring2.jpg',
                            'Paintings/rouge.jpg']
        self.background = cv2.imread(self.backgrounds[0])
        self.background = images.Images.resize_image(480, 640, self.background)

        # For a full screen window
        cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
def main():
    '''
    Arguments to be set:
        showCam : determine if show the camera preview screen.
    '''
    print("Enter main() function")
    
    if args.testImage is not None:
        img = cv2.imread(args.testImage)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.resize(img, FACE_SHAPE)
        print(class_label[result[0]])
        sys.exit(0)

    showCam = 1

    capture = getCameraStreaming()

    if showCam:
        cv2.startWindowThread()
        cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
    
    showScreenAndDectect(capture)
Exemplo n.º 29
0
    def update_display(self):
	while True:
            if self.VERBOSE: print('[Displaying Images] %s' % datetime.strftime(datetime.now(), self.TIME_FORMAT))
            try:
                average = self.average + self.PIXEL_CENTER
                pwm = self.pwm
                masks = self.masks
                images = self.images
                output_images = []
                for img,mask in zip(images, masks):
                    cv2.line(img, (self.PIXEL_MIN, 0), (self.PIXEL_MIN, self.PIXEL_HEIGHT), (0,0,255), 1)
                    cv2.line(img, (self.PIXEL_MAX, 0), (self.PIXEL_MAX, self.PIXEL_HEIGHT), (0,0,255), 1)
                    cv2.line(img, (average, 0), (average, self.PIXEL_HEIGHT), (0,255,0), 2)
                    cv2.line(img, (self.PIXEL_CENTER, 0), (self.PIXEL_CENTER, self.PIXEL_HEIGHT), (255,255,255), 1)
                    output_images.append(numpy.vstack([img, numpy.zeros((20, self.PIXEL_WIDTH, 3), numpy.uint8)]))
                output_small = numpy.hstack(output_images)
                output_large = cv2.resize(output_small, (1024, 768))
                # Offset Display
                if average - self.PIXEL_CENTER >= 0:
                    average_str = str("+%2.2f cm" % ((average - self.PIXEL_CENTER) / float(self.PIXEL_PER_CM)))
                elif average - self.PIXEL_CENTER < 0:
                    average_str = str("%2.2f cm" % ((average - self.PIXEL_CENTER) / float(self.PIXEL_PER_CM)))
                cv2.putText(output_large, average_str, (340,735), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
                ## PWM Display
                #if pwm >= self.PWM_CENTER:
                    #pwm_str = str("+%2.2f cm" % (100 * float(pwm / self.PWM_CENTER)))
                #elif pwm < self.PWM_CENTER:
                    #pwm_str = str("%-2.2f cm" % (100 * float(pwm / self.PIXEL_CENTER)))                  
                #cv2.putText(output_large, pwm_str, (120,735), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
                cv2.namedWindow('AutoTill', cv2.WINDOW_NORMAL)
                if self.FULLSCREEN: cv2.setWindowProperty('AutoTill', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
                cv2.imshow('AutoTill', output_large)
                if cv2.waitKey(5) == 3:
                    pass
            except Exception as error:
                print('\tERROR in display(): %s' % str(error))
Exemplo n.º 30
0
    def update_display(self, images, masks, results):
        if self.VERBOSE: print('[Displaying Images] %s' % datetime.strftime(datetime.now(), self.TIME_FORMAT))
        ## Draw lines on Images
        estimate = results['estimate'] + self.PIXEL_CENTER
        average = results['average'] + self.PIXEL_CENTER
        mod_images = []
        for img in images:
            try:
                cv2.line(img, (estimate, 0), (estimate, self.PIXEL_HEIGHT), (127,0,255), 3)
                cv2.line(img, (average, 0), (average, self.PIXEL_HEIGHT), (127,255,0), 3)
                mod_images.append(img)
            except Exception as error:
                print('\tERROR in display(): %s' % str(error))   

        ## Generate output
        try:
            output = numpy.hstack(mod_images)
            cv2.namedWindow("AutoTill", cv2.WND_PROP_FULLSCREEN)          
            cv2.setWindowProperty("AutoTill", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
            cv2.imshow('AutoTill', output)
            if cv2.waitKey(5) == 27:
                pass
        except Exception as error:
            print('\tERROR in display(): %s' % str(error))
Exemplo n.º 31
0
def main():
    save_flag = False
    alert_flag = False
    alert_frame_cnt = 0
    cam_sensor = CameraSensor()
    thermal_sensor = ThermalSensor()
    cascPath = "/home/pi/face_rect/haarcascade_frontalface_alt.xml"
    cam_sensor.set_face_cascade(cascPath)

    #video_capture = cv2.VideoCapture('1.mp4')
    video_capture = cv2.VideoCapture(0)
    #video_capture.set(10,65)
    #video_capture.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0)
    #cv2.VideoCapture.set(17,3)

    if True:  #show full screen
        cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
        cv2.setWindowProperty('Video', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

    thermal_img = cv2.imread('thermal_back.png')

    ry, rx, rw, rh = cam_sensor.set_sensor_rect(video_capture)
    sensor_rect = (rx, ry, rw, rh)

    qrimg_show_cnt = 0
    alert_show_cnt = 0
    qr_img = None
    temp_str = ''

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        bottom_msg = ''

        # Display the resulting frame
        if ret:
            if alert_flag == True:
                bottom_msg = WARRING_CONTACT_HOSPITAL
                alert_show_cnt += 1
                if alert_show_cnt > ALERT_SHOW_FRAME_COUNT:
                    alert_show_cnt = 0
                    alert_flag = False
                    temp_str = ''
                    GPIO.cleanup()
            else:
                if qr_img is None or qrimg_show_cnt == 0:
                    bottom_msg = MSG_NO_FACE
                    #time.sleep(0.5)
                    end_flag = False
                    #frame =cv2.flip(frame,-1)
                    faces = cam_sensor.get_faces(frame)
                    cv2.rectangle(frame, (rx, ry), (rx + rw, ry + rh),
                                  (0, 255, 255), 2)  # out

                    if len(faces) == 0:
                        thermal_sensor.set_face_state(True)
                    else:
                        thermal_sensor.set_face_state(False)

                    print("face counet:", len(faces))
                    for face in faces:
                        is_in_sensor = cam_sensor.is_face_in_sensor(
                            face, sensor_rect)
                        (x, y, w, h) = face

                        if is_in_sensor == False or rw * 0.4 > w:  # sensor_rect_width*0.5 > face rect width
                            if is_in_sensor == True:
                                bottom_msg = MSG_SMALL_FACE
                            continue

                        ret, value = thermal_sensor.get_face_temper()

                        print("state of thermal:", ret)
                        bottom_msg = MSG_OK_FACE
                        if ret == THERMAL_STATE_DETECTING:
                            bottom_msg = MSG_OK_FACE  #show detecting

                        if value < 36.6 and ret == THERMAL_STATE_DETECTED:  # this is temperature of face
                            cv2.putText(frame, str(value), (10, 10),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 0, 255), 1)
                            qr = qrcode.QRCode(
                                version=1,
                                error_correction=qrcode.constants.
                                ERROR_CORRECT_H,
                                box_size=15,
                                border=6,
                            )
                            #temp_str = str(value) #str(34.444)
                            temp_str = "{:10.1f}".format(value)
                            encodedBytes = base64.b64encode(
                                temp_str.encode("utf-8"))
                            encodedStr = str(encodedBytes, "utf-8")
                            qrdata = 'http://203.174.35.118/register_customer/' + encodedStr + '/china'
                            qr.add_data(qrdata)
                            qr.make(fit=True)
                            img = qr.make_image(fill_color="black",
                                                back_color="white")

                            img.save('/home/pi/face_rect/qrimage/a.png')
                            qr_img = cv2.imread(
                                '/home/pi/face_rect/qrimage/a.png')
                            qrimg_show_cnt += 1
                            frame = cam_sensor.image_add(frame,
                                                         qr_img,
                                                         sensor_rect,
                                                         alpha=1.0)

                            thermal_sensor.onled(green)
                        elif value > 36.6 and ret == THERMAL_STATE_DETECTED:
                            bottom_msg = WARRING_CONTACT_HOSPITAL
                            temp_str = "{:10.1f}".format(value)
                            alert_flag = True
                            alert_show_cnt += 1
                            thermal_sensor.onled(red)
                        break
                if qr_img is not None and qrimg_show_cnt < QRCODE_SHOW_FRAME_COUNT:  # this  is time of qr duration!
                    frame = cam_sensor.image_add(frame, qr_img, sensor_rect)
                    qrimg_show_cnt += 1

                    bottom_msg = MSG_SCAN_QR

                if qr_img is not None and qrimg_show_cnt >= QRCODE_SHOW_FRAME_COUNT:
                    qr_img = None
                    qrimg_show_cnt = 0
                    bottom_msg = ''
                    temp_str = ''
                    GPIO.cleanup()
            # put bottom message
            msg_font = cv2.FONT_HERSHEY_COMPLEX
            if bottom_msg != WARRING_CONTACT_HOSPITAL:
                textSize, baseline = cv2.getTextSize(bottom_msg, msg_font, 0.7,
                                                     2)
                textSizeWidth, textSizeHeight = textSize
                top = int(frame.shape[1] / 2 - textSizeWidth / 2)
                left = frame.shape[0] - textSizeHeight - 10
                cv2.putText(frame, bottom_msg, (top, left), msg_font, 0.7,
                            (255, 255, 0), 2)
            else:
                textSize, baseline = cv2.getTextSize(bottom_msg, msg_font, 0.9,
                                                     2)
                textSizeWidth, textSizeHeight = textSize
                top = int(frame.shape[1] / 2 - textSizeWidth / 2)
                left = frame.shape[0] - textSizeHeight - 20
                cv2.putText(frame, bottom_msg, (top, left), msg_font, 0.9,
                            (0, 0, 255), 2)

            # put temperature text
            #temp_str = "33.4"
            val_font = cv2.FONT_HERSHEY_COMPLEX
            textSize, baseline = cv2.getTextSize(temp_str, val_font, 0.7, 2)
            textSizeWidth, textSizeHeight = textSize
            top = int(frame.shape[1] / 2 - (textSizeWidth + 10) / 2)
            left = ry + rh + textSizeHeight + 5

            # draw text background rect
            #cv2.rectangle(frame, (top, left),
            #              (top + textSizeHeight, left+textSizeWidth),
            #              (255,255,255), thickness=cv2.FILLED)

            cv2.putText(frame, temp_str, (top, left), val_font, 0.7,
                        (0, 255, 255), 2)
            res_w = get_monitors()[0].width
            res_h = get_monitors()[0].height
            frame = cv2.resize(frame, (res_w, res_h))  #resize as full screen
            cv2.imshow('Video', frame)
            if save_flag:
                out.write(frame)
            time.sleep(0.001)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            #end_flag = True
            break

    # if end_flag:
    #     thermal_stat = False
    #     thermal_thread.join()
    if save_flag:
        out.release()
    video_capture.release()
    cv2.destroyAllWindows()
Exemplo n.º 32
0
orig_mask = imgHat[:,:,3]

# Create the inverted mask for the hat & monocle
orig_mask_inv = cv2.bitwise_not(orig_mask)

# Convert hat & monocle image to BGR
# and save the original image size (used later when re-sizing the image)
imgHat = imgHat[:,:,0:3]
origHatHeight, origHatWidth = imgHat.shape[:2]

#-----------------------------------------------------------------------------
#       Main program loop
#-----------------------------------------------------------------------------

cv2.namedWindow("Live Feed", 0)
cv2.setWindowProperty("Live Feed", 0, 1)

# collect video input from first webcam on system
video_capture = cv2.VideoCapture(deviceId)

while(cv2.waitKey(30) != 27):
    # Capture video feed
    ret, frame = video_capture.read()
    height,width,_ = frame.shape
    overlayed = frame
    #small = frame
    # Create greyscale image from the video feed
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Detect faces in input video stream
    faces = faceCascade.detectMultiScale(
Exemplo n.º 33
0
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.setWindowProperty("image", cv2.WINDOW_NORMAL, cv2.WINDOW_FULLSCREEN)
src = '/aimldl-cod/practice/nikhil/nopred.json'
with open(src, 'r') as file:
    json_lines = file.readlines()
    for i in json_lines:
        b = i.rstrip('\n')
        im = cv2.imread(b)
        cv2.imshow("image", im)
        cv2.waitKey(0)
Exemplo n.º 34
0
def read_cam():

    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        windowName = "Edge Detection"
        cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(windowName, 1280, 720)
        cv2.moveWindow(windowName, 0, 0)
        cv2.setWindowTitle(windowName, "InfraRed Stereo")

        cap.set(cv2.CAP_PROP_CONVERT_RGB, False)
        showWindow = 3  # Show all stages
        showHelp = True
        edgeThreshold = 40
        showFullScreen = False
        while True:
            if cv2.getWindowProperty(windowName, 0) < 0:
                break;
            ret_val, frame = cap.read();

            IR_L, BGGR_L = conversion(frame)
            hsv = cv2.cvtColor( BGGR_L, cv2.COLOR_BGR2GRAY)
            blur = cv2.GaussianBlur(hsv, (7, 7), 1.5)
            edges = cv2.Canny(blur, 0, edgeThreshold)
            if showWindow == 3:
                frameRs = cv2.resize( BGGR_L, (640, 360))
                hsvRs = cv2.resize(hsv, (640, 360))
                vidBuf = np.concatenate((frameRs, cv2.cvtColor(hsvRs, cv2.COLOR_GRAY2BGR)), axis=1)
                blurRs = cv2.resize(blur, (640, 360))
                edgesRs = cv2.resize(edges, (640, 360))
                vidBuf1 = np.concatenate(
                    (cv2.cvtColor(blurRs, cv2.COLOR_GRAY2BGR), cv2.cvtColor(edgesRs, cv2.COLOR_GRAY2BGR)), axis=1)
                vidBuf = np.concatenate((vidBuf, vidBuf1), axis=0)
            if showWindow == 1:  # Show Camera Frame
                displayBuf =  BGGR_L
            elif showWindow == 2:  # Show Canny Edge Detection
                displayBuf = edges
            elif showWindow == 3:  # Show All Stages
                displayBuf = vidBuf

            cv2.imshow(windowName, displayBuf)
            key = cv2.waitKey(10)
            if key == 27:  # Check for ESC key
                cv2.destroyAllWindows()
                break;
            elif key == 49:  # 1 key, show frame
                cv2.setWindowTitle(windowName, "Camera Feed")
                showWindow = 1
            elif key == 50:  # 2 key, show Canny
                cv2.setWindowTitle(windowName, "Canny Edge Detection")
                showWindow = 2
            elif key == 51:  # 3 key, show Stages
                cv2.setWindowTitle(windowName, "Camera, Gray scale, Gaussian Blur, Canny Edge Detection")
                showWindow = 3
            elif key == 52:  # 4 key, toggle help
                showHelp = not showHelp
            elif key == 44:  # , lower canny edge threshold
                edgeThreshold = max(0, edgeThreshold - 1)
                print('Canny Edge Threshold Maximum: ', edgeThreshold)
            elif key == 46:  # , raise canny edge threshold
                edgeThreshold = edgeThreshold + 1
                print('Canny Edge Threshold Maximum: ', edgeThreshold)
            elif key == 74:  # Toggle fullscreen; This is the F3 key on this particular keyboard
                # Toggle full screen mode
                if showFullScreen == False:
                    cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
                else:
                    cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
                showFullScreen = not showFullScreen
    else:
        print ("camera open failed")
Exemplo n.º 35
0
def main():
    device = RealSense('C:/Users/s_nava02/sciebo/GECCO/pinktest.bag')
    file = False
    #print("Color intrinsics: ", device.getcolorintrinsics())
    #print("Depth intrinsics: ", device.getdepthintrinsics())
    # Initiate ORB detector
    orb = cv2.ORB_create()
    flag = 500
    try:
        while True:
            image = device.getcolorstream()
            if file:
                image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            depth = device.getdepthstream()
            #image = cv2.imread("D:/Users/s_nava02/Desktop/raw_output.png")
            screenshot = image.copy()
            if flag == 0:
                cv2.imwrite("C:/GECCO/raw_output.png", screenshot)
            flag -= 1
            ###################################################
            # def gethandmask(colorframe image):
            ###################################################
            # Convert BGR to HSV
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            # define range of blue color in HSV // (145, 100, 20), (155, 255, 255)
            # Todo: From RCARAP (IDK why it works so differently 'bad')
            #lower_pink = np.array([140, 0.1 * 255, 0.05 * 255])
            #upper_pink = np.array([170, 0.8 * 255, 0.6 * 255])
            # Todo: New approach, still not working as good as javascript RCARAP, it needs to be refined later
            lower_pink = np.array([130, 100, 100])
            upper_pink = np.array([170, 255, 255])
            # Threshold the HSV image to get only blue colors
            mask = cv2.inRange(hsv, lower_pink, upper_pink)
            # Bitwise-AND mask and original image
            # res = cv2.bitwise_and(colorframe, colorframe, mask=mask)
            # remove noise
            # imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
            # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_filtering/py_filtering.html
            blurred = cv2.blur(mask,
                               (5, 5))  # TODO: VERY BASIC, TRY OTHER FILTERS
            # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
            # ret, thresholded = cv2.threshold(blurred, 50, 255, 0)  # TODO: VERY BASIC, TRY OTHER THRESHHOLDS
            ret, thresholded = cv2.threshold(blurred, 200, 255,
                                             cv2.THRESH_BINARY)
            # th3 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
            ######################
            # return tresholded
            ######################
            #cv2.imshow('RealSense', thresholded)

            ###################################################
            # getcontours(thresholded image):
            ###################################################
            mode = cv2.RETR_EXTERNAL  # cv2.RETR_LIST
            method = cv2.CHAIN_APPROX_SIMPLE
            hand_contours = []
            contours, hierarchy = cv2.findContours(thresholded, mode, method)
            # contours = sorted(contours, key=cv2.contourArea)  # TODO: is this really necessary?
            for c in contours:
                # If contours are bigger than a certain area we push them to the array
                if cv2.contourArea(c) > 3000:
                    hand_contours.append(c)
                    #print("contour found")
            #####################
            # return hand_contours
            #####################

            # https://docs.opencv.org/3.4/dd/d49/tutorial_py_contour_features.html
            ###################################################
            # Get Rough Hull
            ###################################################
            # TODO: try to not compute convex hull twice
            # https://stackoverflow.com/questions/52099356/opencvconvexitydefects-on-largest-contour-gives-error
            for cnt in hand_contours:
                hull = cv2.convexHull(cnt)
                index = cv2.convexHull(cnt, returnPoints=False)
                # cv2.drawContours(image, cnt, 0, (255, 255, 0), 2)
                # cv2.drawContours(image, hull_list, i, (0, 255, 0), 2)

                # TODO: different ways of grouping hull points into neigbours/clusters
                # term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)
                # _ret, labels, centers = cv2.kmeans(np.float32(hull[:,0]), 6, None, term_crit, 10, 0)
                # point_tree = spatial.cKDTree(np.float32(hull[:,0]))
                # print("total points: ",len(np.float32(hull_list[i][:,0])), " - Total groups: ", point_tree.size)
                # neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
                # output = neigh.fit(hull[:,0])
                clustering = DBSCAN(eps=10, min_samples=1).fit(hull[:, 0])
                #print(len(clustering.labels_))
                #print(hull_list[i])
                #print(clustering.labels_)
                #print(clustering.components_)
                rhull = np.column_stack((hull[:, 0], index[:, 0]))
                centers = utils.groupPointsbyLabels(rhull, clustering.labels_)
                defects = cv2.convexityDefects(cnt, np.array(centers)[:, 2])
                c = 0
                for p in hull:
                    # print("init ", p, " - ")
                    cv2.circle(image, tuple(p[0]), 10,
                               id_to_random_color(clustering.labels_[c]))
                    c += 1
                for p in centers:
                    #print("init ", p[0], " - ")
                    cv2.circle(image, (p[0], p[1]), 4, (0, 255, 255))
                    #pass
                for p in centers:
                    # cv2.circle(image, (int(p[0]), int(p[1])), 4, (0, 255, 255))
                    pass
                ###############################################################
                # getHullDefectVertices
                ###############################################################
                # get neighbor defect points of each hull point
                hullPointDefectNeighbors = []  # 0: start, 1: end, 2:defect
                #print("defects.shape[0]: ",defects.shape[0])
                for x in range(defects.shape[0]):
                    s, e, f, d = defects[x, 0]
                    start = tuple(cnt[s][0])
                    end = tuple(cnt[e][0])
                    far = tuple(cnt[f][0])
                    cv2.line(image, start, end, [0, 255, 0], 1)
                    cv2.circle(image, far, 4, (0, 0, 255))
                    cv2.line(image, start, far, [255, 150, 0], 1)
                    cv2.line(image, end, far, [255, 150, 0], 1)
                    hullPointDefectNeighbors.append(
                        [start, end, far]
                    )  # each defect point (red) has its neihbour points (yellow)
                ###############################################################
                # filterVerticesByAngle
                ###############################################################
                #maxAngleDeg = 60
                maxAngleDeg = math.radians(60)
                i = 0
                fingers = []
                for triple in hullPointDefectNeighbors:
                    cf = triple[0]  # candidate finger
                    rd = triple[2]  # right deflect
                    if i == 0:  # left deflect
                        ld = hullPointDefectNeighbors[
                            len(hullPointDefectNeighbors) - 1][2]
                    else:
                        ld = hullPointDefectNeighbors[i - 1][2]
                    # alternative maths
                    v_cp_ld = (ld[0] - cf[0], ld[1] - cf[1])
                    v_cp_rd = (rd[0] - cf[0], rd[1] - cf[1])
                    beta = angle_between(v_cp_ld, v_cp_rd)
                    #print(beta)
                    cv2.circle(image, (cf[0], cf[1]), 4,
                               (0, 0, 255))  # candidate finger: red
                    cv2.circle(image, (rd[0], rd[1]), 4,
                               (255, 0, 0))  # right defect: blue
                    cv2.circle(image, (ld[0], ld[1]), 4,
                               (255, 0, 0))  # left defect: blue
                    if beta < maxAngleDeg:
                        fingers.append(cf)
                    # old maths
                    #if (math.atan2(cf[1] - rd[1], cf[0] - rd[0]) < maxAngleDeg) and (
                    #            math.atan2(cf[1] - ld[1], cf[0] - ld[0]) < maxAngleDeg) and len(fingers) < 5:
                    #    fingers.append(triple[0])
                    i += 1
                #print(len(fingers))
                for f in fingers:
                    cv2.circle(image, (f[0], f[1]), 4,
                               (255, 255, 255))  # identified finger: white
                    #print("image size: ", image.shape)
                    #print("color pixel value of ", f, ":", image[f[0]][f[1]])
                    pass

            # Show images
            cv2.namedWindow("Output Frame", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty("Output Frame", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_NORMAL)
            cv2.imshow('Output Frame', image)
            cv2.waitKey(1)

    finally:
        # Stop streaming
        device.stop()
        pass
Exemplo n.º 36
0
        cv2.imshow('frame', frame)
    else:
        print('Empty frame!')
        continue

    if first_frame:
        cv2.resizeWindow('frame', frame.shape[1], frame.shape[0])
        first_frame = False

    raw_key = cv2.waitKey(int(args.delay * 1000 + 0.5) if args.delay else 1)
    key = raw_key & 0xFF
    if key == ord('q'):
        break
    elif key == ord('f'):
        fullscreen = not fullscreen
        cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, int(fullscreen))
    elif key == ord('r'):
        reset()
    elif key == ord('b'):
        grayscale = not grayscale
    elif key == ord('g'):
        gaussian = not gaussian
    elif key == ord('i'):
        info = not info
    elif key == ord('I'):
        info_mode = info_mode + 1 if info_mode < max_info_mode else 0
    elif key == ord('l'):
        # mirror, as in 'looking glass'
        mirror = not mirror
    elif key == ord('m'):
        mean = not mean
Exemplo n.º 37
0
#!/usr/bin/env python
import cv2
import time
import numpy as np

# pi specific imports
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
# end of imports

cv2.namedWindow("Output", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Output", cv2.WND_PROP_FULLSCREEN, 1)

# camera
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(320, 240))
time.sleep(2.5)

# buttons
btn1 = 17
btn2 = 22
btn3 = 23
btn4 = 27
btnShutter = btn1
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(btn1, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(btn2, GPIO.IN, GPIO.PUD_UP)
# start the video input stream
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}

# initialize our window
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

width, height = None, None
detectionFrames, refreshFrames = 0, 0
occupancyChanged = False

fps = FPS().start()

# process the webcam feed frame by frame
while True:
    frame = vs.read()

    # cap frame size at 500 to improve performance
    frame = imutils.resize(frame, width=500)
    #rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    rgb = frame
Exemplo n.º 39
0
    cv2.circle(frame, (320 - x1,360 + y1), 30, (0, 0, 0), thickness=-1, lineType=8, shift=0)
    cv2.circle(frame, (960 - x1,360 + y1), 30, (0, 0, 0), thickness=-1, lineType=8, shift=0)

    
    
    #Draw a rectangle around the faces
    #for (x, y, w, h) in faces:
        #cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
        
        # cv2.putText(frame,'Face I See'+ str(x)+'ccc' + str(y), org, font,fontScale, color, thickness, cv2.LINE_AA) 
        # cv2.circle(frame, (320,360), 3, (0, 255, 0), thickness=-1, lineType=8, shift=0)
        # cv2.circle(frame, (960,360), 3, (0, 255, 0), thickness=-1, lineType=8, shift=0)

    # Display the resulting frame
    cv2.namedWindow('FaceDetection', cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty('FaceDetection',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
    cv2.imshow('FaceDetection', frame)

    if k%256 == 27: #ESC Pressed
        break
    elif k%256 == 32:
        # SPACE pressed
        img_name = "facedetect_webcam_{}.png".format(img_counter)
        cv2.imwrite(img_name, frame)
        print("{} written!".format(img_name))
        img_counter += 1
        

# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
Exemplo n.º 40
0
def video_processing(graph, category_index, video_file_name, show_video_window, camera_id, run_flag, message_queue):
    if camera_id is None:
        cap = cv2.VideoCapture(0)
        framerate = cap.get(cv2.CAP_PROP_FPS)
        framecount = 0
        input_fps = cap.get(cv2.CAP_PROP_FPS)
        ret, frame = cap.read()
        resized_frame = cv2.resize(frame, dsize=(config.display_window_width, config.display_window_height))
        size = (resized_frame.shape[:2])
        video_output = 'output.mp4'
        out = cv2.VideoWriter(video_output, cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
        output_fps = input_fps / 1
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(video_output, fourcc, output_fps, (resized_frame.shape[1], resized_frame.shape[0]))

        if show_video_window:
            cv2.namedWindow('ppe', cv2.WINDOW_NORMAL)
            if config.display_full_screen:
                cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
            else:
                cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)

        if (config.capture_image_width, config.capture_image_height) in config.supported_video_resolution:
            print("video_processing:", "supported video resoulution")
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, config.capture_image_width)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, config.capture_image_height)

        video_output = "output.mp4"
        with graph.as_default():
            print("video_processing:", "default tensorflow graph")
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            tensor_dict = {}
            for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                        tensor_name)
            with tf.Session() as sess:
                print("video_processing:", "tensorflow session")
                send_message_time = time.time()
                frame_counter = 0
                i = 0  # default is 0
                dbImage = Image("newdata.db")
                while (cap.isOpened()) and ret is True:
                    ret, frame = cap.read()

                    if config.input_type.lower() == "file":
                        frame_counter += 1
                        if frame_counter == int(cap.get(cv2.CAP_PROP_FRAME_COUNT)):
                            frame_counter = 0
                            cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                            continue

                    if frame is None:
                        print("video_processing:", "null frame")
                        break

                    resized_frame = cv2.resize(frame, dsize=(640, 360))

                    image_expanded = np.expand_dims(resized_frame, axis=0)
                    output_dict = run_inference_for_single_image(image_expanded, sess, tensor_dict)

                    detection_scores = np.where(output_dict["detection_scores"] > 0.7, True, False)
                    detection_boxes = output_dict["detection_boxes"][detection_scores]
                    detection_classes = output_dict["detection_classes"][detection_scores]

                    head_boxes = detection_boxes[np.where(detection_classes == 1)]
                    vest_boxes = detection_boxes[np.where(detection_classes == 2)]
                    person_boxes = detection_boxes[np.where(detection_classes == 3)]
                    persons = []
                    for person_box in person_boxes:
                        person = dict()
                        person["head"], person["vest"] = is_person(head_boxes, vest_boxes,
                                                                                    person_box)
                        persons.append(person)

                    vis_utils.visualize_boxes_and_labels_on_image_array(
                        frame,
                        output_dict['detection_boxes'],
                        output_dict['detection_classes'],
                        output_dict['detection_scores'],
                        category_index,
                        instance_masks=output_dict.get('detection_masks'),
                        use_normalized_coordinates=True,
                        line_thickness=4)

                    if time.time() - send_message_time > config.message_send_interval / 1000.0:
                        resized_frame = cv2.resize(frame,
                                                   dsize=(config.storage_image_width, config.storage_image_height))
                        try:
                            message_queue.put_nowait(
                                (camera_id, output_dict, resized_frame, config.object_confidence_threshold))
                        except queue.Full:
                            print("message queue is full")
                        else:
                            send_message_time = time.time()

                    if show_video_window:
                        resized_frame = cv2.resize(frame,
                                                   dsize=(config.display_window_width, config.display_window_height))
                        height, width = resized_frame.shape[:2]
                        hat_count = 0
                        vest_count = 0
                        hat_and_vest_count = 0
                        for person in persons:
                            if person['head'] and person['vest']:
                                hat_and_vest_count += 1
                            elif person['head']:
                                hat_count += 1
                            elif person['vest']:
                                vest_count += 1

                        resized_frame = cv2.putText(resized_frame, "No of person: " + str(len(person_boxes)),
                                                    (30, height - 170), cv2.FONT_HERSHEY_TRIPLEX, 1, (150, 100, 50), 2,
                                                    cv2.LINE_AA)
                        
                        cv2.imshow('ppe', resized_frame)
                        size = (
                            int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                            )
                        
                        #fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                        #video_writer = cv2.VideoWriter('CameraCapture.avi', fourcc, 1, size)
                        
                        if len(person_boxes) >= 1: 
                            print ("detected at: ")
                            while(True):
                                # Capture frame-by-frame
                                success, image = cap.read()
                                framecount += 1
                                # Check if this is the frame closest to 10 seconds
                                if framecount == (framerate * 10):
                                    framecount = 0
                                    cv2.imshow('image', image)
                                    print('-----------10----------------')
                                    video_writer.write(image)
                                    print('-----------Writing----------------')

                            #time.sleep(30)
                            #cv2.imwrite('./Pictures/'+str(i)+'.jpg', resized_frame)
                            #pic_name = "frame" + str(frame_counter) + ".jpg"
                            pic_name = "frame" + str(frame_counter) + ".jpg"
                            cv2.imwrite("./Pictures/" + pic_name , resized_frame)
    
                            with open("./Pictures/" + pic_name, 'rb') as f:
                                dbImage.create_database(name=pic_name, image=f.read())
                        out.write(resized_frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            run_flag.value = 0
                            break
                        #if cv2.waitKey(1) & 0xFF == ord('q'):
                            #break                       

                     #k = cv2.waitKey(30) & 0xff
                     #if k == 27:
                         #break
    else:
        print("[INFO] starting cameras...")
        cap = VideoStream(src=int(camera_id)).start()
        # picam = VideoStream(usePiCamera=True).start()

        # read the next frame from the video stream and resize
        # it to have a maximum width of 400 pixels
        frame = cap.read()
        resized_frame = cv2.resize(frame, dsize=(config.display_window_width, config.display_window_height))
        size = (resized_frame.shape[:2])
        video_output = 'output.mp4'
        out = cv2.VideoWriter(video_output, cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
        output_fps = 30
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(video_output, fourcc, output_fps, (resized_frame.shape[1], resized_frame.shape[0]))

        if show_video_window:
            cv2.namedWindow('ppe', cv2.WINDOW_NORMAL)
            if config.display_full_screen:
                cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
            else:
                cv2.setWindowProperty('ppe', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)

        video_output = "output.mp4"
        with graph.as_default():
            print("video_processing:", "default tensorflow graph")
            ops = tf.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            tensor_dict = {}
            for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                        tensor_name)
            with tf.Session() as sess:
                print("video_processing:", "tensorflow session")
                send_message_time = time.time()
                frame_counter = 0
                i = 0  # default is 0
                #dbImage = Image("newdata.db")
                while True:
                    frame = cap.read()

                    if frame is None:
                        print("video_processing:", "null frame")
                        break
                    frame_counter += 1
                    resized_frame = cv2.resize(frame, dsize=(640, 360))

                    image_expanded = np.expand_dims(resized_frame, axis=0)
                    output_dict = run_inference_for_single_image(image_expanded, sess, tensor_dict)

                    detection_scores = np.where(output_dict["detection_scores"] > 0.7, True, False)
                    detection_boxes = output_dict["detection_boxes"][detection_scores]
                    detection_classes = output_dict["detection_classes"][detection_scores]

                    head_boxes = detection_boxes[np.where(detection_classes == 1)]
                    vest_boxes = detection_boxes[np.where(detection_classes == 2)]
                    person_boxes = detection_boxes[np.where(detection_classes == 3)]
                    persons = []
                    for person_box in person_boxes:
                        person = dict()
                        person["head"], person["vest"] = is_person(head_boxes, vest_boxes,
                                                                                    person_box)
                        persons.append(person)

                    vis_utils.visualize_boxes_and_labels_on_image_array(
                        frame,
                        output_dict['detection_boxes'],
                        output_dict['detection_classes'],
                        output_dict['detection_scores'],
                        category_index,
                        instance_masks=output_dict.get('detection_masks'),
                        use_normalized_coordinates=True,
                        line_thickness=4)

                    if time.time() - send_message_time > config.message_send_interval / 1000.0:
                        resized_frame = cv2.resize(frame,
                                                   dsize=(config.storage_image_width, config.storage_image_height))
                        try:
                            message_queue.put_nowait(
                                (camera_id, output_dict, resized_frame, config.object_confidence_threshold))
                        except queue.Full:
                            print("message queue is full")
                        else:
                            send_message_time = time.time()

                    if show_video_window:
                        resized_frame = cv2.resize(frame,
                                                   dsize=(
                                                       config.display_window_width, config.display_window_height))
                        height, width = resized_frame.shape[:2]
                        hat_count = 0
                        vest_count = 0
                        hat_and_vest_count = 0
                        for person in persons:
                            if person['head'] and person['vest']:
                                hat_and_vest_count += 1
                            elif person['head']:
                                hat_count += 1
                            elif person['vest']:
                                vest_count += 1

                        resized_frame = cv2.putText(resized_frame, "No of person: " + str(len(person_boxes)),
                                                    (30, height - 170), cv2.FONT_HERSHEY_TRIPLEX, 1, (150, 100, 50),
                                                    2,
                                                    cv2.LINE_AA)
                    
                        cv2.imshow('ppe', resized_frame)
                        if len(person_boxes) >= 1: 
                            print ("detected at: ")
                            cv2.imwrite('/home/hydro/person_detection-master/Pictures/snapshot_'+str(i)+'.jpg', resized_frame)
                            pic_name = "frame" + str(frame_counter) + ".jpg"
                            cv2.imwrite("/home/hydro/person_detection-master/Pictures" + pic_name , resized_frame)
                            with open("/home/hydro/person_detection-master/Pictures" + pic_name, 'rb') as f:
                                dbImage.create_database(name=pic_name, image=f.read())
                        out.write(resized_frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            run_flag.value = 0
                            break

    print("video_processing:", "releasing video capture")
    out.release()
    #cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 41
0
def gal():
    def clikz(event, x, y, flags, param):

        if event == cv2.EVENT_LBUTTONDOWN:

            if (x > 0 * int(scrn_x / 4) and x < 1 * int(scrn_x / 4)
                    and y > 2 * int(scrn_y / 4) and y < 3 * int(scrn_y / 4)):
                print("Before")
            elif (x > 3 * int(scrn_x / 4) and x < 4 * int(scrn_x / 4)
                  and y > 2 * int(scrn_y / 4) and y < 3 * int(scrn_y / 4)):
                print("After")
            else:
                pass
            print("(" + str(x) + "," + str(y) + ")")

    img = cv2.imread("white.png")

    cv2.namedWindow("pic", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("pic", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    font = cv2.FONT_HERSHEY_SIMPLEX
    scrn_x, scrn_y = pag.size()
    print(scrn_x, scrn_y)

    while (True):
        img = cv2.imread("white.png")
        xp, yp = pag.position(
        )  #This takes the mouse pointer location in the screen

        img = cv2.resize(
            img, (scrn_x, scrn_y), interpolation=cv2.INTER_CUBIC
        )  # this is used to resize the imcoming feed to fit the screen size

        cv2.putText(
            img, "(" + str(xp) + "," + str(yp) + ")", (xp, yp), font, 1,
            (0, 0, 0),
            2)  #this prints the location of the pointer on screen as a tupple

        #horizontal
        '''cv2.rectangle(img,(int(scrn_x-1915),int(scrn_y-1075)),(int(scrn_x-5),int(scrn_y-810)),(0,0,255),2) #rect 1
	    cv2.rectangle(img,(int(scrn_x-1915),int(scrn_y-805)),(int(scrn_x-5),int(scrn_y-542)),(0,0,255),2) #rect 2
	    cv2.rectangle(img,(int(scrn_x-1915),int(scrn_y-535)),(int(scrn_x-5),int(scrn_y-272)),(0,0,255),2) #rect 3
	    cv2.rectangle(img,(int(scrn_x-1915),int(scrn_y-265)),(int(scrn_x-5),int(scrn_y-5)),(0,0,255),2) #rect 4'''

        #vertical

        cv2.rectangle(img, (int(scrn_x - 1430), int(scrn_y - 1075)),
                      (int(scrn_x - 465), int(scrn_y - 5)), (0, 0, 255),
                      2)  #rect 2

        img1 = cv2.imread("death-note-l.jpg")
        img1_temp = cv2.resize(img1, (int(scrn_x - 465), int(scrn_y - 5)),
                               interpolation=cv2.INTER_CUBIC)

        cv2.putText(img, "<", (int(scrn_x / 16), int(7 * scrn_y / 16)), font,
                    1, (2, 2, 2), 2)

        cv2.putText(img, ">", (int(9 * scrn_x / 16), int(7 * scrn_y / 16)),
                    font, 1, (2, 2, 2), 2)

        cv2.setMouseCallback('pic', clikz)

        cv2.imshow("pic", img)
        cv2.imshow('pic', img1_temp)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        # When everything done, release the capture
    cv2.destroyAllWindows()
Exemplo n.º 42
0
def main():
    print("Open camera...")
    cap = cv2.VideoCapture(0)

    print(cap)

    # set a lower resolution for speed up
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    # env variables
    full_screen = False
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(WINDOW_NAME, 640, 480)
    cv2.moveWindow(WINDOW_NAME, 0, 0)
    cv2.setWindowTitle(WINDOW_NAME, WINDOW_NAME)

    t = None
    index = 0
    print("Build transformer...")
    transform = get_transform()
    print("Build Executor...")
    executor, ctx = get_executor()
    buffer = (
        tvm.nd.empty((1, 3, 56, 56), ctx=ctx),
        tvm.nd.empty((1, 4, 28, 28), ctx=ctx),
        tvm.nd.empty((1, 4, 28, 28), ctx=ctx),
        tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
        tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
        tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
        tvm.nd.empty((1, 12, 14, 14), ctx=ctx),
        tvm.nd.empty((1, 12, 14, 14), ctx=ctx),
        tvm.nd.empty((1, 20, 7, 7), ctx=ctx),
        tvm.nd.empty((1, 20, 7, 7), ctx=ctx)
    )
    idx = 0
    history = [2]
    history_logit = []
    history_timing = []

    i_frame = -1

    print("Ready!")
    while True:
        i_frame += 1
        _, img = cap.read()  # (480, 640, 3) 0 ~ 255
        if i_frame % 2 == 0:  # skip every other frame to obtain a suitable frame rate
            t1 = time.time()
            print("type(img):", type(img), img.shape)  # (720, 1280, 3)
            img_tran = transform([Image.fromarray(img).convert('RGB')])
            print("type(img_tran):", type(img_tran), img_tran.shape)  # torch.Size([3, 224, 224])
            input_var = torch.autograd.Variable(img_tran.view(1, 3, img_tran.size(1), img_tran.size(2)))
            print("type(input_var):", type(input_var), input_var.shape)  # torch.Size([1, 3, 224, 224])
            img_nd = tvm.nd.array(input_var.detach().numpy(), ctx=ctx)
            print("type(img_nd):", type(img_nd), img_nd.shape)  # (1, 3, 224, 224)

            print("type(buffer):", type(buffer), len(buffer))  # (1, 3, 224, 224)
            print(type(buffer[0]), buffer[0].shape)
            print(type(buffer[1]), buffer[1].shape)

            inputs: Tuple[tvm.nd.NDArray] = (img_nd,) + buffer

            print("type(inputs):", type(inputs), len(inputs))
            print(type(inputs[0]), inputs[0].shape)
            print(type(inputs[1]), inputs[1].shape)

            outputs = executor(inputs)

            print("type(outputs):", type(outputs), len(outputs))  # (1, 3, 224, 224)
            print(type(outputs[0]), outputs[0].shape)
            print(type(outputs[1]), outputs[1].shape)

            print("buffer before:", buffer[0].shape, type(buffer), type(buffer[0]))
            buffer_before = buffer[0]
            feat, buffer = outputs[0], outputs[1:]
            print("buffer after:", buffer[0].shape, type(buffer), type(buffer[0]))
            buffer_after = buffer[0]
            print(buffer_before == buffer_after)
            buffer_new = (
                tvm.nd.empty((1, 3, 56, 56), ctx=ctx),
                tvm.nd.empty((1, 4, 28, 28), ctx=ctx),
                tvm.nd.empty((1, 4, 28, 28), ctx=ctx),
                tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
                tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
                tvm.nd.empty((1, 8, 14, 14), ctx=ctx),
                tvm.nd.empty((1, 12, 14, 14), ctx=ctx),
                tvm.nd.empty((1, 12, 14, 14), ctx=ctx),
                tvm.nd.empty((1, 20, 7, 7), ctx=ctx),
                tvm.nd.empty((1, 20, 7, 7), ctx=ctx)
            )
            print(buffer_before == buffer_new)
            print(buffer_after == buffer_new)

            assert isinstance(feat, tvm.nd.NDArray)

            if SOFTMAX_THRES > 0:
                feat_np = feat.asnumpy().reshape(-1)
                feat_np -= feat_np.max()
                softmax = np.exp(feat_np) / np.sum(np.exp(feat_np))

                print(max(softmax))
                if max(softmax) > SOFTMAX_THRES:
                    idx_ = np.argmax(feat.asnumpy(), axis=1)[0]
                else:
                    idx_ = idx
            else:
                idx_ = np.argmax(feat.asnumpy(), axis=1)[0]

            if HISTORY_LOGIT:
                history_logit.append(feat.asnumpy())
                history_logit = history_logit[-12:]
                avg_logit = sum(history_logit)
                idx_ = np.argmax(avg_logit, axis=1)[0]

            idx, history = process_output(idx_, history)

            t2 = time.time()
            print(f"{index} {catigories[idx]}")

            current_time = t2 - t1

        img = cv2.resize(img, (640, 480))
        img = img[:, ::-1]
        height, width, _ = img.shape
        label = np.zeros([height // 10, width, 3]).astype('uint8') + 255

        cv2.putText(label, 'Prediction: ' + catigories[idx],
                    (0, int(height / 16)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 0), 2)
        cv2.putText(label, '{:.1f} Vid/s'.format(1 / current_time),
                    (width - 170, int(height / 16)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 0), 2)

        img = np.concatenate((img, label), axis=0)
        cv2.imshow(WINDOW_NAME, img)

        key = cv2.waitKey(1)
        if key & 0xFF == ord('q') or key == 27:  # exit
            break
        elif key == ord('F') or key == ord('f'):  # full screen
            print('Changing full screen option!')
            full_screen = not full_screen
            if full_screen:
                print('Setting FS!!!')
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)
            else:
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_NORMAL)

        if t is None:
            t = time.time()
        else:
            nt = time.time()
            index += 1
            t = nt

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 43
0
                ser.write('0\n')
            except serial.SerialException as e:
                print("Serial Error, ignoring")
                ser.close()  # close port
                time.sleep(0.1)
                ser.open()
                # ser.write('blarg\n')
                # print ser.readline()
                # lastTime = time.time()

    # print(resizeTo)

    # show our detected faces, then clear the frame in
    # preparation for the next frame
    if showvideo == 1:
        # frameClone = imutils.resize(frameClone, width = screenWidth)
        cv2.putText(frameClone, TextString, (5, 460), cv2.FONT_HERSHEY_SIMPLEX,
                    1.4, (0, 255, 0), 2)
        cv2.namedWindow('I LIKE IT WHEN YOU WATCH', cv2.WINDOW_NORMAL)
        cv2.setWindowProperty('I LIKE IT WHEN YOU WATCH',
                              cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
        cv2.imshow('I LIKE IT WHEN YOU WATCH', frameClone)

    rawCapture.truncate(0)

    # if the 'q' key is pressed, stop the loop
    if cv2.waitKey(1) & 0xFF == ord("q"):
        # send to 0
        ser.write('0\n')
        break
Exemplo n.º 44
0
    def runNarerundar(self):

        windowname = "Narerundar"
        cv2.namedWindow(windowname, cv2.WINDOW_NORMAL)
        cv2.setWindowProperty(windowname, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.waitKey(1)
        cv2.setWindowProperty(windowname, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_NORMAL)
        cv2.namedWindow(windowname)
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')  # 顔認識用の特徴量
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_smile.xml')  # 顔認識用の特徴量
        cascade = cv2.CascadeClassifier(
            '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml'
        )  # 顔認識用の特徴量
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml')  # 顔認識用の特徴量
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml')  # 顔認識用の特徴量
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_extended.xml')  # 顔認識用の特徴量
        #cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')  # 顔認識用の特徴量

        cam = cv2.VideoCapture(0)
        overlayframe = cv2.imread('./img/build.png', cv2.IMREAD_UNCHANGED)
        #overlayframe = cv2.imread(sys.argv[1], cv2.IMREAD_UNCHANGED)
        self.layer2 = Image.fromarray(overlayframe)

        framecnt = 0
        sz = None

        while True:
            ret, frame = cam.read()
            w = frame.shape[0]
            h = frame.shape[1]

            if not ret:
                print('error?')
                break

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.resize(
                gray, (int(frame.shape[1] / 4), int(frame.shape[0] / 4)))

            faces = cascade.detectMultiScale(gray)

            if len(faces) > 0:
                self.layer1 = Image.fromarray(np.uint8(frame)).convert('RGBA')
                layerc = Image.new('RGBA', (h, w), (0, 0, 0, 0))

                for rect in faces:
                    rect *= 4

                    if ((rect[2] < 0) or (rect[3] < 0)):
                        continue

                    zoom = 2
                    vratio = int(rect[2] * zoom) / self.layer2.height
                    sz = (int(rect[2] * zoom), int(rect[3] * vratio))
                    coord = list(rect)
                    coord[2] = coord[0] + coord[2]
                    coord[3] = coord[1] + coord[3]

                    tmp = self.layer2.resize(sz)
                    layerc.paste(tmp, (coord[0] - int(rect[2] / zoom),
                                       coord[1] - int(rect[3] / zoom)), tmp)

                im = Image.alpha_composite(self.layer1, layerc)
                cv2.imshow(windowname, np.asarray(im))

            else:
                cv2.imshow(windowname, frame)

            if cv2.waitKey(10) > 0:
                break

        cam.release()
        cv2.destroyWindow(windowname)
import sys
import glob
import time
import pickle
import subprocess
from colorama import Fore
from pathlib import Path
import cv2
import numpy as np
TMP_PATH = '/tmp/chess_analysis'
WINDOW_NAME = 'is this split correctly? (y/n)'

# subprocess.run(['/home/eg4l/CLionProjects/ChessboardDetectionLib/ChessboardDetectionLib', '/home/eg4l/Downloads/test3', TMP_PATH])

cv2.namedWindow(WINDOW_NAME)
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)

for file in glob.glob(TMP_PATH + '/*/_mozaic.*'):
    mozaic = cv2.imread(file, cv2.IMREAD_COLOR)

    cv2.imshow(WINDOW_NAME, mozaic)

    k = cv2.waitKey(0)

    if k == ord('y'):
        print("Yay")
    elif k == ord('n'):
        print("Nay")
    elif k == ord('q'):
        cv2.destroyAllWindows()
        exit(-1)
Exemplo n.º 46
0
            else:
                fire = 0
                cv2.rectangle(frame, (0, 0), (width, height), (0, 255, 0), 50)
                cv2.putText(frame, 'CLEAR', (int(width / 16), int(height / 4)),
                            cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 10,
                            cv2.LINE_AA)
                print(time, "Clear")

            # stop the timer and convert to ms. (to see how long processing and display takes)

            stop_t = (
                (cv2.getTickCount() - start_t) / cv2.getTickFrequency()) * 1000

            # image display and key handling

            cv2.imshow(windowName, frame)

            # wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)

            key = cv2.waitKey(max(2,
                                  frame_time - int(math.ceil(stop_t)))) & 0xFF
            if (key == ord('x')):
                keepProcessing = False
            elif (key == ord('f')):
                cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)
    else:
        print("usage: set FILEURL environment variable")

################################################################################
    def run(self,
            port_name=None,
            drone_name=None,
            roll_version=1,
            face_detection=0):
        self.set_eventhandler()

        while (self.quit is 0):
            e1 = cv2.getTickCount()
            ###Connect
            if (self.drone_connect(port_name, drone_name)):
                print("connect!")

            ###vedio capture and save to previous frame
            if (self.cap.isOpened()):
                ret, frame = self.cap.read()
                frame = cv2.flip(frame, 1)
                pre_frame = cv2.resize(frame,
                                       None,
                                       fx=self.resize_num,
                                       fy=self.resize_num)
                pre_frame = cv2.cvtColor(pre_frame, cv2.COLOR_BGR2GRAY)
            else:
                print("video is not opened!")

            cv2.namedWindow(self.frame_name, cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty(self.frame_name, cv2.WND_PROP_VISIBLE,
                                  cv2.WINDOW_FULLSCREEN)
            cv2.imshow(self.frame_name, frame)
            cv2.waitKey(1)
            ### Start with face detection
            if (face_detection == 1):
                self.face_detect(frame)

            ### Start with takeoff
            if (self.drone.isConnected()):
                self.drone.sendTakeOff()
            sleep(3)

            print("open : ", self.drone.isConnected(), "quit : ", self.quit)
            while (self.cap.isOpened() and self.quit is 0):
                print("open : ", self.drone.isConnected(), "quit : ",
                      self.quit)
                ret, frame = self.cap.read()
                self.frame_cnt += 1
                pre_frame, r, l = self.divide_screen(frame, pre_frame)
                self.right_area += r
                self.left_area += l

                ### Send the commands every 5 frame
                if (self.frame_cnt == 5):
                    sum = self.right_area + self.left_area
                    differ = self.left_area - self.right_area
                    self.initial_frame_cnt()

                    self.compare_move(sum, differ, roll_version)
                    if self.drone.isConnected():
                        self.drone.sendControl(*self.get_control())

                    # time stuff-----------------------------------------------------------------------
                    e2 = cv2.getTickCount()
                    timePassed = (e2 - e1) / cv2.getTickFrequency()
                    e1 = e2
                    fps = 1 / timePassed

                    # Set the update times for each of the following the bigger the time
                    # the less frequent the data will updated
                    self.batteryTimer = timeCounter(timePassed,
                                                    self.batteryTimer, 1)
                    #self.irRangeTimer = timeCounter(timePassed, self.irRangeTimer, 4)
                    if (self.drone.isConnected() and self.batteryTimer == 0):
                        self.drone.sendRequest(DataType.Battery)
                        sleep(0.03)

                    # We could check battery problem
                    if (self.battery != 0 and self.battery < 10):
                        print(self.battery)
                    # ------------------------------------------------------------------------------------

                    frame = self.draw_for_debug(frame, sum, differ,
                                                roll_version)
                    cv2.imshow(self.frame_name, frame)
                    cv2.waitKey(1)

                key = keyBoardController(self.drone, cv2.waitKey(1) & 0xFF)
                if (key):
                    if (key == 2): self.quit = 1
                    break
        self.cap.release()
        self.drone.close()
Exemplo n.º 48
0
    showImage = background.copy()

    if ret:
        makeContours(frame)

        print(len(contourList))
        for contourItem in contourList:

            overlayBackground = background.copy()

            for i in contourItem[0]:
                cv2.drawContours(overlayBackground, [i], 0, contourItem[1], 2)
            showImage = cv2.addWeighted(overlayBackground, 0.5, showImage, 0.8,
                                        0)

        showImage = cv2.addWeighted(showImage, 1, frame, cameraOpacity, 0)

        cv2.namedWindow("Artwork", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("Artwork", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow("Artwork", showImage)

    else:
        break

    if cv2.waitKey(1) > 0: break

    time.sleep(framerate_waittime)

capture.release()
cv2.destroyAllWindows()
Exemplo n.º 49
0
        s30 = 0
        for i in range(1, 251):
            s30 = s30 + eye30list[i]

        s = s20 + s30
        s1 = s10 * 100
        per10 = s1 // s
        percent = str(per10)
        text1 = 'exhibition tired man'
        text2 = '% of tiredness'
        output = percent + text2

        font = cv2.FONT_HERSHEY_SIMPLEX
        position1 = (x1+5, y1-16)
        position2 = (x1+5, y2+27)
        fontScale = 0.5
        fontColor = (0, 255, 0)
        lineType = 1

        cv2.putText(frame, text1, position1, font, fontScale, fontColor, lineType)
        cv2.putText(frame, output, position2, font, fontScale, fontColor, lineType)

        cv2.namedWindow("Feel tired?", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("Feel tired?", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
        cv2.imshow(winname="Feel tired?", mat=frame)

        if cv2.waitKey(delay=1) == 27:
            break
cap.release()
cv2.destroyAllWindows()
Exemplo n.º 50
0
def callback(data):

    img = cv2.imread(
        '/home/ud/catkin_ws/src/experiment_miki/src/image/pop_90.png')
    h = 1024
    w = 768
    img = cv2.resize(img, (h, w))
    exp_num = rospy.get_param("/exp_num")
    rospy.set_param("exp_miki_img/switch", 1)
    print exp_num

    if exp_num == 4:
        #"print pass"
        r = rospy.Rate(10)
        pts_src = np.array([[0, 0], [1023, 0], [1023, 767], [0, 767]],
                           np.float32)
        pts_org = np.array([[-620, 2660], [620, 2660], [390, 1320],
                            [-390, 1320]])

        #print rospy.get_param("exp_miki_img/switch")

        cv2.namedWindow('window')
        cv2.namedWindow('screen', cv2.WINDOW_NORMAL)
        cv2.setWindowProperty('screen', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        while True:
            #print rospy.get_param("exp_miki_img/switch")
            if rospy.get_param("exp_miki_img/switch") == 0:
                cv2.destroyAllWindows()
                break
            #print pts_org[0], pts_org[1]
            current_pan = get_pan_position()

            weight = (0.80 - current_pan) * 362

            centroid = -(1650 * math.tan(current_pan))
            #print "centroid: ", centroid
            rot = np.float32(
                [[math.cos(current_pan), -(math.sin(current_pan))],
                 [math.sin(current_pan),
                  math.cos(current_pan)]])

            new_pts_org = np.array([[0, 0], [0, 0], [0, 0], [0, 0]],
                                   np.float32)
            new_pts_org[0] = np.dot(rot, pts_org[0].T)
            new_pts_org[1] = np.dot(rot, pts_org[1].T)
            new_pts_org[2] = np.dot(rot, pts_org[2].T)
            new_pts_org[3] = np.dot(rot, pts_org[3].T)

            # Calculate Homography
            h, status = cv2.findHomography(pts_src, new_pts_org)
            #print h#, status
            h = np.linalg.inv(h)

            f = np.float32([[centroid - 5.0 - weight, 1655.0 + weight, 1.0]])
            f = f.T
            f = np.dot(h, f)
            f = f / f[2][0]
            #print f
            new_pts_org[0][0] = f[0][0]
            new_pts_org[0][1] = f[1][0]

            s = np.float32([[centroid + 5.0 + weight, 1655.0 + weight, 1.0]])
            s = s.T
            s = np.dot(h, s)
            s = s / s[2][0]
            #print s
            new_pts_org[1][0] = s[0][0]
            new_pts_org[1][1] = s[1][0]

            t = np.float32([[centroid - 5.0 - weight, 1645.0 - weight, 1.0]])
            t = t.T
            t = np.dot(h, t)
            t = t / t[2][0]
            #print t
            new_pts_org[3][0] = t[0][0]
            new_pts_org[3][1] = t[1][0]

            fo = np.float32([[centroid + 5.0 + weight, 1645.0 - weight, 1.0]])
            fo = fo.T
            fo = np.dot(h, fo)
            fo = fo / fo[2][0]
            #print fo
            new_pts_org[2][0] = fo[0][0]
            new_pts_org[2][1] = fo[1][0]

            M = cv2.getPerspectiveTransform(pts_src, new_pts_org)
            warp = cv2.warpPerspective(img, M, (1024, 768))

            cv2.imshow('screen', warp)
            #print warp
            #print "pass"
            cv2.waitKey(1)
            #time.sleep(1)
        cv2.destroyAllWindows()
Exemplo n.º 51
0
def main():
    try:
        svm = cv2.ml.SVM_load(params.HOG_SVM_PED_PATH)
    except:
        print("Missing files - SVM!")
        print("-- have you performed training to produce these files ?")
        exit()

    ##########################################################################

    # press all the go-faster buttons - i.e. speed-up using multithreads

    cv2.setUseOptimized(True)
    cv2.setNumThreads(4)

    ##########################################################################

    # directory_to_cycle_left = "left-images"
    full_path_directory_left = os.path.join(master_path_to_dataset,
                                            "left-images")

    ##########################################################################
    # create Selective Search Segmentation Object using default parameters

    ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()

    ##########################################################################

    for filename in sorted(os.listdir(full_path_directory_left)):
        # starts a counter
        start_t = cv2.getTickCount()

        full_path_filename_left, full_path_filename_right = stereo_disparity.get_lr_pair(
            filename)

        # checks to see if the input is an image and that there is a
        # corresponding right image to the current left image
        if ('.png' in filename) and (os.path.isfile(full_path_filename_right)):

            # retrieves the disparity from the filename of left image
            # to find the region boxes in selective search
            disparity = stereo_disparity.get_disparity_from_image(filename)

            # retrieves disparity frame used for calculating distance
            distance_disparity = stereo_disparity.get_distance_disparity(
                filename)

            # normalize so can view the image for selective search map
            disp_frame = cv2.normalize(src=disparity,
                                       dst=None,
                                       beta=0,
                                       alpha=255,
                                       norm_type=cv2.NORM_MINMAX)
            disp_frame = np.uint8(disp_frame)
            disp_frame = stereo_disparity.gamma_correction(disp_frame, 0.7)

            # read in the left image
            img_frame = cv2.imread(
                os.path.join(full_path_directory_left, filename),
                cv2.IMREAD_COLOR)

            # convert disp_frame to a 3 channel image
            disp_frame = np.stack((disp_frame, ) * 3, axis=-1)

            # return all of the rectangles from the image
            rects = selective_search.getRects_frame(disp_frame, 1000, ss)

            selective_search.displace_rects(rects, 135)

            # clear the rectangles from the image and display only the detections

            img_frame = cv2.imread(
                os.path.join(full_path_directory_left, filename),
                cv2.IMREAD_COLOR)

            # returns the positive regions and the rejected search regions
            # also draws the bounding boxes around the detections
            pos_rects, neg_rects, z_values = selective_search.get_result(
                rects, img_frame, svm, distance_disparity, disp_frame)

            if len(z_values) == 0:
                z_min = 0.0
            else:
                z_min = min(z_values)

            print(filename)
            print('{0} : nearest detected scene object ({1:.1f}m)'.format(
                filename.replace("_L", "_R"), z_min))

            cv2.imshow('Selective Search - Positive Object detections',
                       img_frame)

            ###################################################################

            stop_t = (
                (cv2.getTickCount() - start_t) / cv2.getTickFrequency()) * 1000

            #print('Processing time (ms): {}'.format(stop_t))

            key = cv2.waitKey(max(40, 40 - int(math.ceil(stop_t)))) & 0xFF
            # start the event loop - essential

            # cv2.waitKey() is a keyboard binding function (argument is the time in milliseconds).
            # It waits for specified milliseconds for any keyboard event.
            # If you press any key in that time, the program continues.
            # If 0 is passed, it waits indefinitely for a key stroke.
            # (bitwise and with 0xFF to extract least significant byte of multi-byte response)
            # here we use a wait time in ms. that takes account of processing time already used in the loop

            # wait 40ms or less depending on processing time taken (i.e. 1000ms / 25 fps = 40 ms)

            # e.g. if user presses "x" then exit / press "f" for fullscreen

            if (key == ord('x')):
                break
            elif (key == ord('f')):
                cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)

            ss.clearImages()

    cv2.destroyAllWindows()
Exemplo n.º 52
0
            cv2.putText(img, team2.getName() + " has won", (newZeroPointX + 90, middleY + 50), font, 1, (255, 255, 255),
                        1, cv2.LINE_AA)
        elif team2.getPoints() < team1.getPoints() and totalPointCount == maxPoints:
            print(team1.getName() + " is in the lead with " + str(team1.getPoints()) + " points")
            cv2.putText(img, team1.getName() + " has won", (newZeroPointX + 90, middleY + 50), font, 1, (255, 255, 255),
                        1, cv2.LINE_AA)

        print(team2.getName() + " has scored " + str(team2.getPoints()) + " points")
        print(team1.getName() + " has scored " + str(team1.getPoints()) + " points.")

        # Draw the objects
        for i in range(len(targetArray)):
            targetArray[i].drawCircle(img)

        # Draws contours for testing purposes.
        for i in range(len(red_boxes)):
            cv2.drawContours(img, [red_boxes[i]], 0, (0, 0, 255), 2)
        for i in range(len(blue_boxes)):
           cv2.drawContours(img, [blue_boxes[i]], 0, (255, 0, 0), 2)

        croppedImg = img[cropYTop:-cropYbottom, cropXLeft:-cropXRight]
        cv2.namedWindow("testIMG", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("testIMG", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
        cv2.imshow('testIMG', croppedImg)
        cv2.imshow('test', croppedFrame)
        cv2.imshow('lasers', frame)

        # Wait until q is pressed to exit loop. This only works when openCV has an active window.
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Exemplo n.º 53
0
def run_yolo(ct, ot, rs, fs):
    LABELS_FILE = "yolo-tiny/coco.names"
    CONFIG_FILE = "yolo-tiny/yolov3-tiny.cfg"
    WEIGHTS_FILE = "yolo-tiny/yolov3-tiny.weights"

    LABELS = open(LABELS_FILE).read().strip().split('\n')
    """PARAMETERS"""
    CONFIDENCE = ct  # Only display objects with more than CONFIDENCE. (0-1)
    THRESHOLD = ot  # decides how much objects are allowed to overlap. (0-1) 1 Being maximum overlap.
    RESOLUTION_SCALE = rs  # cv2 stream the video in a scale of 1440x900. This can be scaled. (0-inf)
    FULLSCREEN = fs
    """PARAMETERS"""

    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype='uint8')

    net = cv2.dnn.readNetFromDarknet(CONFIG_FILE, WEIGHTS_FILE)
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    camera = cv2.VideoCapture(0)
    (W, H) = (None, None)

    while True:
        (return_value, image) = camera.read()

        if W is None or H is None:
            (H, W) = image.shape[:2]

        blob = cv2.dnn.blobFromImage(image,
                                     1 / 255.0, (416, 416),
                                     swapRB=True,
                                     crop=False)
        net.setInput(blob)
        layer_output = net.forward(ln)

        boxes = []
        confidences = []
        class_ids = []

        for output in layer_output:
            for detection in output:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]

                if confidence > CONFIDENCE:
                    box = detection[0:4] * np.array([W, H, W, H])
                    (center_x, center_y, width, height) = box.astype("int")

                    x = int(center_x - (width / 2))
                    y = int(center_y - (height / 2))

                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)

        idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE, THRESHOLD)

        if len(idxs) > 0:
            for i in idxs.flatten():
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])

                color = [int(c) for c in COLORS[class_ids[i]]]
                cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
                text = "{}: {:.4f}".format(LABELS[class_ids[i]],
                                           confidences[i])
                cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, color, 2)

        if FULLSCREEN == 1:
            cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty("window", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)
        cv2.imshow(
            "window",
            cv2.resize(
                image,
                (int(1440 * RESOLUTION_SCALE), int(900 * RESOLUTION_SCALE))))
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    camera.release()
    def map(self):
        side_up_transformations = {}
        if self.__down_marker_id != "":
            down_side_transformations = {}
        for side_marker_id in self.__side_marker_ids:
            if side_marker_id != "":
                side_up_transformations[side_marker_id] = []
                if self.__down_marker_id != "":
                    down_side_transformations[side_marker_id] = []
        if os.path.exists(self.__calibration_dir) and os.path.isfile(
                "{}/cam_mtx.npy".format(
                    self.__calibration_dir)) and os.path.isfile(
                        "{}/dist.npy".format(self.__calibration_dir)):
            cam_mtx = np.load("{}/cam_mtx.npy".format(self.__calibration_dir))
            print(cam_mtx)
            dist = np.load("{}/dist.npy".format(self.__calibration_dir))
        else:
            for calibration in self.__database_calibrations:
                if calibration.to_dict().get('name',
                                             '') == self.__calibration_name:
                    calibration_dict = calibration.to_dict()
                    cam_mtx = np.array(
                        [[
                            calibration_dict['camera matrix'][0], 0,
                            calibration_dict['camera matrix'][2]
                        ],
                         [
                             0, calibration_dict['camera matrix'][1],
                             calibration_dict['camera matrix'][3]
                         ], [0, 0, 1]])
                    dist = np.array(
                        [calibration_dict['distortion coefficients']])

        win_name = "Markers Cube Calibration Image Capture"
        cv2.namedWindow(win_name, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(win_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        #Descomentar quando nao for utilizar o DroidCam
        #video_capture = cv2.VideoCapture(self.__video_source, cv2.CAP_DSHOW)
        video_capture = cv2.VideoCapture(self.__video_source)

        video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

        while True:
            _, frame = video_capture.read()

            done = True
            for side_marker_id in self.__side_marker_ids:
                if side_marker_id != "":
                    done &= len(side_up_transformations[side_marker_id]
                                ) == self.__acquire_min_count

                    if self.__down_marker_id != "":
                        done &= len(down_side_transformations[side_marker_id]
                                    ) == self.__acquire_min_count

            if not done:
                corners, ids = self.__detect_markers(frame)

                font = cv2.FONT_HERSHEY_SIMPLEX
                scale = 0.6
                blue = (255, 0, 0)
                red = (0, 0, 255)
                green = (0, 255, 0)

                if np.all(ids is not None):
                    rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                        corners, float(self.__markers_length), cam_mtx, dist)

                    up_marker_index = None
                    down_marker_index = None
                    side_marker_index = None
                    for i in range(0, ids.size):
                        if ids[i][0] == self.__up_marker_id:
                            up_marker_index = i
                        elif ids[i][0] == self.__down_marker_id:
                            down_marker_index = i
                        elif side_marker_index is None or tvecs[
                                side_marker_index][0][2] > tvecs[i][0][2]:
                            side_marker_index = i
                        else:
                            pass

                    target_marker_index = None
                    other_marker_index = None
                    destination_index = None
                    transformation_destination = None
                    if up_marker_index is not None and side_marker_index is not None:
                        target_marker_index = up_marker_index
                        other_marker_index = side_marker_index
                        destination_index = side_marker_index
                        transformation_destination = side_up_transformations
                    elif down_marker_index is not None and side_marker_index is not None:
                        target_marker_index = side_marker_index
                        other_marker_index = down_marker_index
                        destination_index = side_marker_index
                        transformation_destination = down_side_transformations

                    if target_marker_index is not None and other_marker_index is not None and destination_index is not None and transformation_destination is not None:
                        cv2.putText(
                            frame, "marker {} -> marker {} mapping".format(
                                ids[other_marker_index][0],
                                ids[target_marker_index][0]), (0, 20), font,
                            scale, blue, 2, cv2.LINE_AA)

                        acquired_transformations_count = len(
                            transformation_destination[ids[destination_index]
                                                       [0]])
                        if acquired_transformations_count < self.__acquire_min_count:
                            cv2.putText(
                                frame, "Count: {}".format(
                                    acquired_transformations_count), (0, 40),
                                font, scale, red, 2, cv2.LINE_AA)

                            target_marker_transformation = self.__get_transformation_matrix(
                                rvecs[target_marker_index],
                                tvecs[target_marker_index])
                            other_marker_transformation = self.__get_transformation_matrix(
                                rvecs[other_marker_index],
                                tvecs[other_marker_index])
                            transformation_other_to_target = np.dot(
                                np.linalg.inv(other_marker_transformation),
                                target_marker_transformation)

                            acquire = {}
                            acquire["target"] = target_marker_transformation
                            acquire["other"] = other_marker_transformation
                            acquire[
                                "other_to_target"] = transformation_other_to_target
                            transformation_destination[ids[destination_index]
                                                       [0]].append(acquire)

                        else:
                            cv2.putText(frame, "Done!", (0, 40), font, scale,
                                        green, 2, cv2.LINE_AA)

                cv2.putText(frame, "Q - Quit ", (0, 65), font, scale, blue, 2,
                            cv2.LINE_AA)

                cv2.imshow(win_name, frame)

            else:
                cv2.putText(frame, "Markers cube mapping finished, saving ...",
                            (0, 40), font, scale, green, 2, cv2.LINE_AA)

                cv2.imshow(win_name, frame)
                cv2.waitKey(1000)

                if self.__down_marker_id != "":
                    transformations = self.__compute_transformations(
                        side_up_transformations, down_side_transformations)
                else:
                    transformations = self.__compute_transformations(
                        side_up_transformations)

                settings = MarkersCubeDetectionSettings(
                    self.__markers_length, self.__up_marker_id,
                    self.__side_marker_ids, self.__down_marker_id,
                    transformations)
                settings.persist(self.__cube_id)

                cv2.waitKey(1000)
                video_capture.release()
                cv2.destroyAllWindows()
                break

            pressed_key = cv2.waitKey(1)
            if pressed_key == ord('q'):
                video_capture.release()
                cv2.destroyAllWindows()
                break
Exemplo n.º 55
0
def main():
    parser = argparse.ArgumentParser(
        epilog="""Description:
           Plays a video from a jpeg topic and visualizes the head detection with an orange bounding box around a head.
           Displays ('-d') or stores ('-o') the result of this demo in the kafka topic.
           
           Required topics:
           - <prefix>.cam.0.original.Image.jpg
           - <prefix>.cam.0.dets.ObjectDetectionRecord.json
           """,
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument("broker",
                        help="The name of the kafka broker.",
                        type=str)
    parser.add_argument("prefix",
                        help="Prefix of topics (base|skeleton).",
                        type=str)
    parser.add_argument('-f', "--full_screen", action='store_true')
    parser.add_argument('-d', "--display", action='store_true')
    parser.add_argument('-o',
                        '--output',
                        help='write output image into kafka topic',
                        action='store_true')
    args = parser.parse_args()

    if not args.display and not args.output:
        parser.error(
            "Missing argument: -d (display output) or -o (write output to kafka) is needed"
        )

    if args.output:
        producer = Producer({'bootstrap.servers': args.broker})

    overlay = cv2.imread('resources/powered_by_white.png',
                         cv2.IMREAD_UNCHANGED)

    image_topic = f"{args.prefix}.cam.0.original.Image.jpg"
    detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json"
    output_topic_name = f"{args.prefix}.cam.0.head_detection.Image.jpg"

    # handle full screen
    window_name = "DEMO: Head detection"
    if args.full_screen:
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

    # read message, draw and display them
    consumer = TimeOrderedGeneratorWithTimeout(
        args.broker, "detection",
        [TopicInfo(image_topic),
         TopicInfo(detection_topic)], 100, None, True)
    i = 0
    for msgs in consumer.getMessages():
        for time, v in message_list_to_frame_structure(msgs).items():
            img = v[args.prefix]["0"]["image"]
            if type(img) == np.ndarray:
                # draw bounding_box
                for head_detection in v[args.prefix]["0"]["head_detection"]:
                    object_detection_record = v[args.prefix]["0"][
                        "head_detection"][head_detection]["bounding_box"]
                    if object_detection_record["type"] == "PERSON_HEAD":
                        img = draw_nice_bounding_box(
                            img, object_detection_record["bounding_box"],
                            (10, 95, 255))

                # draw ultinous logo
                img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT)

                # produce output topic
                if args.output:
                    producer.produce(output_topic_name,
                                     value=encode_image_to_message(img),
                                     timestamp=time)
                    producer.poll(0)
                    if i % 100 == 0:
                        producer.flush()
                        i = 0
                    i += 1

                # display
                if args.display:
                    cv2.imshow(window_name, img)
        k = cv2.waitKey(33)
        if k == 113:  # The 'q' key to stop
            break
        elif k == -1:  # normally -1 returned,so don't print it
            continue
        else:
            print(f"Press 'q' key for EXIT!")
Exemplo n.º 56
0
def main():
    # parse and check command line args
    parser = argparse.ArgumentParser(epilog="""Description:
           Plays and optionally dumps video from a jpeg topic (a topic that ends with Image.jpg).""",
                                     formatter_class=RawTextHelpFormatter)
    parser.add_argument("broker",
                        help="The name of the kafka broker.",
                        type=str)
    parser.add_argument("topic",
                        help="The name of topic (*.Image.jpg).",
                        type=str)
    parser.add_argument('-f', "--full_screen", action='store_true')
    parser.add_argument('-d',
                        "--dump",
                        help="if set images are stored in jpg files",
                        action='store_true')
    parser.add_argument('-o', "--offset", type=int, default=-1)
    args = parser.parse_args()
    if not args.topic.endswith(".Image.jpg"):
        raise argparse.ArgumentTypeError(
            'The topic must be a jpeg image topic (should end with .Image.jpg)'
        )

    # handle full screen
    window_name = args.topic
    if args.full_screen:
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

    # calc start time and create consumer
    c = Consumer({
        'bootstrap.servers': args.broker,
        'group.id': 'display',
        'auto.offset.reset': 'latest'
    })
    c.assign(
        [TopicPartition(topic=args.topic, partition=0, offset=args.offset)])

    # read frames and show (or dump) them
    while True:
        msg = c.poll(1.0)

        if msg is None:
            continue
        if msg.error():
            print("Consumer error: {}".format(msg.error()))
            continue

        time = msg.timestamp()[1]
        img = decode_image_message(msg)
        if type(img) == np.ndarray:
            if args.dump:
                cv2.imwrite(args.topic + "_" + str(time) + ".jpg", img)
            cv2.imshow(window_name, img)
        k = cv2.waitKey(33)
        if k == 113:  # The 'q' key to stop
            break
        elif k == -1:  # normally -1 returned,so don't print it
            continue
        else:
            print(f"Press 'q' key for EXIT!")
# Load the face detector
face_recogniser = face_recognition()

# Create a list of training images and labels
labels = []
images = glob('./my_photos/*/*', recursive=True)
for filename in images:
    labels.append(filename.split(os.path.sep)[-2].title())

# Train using this dataset
face_recogniser.train(images, labels)

# Now start video feed
print('Checking for camera...')
cam = cv2.VideoCapture(0)
if (not cam.isOpened()):
    print("no cam!")
    sys.exit()
print("cam: ok.")

cv2.namedWindow('Face Recognition', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('Face Recognition', cv2.WND_PROP_FULLSCREEN,
                      cv2.WINDOW_FULLSCREEN)

while True:
    ret, frame = cam.read()
    labelled_frame = face_recogniser.recognise(frame)

    cv2.imshow('Face Recognition', labelled_frame)
Exemplo n.º 58
0
def main():
    openness=[1,1]
    MIN_FACE=100
    playingWelcome=False
    while True:
        if (cap.isOpened()):
                ret,frame=cap.read()
                cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
                cv2.setWindowProperty('frame',cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
                cv2.imshow('frame',frame)
                cv2.waitKey(1)
        result,msg=datalink.recvRunOnce()
        if result:
            YtVisionSeedModel=vs.YtDataLink.YtVisionSeedModel
            biggestArea=0
            hasPerson=False
            rect=result.getResult([YtVisionSeedModel.FACE_DETECTION,0])
            if(rect):
                if(rect.w*rect.h>biggestArea):
                    biggestArea=rect.w*rect.h
                pose=result.getResult([YtVisionSeedModel.FACE_DETECTION,0,YtVisionSeedModel.FACE_POSE])
                shape=result.getResult([YtVisionSeedModel.FACE_DETECTION,0,YtVisionSeedModel.FACE_LANDMARK])
                if(shape):
                    shape=shape.faceShape
                    yaw=pose.array[1]
                    vec1f=np.array([shape.leftEye[6].x,shape.leftEye[6].y])
                    vec2f=np.array([shape.leftEye[2].x,shape.leftEye[2].y])
                    l1=np.sqrt(np.sum(np.square(vec1f-vec2f)))
                    vec3f=np.array([shape.leftEye[0].x,shape.leftEye[0].y])
                    vec4f=np.array([shape.leftEye[4].x,shape.leftEye[4].y])
                    l2=np.sqrt(np.sum(np.square(vec3f-vec4f)))
                    openness[0]=l1/l2
                    vec1r=np.array([shape.rightEye[6].x,shape.rightEye[6].y])
                    vec2r=np.array([shape.rightEye[2].x,shape.rightEye[2].y])
                    r1=np.sqrt(np.sum(np.square(vec1r-vec2r)))
                    vec3r=np.array([shape.rightEye[0].x,shape.rightEye[0].y])
                    vec4r=np.array([shape.rightEye[4].x,shape.rightEye[4].y])
                    r2=np.sqrt(np.sum(np.square(vec3r-vec4r)))
                    openness[1]=r1/r2
                    hasPerson=biggestArea > MIN_FACE*MIN_FACE
                    eyeclosed=openness[0 if yaw<0 else 1]<0.1

                    if(working and hasPerson):
                        if(len(ringEyeOpen) == ringEyeOpenTotal):
                            ringEyeOpen.pop()
                        ringEyeOpen.appendleft(eyeclosed)
                        if(reallyClosed()):
                            # 可以打断welcome
                            if playingWelcome or not pygame.mixer.music.get_busy():
                                print('ALARM')
                                playingWelcome=False
                                pygame.mixer.music.load("/res/alarming.mp3")
                                pygame.mixer.music.play()
                    '''checkEyeBlink(working and hasPerson)'''

            if (len(ringHasPerson) == ringHasPersonTotal):
                ringHasPerson.pop()
            ringHasPerson.appendleft(hasPerson)
            rnp = reallyNewPerson()
            if (rnp == 1):
                print('WELCOME')
                playingWelcome=True
                pygame.mixer.music.load("/res/welcome.mp3")
                pygame.mixer.music.play()

            elif (rnp == 2):
                print('BYE')
                pygame.mixer.music.load("/res/bye.mp3")
                pygame.mixer.music.play()
Exemplo n.º 59
0
def video_thread():
    cv2.namedWindow("pendulum", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("pendulum", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    key_code = 0
    cap = cv2.VideoCapture(0)
    cap.set(3, 1920)
    cap.set(4, 1080)
    #cap.set(3, 1600)
    #cap.set(4, 900)
    #out = cv2.VideoWriter('vids/MulNoise.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (640,480))
    l_filter = [
                effects.Displacement_zoom_filter("../media/triangle.png"),
                effects.Color_plane_filter(color=(0, 0, 0xFF)), #red
                effects.Color_plane_filter(color=(0, 0xFF, 0xFF)), #yellow
                effects.Color_plane_filter(), #pink / purple
                effects.Displacement_zoom_filter("../media/square.png"),
                effects.Vertical_sin_effect(),
                effects.Mirror_effect(),
                effects.Displacement_zoom_filter("../media/circle.png"),
                effects.Displacement_zoom_filter("../media/square_pattern.png"), #square pattern
                effects.Motion_blur_filter(),
                effects.Border_filter(),
                #effects.Displacement_zoom_filter("../media/triangle_pattern.png"), #triag
               ]
    r_filter = [
                effects.Rotate_grad_filter("../media/triangle.png", angle_delta=120, time_delta=10**5),
                effects.Pixelate_grad_filter(),
                effects.Horizontal_distort_effect(),
                effects.Duatone_filter(dua_layers=(0, 2), other_layer=(1, 0), threshold=100), #red, blue
                effects.Rotate_grad_filter("../media/square.png", angle_delta=90, time_delta=10**5),
                effects.RGB_shift_filter(),
                effects.Png_overlay_filter("../videoplayback3/"),
                effects.Rotate_grad_filter("../media/circle.png"),
                effects.Multiply_filter(),
                #effects.Kaleidoscope8_filter(),
                effects.Kaleidoscope_filter(HOR=True, VERT=True),#4
                effects.Kaleidoscope_filter(HOR=False, VERT=True)#2
               ]
    idle_filter = effects.Png_overlay_filter("../idle_png/", fps=1)
    #r_filter = effects.Kaleidoscope_grad_filter()
    effect_number = 0
    osc_per_effect = 2
    osc_number = 0
    first_zero = True
    ZERO_THRESH = 400
    COUNTER_THRESH = 200
    dir_left = True
    idle_time_start = datetime.now()
    while (key_code != 27): #until esc key is pressed
        #ret, frame = cap.read()
        frame = cap.read()[1][180:,160:160+1600,:]
        frame[:,:,:] = cv2.flip(frame, 1) #VER FLIP
        if frame.shape != (1920, 1080, 3):
            frame = cv2.resize(frame, effects.DEFAULT_SIZE, interpolation=cv2.INTER_AREA)
        val = sensor.values[5]
        #print(val)
        intensity = 1
        if (val > ZERO_THRESH):
            r_filter[effect_number].set_intensity(intensity)
            r_filter[effect_number].apply_filter(frame)
            dir_left = True
            first_zero = True
        elif (val < -ZERO_THRESH):
            if dir_left:
                #effect_number = np.random.randint(0, len(l_filter))
                effect_number = (effect_number + 1) % len(l_filter)
                r_filter[effect_number].reset()
                l_filter[effect_number].reset()
                dir_left = False
                first_zero = False
            l_filter[effect_number].set_intensity(intensity)
            l_filter[effect_number].apply_filter(frame)
            dir_left = False
            first_zero = True
        else:
            if dir_left:
                #effect_number = np.random.randint(0, len(l_filter))
                effect_number = (effect_number + 1) % len(l_filter)
                r_filter[effect_number].reset()
                l_filter[effect_number].reset()
                dir_left = False
            if first_zero:
                idle_time_start = datetime.now()
                first_zero = False
            else:
                time_diff = datetime.now() - idle_time_start
                if time_diff.seconds > 5:
                    idle_filter.apply_filter(frame)
        cv2.imshow('pendulum', frame)
        key_code = cv2.waitKey(1)
    cap.release()
    cv2.destroyAllWindows()
    global done
    done = 1
Exemplo n.º 60
0
        def _visualize_output():
            last_frame_index = 0
            last_frame_time = time.time()
            fps_history = []
            all_gaze_histories = []

            if args.fullscreen:
                cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
                cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN,
                                     cv.WINDOW_FULLSCREEN)

            while True:
                # If no output to visualize, show unannotated frame
                if inferred_stuff_queue.empty():
                    next_frame_index = last_frame_index + 1
                    if next_frame_index in data_source._frames:
                        next_frame = data_source._frames[next_frame_index]
                        if 'faces' in next_frame and len(
                                next_frame['faces']) == 0:
                            if not args.headless:
                                cv.imshow('vis', next_frame['bgr'])
                            if args.record_video:
                                video_out_queue.put_nowait(next_frame_index)
                            last_frame_index = next_frame_index
                    if cv.waitKey(1) & 0xFF == ord('q'):
                        return
                    continue

                # Get output from neural network and visualize
                output = inferred_stuff_queue.get()
                bgr = None
                for j in range(batch_size):
                    frame_index = output['frame_index'][j]
                    if frame_index not in data_source._frames:
                        continue
                    frame = data_source._frames[frame_index]

                    # Decide which landmarks are usable
                    heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(
                        -1, 18),
                                            axis=0)
                    can_use_eye = np.all(heatmaps_amax > 0.7)
                    can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
                    can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)

                    start_time = time.time()
                    eye_index = output['eye_index'][j]
                    bgr = frame['bgr']
                    eye = frame['eyes'][eye_index]
                    eye_image = eye['image']
                    eye_side = eye['side']
                    eye_landmarks = output['landmarks'][j, :]
                    eye_radius = output['radius'][j][0]
                    if eye_side == 'left':
                        eye_landmarks[:, 0] = eye_image.shape[
                            1] - eye_landmarks[:, 0]
                        eye_image = np.fliplr(eye_image)

                    # Embed eye image and annotate for picture-in-picture
                    eye_upscale = 2
                    eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image),
                                                cv.COLOR_GRAY2BGR)
                    eye_image_raw = cv.resize(eye_image_raw, (0, 0),
                                              fx=eye_upscale,
                                              fy=eye_upscale)
                    eye_image_annotated = np.copy(eye_image_raw)
                    if can_use_eyelid:
                        cv.polylines(
                            eye_image_annotated,
                            [
                                np.round(
                                    eye_upscale * eye_landmarks[0:8]).astype(
                                        np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(255, 255, 0),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                    if can_use_iris:
                        cv.polylines(
                            eye_image_annotated,
                            [
                                np.round(
                                    eye_upscale * eye_landmarks[8:16]).astype(
                                        np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                        cv.drawMarker(
                            eye_image_annotated,
                            tuple(
                                np.round(eye_upscale *
                                         eye_landmarks[16, :]).astype(
                                             np.int32)),
                            color=(0, 255, 255),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )
                    face_index = int(eye_index / 2)
                    eh, ew, _ = eye_image_raw.shape
                    v0 = face_index * 2 * eh
                    v1 = v0 + eh
                    v2 = v1 + eh
                    u0 = 0 if eye_side == 'left' else ew
                    u1 = u0 + ew
                    bgr[v0:v1, u0:u1] = eye_image_raw
                    bgr[v1:v2, u0:u1] = eye_image_annotated

                    # Visualize preprocessing results
                    frame_landmarks = (frame['smoothed_landmarks']
                                       if 'smoothed_landmarks' in frame else
                                       frame['landmarks'])
                    for f, face in enumerate(frame['faces']):
                        try:
                            for landmark in frame_landmarks[f][:-1]:
                                cv.drawMarker(
                                    bgr,
                                    tuple(np.round(landmark).astype(np.int32)),
                                    color=(0, 0, 255),
                                    markerType=cv.MARKER_STAR,
                                    markerSize=2,
                                    thickness=1,
                                    line_type=cv.LINE_AA)
                        except IndexError:
                            print(
                                f"Catch index error on {frame['frame_index']}")
                            pass
                        cv.rectangle(
                            bgr,
                            tuple(np.round(face[:2]).astype(np.int32)),
                            tuple(
                                np.round(np.add(face[:2],
                                                face[2:])).astype(np.int32)),
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )

                    # Transform predictions
                    eye_landmarks = np.concatenate([
                        eye_landmarks,
                        [[
                            eye_landmarks[-1, 0] + eye_radius,
                            eye_landmarks[-1, 1]
                        ]]
                    ])
                    eye_landmarks = np.asmatrix(
                        np.pad(eye_landmarks, ((0, 0), (0, 1)),
                               'constant',
                               constant_values=1.0))
                    eye_landmarks = (
                        eye_landmarks *
                        eye['inv_landmarks_transform_mat'].T)[:, :2]
                    eye_landmarks = np.asarray(eye_landmarks)
                    eyelid_landmarks = eye_landmarks[0:8, :]
                    iris_landmarks = eye_landmarks[8:16, :]
                    iris_centre = eye_landmarks[16, :]
                    eyeball_centre = eye_landmarks[17, :]
                    eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
                                                    eye_landmarks[17, :])

                    # Smooth and visualize gaze direction
                    num_total_eyes_in_frame = len(frame['eyes'])
                    if len(all_gaze_histories) != num_total_eyes_in_frame:
                        all_gaze_histories = [
                            list() for _ in range(num_total_eyes_in_frame)
                        ]
                    gaze_history = all_gaze_histories[eye_index]
                    if can_use_eye:
                        # Visualize landmarks
                        cv.drawMarker(  # Eyeball centre
                            bgr,
                            tuple(np.round(eyeball_centre).astype(np.int32)),
                            color=(0, 255, 0),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )
                        # cv.circle(  # Eyeball outline
                        #     bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
                        #     int(np.round(eyeball_radius)), color=(0, 255, 0),
                        #     thickness=1, lineType=cv.LINE_AA,
                        # )

                        # Draw "gaze"
                        # from models.elg import estimate_gaze_from_landmarks
                        # current_gaze = estimate_gaze_from_landmarks(
                        #     iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
                        i_x0, i_y0 = iris_centre
                        e_x0, e_y0 = eyeball_centre
                        theta = -np.arcsin(
                            np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
                        phi = np.arcsin(
                            np.clip((i_x0 - e_x0) /
                                    (eyeball_radius * -np.cos(theta)), -1.0,
                                    1.0))
                        current_gaze = np.array([theta, phi])
                        gaze_history.append(current_gaze)
                        gaze_history_max_len = 10
                        if len(gaze_history) > gaze_history_max_len:
                            gaze_history = gaze_history[-gaze_history_max_len:]
                        util.gaze.draw_gaze(bgr,
                                            iris_centre,
                                            np.mean(gaze_history, axis=0),
                                            length=120.0,
                                            thickness=1)
                    else:
                        gaze_history.clear()

                    if can_use_eyelid:
                        cv.polylines(
                            bgr,
                            [
                                np.round(eyelid_landmarks).astype(
                                    np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(255, 255, 0),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )

                    if can_use_iris:
                        cv.polylines(
                            bgr,
                            [
                                np.round(iris_landmarks).astype(
                                    np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                        cv.drawMarker(
                            bgr,
                            tuple(np.round(iris_centre).astype(np.int32)),
                            color=(0, 255, 255),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )

                    dtime = 1e3 * (time.time() - start_time)
                    if 'visualization' not in frame['time']:
                        frame['time']['visualization'] = dtime
                    else:
                        frame['time']['visualization'] += dtime

                    def _dtime(before_id, after_id):
                        return int(1e3 * (frame['time'][after_id] -
                                          frame['time'][before_id]))

                    def _dstr(title, before_id, after_id):
                        return '%s: %dms' % (title, _dtime(
                            before_id, after_id))

                    if eye_index == len(frame['eyes']) - 1:
                        # Calculate timings
                        frame['time']['after_visualization'] = time.time()
                        fps = int(
                            np.round(1.0 / (time.time() - last_frame_time)))
                        fps_history.append(fps)
                        if len(fps_history) > 60:
                            fps_history = fps_history[-60:]
                        fps_str = '%d FPS' % np.mean(fps_history)
                        last_frame_time = time.time()
                        fh, fw, _ = bgr.shape
                        cv.putText(bgr,
                                   fps_str,
                                   org=(fw - 110, fh - 20),
                                   fontFace=cv.FONT_HERSHEY_DUPLEX,
                                   fontScale=0.8,
                                   color=(0, 0, 0),
                                   thickness=1,
                                   lineType=cv.LINE_AA)
                        cv.putText(bgr,
                                   fps_str,
                                   org=(fw - 111, fh - 21),
                                   fontFace=cv.FONT_HERSHEY_DUPLEX,
                                   fontScale=0.79,
                                   color=(255, 255, 255),
                                   thickness=1,
                                   lineType=cv.LINE_AA)
                        if not args.headless:
                            cv.imshow('vis', bgr)
                        last_frame_index = frame_index

                        # Record frame?
                        if args.record_video:
                            video_out_queue.put_nowait(frame_index)

                        # Quit?
                        if cv.waitKey(1) & 0xFF == ord('q'):
                            return

                        # Print timings
                        if frame_index % 60 == 0:
                            latency = _dtime('before_frame_read',
                                             'after_visualization')
                            processing = _dtime('after_frame_read',
                                                'after_visualization')
                            timing_string = ', '.join([
                                _dstr('read', 'before_frame_read',
                                      'after_frame_read'),
                                _dstr('preproc', 'after_frame_read',
                                      'after_preprocessing'),
                                'infer: %dms' %
                                int(frame['time']['inference']),
                                'vis: %dms' %
                                int(frame['time']['visualization']),
                                'proc: %dms' % processing,
                                'latency: %dms' % latency,
                            ])
                            print('%08d [%s] %s' %
                                  (frame_index, fps_str, timing_string))