Exemple #1
0
def wall_follow_demo():
    velocity.init(0.22, 40, 0.5, 0.1)
    leds.init()
    pose.init()
    motion.init()
    neighbors.init(NBR_PERIOD)

    state = STATE_IDLE
    wall_time = 0
    
    while True:
        # Do updates
        leds.update()
        pose.update()
        velocity.update()
        new_nbrs = neighbors.update()
        
        nbrList = neighbors.get_neighbors()
        tv = 0
        rv = 0

        # this is the main finite-state machine
        if state == STATE_IDLE:
            leds.set_pattern('r', 'circle', LED_BRIGHTNESS)
            if new_nbrs:
                print "idle"
            if rone.button_get_value('r'):
                state = STATE_LOOK_FOR_WALL
            
        elif state == STATE_LOOK_FOR_WALL:
            leds.set_pattern('r', 'blink_fast', LED_BRIGHTNESS)
            if new_nbrs:
                print "look for wall"
            tv = MOTION_TV
            obs = neighbors.get_obstacles() 
            if (obs != None):
                state = STATE_WALL_FOLLOW                
            
        elif state == STATE_WALL_FOLLOW:
            leds.set_pattern('b', 'blink_fast', LED_BRIGHTNESS)
            if new_nbrs:
                print "wall follow"
            # follow the wall
            (tv, rv, active) = wall_follow(MOTION_TV / 2)
            if active == True:
                wall_time = sys.time()
            if sys.time() > (wall_time + WALL_TIMEOUT):
                state = STATE_LOOK_FOR_WALL
                
        # end of the FSM
                        
        # set the velocities
        velocity.set_tvrv(tv, rv)
        
        #set the message
        hba.set_msg(0, 0, 0)
Exemple #2
0
 def initialization(t, state):
     state.wnd_main = "Blink Detection"
     state.wnd_debug = "Diff VC"
     state.streaming = True
     state.debug = False
     state.init_stage = False
     state.tracking_stage = False
     state.usage_text = "'s' Start - 'r' Reset - 'q' Quit"
     state.locating_text = "Mode : Locating eye..."
     state.tracking_text = "Mode : Tracking eye..."
     state.blinked_text = "*Blinked*"
     state.prev = None
     state.diff = None
     state.tpl = ()
     state.comps = ()
     state.blink = ()
     state.color = (0, 255, 0)
     state.diff_color = (255, 255, 0)
     state.text_color = (0, 0, 255)
     state.font = cv2.FONT_HERSHEY_PLAIN
     state.delay = 0
     # Initialize VideoCapture
     state.vc, state.kernel = motion.init(state.wnd_main)
Exemple #3
0
	usage_text = "'s' Start - 'r' Reset - 'q' Quit"
	locating_text = "Mode : Locating eye..."
	tracking_text = "Mode : Tracking eye..."
	blinked_text = "*Blinked*"
	prev = None
	diff = None
	tpl = ()
	comps = ()
	blink = ()
	color = (0,255,0)
	diff_color = (255,255,0)
	text_color = (0,0,255)
	font = cv2.FONT_HERSHEY_PLAIN
	delay = 0
	# Initialize VideoCapture
	vc,kernel = motion.init(wnd_main)
	while streaming:
		# Register sequenced images and find all connected components
		key,frame = vc.read()
		# Write usage text
		cv2.putText(frame, usage_text, (20,20), font, 1.0, text_color)
		if init_stage:
			diff,contours = motion.get_components(frame, prev, kernel)
			comps = motion.get_moved_components(contours, 5, 5)
			# If not entering tracking_stage yet, try find eyes within contours
			if not contours == None and not tracking_stage:
				cv2.putText(frame, locating_text, (20,220), font, 1.0, text_color)
				tracking_stage,tpl = is_eyes(comps, frame)
		# Get ROI from eye template against current frame
		if tracking_stage:
			cv2.putText(frame, tracking_text, (20,220), font, 1.0, color)
Exemple #4
0
def init(kff, kff_offset, kp, ki):
    velocity.init(kff, kff_offset, kp, ki)
    leds.init()
    pose.init()
    motion.init()
    neighbors.init(NBR_PERIOD)
Exemple #5
0
def init(kff, kff_offset, kp, ki):
    velocity.init(kff, kff_offset, kp, ki)
    leds.init()
    pose.init()
    motion.init()
    neighbors.init(NBR_PERIOD)