def main():
    """track a face with FIDO's head and base"""
    global pos_x
    global pos_y

    init_publisher()
    #color_tracker_window = "output"
    #thresh_window = "thresh"
    #blob_window = "blob"
    cascade_fn = "../data/haarcascades/haarcascade_frontalface_alt.xml"
    nested_fn  = "../data/haarcascades/haarcascade_eye.xml"

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)
    
    cam = create_capture(-1)
    cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, MAX_Y)
    cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, MAX_X)
    #cv2.namedWindow(color_tracker_window, 1)
    #cv2.moveWindow(color_tracker_window, 0, 0)
    #cv2.namedWindow(thresh_window, 1)
    #cv2.moveWindow(thresh_window, 700, 0)
    #cv2.namedWindow(blob_window, 1)
    #cv2.moveWindow(blob_window, 700, 500)

    # PIDs for head, neck, and base movement.
    # head & neck PID outputs are relative to center of view, so if the ball is centered, output = 0.
    head_x_pid = pid.PID(kP=0.020, kI=0.002, kD=0.0, output_min=-HEAD_CENTER, output_max=HEAD_CENTER)
    head_x_pid.set_setpoint(CENTER_X)
    neck_y_pid = pid.PID(kP=0.020, kI=0.001, kD=0.0, output_min=-NECK_CENTER, output_max=NECK_CENTER)
    neck_y_pid.set_setpoint(CENTER_Y)
    # base tracks the head and tries to move so that the head will be centered
    # output = rotational velocity (applied negatively to left wheel, positively to right)
    base_r_pid = pid.PID(kP=1.5, kI=0.05, kD=0.0, output_min=-250, output_max=250)
    base_r_pid.set_setpoint(HEAD_CENTER)

    base_area_pid = pid.PID(kP=0.02, kI=0.002, kD=0.0, output_min=-250, output_max=250)
    base_area_pid.set_setpoint(5500)

    servo_if.init_servos()
    time.sleep(0.25)
    servo_if.set_servo(servo_if.NECK, NECK_START)
    head_x = servo_if.get_servo_position(servo_if.HEAD)
    neck_y = servo_if.get_servo_position(servo_if.NECK)
    jaw_pos = servo_if.get_servo_position(servo_if.JAW)
    head_x_output = 0
    neck_y_output = 0
    base_x_output = 0
    base_area_output = 0
    
    last_known_x = None
    last_known_y = None
    
    frame_number = 0
    
    while True:
        start_time = time.time()
        frame_number += 1
        ret, frame = cam.read()
        #print "cam.read returned {0}".format(ret)
        if not ret:
            break
        #cv2.imshow(color_tracker_window, frame)
        #cv2.waitKey(1)
        #blurred_image = cv2.GaussianBlur(frame, (9, 9), 0)
        #thresholded_image = get_thresholded_image(blurred_image)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        rects = detect_faces(gray, cascade)

        area = 0
        for x1, y1, x2, y2 in rects:
            a = (x2 - x1) * (y2 - y1)
            if a > area:
                area = a
                pos_x = (x1 + x2) / 2
                pos_y = (y1 + y2) / 2
        #cv2.imshow(thresh_window, thresholded_image)
        #cv2.waitKey(1)
            
        #Calculating the moments
        #contours = None
        #contours, hierarchy = cv2.findContours(thresholded_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        #area = 0
        #if contours:
        #    for c in contours:
        #        moments = cv2.moments(c)
        #        a = moments['m00']
        #        if a > area:
        #            area = a
        #            moment10 = moments['m10']
        #            moment01 = moments['m01']

        lastX = pos_x
        lastY = pos_y

        #make sure we have a big enough blob
        if area > 100: 			
            #Calculate the coordinates of the centroid
            #pos_x = int(moment10 / area)
            #pos_y = int(moment01 / area)
            last_known_x = pos_x
            last_known_y = pos_y
        elif last_known_x is not None:
            if head_x > HEAD_LEFT and head_x < HEAD_RIGHT and neck_y > NECK_DOWN and neck_y < NECK_UP:
                pos_x = last_known_x
                pos_y = CENTER_Y
            else:
                #servo_if.ramp_servo(servo_if.HEAD, HEAD_CENTER, -3 if head_x > HEAD_CENTER else 3)
                #servo_if.ramp_servo(servo_if.NECK, NECK_START, -3 if neck_y > NECK_CENTER else 3)
                pos_x = CENTER_X
                pos_y = CENTER_Y
                last_known_x = None
                last_known_y = None


        if area > 100:
            head_x_output = head_x_pid.update(pos_x)
            head_x = min(max(head_x + head_x_output, HEAD_LEFT), HEAD_RIGHT)
            servo_if.set_servo(servo_if.HEAD, int(head_x))
        
            neck_y_output = neck_y_pid.update(pos_y)
            neck_y = min(max(neck_y - neck_y_output, NECK_DOWN), NECK_UP)             # for screen coordinates, +y = down, but for neck coordinates, +y = up
            servo_if.set_servo(servo_if.NECK, int(neck_y))
        
            base_x_output = base_r_pid.update(head_x)
            #base_x_output = 0
            #base_area_output = base_area_pid.update(area) if area > 50 else 0
            base_area_output = 0
            left_motor_speed = min(max(base_x_output - base_area_output, -50), 50)
            right_motor_speed = min(max(-base_x_output - base_area_output, -50), 50)
            set_motor_speed(int(left_motor_speed), int(right_motor_speed))  # for +output, left motor goes forward, right motor goes backward

        jaw_pos = int((float(area) - 100.0) / 5000.0 * (servo_if.JAW_OPEN - servo_if.JAW_CLOSED_EMPTY) + servo_if.JAW_CLOSED_EMPTY)
        jaw_pos = max(min(jaw_pos, servo_if.JAW_OPEN), servo_if.JAW_CLOSED_EMPTY)
        servo_if.set_servo(servo_if.JAW, jaw_pos)

        end_time = time.time()
        if not frame_number % 10:
            print '{}. {} x: {} y: {} area: {} head: {} {} neck: {} {} base: {} {} jaw_pos: {} elapsed: {}'.format(frame_number, datetime.now(), pos_x, pos_y, area, head_x, head_x_output, neck_y, neck_y_output, base_x_output, base_area_output, jaw_pos, end_time - start_time)
        #cv2.imshow(blob_window, thresholded_image)
        time.sleep(0.001)
        #c = cv2.waitKey(3)
        #if(c!=-1):
        #    break
    print "exiting"
Esempio n. 2
0
def main():
    """track a tennis ball with FIDO's head and base"""
    global cam
    global brain, tail
    global inputs, outputs, prev_outputs
    global cascade, nested
    global debug, quit_request

    try:
        opts, args = getopt.gnu_getopt(sys.argv, "hd", ["help", "debug"])
    except getopt.GetoptError as err:
        print err
        print 'fido_main.py [--help] [--debug]'
        sys.exit(2)

    print "opts = ", opts
    print "args = ", args
        
    for opt, arg in opts:
        print "opt = ", opt
        if opt in ('-h', '--help'):
            print 'fido_main.py [--help] [--debug]'
            sys.exit()
        elif opt in ('-d', '--debug'):
            debug = True

    print "debug = {}".format(debug)
    
    cascade_fn = "../data/haarcascades/haarcascade_frontalface_alt.xml"
    #nested_fn  = "../data/haarcascades/haarcascade_eye.xml"

    cascade = cv2.CascadeClassifier(cascade_fn)
    #nested = cv2.CascadeClassifier(nested_fn)
    
    cam = create_capture(-1)
    cam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, MAX_Y)
    cam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, MAX_X)

    if debug:
        cv2.namedWindow("output", 1)
        cv2.moveWindow("output", 0, 540)
    
    init_pids()
    init_publisher()
    init_subscriptions()
    init_sighandler()
    
    servo_if.init_servos()
    time.sleep(0.25)
    servo_if.set_servo(servo_if.NECK, servo_if.NECK_START)
    outputs['legs'] = servo_if.legs_position
    outputs['head'] = servo_if.get_servo_position(servo_if.HEAD)
    outputs['neck'] = servo_if.get_servo_position(servo_if.NECK)
    outputs['jaw'] = servo_if.get_servo_position(servo_if.JAW)
    outputs['tail'] = servo_if.get_servo_position(servo_if.TAIL)

    prev_outputs['left_wheel'] = outputs['left_wheel']
    prev_outputs['right_wheel'] = outputs['right_wheel']
    prev_outputs['legs'] = outputs['legs']
    prev_outputs['neck'] = outputs['neck']
    prev_outputs['head'] = outputs['head']
    prev_outputs['jaw'] = outputs['jaw']
    prev_outputs['tail'] = outputs['tail']
    
    brain = fido_fsm.FidoBrain(inputs, outputs)
    tail = fido_fsm.FidoTail(inputs, outputs)

    frame_number = 0
    
    if debug:
        cv2.cv.SetMouseCallback("output", on_mouse, 0)

    while not quit_request:
        start_time = time.time()
        frame_number += 1
        read_inputs()
        if not inputs['cam_status']:
            print "camera read failure"
            break
        update_pids()
        brain.run() 
        tail.run()
        write_outputs()
        if debug:
            s = inputs['hsv_image'][y_co, x_co]
            cv2.putText(inputs['cam_frame'], str(s[0])+","+str(s[1])+","+str(s[2]), (x_co,y_co), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
            cv2.imshow("output", inputs['cam_frame'])
        end_time = time.time()
        if True: #not frame_number % 10:
            print '{}. {} v: {} ir: {} {} {} x: {} y: {} area: {} {} brain: {} tail: {} head: {} neck: {} base: {} {} jaw: {} elapsed: {}'.format(frame_number, datetime.now(), batt_v, inputs['ir_l'], inputs['ir_m'], inputs['ir_r'], inputs['ball_x'], inputs['ball_y'], inputs['avg_ball_area'], inputs['face_area'], brain.state(), tail.state(), outputs['head'], outputs['neck'], outputs['left_wheel'], outputs['right_wheel'], outputs['jaw'], end_time - start_time)
        k = cv2.waitKey(10)
        if k > 0:
            print "key pressed, exiting"
            exit(1)
        time.sleep(max(0.05 - (time.time() - start_time), 0.001))