Esempio n. 1
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    #loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Detect ball
        (img, center) = ball_detector.detect(img)
        # Show ball
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
        # Track ball
        if(center!= None):
            #TODO convert 2d coordinates to 3d coordinates on camera coordinate system
            (x,y,z) =
            print (x,y,z,'on camera axis')

            #TODO convert 3d coordinates on camera axis to 3d coordinates on robot axis
            (x,y,z) =
            print (x,y,z,'on robot axis')

            #TODO: move robot to look at 3d point
            robot.lookatpoint()
Esempio n. 2
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 3
0
def main():
    global point
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # loop
    while True:
        # Get image from camera
        img = camera.getImage()

        #TODO: set the boundaries for the box
        i_min = 280
        i_max = 360
        j_min = 200
        j_max = 280

        # Draw rectangle on the image
        for i in range(i_min, i_max):
            for j in range(j_min, j_max):
                #TODO: change the pixel to another color
                img[j][i] = [255, 0, 0]
        # Show image
        cv2.imshow("Frame", img[..., ::-1])

        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 4
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Get image with ball detected,
        (img, center) = ball_detector.detect(img, 640)
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break

        if (center != None):
            # Convert 2d coordinates to 3d coordinates on camera axis
            (x, y, z) = camera.convert2d_3d(center[0], center[1])
            print(x, y, z, 'on camera axis')
            # Convert 3d coordinates on camera axis to 3d coordinates on robot axis
            (x, y, z) = camera.convert3d_3d(x, y, z)
            print(x, y, z, 'on robot axis')
            # Move robot to look at 3d point
            robot.lookatpoint(x, y, z, 4)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize face detector
    detector = dlib.get_frontal_face_detector()

    # Loop
    while True:
        # Get image
        # Second parameter is upsample_num_times.
        img = camera.getImage()

        #TODO: get face detections using dlib detector
        dets = detector(img, 1)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        #show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()
    #Initalize AICoRe client
    client = AICoRE()
    #Set up client name
    client.setUserName('Jacob')

    #Initalize speeech recogniton
    r = sr.Recognizer()
    #Initalize mic
    #TODO set the microphone index
    mic = sr.Microphone(device_index=11)
    print("start talking")
    with mic as source:
        #adjust for noise
        r.adjust_for_ambient_noise(source)
        #listen to source
        audio = r.listen(source)
    #convert audio to text
    text = r.recognize_google(audio)
    #send text to client
    client.send(text)
    #get answer from AICoRe
    answer = client.answer()

    #creates speech from text
    tts = gTTS(answer)
    #saves the answer to mp3 file
    tts.save('answer.mp3')
    #plays the mp3
    playsound.playsound('answer.mp3')
Esempio n. 7
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # The variable for counting loop
    cnt = 0

    #Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        if (len(dets) > 0):
            face_tracking = None
            distanceFromCenter_min = 1000
            # Find a face near image center
            for face in dets:
                # Draw Rectangle
                cv2.rectangle(img, (face.left(), face.top()),
                              (face.right(), face.bottom()), color_green, 3)

                face_x = (face.left() + face.right()) / 2

                #TODO: write a distance between face and center, center is 0.5*width of image.
                distanceFromCenter = abs(face_x - camera.width / 2)

                # Find a face that has the smallest distance
                if distanceFromCenter < distanceFromCenter_min:
                    distanceFromCenter_min = distanceFromCenter
                    face_tracking = face

            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, face_tracking)
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 8
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Uncomment/comment
    startNod(robot)
Esempio n. 9
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # right direction threshold
    right_threshold = 0.3
    left_threshold = -0.3
    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            # Estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #TODO: find the yaw value from the rotation_vector
            print rotation_vector
            yaw = rotation_vector[2]
            print(yaw)

            #TODO: insert the condition for looking right
            if yaw > right_threshold:
                print('You are looking at right.')
            #TODO: insert the condition for looking left
            elif yaw < left_threshold:
                print('You are looking at left.')

        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    #We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    #Initalize camera
    camera = Camera()
    #start camera
    focal_length = 640

    camera.start()

    robot = Robot()
    #start robot
    robot.start()
    #initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    #counter
    cnt = 1
    #loop
    while True:
        #get image
        img = camera.getImage()

        #gets face detections
        dets = face_detector.detect(img)

        #draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        #we only use 1 face to estimate pose
        if (len(dets) > 0):
            det0 = dets[0]
            #estimate pose
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, det0)
            #draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)

            #converts 2d coordinates to 3d coordinates on camera axis
            (x, y, z) = camera.convert2d_3d((det0.left() + det0.right()) / 2,
                                            (det0.top() + det0.bottom()) / 2)
            print(x, y, z, 'on camera axis')
            #converts 3d coordinates on camera axis to 3d coordinates on robot axis
            (x, y, z) = camera.convert3d_3d(x, y, z)
            print(x, y, z, 'on robot axis')
            #move robot
            robot.lookatpoint(x, y, z, 4)

        cv2.imshow("Frame", img[..., ::-1])
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    #The parameters are 3d coordinates in the real world
    #TODO Change the values in lookatpoint
    robot.lookatpoint(1, 1, 1)
Esempio n. 12
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    # TODO: change the values in move
    robot.move(0, 0)
    time.sleep(0.1)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    #boundaries
    ball_on_right = -100
    ball_on_left = 100
    ball_on_bottom = -100
    ball_on_top = 100

    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Detect ball
        (img, center) = ball_detector.detect(img)
        # Show ball
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
        # Track ball
        if(center!= None):
            print center[0], center[1]
            #TODO: calculate distance between detected ball and the image center.
            distance = calculateDistance(center)

            pan = robot.getPosition()[0]
            tilt = robot.getPosition()[1]
            #TODO: insert the values for moving motors
            pan_delta = 
            tilt_delta = 

            #TODO: move motor on x-axis using move function
            if distance[0]> ball_on_left:
                pan = pan - pan_delta
            elif distance[0] < ball_on_right:
                pan = pan + pan_delta
            #TODO: move motor on y-axis using move function
            if distance[1]> ball_on_top:
                tilt = tilt - tilt_delta
            elif distance[1] < ball_on_bottom:
                tilt = tilt + tilt_delta

            robot.move(pan,tilt)
Esempio n. 14
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    # TODO: change the values in lookatpoint to look at the red square.
    robot.lookatpoint(,,)
    time.sleep(1) # wait a second

    # TODO: change the values in lookatpoint to look at the green square.
    robot.lookatpoint(,,)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    #boundaries
    ball_on_right = -100
    ball_on_left = 100
    ball_on_bottom = -100
    ball_on_top = 100
    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Detect ball
        (img, ball_center) = ball_detector.detect(img)
        # Show ball
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
        # Track ball

        if(ball_center!= None):
            #TODO: calculate distance between detected ball and the image center.
            distance = calculateDistance(ball_center)
            print distance[0], distance[1]

            #TODO: move motor on x-axis using right or left 
            if distance[0]> ball_on_left:
                #move robot 

            elif distance[0] < ball_on_right:
                #move robot 

            #TODO: move motor on y-axis using up and down function
            if distance[1]> ball_on_top:
                #move robot 
            elif distance[1] < ball_on_bottom:
                #move robot 


if __name__ == '__main__':
    main()
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()
    #initalize aicore client
    client = AICoRE()

    # set username
    client.setUserName('Jacob')

    # send text to AICORE
    client.send('What is my favorite food?')

    # get answer from AICORE
    text = client.answer()
Esempio n. 17
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()
    # Initalize face detector
    detector = dlib.get_frontal_face_detector()

    # The variable for counting loop
    cnt = 0

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = detector(img, 1)
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        if (len(dets) > 0):
            tracked_face = dets[0]
            tracked_face_x = (tracked_face.left() + tracked_face.right()) / 2
            tracked_face_y = (tracked_face.top() + tracked_face.bottom()) / 2
            #TODO: convert 2d point to 3d point on the camera coordinates system
            (x, y, z) = camera.convert2d_3d(tracked_face_x, tracked_face_y)
            #TODO: convert the 3d point on the camera point system to the robot coordinates system
            (x, y, z) = camera.convert3d_3d(x, y, z)
            #TODO: move the robot so that it tracks the face
            robot.lookatpoint(x, y, z)

        # Show image
        cv2.imshow("Frame", img[..., ::-1])

        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # When you click pixel on image, onMouse is called.
        cv2.setMouseCallback("Frame", onMouse)
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()

    # Start robot
    robot = Robot()
    robot.start()

    #Initalize AICoRe client
    client = AICoRE()


    #TODO: Set up client name
    client.setUserName('username')

    #Initalize speeech recogniton
    r = sr.Recognizer()

    #List all the microphone hardware
    for i, item in enumerate(sr.Microphone.list_microphone_names()):
        print( i, item)

    #TODO: Initalize mic and set the device_index
    mic = sr.Microphone(device_index=7)

    print "I'm listening "

    with mic as source:
        r.adjust_for_ambient_noise(source)
        audio = r.listen(source)
    text = r.recognize_google(audio)

    client.send(text)
    answer = client.answer()

    tts = gTTS(answer)
    tts.save('answer.mp3')
    playsound.playsound('answer.mp3')

    #TODO: check if 'yes' in voice input
    if in answer.lower():
        #TODO: robot should nod

    #TODO: check if 'no' in voice input
    elif  in answer.lower():
Esempio n. 20
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    robot.center()
    time.sleep(1)

    #TODO: change the values in left
    robot.left(0.2)
    time.sleep(1)#wait a second

    #TODO: change the values in right
    robot.right(0.2)
    time.sleep(1)
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initialize camera
    camera = Camera()
    # Start camera
    camera.start()

    # Loop
    while True:
        #TODO: get image from camera, getImage returns an image
        img = 
        # Use opencv to show image on window named "Frame"
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    focal_length = 640

    camera.start()
    # Initalize face detector
    face_detector = FaceDetector()
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    # Loop
    while True:
        # Get image
        img = camera.getImage()

        # Get face detections
        dets = face_detector.detect(img)

        # Draw all face detections
        for det in dets:
            cv2.rectangle(img, (det.left(), det.top()),
                          (det.right(), det.bottom()), color_green, 3)

        # We only use 1 face to estimate pose
        if (len(dets) > 0):
            #TODO: estimate pose of a detected face
            (success, rotation_vector, translation_vector,
             image_points) = face_detector.estimate_pose(img, dets[0])
            # Draw pose
            img = face_detector.draw_pose(img, rotation_vector,
                                          translation_vector, image_points)
            print("rotation_vector")
            print rotation_vector
            print("translation_vector")
            print translation_vector
        #show image
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
def main():
    global point
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # loop
    while True:
        # Get image from camera
        img = camera.getImage()
        # Draw circle on the point coordinate
        cv2.circle(img, point, 10, (0, 0, 255), 3)
        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # When you click pixel on image, onMouse is called.
        cv2.setMouseCallback("Frame", onMouse)
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 24
0
def main():
    global point
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Loop
    while True:
        # Get image from camera
        img = camera.getImage()

        #performs color segmentation on image
        color_segmentation(img)

        # Show image
        cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
        cv2.imshow("Frame", img[..., ::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 25
0
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize robot
    robot = Robot()
    # Start robot
    robot.start()

    time.sleep(1)  # wait a second

    #TODO: remember the current position
    curr_pos = robot.getPosition()
    curr_pan = curr_pos[0]
    curr_tilt = curr_pos[1]

    #TODO: look somewhere else other than the current position
    robot.move(-0.3, -0.3)
    time.sleep(1)  # wait a second

    #TODO: return back to looking at the current position stored
    robot.move(curr_pan, curr_tilt)
    time.sleep(1)  # wait a second
Esempio n. 26
0
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()
    #Initalize AICoRe client
    client = AICoRE()

    #TODO: Set up client name
    client.setUserName('username')

    #Initalize speeech recogniton
    r = sr.Recognizer()

    #List all the microphone hardware
    for i, item in enumerate(sr.Microphone.list_microphone_names()):
        print( i, item)

    #TODO: Initalize mic and set the device_index
    mic = sr.Microphone(device_index=1)

    print "I'm listening"

    with mic as source:
        r.adjust_for_ambient_noise(source)
        audio = r.listen(source)
    text = r.recognize_google(audio)

    client.send(text)
    answer = client.answer()

    #TODO:set the keyword to respond to
    keyword = 
    #check if keyword in input
    if keyword.lower() in text.lower():
        #TODO: set a response
        answer = 

    tts = gTTS(answer)
    tts.save('hello.mp3')
    playsound.playsound('hello.mp3')
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()
    #Initalize AICoRe client
    client = AICoRE()

    #TODO: Set up client name
    client.setUserName('username')

    #Initalize speeech recogniton
    r = sr.Recognizer()

    #List all the microphone hardware
    for i, item in enumerate(sr.Microphone.list_microphone_names()):
        print( i, item)

    #TODO: Initalize mic and set the device_index
    mic = sr.Microphone(device_index=11)
    with mic as source:
        #TODO: adjust for noise
        r.adjust_for_ambient_noise(source)
        #TODO: listen to source
        audio = r.listen(source)
    #TODO: convert audio to text
    text = r.recognize_google(audio)

    #TODO: send text to client
    client.send(text)
    #TODO: get answer from AICoRe
    answer = client.answer()

    #TODO: Convert text to speech
    tts = gTTS(answer)
    #TODO: Save TTS result
    tts.save('answer.mp3')
    #TODO: Play TTS
    playsound.playsound('answer.mp3')
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Start camera
    camera.start()
    # Start robot
    robot.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    # The variable for counting loop
    cnt = 0

    # Loop
    while True:
        #TODO: get image from camera
        img = 
        #TODO: use ball_detector to detect ball
        (img, center) = 
        cv2.imshow("Frame", img[...,::-1])
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break
Esempio n. 29
0
def main():
    #We need to initalize ROS environment for AICoRE to connect
    ROSEnvironment()
    #Initalize AICoRe client
    client = AICoRE()

    #TODO: Set up client name
    client.setUserName('username')

    #Initalize speeech recogniton
    r = sr.Recognizer()

    #List all the microphone hardware
    for i, item in enumerate(sr.Microphone.list_microphone_names()):
        print( i, item)

    #TODO: Initalize mic and set the device_index
    mic = sr.Microphone(device_index=1)

    print "I'm listening"
    
    with mic as source:
        #TODO: adjust for noise
        
        #TODO: listen to source
        audio = 
    #TODO: convert audio to text
    text =

    #TODO: send text to client
    
    #TODO: get answer from AICoRe
    answer = 

    #TODO: Convert text to speech
    tts = 
def main():
    # We need to initalize ROS environment for Robot and camera to connect/communicate
    ROSEnvironment()
    # Initalize camera
    camera = Camera()
    # Start camera
    camera.start()
    # Initalize ball detector
    ball_detector = BallDetector()

    #loop
    while True:
        # Get image
        img = camera.getImage()
        # Gets image with ball detected,
        (img, center) = ball_detector.detect(img, 640)
        # Show image
        cv2.imshow("Frame", img[..., ::-1])
        # Print the center
        print(center)
        # Close if key is pressed
        key = cv2.waitKey(1)
        if key > 0:
            break