Example #1
0
                    choices=['robo', 'tello'],
                    default='robo',
                    help='robo or tello')
parser.add_argument('mode',
                    choices=['image', 'video'],
                    default='image',
                    help='image or video')
args = parser.parse_args()

# if args.mode not in ('image', 'video'):
# 	print('invalid mode, choose image or video')
# 	sys.exit()

if args.robot == 'robo':
    from EP_api import findrobotIP, Robot
    robot_ip = findrobotIP()
    # robot = Robot('192.168.2.1')
    robot = Robot(robot_ip)

    robot.startvideo()
    while robot.frame is None:  # this is for video warm up. when frame is received, this loop is exited.
        pass
    width = robot.cap.get(3)
    height = robot.cap.get(4)

elif args.robot == 'tello':
    from Tello_api import Tello
    robot = Tello()
    robot.startvideo()
    robot.startstates()
    while robot.frame is None:  # this is for video warm up. when frame is received, this loop is exited.
Example #2
0
def main():
    robot_ip = findrobotIP()
    robot = Robot(robot_ip)
    robot.startvideo()
    while robot.frame is None:
        pass
    robot._sendcommand('camera exposure high')

    # Init Variables
    img_counter = 0
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    count = 0

    ########## Use this to prompt for an input mission ########## 
    txt = input("input mission statement: ")
    print('Mission statement', txt)

    '''
    Insert your clothing article extractor here, we provided a simple library to convert your plain text into the encoded format if you would like to use your previous models. 
    We provided an example of how you can use the provided extract_clothes class here
  	
  	e.g. desired_clothing = ['women dresses'] 
    '''
    extractor = get_clothes_class(model_path, encoded_words_dict_path)
    desired_clothing = extractor.process_input(txt)    
    
    print ('Extracted target clothing article: ',desired_clothing)

    ### Initialise the robot to run on autonomous mode
    mode = 'auto'
    # Lift arm up to elevate the camera angle
    robot._sendcommand('robotic_arm moveto x 210 y 64')
    time.sleep(1)

    while True:

        cv2.namedWindow('Live video', cv2.WINDOW_NORMAL)
        cv2.imshow('Live video', robot.frame)

        # Key listener
        k = cv2.waitKey(16) & 0xFF


        ''' To provide toggle on keyboard between manual driving and autonomous driving '''
        if k == ord('m'):
            print('manual mode activated')
            mode = 'manual'

        if k == ord('n'):
            print('autonomous mode activated')
            mode = 'auto'

        if k == 27: # press esc to stop
            close_stream(robot)
            cv2.destroyWindow("result")
            cv2.destroyWindow("Live video")
            break

        frame = robot.frame
        frame_h, frame_w, frame_channels = frame.shape

        '''
        This set of codes is used for manual control of robot
        '''
        if mode == 'manual':            
            if k == ord('p'):
              robot.scan()
            
            if k == ord('w'):
              robot._sendcommand('chassis move x 0.2')

            elif k == ord('a'):
              robot._sendcommand('chassis move y -0.2')

            elif k == ord('s'):
              robot._sendcommand('chassis move x -0.2')

            elif k == ord('d'):
              robot._sendcommand('chassis move y 0.2')
            elif k == ord('q'):
              robot._sendcommand('chassis move z -5')
            elif k == ord('e'):
              robot._sendcommand('chassis move z 5')

            # elif k == ord('i'): # up and down arrow sometimes dont work
            #   robot._sendcommand('gimbal move p 1')
            # elif k == ord('k'): 
            #   robot._sendcommand('gimbal move p -1')
            # elif k == ord('j'): # up and down arrow sometimes dont work
            #   robot._sendcommand('gimbal move y -1')
            # elif k == ord('l'): 
            #   robot._sendcommand('gimbal move y 1')

            elif k == ord('r'): 
              robot._sendcommand('robotic_arm recenter')
            elif k == ord('x'): 
              robot._sendcommand('robotic_arm stop')
            elif k == ord('c'): 
              robot._sendcommand('robotic_arm moveto x 210 y 44')
            elif k == ord('z'): 
              robot._sendcommand('robotic_arm moveto x 92 y 90')
            elif k == ord('f'):     
              robot._sendcommand('robotic_gripper open 1')
            elif k == ord('g'): 
              robot._sendcommand('robotic_gripper close 1')

        elif mode=='auto':

        	print('Switched to autonomous control')

            #image = Image.fromarray(frame.copy())
            
            '''
            Insert your model here 
            '''
            #raw_image, outputs = model.predict(frame.copy())
            
            '''
            INSERT YOUR ROBOT RESPONSE MOVEMENT HERE
            '''
            #if len(outputs) > 0:
                # cmd_str = move_robot(outputs)

            '''
from EP_api import Robot, findrobotIP
import time
import numpy as np

robot = Robot(findrobotIP())  # router


def neeh():
    # robot._sendcommand('robotic_gripper open')
    # time.sleep(0.3)
    # robot._sendcommand('chassis move x 0.2 vxy 0.2')
    # time.sleep(1)
    robot._sendcommand('robotic_gripper open')
    time.sleep(0.4)
    robot._sendcommand('robotic_gripper close')
    time.sleep(0.3)
    robot._sendcommand('robotic_gripper open')
    time.sleep(0.3)
    robot._sendcommand('robotic_gripper close')
    time.sleep(0.3)
    # robot._sendcommand('robotic_gripper open')
    # time.sleep(0.3)
    # robot._sendcommand('robotic_gripper close')
    # time.sleep(0.3)


def funeral_dance():
    robot._sendcommand('robotic_arm moveto x 50 y 200')
    time.sleep(1)

    # turn right and slide forward