def main(): # grab serial number of robot if required args = anki_vector.util.parse_command_args() # use AsyncRobot so that behaviors don't block robot = anki_vector.AsyncRobot(args.serial, request_control=False, default_logging=False) robot.connect() time.sleep(1) robot.conn.request_control() controlRequest = robot.conn.run_coroutine( robot.conn.control_granted_event.wait()) while not (controlRequest.done()): pass chargerAction = robot.behavior.get_ while not (liftAction.done()): pass robot.release_control() #clean up robot.disconnect()
def main(): net = PoseEstimationWithMobileNet() checkpoint = torch.load("models/checkpoint_iter_370000.pth", map_location='cpu') load_state(net, checkpoint) net = net.cuda() done = threading.Event() with anki_vector.AsyncRobot() as robot: robot.camera.init_camera_feed() robot.camera.image_streaming_enabled() # preparing robot pose ready robot.behavior.set_head_angle(degrees(25.0)) robot.behavior.set_lift_height(0.0) #events for detection and new camera feed robot.events.subscribe(on_new_raw_camera_image, events.Events.new_raw_camera_image, net) robot.events.subscribe_by_name(on_robot_observed_touch, event_name='touched') print( "------ waiting for camera events, press ctrl+c to exit early ------" ) try: if not done.wait(timeout=600): print("------ Did not receive a new camera image! ------") except KeyboardInterrupt: pass
def generate_names(apps, schema_editor): """ Helper function to populate names of animations and triggers and update their status """ def __update_or_create(source, name): """ Helper function to create/update a single name index """ source.objects.update_or_create(name=name, defaults={ 'name': name, 'active': True }) AnimationName = apps.get_model('blocks', 'AnimationName') AnimationName.objects.filter(active=True).update(active=False) AnimationTrigger = apps.get_model('blocks', 'AnimationTrigger') AnimationTrigger.objects.filter(active=True).update(active=False) with anki_vector.AsyncRobot() as robot: anim_request = robot.anim.load_animation_list() anim_request.result() for anim_name in robot.anim.anim_list: __update_or_create(AnimationName, anim_name) anim_trigger_request = robot.anim.load_animation_trigger_list() anim_trigger_request.result() for anim_trigger_name in robot.anim.anim_trigger_list: __update_or_create(AnimationTrigger, anim_trigger_name)
def animation_list(): with anki_vector.AsyncRobot(args.serial, behavior_control_level=None) as robot: anim_request = robot.anim.load_animation_list() anim_request.result() anim_names = robot.anim.anim_list return str(json.dumps(anim_names))
def main(): global robot args = anki_vector.util.parse_command_args() # with anki_vector.AsyncRobot(args.serial, show_viewer=True, show_3d_viewer=True) as async_robot: robot = anki_vector.AsyncRobot(args.serial, show_viewer=True, show_3d_viewer=True) robot.connect() NewFSM.robot = robot # forward = Forward() # turn = Turn() # backward = Forward(-50) # speak = Say("Hi There") # takepic = TakePicture() # speak2 = Say("All done") # declare_failure = Say("I have failed but I am still the best") # displaypic = DisplayImageOnMonitor() # screenpic = DisplayImageOnScreen() # complete1 = CompletionTrans().add_sources(forward).add_destinations(turn) # complete2 = CompletionTrans().add_sources(turn).add_destinations(backward, speak) # complete3 = CompletionTrans().add_sources(speak).add_destinations(takepic) # dataTrans = DataTrans().add_sources(takepic).add_destinations(displaypic, screenpic) # timeTrans = TimeTrans(10).add_sources(displaypic).add_destinations(speak2) # failureTrans = FailureTrans().add_sources(forward, turn, backward, speak, takepic, speak2).add_destinations(declare_failure) # forward.start() setup() cli_loop() robot.disconnect()
def main(): with anki_vector.AsyncRobot("00a10a53", show_viewer=True) as robot: print("Driving off the charger") off_charger_future = robot.behavior.drive_off_charger() off_charger_future.result() while robot.world.connected_light_cube == None: print("Connecting to the cube") connect_future = robot.world.connect_cube() print(connect_future.result()) print(robot.world.connected_light_cube.is_visible) print("Looking for the cube") robot.behavior.set_head_angle(degrees(0)) ''' while not robot.world.connected_light_cube.is_visible: print(robot.world.connected_light_cube.is_visible) robot.motors.set_wheel_motors(10,-10) time.sleep(0.2) print("Cube found") robot.motors.set_wheel_motors(0,0) time.sleep(4) print(robot.world.connected_light_cube) print("Rolling the cube") roll_future = robot.behavior.roll_cube(robot.world.connected_light_cube) print(roll_future) print(roll_future.result()) ''' i = 0 while not robot.world.connected_light_cube.is_visible: print(robot.world.connected_light_cube.is_visible) turn_future = robot.behavior.turn_in_place(degrees(-15)) turn_future.result() time.sleep(1) if (i > 36): break i = i + 1 if (i <= 36): print("Cube found") print("Rolling the cube") roll_future = robot.behavior.roll_cube( robot.world.connected_light_cube) roll_future.result() else: print("Cube not found") print("Disconnecting from the cube") disconnect_future = robot.world.disconnect_cube() disconnect_future.result() print("Driving back to the charger") on_charger_future = robot.behavior.drive_on_charger() on_charger_future.result()
def animation_trigger_list(): with anki_vector.AsyncRobot(args.serial, behavior_control_level=None) as robot: anim_trigger_request = robot.anim.load_animation_trigger_list() anim_trigger_request.result() anim_trigger_names = robot.anim.anim_trigger_list return Response((json.dumps(anim_trigger_names)), mimetype='application/json')
def main(): robot = anki_vector.AsyncRobot("009041bc", enable_face_detection=True) robot.connect() robot.camera.init_camera_feed() image = robot.camera.latest_image image = image.annotate_image() image.show() robot.camera.close_camera_feed() robot.disconnect()
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) as robot: flask_app.remote_control_vector = RemoteControlVector(robot) robot.behavior.drive_off_charger() flask_helpers.run_flask(flask_app)
def main(): args = anki_vector.util.parse_command_args() flag = True docking_result = None with anki_vector.AsyncRobot(args.serial) as robot: while True: testing_cube_pick_up_robust(robot) #testing_cube_pick_up(robot) print('Done') time.sleep(100000)
def __init__(self, robot_index): self.robot_name = vector_utils.get_robot_names()[robot_index] serial = vector_utils.get_robot_serials()[self.robot_name] #self.robot = anki_vector.AsyncRobot(serial=serial, default_logging=False) self.robot = anki_vector.AsyncRobot(serial=serial, default_logging=False, behavior_control_level=anki_vector.connection.ControlPriorityLevel.OVERRIDE_BEHAVIORS_PRIORITY) self.robot.connect() print('Connected to {}'.format(self.robot_name)) self._reset_motors() self.robot_state = 'idle' self.stuck = False
def __init__(self): self._robot = anki_vector.AsyncRobot() self._state = None self._rnd = random.random() self._countdown = 10 self._frequency = int( Configuration.get_value('status_checking_frequency', 0)) self._timer = None if self._frequency > 0: self._update_refresh()
def connect(): global vector global reserve_control log.debug("Connecting to Vector") vector = anki_vector.AsyncRobot() vector.connect() #reserve_control = anki_vector.behavior.ReserveBehaviorControl() atexit.register(exit) return(vector)
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot(args.serial, enable_face_detection=True, enable_custom_object_detection=True) as robot: flask_app.remote_control_vector = RemoteControlVector(robot) flask_app.display_debug_annotations = DebugAnnotations.ENABLED_ALL.value robot.camera.init_camera_feed() robot.behavior.drive_off_charger() robot.camera.image_annotator.add_annotator('robotState', RobotStateDisplay) flask_helpers.run_flask(flask_app)
def run(): args = util.parse_command_args() #when audio is read, add this below - ", enable_audio_feed=True" with anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) as robot: robot.vision.enable_display_camera_feed_on_face() robot.vision.enable_face_detection(detect_faces=True, estimate_expression=True) flask_app.remote_control_vector = RemoteControlVector(robot) #robot.behavior.drive_off_charger() flask_helpers.run_flask(flask_app)
def main(args=None): #create connection to vector async_robot = anki_vector.AsyncRobot(enable_camera_feed=True) async_robot.connect() #initialize ros stuff rclpy.init(args=args) movement = Movement(async_robot) rclpy.spin(movement) #destroy node explicitly movement.destroy_node() rclpy.shutdown()
def video_feed(name): print(name) if name in app.Robots: print("Connection already opened\n") return Response(streaming_video(name), mimetype='multipart/x-mixed-replace; boundary=frame') else: print("Connecting to {} vector with serial : {}".format( name, ID[name])) robot = anki_vector.AsyncRobot(ID[name], enable_face_detection=True) app.Robots[name] = VectorRobot(robot) app.Robots[name].vector.vision.enable_face_detection(detect_faces=True, detect_gaze=True) return Response(streaming_video(name), mimetype='multipart/x-mixed-replace; boundary=frame')
def run(): args = anki_vector.util.parse_command_args() with anki_vector.AsyncRobot(serial=args.serial) as robot: ### PF Converging start_x, start_y, start_h = run_particle_filter(robot) start_x, start_y, start_h = (scale_factor * start_x,scale_factor* start_y,math.radians(start_h)) i = 0 ### Go to pick up zone # goal_x, goal_y = (8.5 * scale_factor,9.5 * scale_factor) while True: #print('Starting from - x: {0:.2f}, y: {1:.2f}, h: {2:.2f}'.format(start_x, start_y, start_h)) #TODO : Confirm this works end_pos, end_ang = rrt_move_to(robot, start_x,start_y,start_h,PICKUP_LOC.x,PICKUP_LOC.y) end_pos, end_ang = rrt_move_to(robot, PICKUP_LOC.x,PICKUP_LOC.y,end_ang,PICKUP_LOC.x,PICKUP_LOC.y + 1) if i == 0: #once done localized and starting to pick stuff up cmap.add_obstacle(obstacle_nodes) i+=1 ### Pick up cube thing = robot.behavior.say_text("ready to begin delivery") thing.result() prev_pose = saving_curr_pose(robot) pick_up_cube(robot) move_to_prev_pose = robot.behavior.go_to_pose(prev_pose) move_to_prev_pose.result() print('returned to previous pose') ### Go to drop off end_pos, end_ang = rrt_move_to(robot, end_pos.x, end_pos.y - 0.32 * scale_factor,end_ang,STORAGE_LOC.x,STORAGE_LOC.y) #end_pos, end_ang = rrt_move_to(robot, end_pos.x, end_pos.y,end_ang,STORAGE_LOC.x,STORAGE_LOC.y) print('placing down now') #drop = robot.behavior.set_lift_height(0.0) #drop.result() drop = robot.behavior.place_object_on_ground_here() drop.result() #prev_pose = saving_curr_pose(robot) #move_to_prev_pose = robot.behavior.go_to_pose(prev_pose) #move_to_prev_pose.result() #start_x, start_y, start_h = (end_pos.x - 0.15 * scale_factor, end_pos.y - 1.20 * scale_factor, end_ang) #start_x, start_y, start_h = (end_pos.x, end_pos.y, end_ang) #cmap.clear_obstacles() #end_pos, end_ang = rrt_move_to(robot, start_x,start_y,start_h, 13*25,9*25) #print("Expected_x: ", end_pos.x,"\nExpected_y: ", end_pos.y, "\nExpected Angle: ", end_ang) #time.sleep(10000) #dif is 1.5 inch y and .5 x #start_x, start_y, start_h = (end_pos.x - 0.15 * scale_factor, end_pos.y - 1.20 * scale_factor, end_ang) start_x, start_y, start_h = (end_pos.x - 0.65 * scale_factor, end_pos.y - 0.9 * scale_factor, end_ang)
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot( args.serial, # this is None or False, whatever, just not used enable_face_detection=False, # if you want it, but I dont enable_custom_object_detection=True, # for my own wall pieces show_3d_viewer=False # kinda laggy ) as robot: object_helper.create_all_objects(robot) flask_app.remote_control_vector = RemoteControlVector(robot) flask_app.display_debug_annotations = DebugAnnotations.ENABLED_VISION.value robot.camera.init_camera_feed() # robot.behavior.drive_off_charger() robot.camera.image_annotator.add_annotator('robotState', RobotStateDisplay) flask_helpers.run_flask(flask_app)
def main(): def on_robot_observed_face(robot, event_type, event): if event.name: FoundAFace.takeAction(robot,event.name) def on_robot_state(robot, event_type, event): global Status if robot.status.is_on_charger & Status.show_is_on_charger: if Status.is_on_charger != True: print("Vector is currently on the charger.") # robot.conn.request_control(timeout=5.0) # robot.behavior.set_eye_color(0, 0) # robot.say_text("On the charger!") # robot.conn.release_control() Status.is_on_charger = True else: if Status.is_on_charger != False: print("Vector is running off battery power.") Status.is_on_charger = False if robot.status.are_motors_moving & Status.show_are_motors_moving: print("Vector is on the move.") # Start the an async connection with the robot with anki_vector.AsyncRobot(anki_vector.util.parse_command_args().serial, requires_behavior_control=False, default_logging=False) as robot: # robot.conn.CONTROL_PRIORITY_LEVEL = 1 robot.conn.request_control(timeout=5.0) robot.say_text("Vector+ 1.0").result() robot.conn.release_control() # FoundAFace.takeAction(robot,"jason") on_robot_observed_face = functools.partial(on_robot_observed_face, robot) robot.events.subscribe(on_robot_observed_face, Events.robot_observed_face) on_robot_state = functools.partial(on_robot_state, robot) robot.events.subscribe(on_robot_state, Events.robot_state) while True: # this just keeps the system running; everything else runs off of event subscriptions so far time.sleep(100)
def main(): args = anki_vector.util.parse_command_args() with anki_vector.AsyncRobot(args.serial, enable_face_detection=True, show_viewer=True) as robot: robot.behavior.drive_off_charger() # If necessary, move Vector's Head and Lift to make it easy to see his face robot.behavior.set_head_angle(degrees(45.0)) robot.behavior.set_lift_height(0.0) face_to_follow = None print( "------ show vector your face, press ctrl+c to exit early ------") try: while True: turn_future = None if face_to_follow: # start turning towards the face turn_future = robot.behavior.turn_towards_face( face_to_follow) if not (face_to_follow and face_to_follow.is_visible): # find a visible face, timeout if nothing found after a short while for face in robot.world.visible_faces: face_to_follow = face break if turn_future != None: # Complete the turn action if one was in progress turn_future.result() time.sleep(.1) except KeyboardInterrupt: pass
def main(): # setup and connect to the robot # note the use of 'AsyncRobot' instead of 'Robot' # this allows code to continue while behaviors are running robot = anki_vector.AsyncRobot(default_logging=False) robot.connect() time.sleep(1) isOnCharger = (robot.status & IS_ON_CHARGER) == IS_ON_CHARGER if not (isOnCharger): print('Vector needs to start on the charger') print('Put him there and re-run the program...') robot.disconnect() exit() # subscribe to observed object events robot.events.subscribe(on_robot_observed_object, Events.robot_observed_object) print('Running away') action = robot.behavior.drive_off_charger() robotState = 'Leaving charger' # watch how the robot moves around while the code here is counting for i in range(0, 30): # print a counter print(i) # check our motion status isMoving = (robot.status & IS_MOVING) == IS_MOVING isOnCharger = (robot.status & IS_ON_CHARGER) == IS_ON_CHARGER # simple state machine # this runs every time the loop executes if robotState == 'Leaving charger': if action.done(): robotState = 'Scared' else: print('...leaving') robotState = 'Leaving charger' elif robotState == 'Scared': print('Nah, going home') action = robot.behavior.drive_on_charger() robotState = 'Run away' elif robotState == 'Run away': if not (action.done()) and not (isOnCharger): print('...going home') robotState = 'Run away' else: robotState = 'Home sweet home' elif robotState == 'Home sweet home': print('Time for a nap.') robotState = 'Nap time' elif robotState == 'Nap time': print('snore...') robotState = 'Nap time' else: print('Uh oh, this is a bad state.') print('Human assistance is needed.') # the loop executes every second time.sleep(1) #clean up robot.disconnect()
import anki_vector from anki_vector.util import degrees, distance_mm, speed_mmps import time with anki_vector.AsyncRobot("00a10a53") as robot: while True: print("Connecting to the cube") robot.world.connect_cube() if robot.world.connected_light_cube: break time.sleep(1) print("Picking the cube") pick_future = robot.behavior.pickup_object( robot.world.connected_light_cube) pick_future.result() print("Put the cube down") place_future = robot.behavior.place_object_on_ground_here() place_future.result()
print("Saturday") if (today.weekday() == 6): print("Sunday") day = int( input("Enter a Day of the week number, 0=M 1=T 2=W 3=Th 4=F 5=F 6=Sn: ")) hour = int(input("Enter an Hour: ")) minute = int(input("Enter a Minute: ")) #hour=int(21) #minute=int(46) while True: if day == int(today.weekday()) and hour == int( datetime.datetime.today().strftime("%H")) and minute == int( datetime.datetime.today().strftime("%M")): print("Alarm Raised") with anki_vector.AsyncRobot() as robot: for x in range(0, 10): datetime.datetime.now().strftime(('%H:%M')) robot.anim.play_animation_trigger('GreetAfterLongTime') robot.audio.stream_wav_file("vector_alert.wav", 75).result() robot.behavior.set_head_angle(degrees(30.0)) robot.behavior.set_lift_height(0.0) time1 = datetime.datetime.now().strftime(('%H:%M')) face_sum = (str(time1)) text_to_draw = face_sum face_image = make_text_image(text_to_draw, 20, 5, font_file) args = anki_vector.util.parse_command_args()
# Read settings env = Env() env.read_env() server = env('SERVER') port = int(env('PORT')) # Read robot list robots = [] with open(".robots") as robotfile: robots = robotfile.readlines() if robots == []: raise Exception("No robots in .robots file.") # Connect to robots temp = [anki_vector.AsyncRobot(robot.strip()) for robot in robots] robots = [] for robot in temp: try: robot.connect() robots.append(robot) except: pass # Setup server connection sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('localhost', 4791)) sock.settimeout(2) while True:
def main(): # setup and connect to the robot # note the use of 'AsyncRobot' instead of 'Robot' # this allows code to continue while behaviors are running # grab serial number of robot if required args = anki_vector.util.parse_command_args() # use AsyncRobot so that behaviors don't block robot = anki_vector.AsyncRobot(args.serial, request_control=False, default_logging=False) robot.connect() time.sleep(1) isOnCharger = (robot.status & IS_ON_CHARGER) == IS_ON_CHARGER if not (isOnCharger): print('Vector needs to start on the charger') print('Put him there and re-run the program...') robot.disconnect() exit() # subscribe to observed object events robot.events.subscribe(on_robot_observed_object, Events.robot_observed_object) # request control and wait until we have it print('Requesting control') robot.conn.request_control() controlRequest = robot.conn.run_coroutine( robot.conn.control_granted_event.wait()) while not (controlRequest.done()): print('...waiting for control') time.sleep(1) print('Running away') action = robot.behavior.drive_off_charger() robotState = 'Leaving charger' # watch how the robot moves around while the code here is counting for i in range(0, 30): # print a counter print(i) # check our motion status isMoving = (robot.status & IS_MOVING) == IS_MOVING isOnCharger = (robot.status & IS_ON_CHARGER) == IS_ON_CHARGER # simple state machine # this runs every time the loop executes if robotState == 'Leaving charger': if action.done(): robotState = 'Scared' else: print('...leaving') robotState = 'Leaving charger' elif robotState == 'Scared': print('Nah, going home') action = robot.behavior.drive_on_charger() robotState = 'Run away' elif robotState == 'Run away': if not (action.done()) and not (isOnCharger): print('...going home') robotState = 'Run away' else: robotState = 'Home sweet home' elif robotState == 'Home sweet home': print('Time for a nap.') robotState = 'Nap time' elif robotState == 'Nap time': print('snore...') robotState = 'Nap time' else: print('Uh oh, this is a bad state.') print('Human assistance is needed.') # the loop executes every second time.sleep(1) #clean up print('Releasing control') robot.release_control() robot.disconnect()
def main(): # this will be the OpenCV version of robot.camera.latest_image global cvImage global cvImageId global trackerImage global puck global ball global camera global vectorMaskImage cvImageId = 0 # open the video window cv.namedWindow('Vector', cv.WINDOW_NORMAL) cv.namedWindow('Tracker', cv.WINDOW_NORMAL) # read in the mask for Vector's lift (all the way down) vectorMaskImage = cv.imread('Vector_Mask.png') # use AsyncRobot so that behaviors don't block robot = anki_vector.AsyncRobot(enable_camera_feed=True, default_logging=Debug) robot.connect() time.sleep(1) done = False displayImage = False headAction = robot.behavior.set_head_angle(HeadTilt) while not (headAction.done()): pass liftAction = robot.behavior.set_lift_height(LiftHeight) while not (liftAction.done()): pass while not (done): # check to see if we have a new image if (robot.camera.latest_image_id != cvImageId): # if we do, convert it to an OpenCV image # and keep track of the id pilImage = robot.camera.latest_image cvImageId = robot.camera.latest_image_id cvImage = cv.cvtColor(np.array(pilImage), cv.COLOR_RGB2BGR) # display it displayImage = True if (False): # locate the puck (if we can see it) findPuck() # display the image with any overlays # puck overlay if puck.found: # draw the circle and centroid on the frame, # then update the list of tracked points cv.circle(cvImage, (int(puck.x), int(puck.y)), int(puck.radius), (0, 255, 255), 2) cv.circle(cvImage, puck.center, 5, (0, 0, 255), -1) if (True): # locate the ball (if we can see it) findBall() # display the image with any overlays # ball overlay if ball.found: # draw the circle and centroid on the frame, # then update the list of tracked points cv.circle(cvImage, (int(ball.x), int(ball.y)), int(ball.radius), (0, 255, 255), 2) cv.circle(cvImage, ball.center, 5, (0, 0, 255), -1) # cvImage = cv.bitwise_and(cvImage, vectorMaskImage) if displayImage: cv.imshow('Vector', cvImage) cv.imshow('Tracker', trackerImage) # waitKey performs the display update # and checks to see if we hit the 'q' key c = cv.waitKey(MainLoopDelay) if (chr(c & 0xff) == 'q'): done = True allDone(robot)
from sensor_msgs.msg import Image class Camera(object): def __init__(self, async_robot, publish_rate=10): self.async_robot = async_robot self.rate = rospy.Rate(publish_rate) self.image_publisher = rospy.Publisher("~camera", Image, queue_size=1) self.publish_camera_feed() def publish_camera_feed(self): bridge = cv_bridge.CvBridge() while not rospy.is_shutdown(): image = bridge.cv2_to_imgmsg( numpy.asarray(self.async_robot.camera.latest_image), encoding="rgb8") # convert PIL.Image to ROS Image self.image_publisher.publish(image) # make sure to publish at required rate self.rate.sleep() if __name__ == "__main__": rospy.init_node("camera") async_robot = anki_vector.AsyncRobot(enable_camera_feed=True) async_robot.connect() Camera(async_robot) rospy.spin()
def rwheel_desired_rate_cb(self, msg): self.rwheel_speed_mm_sec = msg.data self.async_robot.motors.set_wheel_motors(self.lwheel_speed_mm_sec, self.rwheel_speed_mm_sec) def publish_estimated_encoder_ticks(self): ''' publishing encoder ticks is required in order to use diff_drive pkg ''' lwheel_ticks_total = 0 rwheel_ticks_total = 0 while not rospy.is_shutdown(): if self.async_robot.left_wheel_speed_mmps > 0: lwheel_ticks_total += self.async_robot.left_wheel_speed_mmps / self.publish_rate if self.async_robot.right_wheel_speed_mmps > 0: rwheel_ticks_total += self.async_robot.right_wheel_speed_mmps / self.publish_rate # publish number of estimated ticks for each wheel in ratio of 1000 ticks per meter self.lwheel_ticks_publisher.publish(data=int(lwheel_ticks_total)) self.rwheel_ticks_publisher.publish(data=int(rwheel_ticks_total)) self.rate.sleep() # publish at specified rate if __name__ == "__main__": rospy.init_node("drive") async_robot = anki_vector.AsyncRobot() async_robot.connect() Drive(async_robot) rospy.spin()
def control(): args = util.parse_command_args() robot = anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) robot.connect() robot.behavior.drive_off_charger() flask_app.remote_control_vector = RemoteControlVector(robot) return """ <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta name="theme-color" content="#222D32"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous"> <link href='https://fonts.googleapis.com/css?family=Ubuntu' rel='stylesheet'> <link rel="stylesheet" type="text/css" href="static/css/main.css"> <title>VectorCloud - Remote Control</title> </head> <body> <header class="site-header"> <nav class="navbar navbar-expand-md navbar-dark bg-steel fixed-top"> <div class="container"> <a class="navbar-brand mr-4" href="/home"><img src="static/icons/vectorcloud.svg" width="35" height="35" class="text-center align-top" data-toggle="tooltip" data-placement="auto" title="Home" alt=""></a> <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarToggle" aria-controls="navbarToggle" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="collapse navbar-collapse" id="navbarToggle"> <div class="navbar-nav mr-auto"> </div> <!-- Navbar Right Side --> <div class="navbar-nav"> </div> </div> </div> </nav> </header> <table> <tr> <td valign = top> <div id="vectorImageMicrosoftWarning" style="display: none;color: #ff9900; text-align: center;">Video feed performance is better in Chrome or Firefox due to mjpeg limitations in this browser</div> <img src="vectorImage" id="vectorImageId" width=640 height=480> <div id="DebugInfoId"></div> </td> <td width=30></td> <td valign=top> <h2>Controls:</h2> <h3>Driving:</h3> <b>W A S D</b> : Drive Forwards / Left / Back / Right<br><br> <b>Q</b> : Toggle Mouse Look: <button id="mouseLookId" onClick=onMouseLookButtonClicked(this) style="font-size: 14px">Default</button><br> <b>Mouse</b> : Move in browser window to aim<br> (steer and head angle)<br> (similar to an FPS game)<br> <h3>Head:</h3> <b>T</b> : Move Head Up<br> <b>G</b> : Move Head Down<br> <h3>Lift:</h3> <b>R</b> : Move Lift Up<br> <b>F</b>: Move Lift Down<br> <h3>General:</h3> <b>Shift</b> : Hold to Move Faster (Driving, Head and Lift)<br> <b>Alt</b> : Hold to Move Slower (Driving, Head and Lift)<br> <b>P</b> : Toggle Free Play mode: <button id="freeplayId" onClick=onFreeplayButtonClicked(this) style="font-size: 14px">Default</button><br> <h3>Play Animations</h3> <b>0 .. 9</b> : Play Animation mapped to that key<br> <h3>Talk</h3> <b>Space</b> : Say <input type="text" name="sayText" id="sayTextId" value=""" " + flask_app.remote_control_vector.text_to_say + " """ onchange=handleTextInput(this)> </td> <td width=30></td> <td valign=top> <h2>Animation key mappings:</h2> """ + get_anim_sel_drop_downs() + """<br> </td> </tr> </table> <script type="text/javascript"> var gLastClientX = -1 var gLastClientY = -1 var gIsMouseLookEnabled = """ + to_js_bool_string( _is_mouse_look_enabled_by_default) + """