def run(): args = util.parse_command_args() flask_helpers.run_flask(app, host_ip="0.0.0.0", host_port=5000, open_page=False)
def main(): args = parse_command_args() with AsyncRobot(args.serial, enable_face_detection=True) as vector: vector.behavior.say_text("I am ready for the next customer.") connection = Salesforce(username=os.environ['SF_USERNAME'], password=os.environ['SF_PASSWORD'], security_token=os.environ['SF_TOKEN']) result = connection.apexecute('nextcustomer', method='GET', data=None)
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) as robot: flask_app.remote_control_vector = RemoteControlVector(robot) robot.behavior.drive_off_charger() flask_helpers.run_flask(flask_app)
def run(): args = util.parse_command_args() print( "Server running on http://localhost:%d - open this page in your favourite webbrowser!" % PORT) flask_helpers.run_flask(app, host_ip="0.0.0.0", host_port=PORT, open_page=False)
def parse_command_args(): """Parses command line args""" parser = argparse.ArgumentParser() parser.add_argument("--train", action="store_true") parser.add_argument("--predict", action="store_true") parser.add_argument("--dataset_root_folder", nargs='?', default=None) parser.add_argument("--model_config", nargs='?', default=None) parser.add_argument("--model_weights", nargs='?', default=None) return util.parse_command_args(parser)
def run(): args = util.parse_command_args() #when audio is read, add this below - ", enable_audio_feed=True" with anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) as robot: robot.vision.enable_display_camera_feed_on_face() robot.vision.enable_face_detection(detect_faces=True, estimate_expression=True) flask_app.remote_control_vector = RemoteControlVector(robot) #robot.behavior.drive_off_charger() flask_helpers.run_flask(flask_app)
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot(args.serial, enable_face_detection=True, enable_custom_object_detection=True) as robot: flask_app.remote_control_vector = RemoteControlVector(robot) flask_app.display_debug_annotations = DebugAnnotations.ENABLED_ALL.value robot.camera.init_camera_feed() robot.behavior.drive_off_charger() robot.camera.image_annotator.add_annotator('robotState', RobotStateDisplay) flask_helpers.run_flask(flask_app)
def run(): args = util.parse_command_args() global robot with anki_vector.Robot(args.serial, enable_custom_object_detection=True, enable_face_detection=True) as robot: global animation_list global active_viewer global os_info os_info = robot.conn.name + ',' + robot.conn.host + ',' + robot.get_version_state().os_version animation_list = init_animate(robot) # list of animations active_viewer = activate_viewer_if_enabled(robot) # camera and keyboard controls monitor(robot, q) # event monitor activate_controls(robot) # game controller start_server()
def run(): # flask_socket_helpers.run_flask(socketio, app) args = util.parse_command_args() with anki_vector.Robot(args.serial, enable_camera_feed=True, enable_custom_object_detection=True, enable_face_detection=True) as robot: global animation_list global active_viewer animation_list = init_animate(robot) # list of animations active_viewer = activate_viewer_if_enabled( robot) # camera and keyboard controls monitor(robot, q) # event monitor activate_controls(robot) # game controller start_server()
def run(): args = util.parse_command_args() with anki_vector.AsyncRobot( args.serial, # this is None or False, whatever, just not used enable_face_detection=False, # if you want it, but I dont enable_custom_object_detection=True, # for my own wall pieces show_3d_viewer=False # kinda laggy ) as robot: object_helper.create_all_objects(robot) flask_app.remote_control_vector = RemoteControlVector(robot) flask_app.display_debug_annotations = DebugAnnotations.ENABLED_VISION.value robot.camera.init_camera_feed() # robot.behavior.drive_off_charger() robot.camera.image_annotator.add_annotator('robotState', RobotStateDisplay) flask_helpers.run_flask(flask_app)
def main(): args = parse_command_args() with AsyncRobot(args.serial, enable_face_detection=True) as vector: vector_setup(vector) face_to_follow = None turn_future = None while True: if face_to_follow: turn_future = vector.behavior.turn_towards_face(face_to_follow) if not (face_to_follow and face_to_follow.is_visible): face_to_follow = get_named_face(vector.world.visible_faces) time.sleep(0.1) if turn_future != None: turn_future.result() provide_customer_support(vector, face_to_follow) time.sleep(0.1)
nav_direction = nav_point_delta.normalized # Convert the nav_direction into a turn angle relative to the robot's current facing. robot_forward = Vector3(*robot.pose.rotation.to_matrix().forward_xyz).normalized turn_angle = acos(nav_direction.dot(robot_forward)) if nav_direction.cross(robot_forward).z > 0: turn_angle = -turn_angle # Turn toward the nav point, and drive to it. robot.behavior.turn_in_place(angle=radians(turn_angle), speed=degrees(EXPLORE_TURN_SPEED_DPS)) robot.behavior.drive_straight(distance=distance_mm(nav_distance), speed=speed_mmps(EXPLORE_DRIVE_SPEED_MMPS)) if PROXIMITY_EXPLORATION_SHUTDOWN_DELAY_S == 0.0: while True: await asyncio.sleep(1.0) else: print('finished exploring - waiting an additional {0} seconds, then shutting down'.format(PROXIMITY_EXPLORATION_SHUTDOWN_DELAY_S)) await asyncio.sleep(PROXIMITY_EXPLORATION_SHUTDOWN_DELAY_S) if __name__ == '__main__': # Connect to the robot args = parse_command_args() with anki_vector.Robot(args.serial, show_viewer=True, enable_nav_map_feed=False) as robotInstance: robotInstance.viewer_3d.show(False) robotInstance.behavior.drive_off_charger() loop = asyncio.get_event_loop() loop.run_until_complete(map_explorer(robotInstance))
def control(): args = util.parse_command_args() robot = anki_vector.AsyncRobot(args.serial, enable_camera_feed=True) robot.connect() robot.behavior.drive_off_charger() flask_app.remote_control_vector = RemoteControlVector(robot) return """ <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta name="theme-color" content="#222D32"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous"> <link href='https://fonts.googleapis.com/css?family=Ubuntu' rel='stylesheet'> <link rel="stylesheet" type="text/css" href="static/css/main.css"> <title>VectorCloud - Remote Control</title> </head> <body> <header class="site-header"> <nav class="navbar navbar-expand-md navbar-dark bg-steel fixed-top"> <div class="container"> <a class="navbar-brand mr-4" href="/home"><img src="static/icons/vectorcloud.svg" width="35" height="35" class="text-center align-top" data-toggle="tooltip" data-placement="auto" title="Home" alt=""></a> <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarToggle" aria-controls="navbarToggle" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="collapse navbar-collapse" id="navbarToggle"> <div class="navbar-nav mr-auto"> </div> <!-- Navbar Right Side --> <div class="navbar-nav"> </div> </div> </div> </nav> </header> <table> <tr> <td valign = top> <div id="vectorImageMicrosoftWarning" style="display: none;color: #ff9900; text-align: center;">Video feed performance is better in Chrome or Firefox due to mjpeg limitations in this browser</div> <img src="vectorImage" id="vectorImageId" width=640 height=480> <div id="DebugInfoId"></div> </td> <td width=30></td> <td valign=top> <h2>Controls:</h2> <h3>Driving:</h3> <b>W A S D</b> : Drive Forwards / Left / Back / Right<br><br> <b>Q</b> : Toggle Mouse Look: <button id="mouseLookId" onClick=onMouseLookButtonClicked(this) style="font-size: 14px">Default</button><br> <b>Mouse</b> : Move in browser window to aim<br> (steer and head angle)<br> (similar to an FPS game)<br> <h3>Head:</h3> <b>T</b> : Move Head Up<br> <b>G</b> : Move Head Down<br> <h3>Lift:</h3> <b>R</b> : Move Lift Up<br> <b>F</b>: Move Lift Down<br> <h3>General:</h3> <b>Shift</b> : Hold to Move Faster (Driving, Head and Lift)<br> <b>Alt</b> : Hold to Move Slower (Driving, Head and Lift)<br> <b>P</b> : Toggle Free Play mode: <button id="freeplayId" onClick=onFreeplayButtonClicked(this) style="font-size: 14px">Default</button><br> <h3>Play Animations</h3> <b>0 .. 9</b> : Play Animation mapped to that key<br> <h3>Talk</h3> <b>Space</b> : Say <input type="text" name="sayText" id="sayTextId" value=""" " + flask_app.remote_control_vector.text_to_say + " """ onchange=handleTextInput(this)> </td> <td width=30></td> <td valign=top> <h2>Animation key mappings:</h2> """ + get_anim_sel_drop_downs() + """<br> </td> </tr> </table> <script type="text/javascript"> var gLastClientX = -1 var gLastClientY = -1 var gIsMouseLookEnabled = """ + to_js_bool_string( _is_mouse_look_enabled_by_default) + """
def hold_control(): args = util.parse_command_args() with behavior.ReserveBehaviorControl(args.serial): input("Vector behavior control reserved for SDK. Hit 'Enter' to release control.")
class Vector: """ An interface to abstract the anki_vector sdk to further simplify implementation. """ args = parse_command_args() robot = Robot(args.serial) def __init__(self): self.connect() #self.robot.world.connect_cube() self.robot.behavior.drive_off_charger() def connect(self): self.robot.connect() def disconnect(self): self.robot.disconnect() def connect_cube(self) -> bool: if not self.robot.world.connected_light_cube: self.robot.world.connect_cube() return self.robot.world.connected_light_cube @staticmethod def percentage_value(percentage: int, min_value: int, max_value: int) -> int: range_value = max_value - min_value percentage = 0 if percentage < 0 else 100 if percentage > 100 else percentage return min_value + percentage * range_value // 100 def wheels_drive(self, distance: float, speed_percentage: int): #Parameter Speed range = 0-100 min_speed = 1 max_speed = 220 self.robot.behavior.drive_straight( distance_mm(distance), speed_mmps( self.percentage_value(percentage=speed_percentage, min_value=min_speed, max_value=max_speed))) def wheels_turn(self, angle: float): self.robot.behavior.turn_in_place(degrees(angle)) def head_angle(self, angle_percentage: int): #Parameter angle range 0-100 min_angle = -22 max_angle = 45 self.robot.behavior.set_head_angle( degrees( self.percentage_value(percentage=angle_percentage, min_value=min_angle, max_value=max_angle))) def lift_height(self, height_percentage: int): #Parameter height_percentage range 0-100 height = height_percentage / 100 self.robot.behavior.set_lift_height(height) def speak_text(self, text: str): self.robot.behavior.say_text(text) def play_animation(self, animation: str): self.robot.anim.play_animation_trigger(animation) def look_around(self): self.robot.behavior.look_around_in_place() def drive_to_cube(self): if self.robot.world.connected_light_cube and self.robot.world.light_cube.is_visible: self.robot.behavior.go_to_object( target_object=self.robot.world.connected_light_cube, distance_from_object=distance_mm(70)) def dock_with_cube(self): if self.robot.world.connected_light_cube and self.robot.world.light_cube.is_visible: r = self.robot.behavior.dock_with_cube( target_object=self.robot.world.connected_light_cube, num_retries=3) print(r) def pickup_cube(self): self.connect_cube() self.observe_cube() self.pickup_object() def pickup_object(self): if self.robot.world.connected_light_cube and self.robot.world.light_cube.is_visible: self.robot.behavior.pickup_object( target_object=self.robot.world.connected_light_cube, num_retries=3) def put_down_cube(self): if self.robot.status.is_carrying_block: self.robot.behavior.place_object_on_ground_here() def observe_cube(self) -> bool: count = 7 while not self.robot.world.light_cube.is_visible and count: print('cube seen: {}, attemps left: {}'.format( self.robot.world.light_cube.is_visible, count)) self.play_animation('FindCubeTurns') count -= 1 if self.robot.world.light_cube.is_visible: self.play_animation('FindCubeReactToCube') else: self.play_animation('FetchCubeFailure') return self.robot.world.light_cube.is_visible