コード例 #1
0
def testing_statement_and_rotation(robot):
    statement = robot.behavior.say_text("Particle Filter has converged")
    statement.result()
    large_turn = robot.behavior.turn_in_place(degrees(46))
    large_turn.result()
    small_turn = robot.behavior.turn_in_place(degrees(27))
    small_turn.result()
コード例 #2
0
ファイル: main.py プロジェクト: sebastiankruk/V4Vector
    def __find_faces(self):
        '''
    Extended version of the find faces algoritm
    '''
        timeout = 10
        if self.can_rotate:
            if len(self.detected_faces) is 0:
                self.robot.behavior.say_text(random.choice(
                    list(V4Vector.ANYONE_HERE)),
                                             duration_scalar=1.5)

            self.__clean_up_detected_faces()
            self.robot.behavior.drive_off_charger()

            head_angle = random.randint(35, 45)
            rotate_angle = random.randint(30, 90)

            self.rotation += rotate_angle
            self.rotation %= 360

            print(
                f"------ Vector will look for humans (rotation: {self.rotation}, rotate: {rotate_angle}, head: {head_angle}, timeout: {timeout})! ------"
            )

            self.robot.behavior.set_head_angle(degrees(head_angle))
            self.robot.behavior.turn_in_place(degrees(rotate_angle),
                                              angle_tolerance=degrees(0))
            # self.robot.behavior.drive_straight(distance_mm(random.randint(-10, 10)), speed_mmps(100))

        threading.Timer(timeout, self.__find_faces).start()
コード例 #3
0
ファイル: cam_viewer.py プロジェクト: L-F-Z/vector-tools
 def thinkandmove(self, y, thres, depth=0):
     maxval = max(y[280:360])
     if maxval > thres:
         self.robot.drive_wheels(0, 0)
         leftval = sum((g-279) * y[g] for g in range(280, 321)) / sum(list(range(1, 41)))
         rightval = sum((361-g) * y[g] for g in range(320, 361)) / sum(list(range(1, 41)))
         direction = (-1 * 90 * (leftval - 300)/180) if leftval > rightval else (1 * 90 * (rightval - 300)/180)
         # # print(sum(y[280:320]), sum(y[320:360]))
         self.robot.turn_in_place(degrees(direction)).wait_for_completed()
         print(direction)
         self.robot.set_head_angle(vector.util.Angle(-0.3)).wait_for_completed()
         self.image, y = self.groundplane()
         if max(y[280:360]) > thres:
             self.robot.drive_straight(distance_mm(-25), speed_mmps(50)).wait_for_completed()
             leftval = sum((g-279) * y[g] for g in range(280, 321)) / sum(list(range(1, 41)))
             rightval = sum((361-g) * y[g] for g in range(320, 361)) / sum(list(range(1, 41)))
             direction = (-1 * 60 * (leftval - 300)/180) if leftval > rightval else (1 * 60 * (rightval - 300)/180)
             self.robot.turn_in_place(degrees(direction)).wait_for_completed()
             
         # self.image, y = self.groundplane()
         # print("Hello")
         # print(maxval)
         # self.thinkandmove(y, thres+30, depth=depth+1)
         # print("Finish")
         self.robot.set_head_angle(vector.util.Angle(0)).wait_for_completed()
     else:
         self.robot.drive_wheels(50, 50)
コード例 #4
0
def main():
    with anki_vector.Robot(requires_behavior_control=True) as robot:

        try:
            while True:
                robot.behavior.set_eye_color(0.0, 1.0)
                robot.anim.play_animation('anim_eyepose_furious')

                robot.events.subscribe(on_oo, Events.robot_observed_object)
                robot.say_text("WHERE IS MY CUBE?", duration_scalar=0.5)
                robot.behavior.set_head_angle(degrees(0))
                observed_event.clear()
                while not observed_event.wait(timeout=0.4):
                    robot.behavior.turn_in_place(degrees(22.5),
                                                 accel=degrees(5000),
                                                 speed=degrees(5000))
                robot.events.unsubscribe(on_oo, Events.robot_observed_object)

                robot.world.connect_cube()
                robot.behavior.dock_with_cube(robot.world.connected_light_cube,
                                              num_retries=3)
                robot.say_text("GOTCHA!", duration_scalar=0.6)
                robot.behavior.set_lift_height(1.0, accel=255, max_speed=255)
                robot.behavior.set_lift_height(0, accel=255, max_speed=255)
                moved_event.clear()
                robot.events.subscribe(on_om, Events.object_moved)
                while not moved_event.wait():
                    robot.events.unsubscribe(on_om, Events.object_moved)
        except KeyboardInterrupt:
            sys.exit()
コード例 #5
0
def main():
    args = anki_vector.util.parse_command_args()
    robot = anki_vector.Robot(serial=args.serial,
                              default_logging=True,
                              enable_nav_map_feed=True)

    robot.connect()
    pretty_name = args.serial
    if pretty_name == None:
        pretty_name = ''
    #robot.say_text("Vector %s Ready" % pretty_name)

    # subscribe to nav map update events
    on_nav_map_update_mod = functools.partial(on_nav_map_update, robot)
    robot.events.subscribe(on_nav_map_update_mod, Events.nav_map_update)

    if not (robot.status.is_on_charger):
        print('Vector needs to start on the charger')
        print('Put him there and re-run the program...')
        robot.say_text("Uh Oh")
        robot.disconnect()
        exit()

    # make Vector wander a bit
    robot.behavior.drive_off_charger()

    robot.behavior.turn_in_place(angle=degrees(360), speed=degrees(45))

    robot.behavior.drive_on_charger()

    # robot.events.unsubscribe(on_nav_map_update, Events.nav_map_update)
    #robot.say_text("All Done")
    robot.disconnect()
コード例 #6
0
ファイル: tst.py プロジェクト: zhangrelay/vector-blockly
def main():
    with anki_vector.AsyncRobot("00a10a53", show_viewer=True) as robot:
        print("Driving off the charger")
        off_charger_future = robot.behavior.drive_off_charger()
        off_charger_future.result()

        while robot.world.connected_light_cube == None:
            print("Connecting to the cube")
            connect_future = robot.world.connect_cube()
            print(connect_future.result())
            print(robot.world.connected_light_cube.is_visible)

        print("Looking for the cube")
        robot.behavior.set_head_angle(degrees(0))
        '''
        while not robot.world.connected_light_cube.is_visible:
            print(robot.world.connected_light_cube.is_visible)
            robot.motors.set_wheel_motors(10,-10)
            time.sleep(0.2)

        print("Cube found")
        robot.motors.set_wheel_motors(0,0)

        time.sleep(4)

        print(robot.world.connected_light_cube)
        print("Rolling the cube")
        roll_future = robot.behavior.roll_cube(robot.world.connected_light_cube)
        print(roll_future)
        print(roll_future.result())
        '''

        i = 0
        while not robot.world.connected_light_cube.is_visible:
            print(robot.world.connected_light_cube.is_visible)
            turn_future = robot.behavior.turn_in_place(degrees(-15))
            turn_future.result()
            time.sleep(1)
            if (i > 36):
                break
            i = i + 1

        if (i <= 36):
            print("Cube found")

            print("Rolling the cube")
            roll_future = robot.behavior.roll_cube(
                robot.world.connected_light_cube)
            roll_future.result()
        else:
            print("Cube not found")

        print("Disconnecting from the cube")
        disconnect_future = robot.world.disconnect_cube()
        disconnect_future.result()

        print("Driving back to the charger")
        on_charger_future = robot.behavior.drive_on_charger()
        on_charger_future.result()
コード例 #7
0
 def robot_moves(self):
     with anki_vector.Robot() as robot:
         robot.behavior.turn_in_place(degrees(-360))
         robot.behavior.turn_in_place(degrees(360))
         robot.behavior.drive_straight(distance_mm(80), speed_mmps(80))
         robot.behavior.turn_in_place(degrees(180))
         robot.behavior.drive_straight(distance_mm(80), speed_mmps(80))
         robot.behavior.turn_in_place(degrees(180))
コード例 #8
0
ファイル: drive.py プロジェクト: dabmake/AnkiVector
def main():
    args = anki_vector.util.parse_command_args()
    #anki_vector.nav_map.NavMapComponent.init_nav_map_feed(frequency=0.5)
    with anki_vector.Robot(args.serial, show_3d_viewer=True) as robot:
        robot.show_viewer = True
        robot.enable_camera_feed = True
        robot.viewer.show_video()

        robot.enable_custom_object_detection = True
        robot.enable_nav_map_feed = True

        current_robot_pose = robot.pose.position
        print(current_robot_pose)
        print("Drive Vector off of charger...")
        robot.behavior.drive_off_charger()

        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.drive_straight(distance_mm(100), speed_mmps(100))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.turn_in_place(degrees(90))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.drive_straight(distance_mm(100), speed_mmps(100))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.turn_in_place(degrees(90))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.drive_straight(distance_mm(100), speed_mmps(100))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.turn_in_place(degrees(90))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.drive_straight(distance_mm(100), speed_mmps(100))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        robot.behavior.turn_in_place(degrees(90))
        current_robot_pose = robot.pose.position
        print(current_robot_pose)

        #latest_nav_map = robot.nav_map.latest_nav_map
        #throws exception not to be initialized

        robot.behavior.drive_on_charger()
        current_robot_pose = robot.pose.position
        print(current_robot_pose)
コード例 #9
0
ファイル: nodes.py プロジェクト: L-F-Z/vector-tools
 def __init__(self, angle=degrees(0), abort_on_stop=True, **action_kwargs):
     if isinstance(angle, (int, float)):
         angle = degrees(angle)
     elif not isinstance(angle, anki_vector.util.Angle):
         raise ValueError(
             '%s angle must be a number or a anki_vector.util.Angle' % self)
     self.angle = angle
     self.action_kwargs = action_kwargs
     super().__init__(abort_on_stop)
コード例 #10
0
 def __init__(self):
     landmarks = {
         cube1: Pose(55, 160, 0, angle_z=degrees(90)),
         cube2: Pose(160, 55, 0, angle_z=degrees(0)),
         cube3: Pose(160, -55, 0, angle_z=degrees(0))
     }
     pf = ParticleFilter(robot,
                         landmarks=landmarks,
                         sensor_model=CubeSensorModel(robot))
     super().__init__(particle_filter=pf, particle_viewer=True)
コード例 #11
0
 def __init__(self):
     landmarks = {
         0 : Pose(-55, 160, 0, angle_z=degrees(90)),
         1 : Pose( 55, 160, 0, angle_z=degrees(90)),
         2 : Pose(160,  55, 0, angle_z=degrees( 0)),
         3 : Pose(160, -55, 0, angle_z=degrees( 0))
     }
     pf = ParticleFilter(robot,
                         landmarks = landmarks,
                         sensor_model = ArucoCombinedSensorModel(robot))
     super().__init__(particle_filter=pf, particle_viewer=True)
コード例 #12
0
ファイル: fetch.py プロジェクト: yongchinx95/anki-vector-rgw
def main():
    '''Play fetch with Vector

    Touch his back to signal to him that he can go get his cube

    Once he comes back, touch his back again to tell him to put down the cube
    '''
    args = anki_vector.util.parse_command_args()

    with anki_vector.Robot(args.serial) as robot:
        robot.behavior.drive_off_charger()

        robot.behavior.set_head_angle(degrees(-5.0))
        robot.behavior.set_lift_height(0.0)

        print("Connecting to a cube...")
        robot.world.connect_cube()

        robot.behavior.say_text(
            "I am looking for my cube and trying to fetch it")

        for i in range(2):
            dock_response = False
            docking_result = None
            while not robot.touch.last_sensor_reading.is_being_touched:
                robot.motors.set_wheel_motors(-50, 50)
                time.sleep(0.1)
                robot.motors.set_wheel_motors(50, -50)
                time.sleep(0.1)
            robot.motors.set_wheel_motors(0, 0)
            time.sleep(0.25)
            dock_response = robot.behavior.dock_with_cube(
                robot.world.connected_light_cube, num_retries=3)
            if dock_response:
                docking_result = dock_response.result
                robot.motors.set_lift_motor(50)
                time.sleep(1)
                robot.behavior.turn_in_place(degrees(90))
                robot.behavior.drive_straight(distance_mm(100),
                                              speed_mmps(100))
                while not robot.touch.last_sensor_reading.is_being_touched:
                    robot.motors.set_wheel_motors(-50, 50)
                    time.sleep(0.1)
                    robot.motors.set_wheel_motors(50, -50)
                    time.sleep(0.1)
                robot.motors.set_wheel_motors(0, 0)
                time.sleep(0.25)
                robot.motors.set_lift_motor(-50)
                time.sleep(1)
                robot.behavior.say_text("Here is your cube!")
                robot.behavior.turn_in_place(degrees(180))

        robot.world.disconnect_cube()
コード例 #13
0
 def __init__(self, robot, arguments=None):
     robot.behavior.drive_off_charger()
     robot.world.connect_cube()
     robot.behavior.say_text("Lulu is playing").result(10)
     robot.anim.play_animation_trigger('GreetAfterLongTime').result(10)
     robot.behavior.drive_straight(distance_mm(100),
                                   speed_mmps(100)).result(10)
     robot.behavior.say_text("he is playing").result(10)
     robot.behavior.turn_in_place(degrees(70)).result(10)
     robot.behavior.say_text("Lulu is playing").result(10)
     robot.behavior.turn_in_place(degrees(220)).result(10)
     robot.anim.play_animation_trigger('GreetAfterLongTime').result(10)
     robot.behavior.drive_on_charger()
コード例 #14
0
def testing_cube_pick_up_robust_old(robot):
    connect_response = robot.world.connect_cube()
    light_cube = robot.world.connected_light_cube
    while light_cube is None:
        time.sleep(0.1)
        print('Still connecting')
        light_cube = robot.world.connected_light_cube
    print('Connected')
    print(connect_response.result())
    #print(connect_response.result().status)
    time.sleep(1)
    print(light_cube.is_visible, 'Vis')
    moving = robot.behavior.pickup_object(light_cube, use_pre_dock_pose = True)
    #print(type(moving.result()))
    print(moving.result().result.code)
    # 50331652 - Bad object
    # 50331683 - can't see
    flag = True
    curr = moving.result()
    while flag:
        if curr.result.code == 50331652:
            print('BAD OBJECT')
            turn = robot.behavior.turn_in_place(degrees(45))
            turn.result()
        elif curr.result.code == 50331683:
            print('CAN"T SEE')
            turn = robot.behavior.turn_in_place(degrees(45))
            turn.result()                    
        elif curr.result.code == 3 or curr.result.code == 0:
            print('Got there')
            print(curr.result.code)
            flag = False
        elif curr.result.code == 67108867:
            back = robot.behavior.drive_straight(distance_mm(-100), speed_mmps(30))
            back.result()
    #print(moving.result)
    #print(light_cube.pose.x)
        if flag:
            moving = robot.behavior.pickup_object(light_cube, use_pre_dock_pose = True)
            curr = moving.result() 
            time.sleep(2) 
    print('im out', curr.result)
    pick_up = robot.behavior.pickup_object(light_cube, use_pre_dock_pose = True)
    pick_up.result()
    back = robot.behavior.drive_straight(distance_mm(-50), speed_mmps(30))
    back.result()
    print('placing down now')
    drop = robot.behavior.place_object_on_ground_here()
    drop.result()
    
    '''
コード例 #15
0
ファイル: main.py プロジェクト: sebastiankruk/V4Vector
    def __find_jira_tickets(self):
        '''
    Will periodically check for new jira tickets
    '''
        new_jira_tickets = self.jira.check_for_new_jira_tickets(
            self.last_seen_human)

        if new_jira_tickets['total'] > 0:
            users_with_tickets = set([
                self.jira.get_user_name(issue['assignee'])
                for issue in new_jira_tickets['issues']
            ])

            print(f"found users with tickets {users_with_tickets}")

            for user in users_with_tickets:
                self.last_seen_human[user] = datetime.now()

                rotations = self.last_seen_human_rotation.get(user, [])
                if rotations:
                    rotate_angle = rotations[0] - self.rotation

                    print(
                        f'Rotations: {rotate_angle}, {self.rotation}, {rotations}'
                    )

                    tickets_for_user = [
                        issue['summary']
                        for issue in new_jira_tickets['issues']
                        if self.jira.get_user_name(issue['assignee']) is user
                    ]

                    tickets_count = len(tickets_for_user)

                    self.robot.behavior.say_text(
                        f'I\'m looking for {user}, you have {tickets_count} new jira { self.inflect_engine.plural("ticket", tickets_count) }'
                    )

                    self.rotation = rotations[0]
                    self.robot.behavior.turn_in_place(
                        degrees(rotate_angle), angle_tolerance=degrees(0))

                    for idx, ticket in enumerate(tickets_for_user):
                        self.robot.behavior.say_text(
                            f"Issue number {idx+1}: {ticket}",
                            duration_scalar=0.9,
                            use_vector_voice=False)

        threading.Timer(60, self.__find_jira_tickets).start()
コード例 #16
0
def main():

    args = anki_vector.util.parse_command_args()

    with anki_vector.Robot(args.serial) as robot:
        if robot.status.is_charging:
            print("Vector is currently charging.")
            robot.behavior.drive_off_charger()
            robot.behavior.say_text('Charge.')

        object_dist = robot.proximity.last_sensor_reading.distance
        print(f'Distance from object: {object_dist.distance_mm}')

        if object_dist.distance_mm == 30:
            robot.behavior.say_text('I go straights.')
            robot.behavior.drive_straight(distance_mm(800), speed_mmps(100))
            robot.motors.stop_all_motors()
            robot.behavior.say_text('I escaped the Minotaur.')
        elif object_dist.distance_mm == 400:
            robot.behavior.say_text('I go straights.')
            robot.behavior.drive_straight(distance_mm(400), speed_mmps(100))
            object_dist = robot.proximity.last_sensor_reading.distance
            print(f'Distance from object: {object_dist.distance_mm}')
            if object_dist.distance_mm == 400:
                robot.behavior.say_text('I am not there yet.')
                robot.behavior.drive_straight(distance_mm(400),
                                              speed_mmps(100))
                robot.motors.stop_all_motors()
                robot.behavior.say_text('I escaped the Minotaur.')
            elif object_dist.distance_mm < 400:
                robot.behavior.say_text('This is my last run.')
                robot.behavior.drive_straight(
                    distance_mm(object_dist.distance_mm - 50), speed_mmps(100))
                print("Turn right 90 degrees.")
                robot.behavior.say_text("I go rights.")
                robot.behavior.turn_in_place(degrees(-90))
        elif object_dist.distance_mm < 400:
            stopping_distance = object_dist.distance_mm - 50.0
            print(f'Distance from object: {object_dist.distance_mm}')
            print(f'Stopping distance: {stopping_distance}')
            print("Go straight.")
            robot.behavior.say_text('I go strights.')
            robot.behavior.drive_straight(distance_mm(stopping_distance),
                                          speed_mmps(50))
            robot.motors.stop_all_motors()
            print("Turn right 90 degrees.")
            robot.behavior.say_text("I go rights.")
            robot.behavior.turn_in_place(degrees(-90))
コード例 #17
0
def main():
    def on_robot_observed_face(robot, event_type, event):
        """on_robot_observed_face is called when a face is seen"""
        print("Vector sees a face")

    args = anki_vector.util.parse_command_args()
    with anki_vector.Robot(args.serial, enable_vision_mode=True) as robot:
        # If necessary, Move Vector's Head and Lift to make it easy to see his face
        robot.behavior.set_head_angle(degrees(50.0))
        robot.behavior.set_lift_height(0.0)

        on_robot_observed_face = functools.partial(on_robot_observed_face,
                                                   robot)
        robot.events.subscribe(on_robot_observed_face,
                               Events.robot_observed_face)

        print(
            "------ waiting for face events, press ctrl+c to exit early ------"
        )

        try:
            # Wait 5 seconds to see a face
            time.sleep(5)
        except KeyboardInterrupt:
            pass
コード例 #18
0
def main():
    args = anki_vector.util.parse_command_args()

    with anki_vector.Robot(args.serial) as robot:
        print("\n\n------------------------------\n\nStarting program...\n")
        robot.behavior.set_head_angle(degrees(45.0))
        robot.behavior.set_lift_height(0.0)
        mode = input("Enter mode:\n 1: Eye color\n 2: Screen color\n 3: Screen and eyes\n")
        robot.say_text(f"Mode {mode} selected")
        if mode == "1":
            loop = int(input("\nHow many times would you like to repeat?  "))
            robot.say_text(f"Repeating {loop} times")
            speed = int(float(input("How long should it wait?  ")))
            robot.say_text(f"Sleeping for {speed} seconds")
            print("\n\n------------------------------\n\n")
            eye(loop, speed, robot)
        elif mode == "2":
            loop = int(input("How many times would you like to repeat?  "))
            robot.say_text(f"Repeating {loop} times")
            speed = int(float(input("How long should it wait?  ")))
            robot.say_text(f"Sleeping for {speed} seconds")
            print("\n\n------------------------------\n\n")
            screen(loop, speed, robot)
        elif mode == "3":
            loop = int(input("How many times would you like to repeat?  "))
            robot.say_text(f"Repeating {loop} times")
            speed = int(float(input("How long should it wait?  ")))
            robot.say_text(f"Sleeping for {speed} seconds")
            print("\n\n------------------------------\n\n")
            both(loop, speed, robot)
        else:
            print(f"Invalid command '{mode}'")
            main()
コード例 #19
0
ファイル: textInput.py プロジェクト: pear-a-grin/vector
def main():
    args = anki_vector.util.parse_command_args()

    with anki_vector.Robot(args.serial) as robot:
        robot.behavior.drive_off_charger()

        str = "nothing"
        while True:
            str = input(
                "\nWhat should Vector say? \nthink \t= show thinking\nsad \t= do a sad thing\nhappy\t= do a happy thing\nr \t= turn right\nl \t= turn left\nexit \t= exit\n >>"
            )
            if str == "exit":
                break
            elif str == "r":
                robot.anim.play_animation('anim_rtmotion_turn45right_01')
            elif str == "l":
                robot.anim.play_animation('anim_rtmotion_turn45left_01')
            elif str == "think":
                playFromList(robot, "think", 3)
            elif str == "sad":
                playFromList(robot, "sad", 2)
            elif str == "happy":
                playFromList(robot, "happy", 5)
            else:
                playFromList(robot, "happy")
                robot.say_text(str)
                playFromList(robot, "happy", 3)

        robot.behavior.turn_in_place(degrees(360 * 1))
        robot.anim.play_animation('anim_reacttoblock_happydetermined_02')
コード例 #20
0
ファイル: giphy.py プロジェクト: smourph/anki-vector-snippets
def main():
    args = anki_vector.util.parse_command_args()

    # Get GIFs list from Giphy
    request = requests.get(giphy_url, {
        'q': keywords,
        'api_key': giphy_api_key,
        'limit': giphy_limit
    })
    # print(request.url)
    data = json.loads(request.content)
    # print(json.dumps(data, sort_keys=True, indent=2))

    if data and data['data'] and len(data['data']) > 0:
        # Load a random GIF
        selected_gif_index = random.randrange(len(data['data']))
        gif_url = data['data'][selected_gif_index]['images']['original']['url']
        gif = Image.open(io.BytesIO(requests.get(gif_url).content))

        with anki_vector.Robot(args.serial) as robot:
            # Get vector'screen dimension (height, width)
            vector_screen_width, vector_screen_height = anki_vector.screen.dimensions(
            )

            background = Image.new('RGB',
                                   (vector_screen_width, vector_screen_height),
                                   (0, 0, 0))
            background_w, background_h = background.size

            # Build the vector'sscreen-compliant gif
            images = list()
            for frame in ImageSequence.Iterator(gif):
                frame = frame.convert('RGB')

                # Resize the frame image
                frame.thumbnail((vector_screen_width, vector_screen_height),
                                Image.ANTIALIAS)
                frame_w, frame_h = frame.size

                # Define the black background
                image = background.copy()

                # Paste the frame image on the black background
                offset = (background_w - frame_w) // 2, (background_h -
                                                         frame_h) // 2
                image.paste(frame, offset)
                images.append(image)

            # If necessary, move Vector's Head and Lift to make it easy to see his face
            robot.behavior.set_head_angle(degrees(45.0))
            robot.behavior.set_lift_height(0.0)

            for image in images:
                # Convert the image to the format used by the Screen
                screen_data = anki_vector.screen.convert_image_to_screen_data(
                    image)

                # Display image on Vector's face
                robot.screen.set_screen_with_image_data(screen_data, 1.0 / 24)
                time.sleep(1.0 / 24)
コード例 #21
0
    def perform(self, parameter):
        args = anki_vector.util.parse_command_args()

        with anki_vector.Robot(args.serial) as robot:

            try:
                robot.behavior.drive_on_charger()

                time.sleep(10)

                robot.behavior.drive_off_charger()

                time.sleep(10)

                degr = 360

                robot.behavior.say_text("now i will begin turning %s degrees" %
                                        degr)

                robot.behavior.turn_in_place(degrees(360),
                                             is_absolute=1,
                                             num_retries=3)

                time.sleep(10)

                robot.behavior.say_text("i have finished turning")
            except Exception as err:
                print(err)
コード例 #22
0
ファイル: main.py プロジェクト: sebastiankruk/V4Vector
    def run(self):
        self.detected_faces = set()

        with anki_vector.Robot(enable_face_detection=True) as robot:
            self.robot = robot
            robot.vision.enable_face_detection(estimate_expression=True)
            robot.vision.enable_display_camera_feed_on_face()
            robot.behavior.set_head_angle(degrees(45.0))
            robot.behavior.set_lift_height(0.0)

            evt = threading.Event()
            robot.events.subscribe(self._on_robot_observed_face,
                                   Events.robot_observed_face, evt)
            threading.Timer(10, self.__find_faces).start()
            threading.Timer(60, self.__find_jira_tickets).start()

            print(
                "------ waiting for face events, press ctrl+c to exit early ------"
            )

            try:
                if not evt.wait(timeout=12000):
                    print("----- that's all folks ------")
            except KeyboardInterrupt:
                pass

            robot.events.unsubscribe(self._on_robot_observed_face,
                                     Events.robot_observed_face)
コード例 #23
0
def main():
    args = anki_vector.util.parse_command_args()

    with anki_vector.Robot(args.serial) as robot:
        robot.behavior.set_head_angle(degrees(45.0))
        robot.behavior.set_lift_height(0.0)
        robot.behavior.say_text("Alert! Motion Detected Outside!")

        current_directory = os.path.dirname(os.path.realpath(__file__))

        for _ in range(60):
            snapshot = homeData.getLiveSnapshot(camera=MY_CAMERA)
            if not snapshot:
                exit(1)
            #You may ave to create a folder called images in the directory the script is saved in.
            with open("images/driveway_snapshot.jpg", "wb") as f:
                f.write(snapshot)

            image = Image.open('images/driveway_snapshot.jpg')
            new_image = image.resize((184, 96))
            new_image.save('images/driveway_snapshot_vector.jpg')

            image_path = os.path.join(current_directory, "images",
                                      "driveway_snapshot_vector.jpg")

            image_file = Image.open(image_path)

            print("Display image on Vector's face...")
            screen_data = anki_vector.screen.convert_image_to_screen_data(
                image_file)
            robot.screen.set_screen_with_image_data(screen_data, 20.0)

            #this is the time between each snapshot
            time.sleep(0.6)
コード例 #24
0
def main():
    args = anki_vector.util.parse_command_args()
    with anki_vector.Robot(args.serial, enable_face_detection=True) as robot:
        evt = threading.Event()

        async def on_robot_observed_face(event_type, event):
            print("Vector sees a face")
            global said_text
            if not said_text:
                said_text = True
                await robot.say_text("I see a face!")
                evt.set()

        # If necessary, move Vector's Head and Lift to make it easy to see his face
        robot.behavior.set_head_angle(degrees(50.0))
        robot.behavior.set_lift_height(0.0)

        robot.events.subscribe(on_robot_observed_face, Events.robot_observed_face)

        print("------ waiting for face events, press ctrl+c to exit early ------")

        try:
            if not evt.wait(timeout=5):
                print("------ Vector never saw your face! ------")
        except KeyboardInterrupt:
            pass
コード例 #25
0
def main():

    net = PoseEstimationWithMobileNet()
    checkpoint = torch.load("models/checkpoint_iter_370000.pth",
                            map_location='cpu')
    load_state(net, checkpoint)
    net = net.cuda()
    done = threading.Event()

    with anki_vector.AsyncRobot() as robot:
        robot.camera.init_camera_feed()
        robot.camera.image_streaming_enabled()

        # preparing robot pose ready
        robot.behavior.set_head_angle(degrees(25.0))
        robot.behavior.set_lift_height(0.0)

        #events for detection and new camera feed
        robot.events.subscribe(on_new_raw_camera_image,
                               events.Events.new_raw_camera_image, net)
        robot.events.subscribe_by_name(on_robot_observed_touch,
                                       event_name='touched')

        print(
            "------ waiting for camera events, press ctrl+c to exit early ------"
        )

        try:
            if not done.wait(timeout=600):
                print("------ Did not receive a new camera image! ------")
        except KeyboardInterrupt:
            pass
コード例 #26
0
def main():
    args = anki_vector.util.parse_command_args()

    docking_result = None
    with anki_vector.Robot(args.serial) as robot:
        robot.behavior.drive_off_charger()

        # If necessary, move Vector's Head and Lift down
        robot.behavior.set_head_angle(degrees(-5.0))
        robot.behavior.set_lift_height(0.0)

        print("Connecting to a cube...")
        robot.world.connect_cube()

        if robot.world.connected_light_cube:
            print("Begin cube docking...")
            dock_response = robot.behavior.dock_with_cube(
                robot.world.connected_light_cube, num_retries=3)
            if dock_response:
                docking_result = dock_response.result

            robot.world.disconnect_cube()

    if docking_result:
        if docking_result.code != anki_vector.messaging.protocol.ActionResult.ACTION_RESULT_SUCCESS:
            print("Cube docking failed with code {0} ({1})".format(
                str(docking_result).rstrip('\n\r'), docking_result.code))
    else:
        print("Cube docking failed.")
コード例 #27
0
async def on_user_intent(robot, event_type, event):
    """
        Process the user intent
    """
    user_intent = UserIntent(event)
    if user_intent.intent_event is UserIntentEvent.weather_response:
        data = json.loads(user_intent.intent_data)
        print (data)
        print(f"Weather report for {data['speakableLocationString']}: "
              f"{data['condition']}, temperature {data['temperature']} degrees")
        (summary, animation) = processWeather(data['speakableLocationString'])
        # Load an image
        image_file = Image.open('./cloudy.png')

        # Convert the image to the format used by the Screen
        screen_data = anki_vector.screen.convert_image_to_screen_data(image_file)

        await asyncio.wrap_future(robot.behavior.look_around_in_place())
        await asyncio.wrap_future(robot.behavior.set_head_angle(degrees(35.0)))
        for item in summary[:-1]:
           await asyncio.wrap_future(robot.behavior.say_text(item))
        
        duration_s = 5.0
        await asyncio.wrap_future(robot.screen.set_screen_with_image_data(screen_data, duration_s))
        await asyncio.wrap_future(robot.behavior.say_text(summary[-1]))
        await asyncio.wrap_future(robot.anim.play_animation(animation))
コード例 #28
0
def look_for_faces(robot):
    found_face = False
    while not found_face:
        dist = random.randint(0, 200)
        rot = random.randint(-90, 90)
        robot.behavior.turn_in_place(degrees(rot))
        time.sleep(1)
        robot.behavior.drive_straight(distance_mm(dist), speed_mmps(70))
        robot.behavior.set_head_angle(degrees(30))
        time.sleep(1)
        for trial in range(500):
            for _ in robot.world.visible_faces:
                found_face = True

        # Continue searching
        robot.behavior.set_head_angle(degrees(10))
コード例 #29
0
def make_scene_light(anki_serial, text):
    robot = AsyncRobot(anki_serial)
    try:
        robot.connect()
        f = 'vector_bell_whistle.wav'
        robot.behavior.set_eye_color(1, 1)  #reddish
        for i in range(1, 3):
            stream_future = robot.audio.stream_wav_file(f, 50)
            stream_future.result()
            time.sleep(1)
        robot.behavior.say_text("ALERT!")
        robot.behavior.set_head_angle(degrees(50.0))
        robot.behavior.set_lift_height(0.0)
        image = make_text_image(text, 0, 0)
        screen_data = anki_vector.screen.convert_image_to_screen_data(image)
        for i in range(1, 10):
            r = robot.screen.set_screen_with_image_data(screen_data,
                                                        5.0,
                                                        interrupt_running=True)
            r.result()
            time.sleep(3)
        for i in range(1, 5):
            if robot.status.is_on_charger is False:
                drive_future = robot.behavior.drive_on_charger()
                r = drive_future.result()
                logging.debug("Drive Back Home Result: %s", r)
                time.sleep(5)
    finally:
        try:
            robot.disconnect()
        except:
            print('Connection was not nicely cleaned!!!')
        finally:
            print("....DONE....")
コード例 #30
0
def main():
    with Robot() as vector:
        vector.behavior.drive_on_charger()
        vector.behavior.drive_off_charger()
        vector.behavior.drive_straight(distance_mm(150), speed_mmps(25))
        vector.behavior.turn_in_place(degrees(180), degrees(45), 
                                      degrees(5), degrees(5))
        # ^ If vector's base is facing away from the camera
        vector.anim.play_animation_trigger(random.choice(["GreetAfterLongTime", "PRDemoGreeting", "ReactToGoodMorning", "ReactToGreeting"]))
        vector.behavior.say_text(get_speakable_text(random.choice(get_introductory_phrases())))
        vector.behavior.say_text(get_speakable_text(random.choice(get_world_news_segment_lead_in_phrases())))
        vector.anim.play_animation_trigger("ExploringQuickScan")
        for story in get_world_news():
            announcement = get_announcement_from_story(story)
            vector.behavior.say_text(get_speakable_text(announcement))
            vector.anim.play_animation_trigger(random.choice(["NeutralFace",
                                                              "NothingToDoBoredIdle",
                                                              "ObservingIdleEyesOnly",
                                                              "ObservingIdleWithHeadLookingStraight",
                                                              "ObservingIdleWithHeadLookingUp",
                                                              "ObservingLookStraight",
                                                              "ObservingLookUp",
                                                              "LookAround"]))
            vector.behavior.say_text(get_speakable_text(random.choice(get_brief_description_lead_in_phrases())))
            for sentence in story.get_summary():
                vector.behavior.say_text(get_speakable_text(sentence))
        vector.behavior.say_text(get_speakable_text(random.choice(get_sports_news_segment_lead_in_phrases())))
        for announcement in [_announcement for _announcement in get_announcements_from_stories(get_sports_news())][:3]:
            vector.behavior.say_text(get_speakable_text(announcement))
        vector.behavior.say_text(random.choice(get_outer_space_news_segment_lead_in_phrases()))
        for announcement in [_announcement for _announcement in get_announcements_from_stories(get_outer_space_news())][:3]:
            vector.behavior.say_text(get_speakable_text(announcement))
        vector.behavior.say_text(get_speakable_text(random.choice(get_vector_news_segment_lead_in_phrases())))
        for post in [_post for _post in get_reddit_vector_posts()][:3]:
            vector.behavior.say_text(get_speakable_text(f"Here is one by {post.author} (thank you, {post.author}). It's titled: {post.post}."))
            vector.behavior.say_text(random.choice(get_brief_description_lead_in_phrases()))
            for sentence in post.get_summary():
                vector.behavior.say_text(get_speakable_text(sentence))
        vector.behavior.say_text(get_speakable_text(random.choice(get_outro_phrases())))
        vector.anim.play_animation_trigger(random.choice(["Feedback_ILoveYou",
                                                          "Feedback_GoodRobot",
                                                          "LookAtUserEndearingly",
                                                          "OnboardingReactToFaceHappy",
                                                          "OnboardingLookAtUser",
                                                          "ReactToGoodBye",
                                                          "ReactToGoodNight"]))
        vector.behavior.drive_on_charger()