def __init__(self): self.interrupted = False # 读取配置 config_file = 'config.yaml' if os.path.exists(config_file) == False: print('【警告】配置文件不存在') return f = open('config.yaml') config = yaml.load(f) model_name = config.get('name', 'snowboy') if ['snowboy','alexa'].count(model_name) > 0: model_name = model_name + '.umdl' else: model_name = model_name + '.pmdl' self.model = "唤醒词/" + model_name # 使用的语音模型 print('【唤醒词文件】' + self.model) if os.path.exists(self.model) == False: print('【警告】唤醒词文件不存在') return signal.signal(signal.SIGINT, self.signal_handler) # 捕获ctrl+c self.detector = snowboydecoder.HotwordDetector(self.model, sensitivity=0.5) # 设置语音模型与敏感度 print('Listening... Press Ctrl+Z to exit') self.robot = Robot(config) # 创建应用模块 self.player = Player() # 触发响应词叮咚播放
def setUp(self): mock = self.create_mock_robotd() mock.new_powerboard() time.sleep(0.2) self.mock = mock self.robot = Robot(robotd_path=mock.root_dir, wait_for_start_button=False)
def test_invalid_input(self): with captured_output() as (out, err): robot = Robot() robot.place(100, 100, "nowhere") output = out.getvalue().strip() self.assertEqual(output, "Invalid input.")
def setUp(self): mock = self.create_mock_robotd() # Insert a power board to let the robot start up self.power_board = mock.new_powerboard() time.sleep(0.2) self.mock = mock self.robot = Robot(robotd_path=mock.root_dir, wait_for_start_button=False)
def test_implicit_wait_start_competition_mode(self): self.set_competition_mode() with self.mock_kill_after_delay() as mock_kill_after_delay: Robot(robotd_path=self.mock.root_dir) mock_kill_after_delay.assert_called_once_with( game_specific.GAME_DURATION_SECONDS, )
def test_falling_from_board(self): with captured_output() as (out, err): robot = Robot(4, 4, "east") robot.move() output = out.getvalue().strip() self.assertEqual(output, "Safety warning - I am about to fall to oblivion!")
def __init__(self, map_filename, robot_filename): self.robot = Robot() self.world = FeatureBasedWorld() self.tick = 0.0 self.pose_history = [] # ground truth trajectory self.robot.LoadFromFile(robot_filename) self.world.LoadFromFile(map_filename) self.pose_history.append(self.robot.GetPose()) # initial pose
def __init__(self): self.interrupted = False self.model = "嘿小二.pmdl" # 使用的语音模型 signal.signal(signal.SIGINT, self.signal_handler) # 捕获ctrl+c self.detector = snowboydecoder.HotwordDetector( self.model, sensitivity=0.5) # 设置语音模型与敏感度 print('Listening... Press Ctrl+Z to exit') self.robot = Robot() # 创建应用模块 self.music_player = self.robot.get_music_player() # 创建音乐播放模块 self.player = Player() # 触发响应词叮咚播放
def __init__(self): self.interrupted = False self.model = "snowboy.pmdl" signal.signal(signal.SIGINT, self.signal_handler) self.detector = snowboydecoder.HotwordDetector(self.model, sensitivity=0.42) print('Listening... Press Ctrl+C to exit') self.robot = Robot() self.music_player = self.robot.get_music_player() self.player = Player()
def test_explicit_wait_start_development_mode(self): robot = Robot( robotd_path=self.mock.root_dir, wait_for_start_button=False, ) with self.mock_kill_after_delay() as mock_kill_after_delay: robot.power_board.wait_start() # default is development mode, which doesn't have a timeout mock_kill_after_delay.assert_not_called()
def test_two_clients(self): """ Checks you can interface a motor board multiple times""" # TODO make this test generic to the board, so it runs on all boards. self.mock.new_motorboard('ABC') # Set up robot 2! robot2 = Robot(robotd_path=self.mock.root_dir, wait_for_start_button=False) # Give it a tiny bit to init the boards time.sleep(0.2) self.robot.motor_boards[0].m0 = 1 self.robot.motor_boards[0].m0 = -1 robot2.motor_boards[0].m0 = 1 robot2.motor_boards[0].m0 = -1
def main(): robot = Robot((18,1), True) #robot.set_speed(100) #_real_map = Arena(robot) #ArenaUtils.load_arena_from_file(_real_map, 'map/SampleWeek11.txt') _explore_map = Arena(robot) _explore_map.set_allunexplored() CommMgr.connect() _explore = Exploration(_explore_map, robot, 300, 3600) _explore.run() CommMgr.close()
def test_explicit_wait_start_competition_mode(self): robot = Robot( robotd_path=self.mock.root_dir, wait_for_start_button=False, ) self.set_competition_mode() with self.mock_kill_after_delay() as mock_kill_after_delay: robot.power_board.wait_start() mock_kill_after_delay.assert_called_once_with( game_specific.GAME_DURATION_SECONDS, )
def fp(): robot = Robot((18, 1), True) _real_map = Arena(robot) ArenaUtils.load_arena_from_file(_real_map, 'map/17_week10.txt') print('Awaiting FP_START') while True: _command = CommMgr.recv() if _command == 'FP_START': CommMgr.send('X', CommMgr.ARDUINO) break _go_to_wp_goal = FastestPath(_real_map, robot) _status = _go_to_wp_goal.do_fastest_path_wp_goal( (7, 10), ArenaConstant.GOAL_POS.value)
def execute_path_exploration(self, path, goal_pos): _output_string = [] _temp_block = path.pop() _ori_target = None _temp_robot = Robot(self._robot.get_pos(), False) _temp_robot.set_ori(self._robot.get_ori()) _temp_robot.set_speed(0) while _temp_robot.get_pos()[0] != goal_pos[0] or _temp_robot.get_pos( )[1] != goal_pos[1]: if _temp_robot.get_pos()[0] == _temp_block.get_pos( )[0] and _temp_robot.get_pos()[1] == _temp_block.get_pos()[1]: _temp_block = path.pop() _ori_target = self.get_target_ori(_temp_robot.get_pos(), _temp_robot.get_ori(), _temp_block) _action = None if _temp_robot.get_ori() != _ori_target: _action = self.get_target_move(_temp_robot.get_ori(), _ori_target) else: _action = Action.FORWARD print('Action {} from {} to {}'.format(_action.value, _temp_robot.get_pos(), _temp_block.get_pos())) _temp_robot.move(_action) self._actions.append(_action) _output_string.append(_action.value) for _act in self._actions: if _act == Action.FORWARD: if not self.can_move_forward(): print("Fastest Path Execution terminated!") return "T" self._robot.move(_act) self._robot.notify(_act.value, self._map, True) self._robot.set_sensor() self._robot.sense(self._map) #, self._real_map) if self._surface is not None: self._map.draw(self._surface) return ''.join(_output_string)
async def main(): opts = Options(sys.argv) r = Robot(**opts.args()) try: if opts.id is None: """Create new game if id was not specified""" opts.id = await r.newgame() else: """Check if game exists""" if opts.id != await r.checkgame(opts.id): print('Game id miss-matched', file=sys.stderr) sys.exit(2) except Exception as e: print(f'Failed to acquire game: {e}') sys.exit(3) if opts.robots: rbs = [r] wrk = [ensure_future(r.run(pnum=opts.autoplay))] await sleep(0.2) # Leader process should be able to reset game for i in range(1, opts.robots): r = Robot(uri=opts.site, idx=i, id=opts.id) rbs.append(r) wrk.append(ensure_future(r.run())) await gather(*wrk) results(wrk[0].result(), [r.pname for r in rbs], [r.id_prefix for r in rbs]) else: res = await r.run(pnum=opts.autoplay) results(res)
def initialize_bots(gt, generator): global config bots = [] for i in range(config['num_bots']): sim = gt.get_hardware_instance(guid=i) bot = Robot(guid=i, ip=config['bots']['ip'], port=config['bots']['s_port']+i, hardware=sim, generator=generator, connect=config['bots']['connect'], image_size=config['sim']['image_size'] ) bots.append(bot) return bots
def main(): with tempfile.TemporaryDirectory(prefix="robot-api-test-") as root_dir: mock = MockRobotD(root_dir=root_dir) mock.new_powerboard() time.sleep(0.2) mock.new_motorboard() time.sleep(0.2) from robot.robot import Robot robot = Robot(robotd_path=root_dir) robot.power_board.power_off() robot.motor_boards[0].m0 = 1 time.sleep(0.2) mock.stop()
def test_implicit_wait_start_development_mode(self): with self.mock_kill_after_delay() as mock_kill_after_delay: Robot(robotd_path=self.mock.root_dir) # default is development mode, which doesn't have a timeout mock_kill_after_delay.assert_not_called()
from math import pi import robot.api as api import asyncio config = '' def load_config(): with open('config.yml') as f: global config config = yaml.load(f, Loader=yaml.FullLoader) if __name__ == '__main__': load_config() robot = Robot(config['robot']['ip']) asyncio.get_event_loop().run_until_complete( robot.cmd_via_ws(api.CMD_START_SYS)) # 1. send cmd via http print("========================================") print("The following cmd was sent via http") print("current robot mode: {}".format(robot.get_robot_mode())) print(robot.get_target_joints()) print(robot.get_actual_joints()) print(robot.get_target_tcp_pose()) print(robot.get_actual_tcp_pose()) robot.move_check() print(robot.movej([0, 0, 0, 0, 0, 0], True, 1, 1, 0)) print(robot.movej([0, -pi / 4, pi / 2, -pi / 4, pi / 2, 0], True, 1, 1, 0)) print(robot.movel([-0.3, -0.1, 0.2, -pi / 2, 0, pi / 2], False, 1, 1, 0))
config = toml.load(open("physics.toml", 'r')) ppi = 2 height = config["field"]["height"] width = config["field"]["width"] field = np.zeros((width * ppi, height * ppi, 3)) robot_width = config["drivetrain"]["width"] robot_length = config["drivetrain"]["length"] robot_rot = config["robot"]["rot"] robot_x = config["robot"]["x"] robot_y = config["robot"]["y"] drivetrain = DifferentialDrive(robot_width) robot = Robot(drivetrain, robot_width * ppi, robot_length * ppi, position=[robot_x, robot_y], rotation=robot_rot) dt = 0 last_time = datetime.now().microsecond / 1000000 while True: simfield = np.copy(field) robot.update(dt) pts = np.array(robot.points, np.int32) pts = pts.reshape((-1, 1, 2)) cv2.fillPoly(simfield, [pts], (0, 255, 255)) cv2.putText(simfield, f"L: {drivetrain.l_speed:.2f}", (15, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255)) cv2.putText(simfield, f"R: {drivetrain.r_speed:.2f}", (15, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255)) cv2.putText(simfield, f"F: {robot.forward:.2f}", (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
from robot.robot import Robot from util.json_generator import generate_sample_obstacles from util.json_parser import JSONParser import numpy as np # Generate the JSON file needed for obstacles generate_sample_obstacles() map_axis_size = 50 map_plane = np.zeros([map_axis_size, map_axis_size]) starting_location = [map_axis_size / 2, map_axis_size / 2] robot = Robot(map_plane, JSONParser.get_obstacles(), starting_location) # Human input movement while (True): user_input = input('Enter a movement command') if user_input.lower() not in Robot.ALLOWED_MOVEMENTS: print("Error: {} is not a recognized movement".format(user_input)) print("The allowed movements are {}.".format(Robot.ALLOWED_MOVEMENTS)) continue robot.update_position(user_input)
battery = Battery(arduino_serial_port) prehenseur = PrehenseurRotationControl(polulu_serial_port) magnet = Magnet(arduino_serial_port, prehenseur) robot_service = RobotService(island_server_address) camera_rotation = CameraRotationControl(polulu_serial_port) movement = Movement( compute=None, sense=world_map, control=wheels, loop_time=loop_time, min_distance_to_destination=min_distance_to_destination) robot = Robot(wheels=corrected_wheels, world_map=world_map, pathfinder=None, manchester_antenna=manchester_antenna, movement=movement, battery=battery, magnet=magnet, camera_rotation=camera_rotation) robot.lift_prehenseur_up() time.sleep(1) robot.lift_prehenseur_down() treasure_easiest_path = TreasureEasiestPath() vision_refresher = VisionRefresher(robot, base_station_host, base_station_port, camera, table_corners, treasure_easiest_path) action_machine = ActionMachine() embedded_vision_service = EmbeddedVisionService( camera, EmbeddedTreasureDetector(), EmbeddedRechargeStationDetector()) timer = Timer()
import sys from robot.robot import Robot if __name__ == "__main__": robot = Robot() instruction_file = None if len(sys.argv) > 1: instruction_file = sys.argv[1] result = robot.operate(instruction_file, debug=False) distance = robot.nav.distance
def evaluate_both(self, trials=10): """ Run the live experiment controlling the robot. For each trials first EOG is classified which determines the position the robot moves to. After the robot reached its final position EMG is classified controlling the gripper. Recorded data is saved into the live_data folder. :param trials: number of trials :return: """ recorder = Recorder(num_channels=8) # load models emg_model = load_latest_emg_model() emg_clf = emg_model["clf"] emg_best_features = emg_model["best_features"] emg_feature_extractor = EMGFeatureExtractor() eog_model = load_latest_eog_model() eog_clf = eog_model["clf"] eog_best_features = eog_model["best_features"] eog_relax_clf = eog_model["relax_clf"] eog_best_relax_features = eog_model["best_relax_features"] eog_feature_extractor = EOGFeatureExtractor() eog_bp = BandPassFilter(lowcut=1, highcut=22, fs=FS, order=4) # discord the first two sampling windows, which gets rid of a spike at the beginning recorder.read_sample_win() recorder.read_sample_win() robot = Robot() for _ in range(trials): # notify the subject that sampling is about to start play_sound() signals = recorder.read_sample_win() eog_signals = signals[:, 4:8] # bandpass filter and extract features eog_features = [] for channel in range(eog_signals.shape[1]): eog_signals[:, channel] = eog_bp(eog_signals[:, channel]) eog_features.append( eog_feature_extractor.extract_features(eog_signals)) eog_features = np.array(eog_features) eog_label = eog_relax_clf.predict( [eog_features[0, eog_best_relax_features]])[0] save_live_data("eog", eog_label, eog_signals) if eog_label == "relax": # continue if home position robot.move_home() continue else: # classify the eye movement label and move the robot into position eog_label = eog_clf.predict( [eog_features[0, eog_best_features]])[0] save_live_data("eog", eog_label, eog_signals) if eog_label == "left": robot.move_up_left() elif eog_label == "up": robot.move_up_right() elif eog_label == "right": robot.move_down_right() elif eog_label == "down": robot.move_down_left() # notify the subject that sampling is about to start play_sound() signals = recorder.read_sample_win() emg_signals = signals[:, :2] emg_features = [] emg_features.append( emg_feature_extractor.extract_features(emg_signals)) emg_features = np.array(emg_features) emg_label = emg_clf.predict([emg_features[0, emg_best_features]])[0] save_live_data("emg", emg_label, emg_signals) if emg_label == "fist": robot.close_gripper() else: robot.open_gripper() # hold position for a little time.sleep(2) # return to initial state robot.move_home()
def explore(): robot = Robot((18, 1), True) _explore_map = Arena(robot) _explore_map.set_allunexplored() _explore = Exploration(_explore_map, robot, 300, 3600) _explore.run()
def __init__(self, window, window_title): self.window = window self.robot = Robot('tkinter') self.window.title(window_title) # register behaviour on clicking red cross 'close' button self.window.protocol("WM_DELETE_WINDOW", self.close_window) # create two notebooks side by side , one for plain eye images, second for aspects self.eye_notebook = ttk.Notebook(window) self.aspect_notebook = ttk.Notebook(window) # create notebook frame for each eye and add it to notebook for eye in self.robot.cognition.eyes: frame = EyeFrame(self.eye_notebook, eye, self.eyeframe_click_callback) self.eye_notebook.add(frame, text=eye.name) # bind eye frame notebook tab change routine frame.bind("<Visibility>", self.on_eye_tab_changed) current_eye = self.get_current_eye_frame().eye # make tabs for first eye aspects self.populate_aspect_notebook(current_eye) current_aspect = self.get_current_aspect_frame().aspect # create aspect details frame self.aspect_details = AspectDetails(window, 'Details', current_aspect) self.btn_save_all_aspects = tkinter.Button( window, text="Save all aspects", width=20, command=self.save_all_aspects) self.btn_load_all_aspects = tkinter.Button( window, text="Load all aspects", width=20, command=self.load_all_aspects) self.btn_start_record = tkinter.Button(window, text="Start Recorder", width=20, command=self.start_recorder) self.btn_start_chaser = tkinter.Button(window, text="Start Chaser", width=20, command=self.start_chaser) # grid manager self.eye_notebook.grid(column=0, row=0, columnspan=3) self.aspect_notebook.grid(column=4, row=0, columnspan=3) self.btn_save_all_aspects.grid(column=0, row=1) self.btn_load_all_aspects.grid(column=1, row=1) self.btn_start_record.grid(column=2, row=1) self.btn_start_chaser.grid(column=3, row=1) self.aspect_details.grid(column=4, row=1, columnspan=3) # # Define the codec and create VideoWriter object # self.fourcc = cv2.VideoWriter_fourcc(*"MJPG") # self.out = cv2.VideoWriter('output.avi', self.fourcc, 15.0, (600, 450)) # After it is called once, the update method will be automatically called every delay milliseconds # should be about 30FPS self.delay = 30 self.update() self.window.mainloop()
def step_impl(context): context.robot = Robot()
def execute_path(self, path, goal_pos): _output_string = [] _temp_block = path.pop() _ori_target = None _actions = [] _temp_robot = Robot(self._robot.get_pos(), False) _temp_robot.set_ori(self._robot.get_ori()) _temp_robot.set_speed(0) while _temp_robot.get_pos()[0] != goal_pos[0] or _temp_robot.get_pos( )[1] != goal_pos[1]: if _temp_robot.get_pos()[0] == _temp_block.get_pos( )[0] and _temp_robot.get_pos()[1] == _temp_block.get_pos()[1]: _temp_block = path.pop() _ori_target = self.get_target_ori(_temp_robot.get_pos(), _temp_robot.get_ori(), _temp_block) _action = None if _temp_robot.get_ori() != _ori_target: _action = self.get_target_move(_temp_robot.get_ori(), _ori_target) else: _action = Action.FORWARD print('Action {} from {} to {}'.format(_action.value, _temp_robot.get_pos(), _temp_block.get_pos())) _temp_robot.move(_action) _actions.append(_action) _output_string.append(_action.value) if not self._robot.is_actual_robot(): for _act in _actions: if _act == Action.FORWARD: if not self.can_move_forward(): print("Fastest Path Execution terminated!") return "T" self._robot.move(_act) self._robot.notify(_act, self._map, True) if self._exploration_mode: self._robot.set_sensor() self._robot.sense(self._map) #, self._real_map) if self._surface is not None: self._map.draw(self._surface) else: _fCount = 0 for _act in _actions: if _act == Action.FORWARD: _fCount += 1 if _fCount == 9: self._robot.move_multiple(_fCount) _fCount == 0 _command = 'F' + str(_fCount) self._robot.notify(_command, self._map, True) elif _act == Action.RIGHT or _act == Action.LEFT: if _fCount > 0: self._robot.move_multiple(_fCount) _fCount == 0 _command = 'F' + str(_fCount) self._robot.notify(_command, self._map, True) self._robot.move(_act) self._robot.notify(_act, self._map, True) if _fCount > 0: self._robot.move_multiple(_fCount) _fCount == 0 _command = 'F' + str(_fCount) self._robot.notify(_command, self._map, True) print('Actions: {}'.format(''.join(_output_string))) return ''.join(_output_string)
from robot.robot import Robot import time with Robot() as r: r.lidar(1) r.servo(0) for i in range(100): r.step(1000, 50, 1000, 50) for j in range(100): print(r.sensor) time.sleep(0.1) break r.drive(800, 800) print(r.sensor) if 0 < r.sensor < 300: r.stop() r.lidar(0) time.sleep(0.5) r.drive(1000, -1000) time.sleep(1.6) r.stop() break time.sleep(0.1) r.stop()