def cleanup(world, max_bots=10, remove_from=5): """ Removes the slowest of the oldest `remove_from` robots from the world if there are more than `max_bots` :param world: :type world: World :return: """ if len(world.robots) <= max_bots: return logger.debug("Automatically removing the slowest of the oldest %d robots..." % remove_from) # Get the oldest `num` robots robots = sorted(world.robot_list(), key=lambda r: r.age(), reverse=True)[:remove_from] # Get the slowest robot slowest = sorted(robots, key=lambda r: r.velocity())[0] fut, hl = yield From(world.add_highlight(slowest.last_position, (50, 50, 50, 50))) yield From(fut) yield From(trollius.sleep(1)) # Delete it from the world fut = yield From(world.delete_robot(slowest)) yield From(trollius.sleep(1)) yield From(remove_highlights(world, [hl])) yield From(fut)
def publish_loop(self): manager = yield From(pygazebo.connect()) def callback_pose(data): self.sensor_comp.callback(data) publisher = yield From( manager.advertise('/gazebo/default/turtlebot/joint_cmd', 'gazebo.msgs.JointCmd')) subscriber_pose = manager.subscribe( '/gazebo/default/pose/info', 'gazebo.msgs.PosesStamped', callback_pose) yield From(subscriber_pose.wait_for_connection()) msg_left_wheel = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_left_wheel.name = 'turtlebot::create::left_wheel' msg_right_wheel = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_right_wheel.name = 'turtlebot::create::right_wheel' print "wait for connection.." yield From(trollius.sleep(1.0)) while True: self.brica_agent.step() force = self.action_comp.get_result("out_force") max_force_strength = 0.7 force = np.clip(force, -max_force_strength, max_force_strength) msg_right_wheel.force, msg_left_wheel.force = force From(publisher.publish(msg_left_wheel)) From(publisher.publish(msg_right_wheel)) yield From(trollius.sleep(0.00))
def pick_up_cubes(r): global ganked_cube while True: val = get_cube(r) if val != Colors.NONE: r.drive.stop() yield From(asyncio.sleep(0.1)) val = get_cube(r) if val == OUR_COLOR: ganked_cube = time.time() r.arms.silo.up() log.info('Picked up {} block'.format(Colors.name(val))) r.arms.silo.down() elif val == THEIR_COLOR: ganked_cube = time.time() r.drive.stop() r.arms.dump.up() log.info('Picked up {} block'.format(Colors.name(val))) r.arms.dump.down() else: break else: break yield From(asyncio.sleep(0.05))
def publish_loop(U): manager = yield From(pygazebo.connect()) lift_publisher = yield From( manager.advertise('/gazebo/default/trevor/front_gripper/lift_force', 'gazebo.msgs.JointCmd')) grip_publisher = yield From( manager.advertise('/gazebo/default/trevor/front_gripper/grip_force', 'gazebo.msgs.JointCmd')) lift_message = pygazebo.msg.joint_cmd_pb2.JointCmd() lift_message.axis = 0 lift_message.force = U.lift_force grip_message = pygazebo.msg.joint_cmd_pb2.JointCmd() grip_message.axis = 0 grip_message.force = U.grip_force lift_message.name = "trevor::front_gripper::palm_raiser" while True: yield From(lift_publisher.publish(lift_message)) yield From(trollius.sleep(0.1)) grip_message.name = "trevor::front_gripper::left_finger_tip" yield From(grip_publisher.publish(grip_message)) yield From(trollius.sleep(0.1)) grip_message.name = "trevor::front_gripper::right_finger_tip" yield From(grip_publisher.publish(grip_message)) yield From(trollius.sleep(0.1)) grip_message.name = "trevor::front_gripper::palm_left_finger" yield From(grip_publisher.publish(grip_message)) yield From(trollius.sleep(0.1)) grip_message.name = "trevor::front_gripper::palm_right_finger" yield From(grip_publisher.publish(grip_message)) yield From(trollius.sleep(0.1)) print("Publishing")
def readCamera(self): cap = cv2.VideoCapture(0) frame = None success = False if not cap.isOpened(): print("Failed to open camera!") return while True: try: success, frame = cap.read() if not success: print("cap.read() failed") yield trollius.From(trollius.sleep(1.0/self.fps)) continue self.broadcast(frame) if self.hasFrame: self.hasFrame(frame) except KeyboardInterrupt: self.loop.stop() except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) yield trollius.From(trollius.sleep(1.0/self.fps)) cap.release()
def run_server(): conf = parser.parse_args() conf.analyzer_address = None world = yield From(World.create(conf)) yield From(world.pause(True)) with open("/home/elte/mt/tol/scripts/starfish.yaml", "rb") as f: robot_yaml = f.read() body_spec = world.builder.body_builder.spec brain_spec = world.builder.brain_builder.spec bot = yaml_to_robot(body_spec, brain_spec, robot_yaml) fname = conf.output_directory+"/revolve_benchmark.csv" exists = os.path.exists(fname) if exists: f = open(fname, 'ab', buffering=1) else: f = open(fname, 'wb', buffering=1) output = csv.writer(f, delimiter=',') if not exists: output.writerow(['run', 'population_size', 'step_size', 'sim_time', 'real_time', 'factor']) n_bots = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50] sim_time = 5.0 runs = 20 yield From(world.pause(False)) for n in n_bots: poses = get_poses(n) trees = [Tree.from_body_brain(bot.body, bot.brain, body_spec) for _ in range(n)] for i in range(runs): yield From(wait_for(world.insert_population(trees, poses))) while world.last_time is None: yield From(trollius.sleep(0.1)) sim_before = world.last_time before = time.time() while float(world.last_time - sim_before) < sim_time: yield From(trollius.sleep(0.1)) sim_diff = float(world.last_time - sim_before) diff = time.time() - before output.writerow((i, n, conf.world_step_size, sim_diff, diff, sim_diff / diff)) yield From(wait_for(world.delete_all_robots())) yield From(trollius.sleep(0.3))
def frame_parser(self, reader, writer): # This takes care of the framing. last_request_id = 0 while True: # Read the frame header, parse it, read the data. # NOTE: The readline() and readexactly() calls will hang # if the client doesn't send enough data but doesn't # disconnect either. We add a timeout to each. (But the # timeout should really be implemented by StreamReader.) framing_b = yield From(asyncio.wait_for( reader.readline(), timeout=args.timeout, loop=self.loop)) if random.random()*100 < args.fail_percent: logging.warn('Inserting random failure') yield From(asyncio.sleep(args.fail_sleep*random.random(), loop=self.loop)) writer.write(b'error random failure\r\n') break logging.debug('framing_b = %r', framing_b) if not framing_b: break # Clean close. try: frame_keyword, request_id_b, byte_count_b = framing_b.split() except ValueError: writer.write(b'error unparseable frame\r\n') break if frame_keyword != b'request': writer.write(b'error frame does not start with request\r\n') break try: request_id, byte_count = int(request_id_b), int(byte_count_b) except ValueError: writer.write(b'error unparsable frame parameters\r\n') break if request_id != last_request_id + 1 or byte_count < 2: writer.write(b'error invalid frame parameters\r\n') break last_request_id = request_id request_b = yield From(asyncio.wait_for( reader.readexactly(byte_count), timeout=args.timeout, loop=self.loop)) try: request = json.loads(request_b.decode('utf8')) except ValueError: writer.write(b'error unparsable json\r\n') break response = self.handle_request(request) # Not a coroutine. if response is None: writer.write(b'error unhandlable request\r\n') break response_b = json.dumps(response).encode('utf8') + b'\r\n' byte_count = len(response_b) framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count) writer.write(framing_s.encode('ascii')) yield From(asyncio.sleep(args.resp_sleep*random.random(), loop=self.loop)) writer.write(response_b)
def run(): """ :return: """ conf = parser.parse_args() conf.world_step_size = 0.004 fname = conf.output_directory+"/benchmark.csv" exists = os.path.exists(fname) if exists: f = open(fname, 'ab', buffering=1) else: f = open(fname, 'wb', buffering=1) output = csv.writer(f, delimiter=',') if not exists: output.writerow(['run', 'population_size', 'step_size', 'sim_time', 'real_time', 'factor']) world = yield From(OnlineEvoManager.create(conf)) yield From(wait_for(world.pause(False))) population_sizes = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50] sim_time = 5 runs = 20 for n in population_sizes: for i in range(runs): trees, bboxes = yield From(world.generate_population(n)) for tree, bbox in zip(trees, bboxes): res = yield From(world.birth(tree, bbox, None)) yield From(res) yield From(trollius.sleep(0.05)) while world.last_time is None: yield From(trollius.sleep(0.1)) sim_before = world.last_time before = time.time() while float(world.last_time - sim_before) < sim_time: yield From(trollius.sleep(0.1)) sim_diff = float(world.last_time - sim_before) diff = time.time() - before output.writerow((i, n, conf.world_step_size, sim_diff, diff, sim_diff / diff)) yield From(wait_for(world.delete_all_robots())) yield From(trollius.sleep(0.3)) f.close()
def run_server(): conf = Config(update_rate=25, proposal_threshold=0) world = yield From(World.create(conf)) yield From(world.pause(True)) yield From(yield_wait(world.build_arena())) n_bots = [1, 5, 20, 50] radius = 0.4 * conf.arena_size[0] n_repeats = 1 sim_time = 5.0 _state = [None, -1] # World update trigger def trigger(_): elapsed = float(world.last_time) if elapsed >= sim_time: _state[0] = time.time() - _state[0] _state[1] = elapsed # Remove trigger to prevent other incoming messages # from overwriting these values. world.remove_update_trigger(trigger) elif elapsed < 0: logger.error("Elapsed time < 0!") print("nbots\titer\tsimtime\twctime") for n in n_bots: poses = get_circle_poses(n, radius) for i in range(n_repeats): # Generate a starting population from the given poses yield From(yield_wait(world.generate_starting_population(poses))) yield From(trollius.sleep(0.5)) _state[0] = time.time() _state[1] = -1 world.add_update_trigger(trigger) yield From(world.pause(False)) while _state[1] < 0: # Wait until the simulation time has passed the required # number of seconds. yield From(trollius.sleep(0.1)) print("%d\t%d\t%f\t%f" % (n, i, _state[1], _state[0])) yield From(world.pause()) yield From(yield_wait(world.delete_all_robots())) # Sleep to process all old messages yield From(trollius.sleep(0.5)) yield From(world.reset())
def control_loop(self, driver, time_out): manager = yield From(pygazebo.connect()) yield From(trollius.sleep(0.5)) world_subscriber = manager.subscribe('/gazebo/default/world_stats', 'gazebo.msgs.WorldStatistics', self.world_callback) location_subscriber = manager.subscribe('/gazebo/default/pose/info', 'gazebo.msgs.PosesStamped', self.location_callback) light_sensor1_subscriber = manager.subscribe('/gazebo/default/husky/camera1/link/camera/image', 'gazebo.msgs.ImageStamped', self.light_1_callback) light_sensor2_subscriber = manager.subscribe('/gazebo/default/husky/camera2/link/camera/image', 'gazebo.msgs.ImageStamped', self.light_2_callback) laser_subscriber = manager.subscribe('/gazebo/default/husky/hokuyo/link/laser/scan', 'gazebo.msgs.LaserScanStamped', self.laser_callback) yield From(trollius.sleep(1)) world_publisher = yield From(manager.advertise('/gazebo/default/world_control','gazebo.msgs.WorldControl')) world_publisher.wait_for_listener() wheel_publisher = yield From(manager.advertise('/gazebo/default/husky/joint_cmd', 'gazebo.msgs.JointCmd')) wheel_publisher.wait_for_listener() world_control = WorldControl() world_control.pause = True world_control.reset.all = True yield From(trollius.sleep(0.01)) yield From(world_publisher.publish(world_control)) left_wheel = JointCmd() left_wheel.name = 'husky::front_left_joint' right_wheel = JointCmd() right_wheel.name = 'husky::front_right_joint' left_wheel.velocity.target = 0 right_wheel.velocity.target = 0 yield From(trollius.sleep(0.01)) yield From(wheel_publisher.publish(left_wheel)) yield From(wheel_publisher.publish(right_wheel)) world_control.pause = False yield From(trollius.sleep(0.01)) yield From(world_publisher.publish(world_control)) yield From(trollius.sleep(0.5)) start_time = self.sim_time end_time = start_time + time_out print "Starting control loop" while (self.sim_time < end_time) and (self.distance_to_goal > 0.5): sensor_input = [self.light_sensor1, self.light_sensor2] sensor_input += self.collide (left,right) = driver(sensor_input) left_wheel.velocity.target = left right_wheel.velocity.target = right yield From(wheel_publisher.publish(left_wheel)) yield From(wheel_publisher.publish(right_wheel)) self.update_distance() if self.distance_to_goal < 0.5: break yield From(trollius.sleep(.01)) yield From(trollius.sleep(0.01)) world_control.pause = True yield From(world_publisher.publish(world_control)) self.left_velocity = left_wheel.velocity.target self.right_velocity = right_wheel.velocity.target self.loop.stop()
def publish_loop(): manager = yield From(pygazebo.connect()) publisher = yield From( manager.advertise('/gazebo/default/simple_arm_0/joint_cmd', 'gazebo.msgs.JointCmd')) message = pygazebo.msg.joint_cmd_pb2.JointCmd() message.name = 'simple_arm_0::arm_shoulder_pan_joint' message.position.target = 3 yield From(publisher.publish(message)) yield From(trollius.sleep(0.5)) yield From(publisher.publish(message)) yield From(trollius.sleep(0.5))
def _set_inferior_tty(): if self.proc_inftty: if self.proc_inftty.returncode is None: self.proc_inftty.terminate() self.proc_inftty = None try: self.proc_inftty = proc = yield asyncio.From( asyncio.create_subprocess_exec(*args)) info('inferiortty: {}'.format(args)) except OSError as e: self.console_print('Cannot spawn terminal: {}\n'.format(e)) else: start = time.time() while time.time() - start < 2: try: with open(result_file.name) as f: lines = f.readlines() # Commands found in the result file. if len(lines) == 2: set_inferior_tty_cb(lines[0]) set_inferior_tty_cb(lines[1]) break except IOError as e: self.console_print( 'Cannot set the inferior tty: {}\n'.format(e)) proc.terminate() break yield asyncio.From(asyncio.sleep(.100, loop=self.vim.loop)) else: self.console_print('Failed to start inferior_tty.py.\n') proc.terminate()
def gpsUpdate(self): while True: #swag.system('cls' if swag.name == 'nt' else 'clear') gps_string = self.gpsRead()[1:] #getting rid of the stupid $ marker at the beginning of the strings gps_list = string.split(gps_string, ',') if gps_list[0] == "GPRMC": if gps_list[4] == 'S': self.gps_data['latitude'] = -1 * (float(gps_list[3][0:2]) + (float(gps_list[3][2:])/60.0)) else: self.gps_data['latitude'] = (float(gps_list[3][0:2]) + (float(gps_list[3][2:])/60.0)) if gps_list[6] == 'W': self.gps_data['longitude'] = -1 * (float(gps_list[5][0:3]) + (float(gps_list[5][3:])/60.0)) else: self.gps_data['longitude'] = float(gps_list[5][0:3]) + (float(gps_list[5][3:])/60.0) self.gps_data['speed'] = (float(gps_list[7]) * 1.15078) self.publish(u'aero.near.carPos', self.gps_data['latitude'], self.gps_data['longitude']) self.publish(u'aero.near.carSpeed', self.gps_data['speed']) elif gps_list[0] == "GPVTG": degrees = float(gps_list[1]) if degrees > 90 and degrees < 270: if degrees > 180: self.gps_data['heading'] = "S°{}W".format(degrees-180) #° converted to degree symbol in html else: self.gps_data['heading'] = "S°{}E".format(180-degrees) else: if degrees > 270: self.gps_data['heading'] = "N°{}W".format(360-degrees) else: self.gps_data['heading'] = "N°{}E".format(degrees) self.publish(u'aero.near.carHeading', self.gps_data['heading']) print self.gps_data yield From(asyncio.sleep(.03333))
def rainbow(): delay = 0.01 offset = 0 while True: if offset > leds.ledArraySize: offset = 0 x = offset for led in range(leds.ledArraySize): if x >= leds.ledArraySize: x = 0 wavelength = mp(led, 0, leds.ledArraySize, 780, 380) color = wavelengthToRGB(wavelength) leds.changeColor(x, color) #print("x={0}, offset={1}, color={2}".format(x, offset, color)) x += 1 offset += 1 yield asyncio.From(asyncio.sleep(delay))
def publish_loop(): manager = yield From(pygazebo.connect()) print("have manager") publisher = yield From( manager.advertise('/gazebo/default/stompy/joint_cmd', 'gazebo.msgs.JointCmd')) print("have publisher") message = pygazebo.msg.joint_cmd_pb2.JointCmd() #message.name = 'stompy::fl_leg::thigh_to_calf_upper' #message.axis = 1 #message.force = -1000.0 #message.name = 'stompy::body_to_fl_leg' #message.axis = 2 message.name = joint['name'] message.axis = joint['axis'] message.force = float(sys.argv[1]) #message.force = -1000 #message.position.p_gain = 10.0 #message.position.i_gain = 0.5 #message.position.target = 1.5707 #message.position.target = math.radians(float(sys.argv[1])) print("message: %s" % message) #message.name = 'stompy::fl_leg::hip_to_thigh' #message.axis = 1 #message.force = -1000.0 for i in xrange(n): print("Publish: %s" % i) yield From(publisher.publish(message)) yield From(trollius.sleep(1.0))
def _wait_data_writer(self): for dummy in range(50): if not self.data_writer: yield From(trollius.sleep(0.1)) else: return raise Exception('Time out')
def poll(self): print("WSResourceServer: poll()") rs = {} while True: try: rs = {} rs["resources"] = [] for resource in self.resources: r = {} r["resourceName"] = resource.name r["variables"] = resource.variables r["lastUpdated"] = str(datetime.now()) rs['resources'].append(r) self.broadcast(json.dumps(rs)) except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=2, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=6, file=sys.stdout) yield asyncio.From(asyncio.sleep(self.pollRate))
def run_server(): conf = Config( proposal_threshold=0, output_directory='output' ) world = yield From(World.create(conf)) yield From(world.pause(True)) wall_x = conf.arena_size[0] / 2.0 wall_y = conf.arena_size[1] / 2.0 wall_points = [Vector3(-wall_x, -wall_y, 0), Vector3(wall_x, -wall_y, 0), Vector3(wall_x, wall_y, 0), Vector3(-wall_x, wall_y, 0)] future = yield From(world.build_walls(wall_points)) yield From(future) grid_size = (1, 2) spacing = 3 * conf.mating_distance grid_x, grid_y = grid_size x_offset = -(grid_x - 1) * spacing * 0.5 y_offset = -(grid_y - 1) * spacing * 0.5 poses = [Pose(position=Vector3(x_offset + spacing * i, y_offset + spacing * j, 0)) for i, j in itertools.product(range(grid_x), range(grid_y))] trees, bboxes = yield From(world.generate_population(len(poses))) for pose, bbox in itertools.izip(poses, bboxes): pose.position += Vector3(0, 0, bbox[2]) fut = yield From(world.insert_population(trees, poses)) yield From(fut) while True: yield From(trollius.sleep(0.1)) yield From(world.perform_reproduction())
def display_date(loop): end_time = loop.time() + 10 while True: print datetime.datetime.now() if (loop.time() + 1.0) >=end_time: break yield From(asyncio.sleep(1))
def publish_loop(): manager = yield From(pygazebo.connect()) print("have manager") publisher = yield From( manager.advertise('/gazebo/default/stompy/joint_cmd', 'gazebo.msgs.JointCmd')) print("have publisher") message = pygazebo.msg.joint_cmd_pb2.JointCmd() #message.name = 'stompy::fl_leg::thigh_to_calf_upper' #message.axis = 1 #message.force = -1000.0 message.name = 'stompy::body_to_fl_leg' message.axis = 2 message.force = 1000 #message.position.p_gain = 10.0 #message.position.i_gain = 0.5 #message.position.target = 1.5707 #message.name = 'stompy::fl_leg::hip_to_thigh' #message.axis = 1 #message.force = -1000.0 print("have message") while True: yield From(publisher.publish(message)) print("published") yield From(trollius.sleep(1.0)) print("sleeping")
def factorial(name, number): f = 1 for i in range(2, number+1): print "Task %s: factorial (%s)"%(name, i) yield From(asyncio.sleep(1)) f *= i print "Task %s, result: %s"%(name, f)
def poll(self): print("poll task started") while True: try: msg = self.debugQueue.get_nowait() print(msg) except Empty: pass except: exc_type, exc_value, exc_traceback = sys.exc_info() print("poll exception") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) try: result = self.resultQueue.get_nowait() self.hasResult(result) except Empty: pass except: exc_type, exc_value, exc_traceback = sys.exc_info() print("poll exception") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) finally: yield asyncio.From(asyncio.sleep(self.pollRate))
def message_received(self, frame, delay=0): tmi = TReadOnlyBuffer(frame) iprot = THeaderProtocol(tmi) (fname, mtype, rseqid) = iprot.readMessageBegin() if delay: yield From(asyncio.sleep(delay)) else: try: timeout_task = self.pending_tasks.pop(rseqid) except KeyError: # Task doesn't have a timeout or has already been cancelled # and pruned from `pending_tasks`. pass else: timeout_task.cancel() method = getattr(self.client, "recv_" + fname.decode(), None) if method is None: logger.error("Method %r is not supported", fname) self.transport.abort() else: try: method(iprot, mtype, rseqid) except ( asyncio.futures.InvalidStateError, asyncio.CancelledError, ) as e: logger.warning("Method %r cancelled: %s", fname, str(e))
def client(): reader, writer = yield From(asyncio.streams.open_connection( '127.0.0.1', 12345, loop=loop)) def send(msg): print("> " + msg) writer.write((msg + '\n').encode("utf-8")) def recv(): msgback = (yield From(reader.readline())) msgback = msgback.decode("utf-8").rstrip() print("< " + msgback) raise Return(msgback) # send a line send("add 1 2") msg = yield From(recv()) send("repeat 5 hello") msg = yield From(recv()) assert msg == 'begin' while True: msg = yield From(recv()) if msg == 'end': break writer.close() yield From(asyncio.sleep(0.5))
def _process_robots(self): '''Process robots.txt. Coroutine. ''' try: request = self._new_initial_request(with_body=False) verdict = (yield From(self._should_fetch_reason_with_robots( request, self._url_item.url_record)))[0] except REMOTE_ERRORS as error: _logger.error(__( _('Fetching robots.txt for ‘{url}’ ' 'encountered an error: {error}'), url=self._next_url_info.url, error=error )) self._result_rule.handle_error(request, error, self._url_item) wait_time = self._result_rule.get_wait_time( request, self._url_item.url_record, error=error ) if wait_time: _logger.debug('Sleeping {0}.'.format(wait_time)) yield From(trollius.sleep(wait_time)) raise Return(False) else: if not verdict: self._url_item.skip() raise Return(False) raise Return(True)
def do_mate(world, ra, rb): """ :param world: :param ra: :param rb: :return: """ mate = None while True: # Attempt reproduction mate = yield From(world.attempt_mate(ra, rb)) if mate: break logger.debug("Mates selected, highlighting points...") new_pos = get_insert_position(world.conf, ra, rb, world) new_pos.z = insert_z hls = yield From(create_highlights( world, ra.last_position, rb.last_position, new_pos)) yield From(trollius.sleep(2)) logger.debug("Inserting child...") child, bbox = mate pose = Pose(position=new_pos) future = yield From(world.insert_robot(child, pose, parents=[ra, rb])) yield From(future) yield From(sleep_sim_time(world, 1.8)) logger.debug("Removing highlights...") yield From(remove_highlights(world, hls))
def activity(self): backoff = 0 while True: try: self.reader, self.writer = yield From(asyncio.open_connection( self.host, self.port, ssl=self.sslctx, loop=self.loop)) except Exception as exc: backoff = min(args.max_backoff, backoff + (backoff//2) + 1) logging.info('Error connecting: %r; sleep %s', exc, backoff) yield From(asyncio.sleep(backoff, loop=self.loop)) continue backoff = 0 self.next_id = 0 self.pending = {} self. initialized = True try: while self.todo: payload, waiter = self.todo.pop() if not waiter.done(): yield From(self.send(payload, waiter)) while True: resp_id, resp = yield From(self.process()) if resp_id in self.pending: payload, waiter = self.pending.pop(resp_id) if not waiter.done(): waiter.set_result(resp) except Exception as exc: self.initialized = False self.writer.close() while self.pending: req_id, pair = self.pending.popitem() payload, waiter = pair if not waiter.done(): self.todo.add(pair) logging.info('Error processing: %r', exc)
def process(self): '''Process. Coroutine. ''' verdict = self._fetch_rule.check_ftp_request( self._url_item.url_info, self._url_item.url_record)[0] if not verdict: self._url_item.skip() return request = Request(self._url_item.url_info.url) # TODO: dependency inject if self._fetch_rule.ftp_login: request.username, request.password = self._fetch_rule.ftp_login dir_name, filename = self._url_item.url_info.split_path() if self._processor.fetch_params.glob and frozenset(filename) & GLOB_CHARS: directory_url = to_dir_path_url(request.url_info) directory_request = copy.deepcopy(request) directory_request.url = directory_url request = directory_request is_file = False self._glob_pattern = urllib.parse.unquote(filename) else: is_file = yield From(self._prepare_request_file_vs_dir(request)) self._file_writer_session.process_request(request) wait_time = yield From(self._fetch(request, is_file)) if wait_time: _logger.debug('Sleeping {0}.'.format(wait_time)) yield From(trollius.sleep(wait_time))
def get_cpu_usage(agent): yield From(agent.run_event.wait()) config = agent.config['linux'] logger.info('starting "get_cpu_usage" task for "%s"', config['hostname']) db_config = config['database'] yield From(agent.async_create_database(**db_config)) while agent.run_event.is_set(): yield From(asyncio.sleep(config['cpu_usage_frequency'])) try: cpu_percent = psutil.cpu_percent(interval=None) points = [{ 'measurement': 'cpu_usage', 'tags': { 'host': config['hostname'], }, 'fields': { 'value': cpu_percent } }] logger.debug('{}: cpu_usage={}%'.format( config['hostname'], cpu_percent)) yield From(agent.async_push(points, db_config['name'])) except: logger.exception('cannot get the cpu usage') logger.info('get_cpu_usage terminated')
def _check_resource_monitor(self): if not self._resource_monitor: return for counter in itertools.count(): resource_info = self._resource_monitor.check() if not resource_info: if counter: _logger.info(_('Situation cleared.')) break if counter % 15 == 0: if resource_info.path: _logger.warning(__( _('Low disk space on {path} ({size} free).'), path=resource_info.path, size=wpull.string.format_size(resource_info.free) )) else: _logger.warning(__( _('Low memory ({size} free).'), size=wpull.string.format_size(resource_info.free) )) _logger.warning(_('Waiting for operator to clear situation.')) yield From(trollius.sleep(60))
def slow_function(): # <5> # pretend waiting a long time for I/O yield From(asyncio.sleep(3)) # <6> raise Return("suc")
def frame_parser(self, reader, writer): # This takes care of the framing. last_request_id = 0 while True: # Read the frame header, parse it, read the data. # NOTE: The readline() and readexactly() calls will hang # if the client doesn't send enough data but doesn't # disconnect either. We add a timeout to each. (But the # timeout should really be implemented by StreamReader.) framing_b = yield From( asyncio.wait_for(reader.readline(), timeout=args.timeout, loop=self.loop)) if random.random() * 100 < args.fail_percent: logging.warn('Inserting random failure') yield From( asyncio.sleep(args.fail_sleep * random.random(), loop=self.loop)) writer.write(b'error random failure\r\n') break logging.debug('framing_b = %r', framing_b) if not framing_b: break # Clean close. try: frame_keyword, request_id_b, byte_count_b = framing_b.split() except ValueError: writer.write(b'error unparseable frame\r\n') break if frame_keyword != b'request': writer.write(b'error frame does not start with request\r\n') break try: request_id, byte_count = int(request_id_b), int(byte_count_b) except ValueError: writer.write(b'error unparsable frame parameters\r\n') break if request_id != last_request_id + 1 or byte_count < 2: writer.write(b'error invalid frame parameters\r\n') break last_request_id = request_id request_b = yield From( asyncio.wait_for(reader.readexactly(byte_count), timeout=args.timeout, loop=self.loop)) try: request = json.loads(request_b.decode('utf8')) except ValueError: writer.write(b'error unparsable json\r\n') break response = self.handle_request(request) # Not a coroutine. if response is None: writer.write(b'error unhandlable request\r\n') break response_b = json.dumps(response).encode('utf8') + b'\r\n' byte_count = len(response_b) framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count) writer.write(framing_s.encode('ascii')) yield From( asyncio.sleep(args.resp_sleep * random.random(), loop=self.loop)) writer.write(response_b)
def timeout_task(self, fname, seqid, delay=0): yield From(asyncio.sleep(delay)) self._handle_timeout(fname, seqid)
def hello(): print("Hello world!") r = yield From(asyncio.sleep(1)) print("Hello again!")
def handle_gonogo(G): ''' This contains the experimenal logic of the Stop Task. A lot of work went into constructing the stimuli. Stimuli parameters are loaded into variables in the code above. Runs 136 trials of Go-Nogo. This function is to be run within the asyncio loop. ''' # we just need it here... STOP = 1 GO = 0 tooSoonTime = G['S']['tooSoonTime'] myMultiStairs = G['S']['myMultiStairs'] # if the time it took tov respond is smaller than this time --> invalid. numberOfResponses = 0 G['S']['nextFLipTasks'] = [] # stuff winflupper needs to do later on.. # set the visual contents here... # INITIAL SETTING G['S']['goNogoStim'] = G['vstims']['S']['fix'] # yeah, do all kinds of init here. for trialNumber in range(len(G['S']['SSstopgo'])): thisDirection = random.choice( ('al', 'ar')) # obtain this from the file!! LorR = thisDirection[1].upper() thisTrialType_num = G['S']['SSstopgo'][ trialNumber] # this is a 0 (GO) or 1 (STOP) thisTrialType = [GO, STOP][int( thisTrialType_num )] # shady practices indeed -- so later on I cany say 'if this TrialType is GO:, etc' GorNG = ['Go', 'Stop'][int(thisTrialType)] thisISIWaitTime = G['S']['ISIwaitTime'][trialNumber] correctResponseSide = G['S']['correctResponseSides'][trialNumber] wrongResponseSide = G['S']['wrongResponseSides'][trialNumber] allResponses = [] responded = False # subj responded? tooManyResponses = False trialHandled = False if thisTrialType is STOP: # this should be called only 40 times, since there are 40 stop trials... thisSSD, thisCondition = myMultiStairs.next( ) # I defined the myMultiStairs above. # this code tells the loop to only continue when continueTroutine is not False # otherwise it'll just keep yielding. # let winflipper make new clock G['S']['continueRoutine'] = False G['S']['nextFlipTasks'].append( [G['S']['resetClock'], G]) # the makeNewClock automatically makes things continue while G['S']['continueRoutine'] is False: yield From(asyncio.sleep(0)) cl = G['S']['clock'] # obtain the clock that was just made. # ok, we can proceed -- the clock has been set. G['S']['goNogoStim'] = G['vstims']['S']['pre'] while cl.getTime() < 0.5: yield From(asyncio.sleep(0)) # obtain our next clock... # this code tells the loop to only continue when continueTroutine is not False # otherwise it'll just keep yielding. # let winflipper make new clock G['S']['continueRoutine'] = False # make sure upon next window flow, we have a new clock set, and also - that marker is sent signalling the start of the new go/stop trial. G['S']['nextFlipTasks'].append( [G['S']['resetClock'], G]) # the makeNewClock automatically makes things continue # send the trigger regarding the arrow, as soon as the windows flips G['S']['nextFlipTasks'].append( [G['eh'].send_message, ['Begin' + GorNG + LorR]]) while G['S']['continueRoutine'] is False: yield From(asyncio.sleep(0)) cl = G['S']['clock'] # obtain the clock that was just made. # this is where we show the arrow + find out whether a key is pressed: G['S']['goNogoStim'] = G['vstims']['S'][thisDirection] currentTime = 0.0 while currentTime < 1.0: currentTime = cl.getTime() # set the stimulus to the proper direction (it's a choice, for now... -- but it's much much better to hard-code it) # make the arrow (+ circle) evs = event.getKeys(timeStamped=cl) if len(evs) > 0: buttonsPressed, timesPressed = zip(*evs) # it's highly unlikely that two buttons are pressed in a signle # frame, but control for that anyway. allResponses.append((buttonsPressed[0], timesPressed[0])) numberOfResponses += 1 # LOG this event... (i.e. send trigger) # handle event: # once a button is pressed -- display fixation point again. if len(allResponses) > 0 and not responded: # 'clear' the visual window --> fixation cross, again: G['S']['goNogoStim'] = G['vstims']['S']['fix'] responded = True buttonPressed, RTime = allResponses[0] if RTime < tooSoonTime: G['eh'].send_message('PressedTooSoon') else: if buttonsPressed[0] == BUTTONS[0]: G['eh'].send_message('RespL') elif buttonsPressed[0] == BUTTONS[1]: G['eh'].send_message('RespR') # if it's a stop trial, then make arrow red after X time if thisTrialType is STOP and not responded: if currentTime > thisSSD: G['S']['goNogoStim'] = G['vstims']['S'][thisDirection + 'r'] # here we let the screen flip, for example... yield From(asyncio.sleep(0)) # # Stop / Inhibit Response Codes # 'BeginGoL':1, # 'BeginGoR':2, # 'BeginStopL':3, # 'BeginStopR':4, # # 'RespL':5, # 'RespR':6, # # 'CorrectGoL':11, # 'CorrectGoR':12, # 'CorrectStopL':13, # 'CorrectStopR':14, # 'ErrorCommission':15, # # # don't expect too many of these: # 'ErrorOmission':21, # 'PressedTooSoon':22, # 'TooManyResponses':23, # 'WrongSideErrorCommission':24, # 'WrongSideGo':25, # so the loop is done -- let's figure out what kind of trial this was. # taking care of the button press itself, as soon as button is pressed: if not trialHandled and responded: trialHandled = True if len(allResponses) > 1: trialOutcome = 'TooManyResponses' if thisTrialType is STOP: myMultiStairs.addResponse(0) else: if RTime < tooSoonTime: trialOutcome = 'PressedTooSoon' if thisTrialType is STOP: myMultiStairs.addResponse(0) else: if thisTrialType is STOP: if buttonPressed == correctResponseSide: trialOutcome = 'ErrorCommission' myMultiStairs.addResponse(0) elif buttonPressed == wrongResponseSide: trialOutcome = 'WrongSideErrorCommission' myMultiStairs.addResponse(0) elif thisTrialType is GO: if buttonPressed == correctResponseSide: trialOutcome = 'CorrectGo' + correctResponseSide # not yet... elif buttonPressed == wrongResponseSide: trialOutcome = 'WrongSideGo' # handle the 'response' if the button was NOT pressed: if not trialHandled and not responded: trialHandled = True if thisTrialType is GO: trialOutcome = 'ErrorOmission' if thisTrialType is STOP: trialOutcome = 'CorrectStop' + LorR myMultiStairs.addResponse(1) # so we send it out: G['eh'].send_message(trialOutcome) # this code tells the loop to only continue when continueTroutine is not False # otherwise it'll just keep yielding. # let winflipper make new clock # this code tells the loop to only continue when continueTroutine is not False # otherwise it'll just keep yielding. # let winflipper make new clock G['S']['continueRoutine'] = False G['S']['nextFlipTasks'].append( [G['S']['resetClock'], G]) # the makeNewClock automatically makes things continue while G['S']['continueRoutine'] is False: yield From(asyncio.sleep(0)) cl = G['S']['clock'] # obtain the clock that was just made. # ok, we can proceed -- the clock has been set. flushed = False G['S']['goNogoStim'] = G['vstims']['S']['fix'] while cl.getTime() < thisISIWaitTime: if not flushed: # this is a nice place to save it to logfile: before the # send a report about the STOP trial, write a nice line: # logging.data('messa') logging.flush() flused = True yield From(asyncio.sleep(0))
from ctypes import cast, POINTER, sizeof import logging logger = logging.getLogger(__name__) import trollius as asyncio from trollius import coroutine, From import tglib as tg msleep = lambda t: asyncio.sleep(t / 1000.0) def prepare_block(block, registerflags, silent=False): bname = block.block_name for register in registerflags: flags = registerflags[register] oreg = getattr(block, register) if isinstance(flags, dict): for flag in flags: ival = flags[flag] cval = getattr(oreg, flag) if cval != ival: if not silent: if ival > 1: verb = "configuring" elif ival: verb = "setting" else: verb = "clearing" logger.debug("%s %s in %s.%s", verb, flag, bname,
def get_stats(agent): yield From(agent.run_event.wait()) """ After plugin installation, copy the configuration file from sentinella-plugin-template/conf/ to /etc/sentinella/conf.d/ """ config = agent.config['sentinella-kvm'] """ The plugin Key is a unique UUID provided by Sentinel.la. If not valid, the plugin metrics will never be registered. """ plugin_key = config['plugin_key'] logger.info( 'starting "get_stats" task for plugin_key "%s" and host "%s"'.format( plugin_key, hostname)) while agent.run_event.is_set(): yield From(asyncio.sleep(frequency)) try: data = {'server_name': hostname, 'plugins': {}} logger.debug('connecting to data source') data['plugins'].update({"{}".format(plugin_key): {}}) conn = libvirt.open('qemu:///system') for id in conn.listDomains(): domain = conn.lookupByID(id) domain_name = domain.name() dom_info = domain.info() metric = "{0}_domain_name".format(domain_name) data['plugins'][plugin_key].update( {metric: { "value": domain_name, "type": "string" }}) cpu_time = dom_info[4] metric = "{0}_cpu_time".format(cpu_time) data['plugins'][plugin_key].update( {metric: { "value": cpu_time, "type": "integer" }}) memory = dom_info[2] metric = "{0}_memory".format(memory) data['plugins'][plugin_key].update( {metric: { "value": memory, "type": "integer" }}) logger.debug('{}: myplugin={}%'.format(hostname, data)) yield From(agent.async_push(data)) except: logger.exception('cannot get data source information') logger.info('get_stats terminated')
def async_job(verb, job, threadpool, locks, event_queue, log_path): """Run a sequence of Stages from a Job and collect their output. :param job: A Job instance :threadpool: A thread pool executor for blocking stages :event_queue: A queue for asynchronous events """ # Initialize success flag all_stages_succeeded = True # Jobs start occuping a jobserver job occupying_job = True # Execute each stage of this job for stage in job.stages: # Logger reference in this scope for error reporting logger = None # Abort the job if one of the stages has failed if job.continue_on_failure and not all_stages_succeeded: break # Check for stage synchronization lock if stage.locked_resource is not None: lock = locks.setdefault(stage.locked_resource, asyncio.Lock()) yield asyncio.From(lock) else: lock = FakeLock() try: # If the stage doesn't require a job token, release it temporarily if stage.occupy_job: if not occupying_job: while job_server.try_acquire() is None: yield asyncio.From(asyncio.sleep(0.05)) occupying_job = True else: if occupying_job: job_server.release() occupying_job = False # Notify stage started event_queue.put( ExecutionEvent('STARTED_STAGE', job_id=job.jid, stage_label=stage.label)) if type(stage) is CommandStage: try: # Initiate the command while True: try: # Update the environment for this stage (respects overrides) stage.update_env(job.env) # Get the logger protocol_type = stage.logger_factory( verb, job.jid, stage.label, event_queue, log_path) # Start asynchroonous execution transport, logger = yield asyncio.From( async_execute_process( protocol_type, **stage.async_execute_process_kwargs)) break except OSError as exc: if 'Text file busy' in str(exc): # This is a transient error, try again shortly # TODO: report the file causing the problem (exc.filename) yield asyncio.From(asyncio.sleep(0.01)) continue raise # Notify that a subprocess has been created event_queue.put( ExecutionEvent('SUBPROCESS', job_id=job.jid, stage_label=stage.label, stage_repro=stage.get_reproduction_cmd( verb, job.jid), **stage.async_execute_process_kwargs)) # Asynchronously yield until this command is completed retcode = yield asyncio.From(logger.complete) except: # noqa: E722 # Bare except is permissable here because the set of errors which the CommandState might raise # is unbounded. We capture the traceback here and save it to the build's log files. logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) logger.err(str(traceback.format_exc())) retcode = 3 elif type(stage) is FunctionStage: logger = IOBufferLogger(verb, job.jid, stage.label, event_queue, log_path) try: # Asynchronously yield until this function is completed retcode = yield asyncio.From(get_loop().run_in_executor( threadpool, stage.function, logger, event_queue)) except: # noqa: E722 # Bare except is permissable here because the set of errors which the FunctionStage might raise # is unbounded. We capture the traceback here and save it to the build's log files. logger.err('Stage `{}` failed with arguments:'.format( stage.label)) for arg_val in stage.args: logger.err(' {}'.format(arg_val)) for arg_name, arg_val in stage.kwargs.items(): logger.err(' {}: {}'.format(arg_name, arg_val)) retcode = 3 else: raise TypeError("Bad Job Stage: {}".format(stage)) # Set whether this stage succeeded stage_succeeded = (retcode == 0) # Update success tracker from this stage all_stages_succeeded = all_stages_succeeded and stage_succeeded # Store the results from this stage event_queue.put( ExecutionEvent('FINISHED_STAGE', job_id=job.jid, stage_label=stage.label, succeeded=stage_succeeded, stdout=logger.get_stdout_log(), stderr=logger.get_stderr_log(), interleaved=logger.get_interleaved_log(), logfile_filename=logger.unique_logfile_name, repro=stage.get_reproduction_cmd(verb, job.jid), retcode=retcode)) # Close logger logger.close() finally: lock.release() # Finally, return whether all stages of the job completed raise asyncio.Return(job.jid, all_stages_succeeded)
def start_builder(self, realm, token, build_uuid): region = self.executor_config["EC2_REGION"] channel = self.executor_config.get("COREOS_CHANNEL", "stable") coreos_ami = self.executor_config.get("COREOS_AMI", None) if coreos_ami is None: get_ami_callable = partial(self._get_coreos_ami, region, channel) coreos_ami = yield From( self._loop.run_in_executor(None, get_ami_callable)) user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) logger.debug("Generated cloud config for build %s: %s", build_uuid, user_data) ec2_conn = self._get_conn() ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType( size=int(self.executor_config.get("BLOCK_DEVICE_SIZE", 48)), volume_type="gp2", delete_on_termination=True, ) block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_devices["/dev/xvda"] = ssd_root_ebs interfaces = None if self.executor_config.get("EC2_VPC_SUBNET_ID", None) is not None: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=self.executor_config["EC2_VPC_SUBNET_ID"], groups=self.executor_config["EC2_SECURITY_GROUP_IDS"], associate_public_ip_address=True, ) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( interface) try: reservation = yield From( ec2_conn.run_instances( coreos_ami, instance_type=self.executor_config["EC2_INSTANCE_TYPE"], key_name=self.executor_config.get("EC2_KEY_NAME", None), user_data=user_data, instance_initiated_shutdown_behavior="terminate", block_device_map=block_devices, network_interfaces=interfaces, )) except boto.exception.EC2ResponseError as ec2e: logger.exception("Unable to spawn builder instance") raise ec2e if not reservation.instances: raise ExecutorException("Unable to spawn builder instance.") elif len(reservation.instances) != 1: raise ExecutorException("EC2 started wrong number of instances!") launched = AsyncWrapper(reservation.instances[0]) # Sleep a few seconds to wait for AWS to spawn the instance. yield From(sleep(_TAG_RETRY_SLEEP)) # Tag the instance with its metadata. for i in range(0, _TAG_RETRY_COUNT): try: yield From( launched.add_tags({ "Name": "Quay Ephemeral Builder", "Realm": realm, "Token": token, "BuildUUID": build_uuid, })) except boto.exception.EC2ResponseError as ec2e: if ec2e.error_code == "InvalidInstanceID.NotFound": if i < _TAG_RETRY_COUNT - 1: logger.warning( "Failed to write EC2 tags for instance %s for build %s (attempt #%s)", launched.id, build_uuid, i, ) yield From(sleep(_TAG_RETRY_SLEEP)) continue raise ExecutorException("Unable to find builder instance.") logger.exception("Failed to write EC2 tags (attempt #%s)", i) logger.debug("Machine with ID %s started for build %s", launched.id, build_uuid) raise Return(launched.id)
def testOneway(self, seconds): t = time.time() yield From(asyncio.sleep(seconds)) yield From(self.onewaysQueue.put((t, time.time(), seconds)))
def run(self): """ :return: """ conf = self.conf insert_queue = [] if not self.do_restore: # Build the arena yield From(wait_for(self.build_arena())) # Generate a starting population trees, bboxes = yield From( self.generate_population(conf.initial_population_size)) insert_queue = zip(trees, bboxes, [None for _ in range(len(trees))]) # Simple loop timing mechanism timers = { k: Time() for k in ['reproduce', 'death', 'snapshot', 'log_fitness', 'rtf'] } this = self def timer(name, t): """ :param t: :param name: :return: """ if this.last_time is not None and float(this.last_time - timers[name]) > t: timers[name] = this.last_time return True return False # Start the world real_time = time.time() yield From(wait_for(self.pause(False))) while True: if insert_queue: tree, bbox, parents = insert_queue.pop() yield From(wait_for(self.birth(tree, bbox, parents))) # Perform operations only if there are no items # in the insert queue, makes snapshotting easier. if insert_queue: yield From(trollius.sleep(0.2)) continue if timer('snapshot', 10.0): # Snapshot the world every 10 simulation seconds yield From(self.create_snapshot()) yield From(wait_for(self.pause(False))) if timer('death', 5.0): # Kill off robots over their age every 5 simulation seconds futs = yield From(self.kill_old_robots()) if futs: yield From(multi_future(futs)) if len(self.robots) <= 1: print("Less than two robots left in population - extinction.") break if timer('reproduce', 3.0) and len(self.robots) < conf.max_population_size: # Attempt a reproduction every 3 simulation seconds potential_mates = self.select_mates() if potential_mates: ra, rb = random.choice(potential_mates) result = yield From(self.attempt_mate(ra, rb)) if result: ra.did_mate_with(rb) rb.did_mate_with(ra) child, bbox = result insert_queue.append((child, bbox, (ra, rb))) if timer('log_fitness', 2.0): # Log overall fitness every 2 simulation seconds self.log_fitness() if timer('rtf', 1.0): # Print RTF to screen every second nw = time.time() diff = nw - real_time real_time = nw print("RTF: %f" % (1.0 / diff)) yield From(trollius.sleep(0.2))
def testString(self, s): yield From(asyncio.sleep(0)) raise Return(s)
def coro(): for e in asyncio.sleep(30): yield e
def control_loop(self, driver, time_out): manager = yield From(pygazebo.connect()) yield From(trollius.sleep(0.5)) world_subscriber = manager.subscribe('/gazebo/default/world_stats', 'gazebo.msgs.WorldStatistics', self.world_callback) location_subscriber = manager.subscribe('/gazebo/default/pose/info', 'gazebo.msgs.PosesStamped', self.location_callback) light_sensor1_subscriber = manager.subscribe( '/gazebo/default/husky/camera1/link/camera/image', 'gazebo.msgs.ImageStamped', self.light_1_callback) light_sensor2_subscriber = manager.subscribe( '/gazebo/default/husky/camera2/link/camera/image', 'gazebo.msgs.ImageStamped', self.light_2_callback) laser_subscriber = manager.subscribe( '/gazebo/default/husky/hokuyo/link/laser/scan', 'gazebo.msgs.LaserScanStamped', self.laser_callback) yield From(trollius.sleep(1)) world_publisher = yield From( manager.advertise('/gazebo/default/world_control', 'gazebo.msgs.WorldControl')) world_publisher.wait_for_listener() wheel_publisher = yield From( manager.advertise('/gazebo/default/husky/joint_cmd', 'gazebo.msgs.JointCmd')) wheel_publisher.wait_for_listener() world_control = WorldControl() world_control.pause = True world_control.reset.all = True yield From(trollius.sleep(0.01)) yield From(world_publisher.publish(world_control)) left_wheel = JointCmd() left_wheel.name = 'husky::front_left_joint' right_wheel = JointCmd() right_wheel.name = 'husky::front_right_joint' left_wheel.velocity.target = 0 right_wheel.velocity.target = 0 yield From(trollius.sleep(0.01)) yield From(wheel_publisher.publish(left_wheel)) yield From(wheel_publisher.publish(right_wheel)) world_control.pause = False yield From(trollius.sleep(0.01)) yield From(world_publisher.publish(world_control)) yield From(trollius.sleep(0.5)) start_time = self.sim_time end_time = start_time + time_out print "Starting control loop" while (self.sim_time < end_time) and (self.distance_to_goal > 0.5): sensor_input = [self.light_sensor1, self.light_sensor2] sensor_input += self.collide (left, right) = driver(sensor_input) left_wheel.velocity.target = left right_wheel.velocity.target = right yield From(wheel_publisher.publish(left_wheel)) yield From(wheel_publisher.publish(right_wheel)) self.update_distance() if self.distance_to_goal < 0.5: break yield From(trollius.sleep(.01)) yield From(trollius.sleep(0.01)) world_control.pause = True yield From(world_publisher.publish(world_control)) self.left_velocity = left_wheel.velocity.target self.right_velocity = right_wheel.velocity.target self.loop.stop()
def publish_loop(): manager = yield From(pygazebo.connect()) publisher = yield From( manager.advertise('/gazebo/default/create/joint_cmd', 'gazebo.msgs.JointCmd')) msg_coast_L = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_coast_L.name = 'create::left_wheel' msg_coast_L.axis = 0 msg_coast_L.force = 0.0 msg_coast_R = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_coast_R.name = 'create::right_wheel' msg_coast_R.axis = 0 msg_coast_R.force = 0.0 msg_accel_forward_L = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_forward_L.name = 'create::left_wheel' msg_accel_forward_L.axis = 0 msg_accel_forward_L.force = FORCE_STANDARD msg_accel_forward_R = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_forward_R.name = 'create::right_wheel' msg_accel_forward_R.axis = 0 msg_accel_forward_R.force = FORCE_STANDARD msg_accel_backward_L = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_backward_L.name = 'create::left_wheel' msg_accel_backward_L.axis = 0 msg_accel_backward_L.force = -FORCE_STANDARD msg_accel_backward_R = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_backward_R.name = 'create::right_wheel' msg_accel_backward_R.axis = 0 msg_accel_backward_R.force = -FORCE_STANDARD msg_accel_right_L = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_right_L.name = 'create::left_wheel' msg_accel_right_L.axis = 0 msg_accel_right_L.force = FORCE_STANDARD * TURN_SCALAR msg_accel_right_R = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_right_R.name = 'create::right_wheel' msg_accel_right_R.axis = 0 msg_accel_right_R.force = -FORCE_STANDARD * TURN_SCALAR msg_accel_left_L = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_left_L.name = 'create::left_wheel' msg_accel_left_L.axis = 0 msg_accel_left_L.force = -FORCE_STANDARD * TURN_SCALAR msg_accel_left_R = pygazebo.msg.joint_cmd_pb2.JointCmd() msg_accel_left_R.name = 'create::right_wheel' msg_accel_left_R.axis = 0 msg_accel_left_R.force = FORCE_STANDARD * TURN_SCALAR frame_count = 0 while True: if (mod(frame_count, (20 * FRAME_RATE)) == 0 * FRAME_RATE): yield From(publisher.publish(msg_coast_L)) yield From(publisher.publish(msg_coast_R)) elif (mod(frame_count, (20 * FRAME_RATE)) == 16 * FRAME_RATE): yield From(publisher.publish(msg_accel_backward_L)) yield From(publisher.publish(msg_accel_backward_R)) elif (mod(frame_count, (20 * FRAME_RATE)) == 17 * FRAME_RATE): yield From(publisher.publish(msg_accel_right_L)) yield From(publisher.publish(msg_accel_right_R)) elif (mod(frame_count, (20 * FRAME_RATE)) == 18 * FRAME_RATE): yield From(publisher.publish(msg_accel_left_L)) yield From(publisher.publish(msg_accel_left_R)) elif (mod(frame_count, (20 * FRAME_RATE)) == 19 * FRAME_RATE): yield From(publisher.publish(msg_accel_forward_L)) yield From(publisher.publish(msg_accel_forward_R)) yield From(trollius.sleep(1 / FRAME_RATE)) frame_count += 1
def slowsquare(self, x): if x > 5: raise Exception("number too large") else: yield trollius.sleep(1) raise trollius.Return(x * x)
def run_server(): conf = parser.parse_args() conf.analyzer_address = None world = yield From(World.create(conf)) yield From(world.pause(True)) with open("/home/elte/mt/tol/scripts/starfish.yaml", "rb") as f: robot_yaml = f.read() body_spec = world.builder.body_builder.spec brain_spec = world.builder.brain_builder.spec bot = yaml_to_robot(body_spec, brain_spec, robot_yaml) fname = conf.output_directory + "/revolve_benchmark.csv" exists = os.path.exists(fname) if exists: f = open(fname, 'ab', buffering=1) else: f = open(fname, 'wb', buffering=1) output = csv.writer(f, delimiter=',') if not exists: output.writerow([ 'run', 'population_size', 'step_size', 'sim_time', 'real_time', 'factor' ]) n_bots = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50] sim_time = 5.0 runs = 20 yield From(world.pause(False)) for n in n_bots: poses = get_poses(n) trees = [ Tree.from_body_brain(bot.body, bot.brain, body_spec) for _ in range(n) ] for i in range(runs): yield From(wait_for(world.insert_population(trees, poses))) while world.last_time is None: yield From(trollius.sleep(0.1)) sim_before = world.last_time before = time.time() while float(world.last_time - sim_before) < sim_time: yield From(trollius.sleep(0.1)) sim_diff = float(world.last_time - sim_before) diff = time.time() - before output.writerow( (i, n, conf.world_step_size, sim_diff, diff, sim_diff / diff)) yield From(wait_for(world.delete_all_robots())) yield From(trollius.sleep(0.3))
def sleeper(time): yield From(asyncio.sleep(time)) raise Return(time)
def handle_visual(G): ''' This flips the window, draws the stuff to be drawn, and calls functions to be called from the stop task. It is supposed to be run in the asyncio loop. ''' # logging.console.setLevel(logging.DEBUG) mainClock = G['mainClock'] eh = G['eh'] fd_with_markers = G['V']['fd_with_markers'] ASYNC_SLEEPTIME = G['V']['ASYNC_SLEEPTIME'] print ASYNC_SLEEPTIME frameCounter = 0 vstims = G['vstims']['V'] totFrames = len(fd_with_markers) # visualClock=clock.Clock() # this will run the entire length of the visual... # within this time, the stop signal task will (hopefully) finish. # OR... we can also just use a counter. while frameCounter < totFrames: # the workflow # 1) Prepare everything + draw # 2) Prepare markers # 3) win.flip() + the callOnFlip routines to be done. # all the visual stuff: frameIndex, visContents, markers = fd_with_markers[frameCounter] if frameIndex == 0: eh.send_message('vis_BEGIN') frameCounter += 1 # deal with the visuals -- using vstims which should be accessible # we get the list... # create the shapes to be drawn using list comprehension shapes = [] if len(visContents) > 0: for item in visContents: for stim in vstims[item]: shapes.append(stim) # print shapes # print G['S']['goNogoStim'] # add the gonogo stimuli to them: if len(G['S']['goNogoStim']) > 0: for stim in G['S']['goNogoStim']: if stim is not None: shapes.append(stim) # print shapes # draw them on our little canvas. if len(shapes) > 0: for shape in shapes: #if shape is not None: shape.draw() else: G['vstims']['S']['fix'][0].draw() # prepare the calls for the next iteration, including marlers; # deal with visual markers if len(markers) > 0: for marker in markers: win.callOnFlip(eh.send_message, marker) # win.callOnFlip(print,marker) tasks = G['S']['nextFlipTasks'] while len(tasks) > 0: task = tasks.pop(0) function, arg = task win.callOnFlip(function, arg) # we flip the screen here - this will take ~ 16.66667 msec. win.flip() yield From(asyncio.sleep(0)) eh.send_message('vis_END')
def handle_audio(G): ''' this should handle the audio stimuli, using the async programming style. it starts a new clock and depending on timings, will start some audio samples, L or R, 40 or 55 Hz. ''' audio_stim_list = G['A']['audio_stim_list'] astims = G['astims'] eh = G['eh'] audioClock = clock.Clock() playing = False withinAudioBlock = False prevWithinAudioBlock = False RunAudio = True currentTime = audioClock.getTime() logging.data('aud_BEGIN') eh.send_message('aud_BEGIN') # just before going in here -- LOG it. # log the beginning... while currentTime < 340.: #currentTime < 340.: # print('hello') # print(currentTime) if not playing: # I can safely use this since only one audio is playing at a time. withinAudioBlock = False for item in audio_stim_list: b, e, stim = item if b < currentTime < e: currentStim = stim withinAudioBlock = True astims[stim].play() playDuration = astims[stim].getDuration() playing = True playClock = clock.Clock() # print(stim) logging.data(stim) eh.send_message(stim) logging.flush() else: if playClock.getTime( ) > playDuration: # figure out if something is playing playing = False # try dealing with begin and ending markers: if withinAudioBlock and not prevWithinAudioBlock: messg = currentStim.replace('_', '_b') # print(messg) logging.data(messg) eh.send_message(messg) prevWithinAudioBlock = True elif prevWithinAudioBlock and not withinAudioBlock: messg = currentStim.replace('_', '_e') # print(messg) logging.data(messg) eh.send_message(messg) prevWithinAudioBlock = False # this will stop this loop, probably: currentTime = audioClock.getTime() #if currentTime > 340.: # print('Stopping!') # RunAudio=False yield From(asyncio.sleep( 0)) # pass control to someone else, while this guy sleeps a bit. logging.data('aud_END') eh.send_message('aud_END')
def _make_idle(self): while True: now = time.time() if now - self.command_time > 2.0: self.driver.set_idle() yield From(asyncio.sleep(0.5))
def _work_checker(self): logger.debug("Initializing work checker") while self._current_status == BuildServerStatus.RUNNING: with database.CloseForLongOperation(app.config): yield From(trollius.sleep(WORK_CHECK_TIMEOUT)) logger.debug( "Checking for more work for %d active workers", self._lifecycle_manager.num_workers(), ) processing_time = self._lifecycle_manager.overall_setup_time( ) + SETUP_LEEWAY_SECONDS job_item = self._queue.get(processing_time=processing_time, ordering_required=True) if job_item is None: logger.debug( "No additional work found. Going to sleep for %s seconds", WORK_CHECK_TIMEOUT) continue try: build_job = BuildJob(job_item) except BuildJobLoadException as irbe: logger.warning( "[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.", job_item.body, ) logger.exception(irbe) self._queue.incomplete(job_item, restore_retry=False) continue logger.debug("Checking for an avaliable worker for build job %s", build_job.repo_build.uuid) try: schedule_success, retry_timeout = yield From( self._lifecycle_manager.schedule(build_job)) except: logger.warning( "[BUILD INCOMPLETE: scheduling] Build ID: %s. Retry restored.", build_job.repo_build.uuid, ) logger.exception("Exception when scheduling job: %s", build_job.repo_build.uuid) self._current_status = BuildServerStatus.EXCEPTION self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT) return if schedule_success: logger.debug("Marking build %s as scheduled", build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) yield From( status_handler.set_phase( database.BUILD_PHASE.BUILD_SCHEDULED)) self._job_count = self._job_count + 1 logger.debug( "Build job %s scheduled. Running: %s", build_job.repo_build.uuid, self._job_count, ) else: logger.warning( "[BUILD INCOMPLETE: no schedule] Build ID: %s. Retry restored.", build_job.repo_build.uuid, ) logger.debug( "All workers are busy for job %s Requeuing after %s seconds.", build_job.repo_build.uuid, retry_timeout, ) self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout)
def greet_every_two_seconds(): while True: print('Hello World') yield From(trollius.sleep(2))
def run(): """ The main coroutine, which is started below. """ # Parse command line / file input arguments conf = parser.parse_args() conf.output_directory = "output" conf.restore_directory = "restore" with open("models/robot_26.yaml", 'r') as yamlfile: bot_yaml1 = yamlfile.read() with open("models/robot_150.yaml", 'r') as yamlfile: bot_yaml2 = yamlfile.read() # Create the world, this connects to the Gazebo world world = yield From(World.create(conf)) # These are useful when working with YAML body_spec = world.builder.body_builder.spec brain_spec = world.builder.brain_builder.spec # Create a robot from YAML protobot1 = yaml_to_robot(body_spec=body_spec, nn_spec=brain_spec, yaml=bot_yaml1) # Create a robot from YAML protobot2 = yaml_to_robot(body_spec=body_spec, nn_spec=brain_spec, yaml=bot_yaml2) # Create a revolve.angle `Tree` representation from the robot, which # is what is used in the world manager. robot_tree1 = Tree.from_body_brain(body=protobot1.body, brain=protobot1.brain, body_spec=body_spec) # Create a revolve.angle `Tree` representation from the robot, which # is what is used in the world manager. robot_tree2 = Tree.from_body_brain(body=protobot2.body, brain=protobot2.brain, body_spec=body_spec) # Insert the robot into the world. `insert_robot` resolves when the insert # request is sent, the future it returns resolves when the robot insert # is actually confirmed and a robot manager object has been created pose1 = Pose(position=Vector3(0, 0, 0.05)) pose2 = Pose(position=Vector3(0, 2, 0.05)) future1 = yield From( world.insert_robot( tree=robot_tree1, pose=pose1, # name="robot_26" )) future2 = yield From( world.insert_robot( tree=robot_tree2, pose=pose2, # name="robot_26" )) robot_manager1 = yield From(future1) robot_manager2 = yield From(future2) # I usually start the world paused, un-pause it here. Note that # pause again returns a future for when the request is sent, # that future in turn resolves when a response has been received. # This is the general convention for all message actions in the # world manager. `wait_for` saves the hassle of grabbing the # intermediary future in this case. yield From(wait_for(world.pause(True))) # Start a run loop to do some stuff while True: # Print robot fitness every second print("Robot fitness is {fitness}".format( fitness=robot_manager1.fitness())) yield From(trollius.sleep(1.0))
def get_uwsgi_stats(agent): yield From(agent.run_event.wait()) config = agent.config['uwsgi'] logger.info('starting "get_uwsgi_stats" task for "%s"', config['hostname']) db_config = config['database'] yield From(agent.async_create_database(**db_config)) workers_stats = None uwsgi_host = config['hostname'] uwsgi_port = config['port'] while agent.run_event.is_set(): yield From(asyncio.sleep(config['frequency'])) try: logger.debug('open connection to uwsgi stats server') reader, writer = yield From( asyncio.open_connection(uwsgi_host, uwsgi_port)) data = yield From(reader.read()) d = json.loads(data.decode('utf-8')) if workers_stats is None: logger.debug('first run, no previous statistics in memory') workers_stats = dict() for worker in d['workers']: workers_stats[worker['id']] = worker logger.debug('current statistcs: %s', workers_stats) continue ref_worker = d['workers'][0] stored_last_spawn = workers_stats[ref_worker['id']]['last_spawn'] received_last_spawn = ref_worker['last_spawn'] if stored_last_spawn != received_last_spawn: logger.warn( 'a restart of the uwsgi server "%s" ' 'has been detected', uwsgi_host) workers_stats = dict() for worker in d['workers']: workers_stats[worker['id']] = worker continue points = [] for worker in d['workers']: logger.debug('process worker data...') stored_wk_data = workers_stats[worker['id']] requests = worker['requests'] - stored_wk_data['requests'] exceptions = worker['exceptions'] - \ stored_wk_data['exceptions'] tx = worker['tx'] - stored_wk_data['tx'] points.append({ 'measurement': 'uwsgi_stats', 'tags': { 'host': config['hostname'], 'worker': worker['id'] }, 'fields': { 'requests': requests, 'exceptions': exceptions, 'tx': tx, 'rss': worker['rss'], 'vsz': worker['vsz'], 'avg_rt': worker['avg_rt'], 'wid': worker['id'], 'status': worker['status'] } }) workers_stats[worker['id']] = worker yield From(agent.async_push(points, db_config['name'])) except: logger.exception('cannot get the uwsgi stats') logger.info('get_uwsgi_stats terminated')