def test_run_write(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(self.josh_hello_cmd) spy.post_message.assert_called_once_with(username="******", message="Hello!") spy.get_messages_of.assert_not_called()
def cli(*args): cli = Cli(os.environ.get('ATOMIX_HOST', 'localhost'), int(os.environ.get('ATOMIX_PORT', 5678))) try: if len(args) > 0: cli._process_command(*args) else: cli.run() except (EOFError, KeyboardInterrupt): cli.set_return_value(None)
def test_e2e(self): cli = Cli(Feed()) cmds = [ "Josh -> As Josh used to say", "Josh", "Josh follows Bob", "Bob -> @Josh whats'up?", "John -> pancakes", "Bob -> @John you cooking pancakes?", "Josh -> mamma mia", "Josh wall", ] parsed = [cli.parse(cmd) for cmd in cmds] run = [cli.run(cmd) for cmd in parsed] expected = [ "mamma mia", "@John you cooking pancakes?", "@Josh whats'up?", "As Josh used to say", ] self.assertEqual(expected, run[-1])
from feed import Feed from cli import Cli if __name__ == "__main__": cli = Cli(Feed()) while True: cmd_str = input(">> ") cmd = cli.parse(cmd_str) res = cli.run(cmd) if res: print(res)
from cli import Cli from cli.cmds.create_event import CreateEvent from cli.cmds.delete_event import DeleteEvent from cli.cmds.filter_sort_event import FilterSortEvent from cli.cmds.show_events import ShowEvents from cli.cmds.update_event import UpdateEvent cmds = (CreateEvent, ShowEvents, DeleteEvent, UpdateEvent, FilterSortEvent) cli = Cli(cmds) cli.run()
class App: def __init__(self): self.state = MainAppState.INIT self.display = Display() self.ee = BaseEventEmitter() self.cli = None if Config.ENABLE_CLI: self.cli = Cli() def init(self): if Config.ENABLE_CLI: self.cli.run() def handle_cli_command(self, input_str): command_up = "up" command_ok = "ok" command_down = "down" if input_str == command_ok or input_str == command_up or input_str == command_down: self.ee.emit(IOEvent.BUTTON) if (input_str == command_up): self.ee.emit(IOEvent.BUTTON_UP) if (input_str == command_down): self.ee.emit(IOEvent.BUTTON_DOWN) if (input_str == command_ok): self.ee.emit(IOEvent.BUTTON_OK) if (input_str == "isd"): self.ee.emit(IOEvent.INSERT_SD) if (input_str == "ihd"): self.ee.emit(IOEvent.INSERT_HDD) if (input_str == "esd"): self.ee.emit(IOEvent.EJECT_SD) if (input_str == "ehd"): self.ee.emit(IOEvent.EJECT_HDD) def run(self): self.init() sm = SceneManager() sm.register_scene(DevicesWaiting(self.display, self.ee)) # sm.go(SceneId.DEVICES_WAITING) sm.register_scene(Dummy(self.display)) # sm.go(SceneId.DUMMY) sm.register_scene(Loading(self.display)) sm.go(SceneId.LOADING) while (True): if Config.ENABLE_CLI: input_str = self.cli.read() if input_str == 'exit': sm.destroy() break self.handle_cli_command(input_str) time.sleep(0.01)
def test_run_follow_cmd(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(Command(Action.DISPLAY_RELEVANT_POSTS, "Josh", None)) spy.get_wall_for.assert_called_once_with("Josh") spy.get_messages_of.assert_not_called()
def test_run_follow_cmd(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(Command(Action.FOLLOW, "Josh", "Bob")) spy.follow.assert_called_once_with(follower="Josh", followed="Bob") spy.get_messages_of.assert_not_called()
def test_run_display_own_msg(self): spy = mock.Mock(wraps=Feed()) cli = Cli(spy) cli.run(self.jonny_display_cmd) spy.get_messages_of.assert_called()
class SwitchML(object): '''SwitchML controller''' def __init__(self): super(SwitchML, self).__init__() self.log = logging.getLogger(__name__) self.log.info('SwitchML controller') # CPU PCIe port self.cpu_port = 320 # Pipe 2 Quad 16 # UDP port and mask self.udp_port = 0xbee0 self.udp_mask = 0xfff0 # RDMA partition key self.switch_pkey = 0xffff # Set all nodes MGID self.all_ports_mgid = 0x8000 self.all_ports_initial_rid = 0x8000 # Multicast group ID -> replication ID (= node ID) -> port self.multicast_groups = {self.all_ports_mgid: {}} def critical_error(self, msg): self.log.critical(msg) print(msg, file=sys.stderr) logging.shutdown() #sys.exit(1) os.kill(os.getpid(), signal.SIGTERM) def setup( self, program, switch_mac, switch_ip, bfrt_ip, bfrt_port, ports_file, ): # Device 0 self.dev = 0 # Target all pipes self.target = gc.Target(self.dev, pipe_id=0xFFFF) # Connect to BFRT server try: interface = gc.ClientInterface('{}:{}'.format(bfrt_ip, bfrt_port), client_id=0, device_id=self.dev) except RuntimeError as re: msg = re.args[0] % re.args[1] self.critical_error(msg) else: self.log.info('Connected to BFRT server {}:{}'.format( bfrt_ip, bfrt_port)) try: interface.bind_pipeline_config(program) except gc.BfruntimeForwardingRpcException: self.critical_error('P4 program {} not found!'.format(program)) try: # Get all tables for program self.bfrt_info = interface.bfrt_info_get(program) # Ports table self.ports = Ports(self.target, gc, self.bfrt_info) # Enable loopback on front panel ports loopback_ports = ( [64] + # Pipe 0 CPU ethernet port # Pipe 0: all 16 front-panel ports #list(range( 0, 0+64,4)) + # Pipe 1: all 16 front-panel ports list(range(128, 128 + 64, 4)) + # Pipe 2: all 16 front-panel ports list(range(256, 256 + 64, 4)) + # Pipe 3: all 16 front-panel ports list(range(384, 384 + 64, 4))) print('Setting {} front panel ports in loopback mode'.format( len(loopback_ports))) self.ports.set_loopback_mode(loopback_ports) # Enable loopback on PktGen ports pktgen_ports = [192, 448] if not self.ports.get_loopback_mode_pktgen(pktgen_ports): # Not all PktGen ports are in loopback mode print('\nYou must \'remove\' the ports in the BF ucli:\n') for p in pktgen_ports: print(' bf-sde> dvm rmv_port 0 {}'.format(p)) input('\nPress Enter to continue...') if not self.ports.set_loopback_mode_pktgen(pktgen_ports): self.critical_error( 'Failed setting front panel ports in loopback mode') print('\nAdd the ports again:\n') for p in pktgen_ports: print(' bf-sde> dvm add_port 0 {} 100 0'.format(p)) input('\nPress Enter to continue...') if not self.ports.get_loopback_mode_pktgen(pktgen_ports): self.critical_error( 'Front panel ports are not in loopback mode') # Packet Replication Engine table self.pre = PRE(self.target, gc, self.bfrt_info, self.cpu_port) # Setup tables # Forwarder self.forwarder = Forwarder(self.target, gc, self.bfrt_info, self.all_ports_mgid) # ARP and ICMP responder self.arp_and_icmp = ARPandICMPResponder(self.target, gc, self.bfrt_info) # Drop simulator self.drop_simulator = DropSimulator(self.target, gc, self.bfrt_info) # RDMA receiver self.rdma_receiver = RDMAReceiver(self.target, gc, self.bfrt_info) # UDP receiver self.udp_receiver = UDPReceiver(self.target, gc, self.bfrt_info) # Bitmap checker self.bitmap_checker = BitmapChecker(self.target, gc, self.bfrt_info) # Workers counter self.workers_counter = WorkersCounter(self.target, gc, self.bfrt_info) # Exponents self.exponents = Exponents(self.target, gc, self.bfrt_info) # Processors self.processors = [] for i in range(32): p = Processor(self.target, gc, self.bfrt_info, i) self.processors.append(p) # Next step selector self.next_step_selector = NextStepSelector(self.target, gc, self.bfrt_info) # RDMA sender self.rdma_sender = RDMASender(self.target, gc, self.bfrt_info) # UDP sender self.udp_sender = UDPSender(self.target, gc, self.bfrt_info) # Add multicast group for flood self.pre.add_multicast_group(self.all_ports_mgid) # Enable ports success, ports = self.load_ports_file(ports_file) if not success: self.critical_error(ports) # Set switch addresses self.set_switch_mac_and_ip(switch_mac, switch_ip) # CLI setup self.cli = Cli() self.cli.setup(self, prompt='SwitchML', name='SwitchML controller') # Set up gRPC server self.grpc_server = GRPCServer(ip='[::]', port=50099) # Run event loop for gRPC server in a separate thread # limit concurrency to 1 to avoid synchronization problems in the BFRT interface self.grpc_executor = futures.ThreadPoolExecutor(max_workers=1) self.event_loop = asyncio.get_event_loop() except KeyboardInterrupt: self.critical_error('Stopping controller.') except Exception as e: self.log.exception(e) self.critical_error('Unexpected error. Stopping controller.') def load_ports_file(self, ports_file): ''' Load ports yaml file and enable front panel ports. Keyword arguments: ports_file -- yaml file name Returns: (success flag, list of ports or error message) ''' ports = [] fib = {} with open(ports_file) as f: yaml_ports = yaml.safe_load(f) for port, value in yaml_ports['ports'].items(): re_match = front_panel_regex.match(port) if not re_match: return (False, 'Invalid port {}'.format(port)) fp_port = int(re_match.group(1)) fp_lane = int(re_match.group(2)) # Convert all keys to lowercase value = {k.lower(): v for k, v in value.items()} if 'speed' in value: try: speed = int(value['speed'].upper().replace('G', '').strip()) except ValueError: return (False, 'Invalid speed for port {}'.format(port)) if speed not in [10, 25, 40, 50, 100]: return ( False, 'Port {} speed must be one of 10G,25G,40G,50G,100G' .format(port)) else: speed = 100 if 'fec' in value: fec = value['fec'].lower().strip() if fec not in ['none', 'fc', 'rs']: return (False, 'Port {} fec must be one of none, fc, rs'. format(port)) else: fec = 'none' if 'autoneg' in value: an = value['autoneg'].lower().strip() if an not in ['default', 'enable', 'disable']: return ( False, 'Port {} autoneg must be one of default, enable, disable' .format(port)) else: an = 'default' if 'mac' not in value: return (False, 'Missing MAC address for port {}'.format(port)) success, dev_port = self.ports.get_dev_port(fp_port, fp_lane) if success: fib[dev_port] = value['mac'].upper() else: return (False, dev_port) ports.append((fp_port, fp_lane, speed, fec, an)) # Add ports success, error_msg = self.ports.add_ports(ports) if not success: return (False, error_msg) # Add forwarding entries self.forwarder.add_entries(fib.items()) # Add ports to flood multicast group rids_and_ports = [(self.all_ports_initial_rid + dp, dp) for dp in fib.keys()] success, error_msg = self.pre.add_multicast_nodes( self.all_ports_mgid, rids_and_ports) if not success: return (False, error_msg) for r, p in rids_and_ports: self.multicast_groups[self.all_ports_mgid][r] = p return (True, ports) def set_switch_mac_and_ip(self, switch_mac, switch_ip): ''' Set switch MAC and IP ''' self.switch_mac = switch_mac.upper() self.switch_ip = switch_ip self.arp_and_icmp.set_switch_mac_and_ip(self.switch_mac, self.switch_ip) self.rdma_receiver.set_switch_mac_and_ip(self.switch_mac, self.switch_ip) self.udp_receiver.set_switch_mac_and_ip(self.switch_mac, self.switch_ip) self.rdma_sender.set_switch_mac_and_ip(self.switch_mac, self.switch_ip) self.udp_sender.set_switch_mac_and_ip(self.switch_mac, self.switch_ip) def get_switch_mac_and_ip(self): ''' Get switch MAC and IP ''' return self.switch_mac, self.switch_ip def clear_multicast_group(self, session_id): ''' Remove multicast group and nodes for this session ''' if session_id in self.multicast_groups: for node_id in self.multicast_groups[session_id]: self.pre.remove_multicast_node(node_id) self.pre.remove_multicast_group(session_id) del self.multicast_groups[session_id] def reset_workers(self): ''' Reset all workers state ''' #TODO clear counters self.udp_receiver._clear() self.udp_sender.clear_udp_workers() self.rdma_receiver._clear() self.rdma_sender.clear_rdma_workers() self.bitmap_checker._clear() self.workers_counter._clear() self.exponents._clear() for p in self.processors: p._clear() for session_id in self.multicast_groups.copy(): if session_id != self.all_ports_mgid: self.clear_multicast_group(session_id) # Reset gRPC broadcast/barrier state self.grpc_server.reset() def clear_rdma_workers(self, session_id): ''' Reset UDP workers state for this session ''' #TODO selectively remove workers (RDMA or UDP) for #this session, clear RDMA sender/receiver counters, #clear bitmap/count/exponents/processors self.rdma_receiver._clear() self.rdma_sender.clear_rdma_workers() self.bitmap_checker._clear() self.workers_counter._clear() self.exponents._clear() for p in self.processors: p._clear() # Multicast groups below 0x8000 are used for sessions # (the mgid is the session id) #TODO session_id = session_id % 0x8000 session_id = 0 # Single session supported for now self.clear_multicast_group(session_id) def add_rdma_worker(self, session_id, worker_id, num_workers, worker_mac, worker_ip, worker_rkey, packet_size, message_size, qpns_and_psns): ''' Add SwitchML RDMA worker. Keyword arguments: session_id -- ID of the session worker_id -- worker rank num_workers -- number of workers in this session worker_mac -- worker MAC address worker_ip -- worker IP address worker_rkey -- worker remote key packet_size -- MTU for this session message_size -- RDMA message size for this session qpns_and_psns -- list of (QPn, initial psn) tuples Returns: (success flag, None or error message) ''' if worker_id >= 32: error_msg = 'Worker ID {} too large; only 32 workers supported'.format( worker_id) self.log.error(error_msg) return (False, error_msg) if num_workers > 32: error_msg = 'Worker count {} too large; only 32 workers supported'.format( num_workers) self.log.error(error_msg) return (False, error_msg) # Get port for node success, dev_port = self.forwarder.get_dev_port(worker_mac) if not success: return (False, dev_port) # Multicast groups below 0x8000 are used for sessions # (the mgid is the session id) #TODO session_id = session_id % 0x8000 session_id = 0 # Single session supported for now # Add RDMA receiver/sender entries success, error_msg = self.rdma_receiver.add_rdma_worker( worker_id, worker_ip, self.switch_pkey, packet_size, num_workers, session_id) if not success: return (False, error_msg) self.rdma_sender.add_rdma_worker(worker_id, worker_mac, worker_ip, worker_rkey, packet_size, message_size, qpns_and_psns) # Add multicast group if not present if session_id not in self.multicast_groups: self.pre.add_multicast_group(session_id) self.multicast_groups[session_id] = {} if worker_id in self.multicast_groups[ session_id] and self.multicast_groups[session_id][ worker_id] != dev_port: # Existing node with different port, remove it self.pre.remove_multicast_node(worker_id) del self.multicast_groups[session_id][worker_id] # Add multicast node if not present if worker_id not in self.multicast_groups[session_id]: # Add new node success, error_msg = self.pre.add_multicast_node( session_id, worker_id, dev_port) if not success: return (False, error_msg) self.multicast_groups[session_id][worker_id] = dev_port self.log.info('Added RDMA worker {}:{} {}'.format( worker_id, worker_mac, worker_ip)) return (True, None) def clear_udp_workers(self, session_id): ''' Reset UDP workers state for this session ''' #TODO selectively remove workers (RDMA or UDP) for #this session, clear UDP sender/receiver counters, #clear bitmap/count/exponents/processors self.udp_receiver._clear() self.udp_sender.clear_udp_workers() self.bitmap_checker._clear() self.workers_counter._clear() self.exponents._clear() for p in self.processors: p._clear() # Multicast groups below 0x8000 are used for sessions # (the mgid is the session id) #TODO session_id = session_id % 0x8000 session_id = 0 # Single session supported for now self.clear_multicast_group(session_id) def add_udp_worker(self, session_id, worker_id, num_workers, worker_mac, worker_ip): ''' Add SwitchML UDP worker. Keyword arguments: session_id -- ID of the session worker_id -- worker rank num_workers -- number of workers in this session worker_mac -- worker MAC address worker_ip -- worker IP address Returns: (success flag, None or error message) ''' #TODO session packet size if worker_id >= 32: error_msg = 'Worker ID {} too large; only 32 workers supported'.format( worker_id) self.log.error(error_msg) return (False, error_msg) if num_workers > 32: error_msg = 'Worker count {} too large; only 32 workers supported'.format( num_workers) self.log.error(error_msg) return (False, error_msg) # Get port for node success, dev_port = self.forwarder.get_dev_port(worker_mac) if not success: return (False, dev_port) # Multicast groups below 0x8000 are used for sessions # (the mgid is the session id) #TODO session_id = session_id % 0x8000 session_id = 0 # Single session supported for now # Add UDP receiver/sender entries success, error_msg = self.udp_receiver.add_udp_worker( worker_id, worker_mac, worker_ip, self.udp_port, self.udp_mask, num_workers, session_id) if not success: return (False, error_msg) self.udp_sender.add_udp_worker(worker_id, worker_mac, worker_ip) # Add multicast group if not present if session_id not in self.multicast_groups: self.pre.add_multicast_group(session_id) self.multicast_groups[session_id] = {} if worker_id in self.multicast_groups[ session_id] and self.multicast_groups[session_id][ worker_id] != dev_port: # Existing node with different port, remove it self.pre.remove_multicast_node(worker_id) del self.multicast_groups[session_id][worker_id] # Add multicast node if not present if worker_id not in self.multicast_groups[session_id]: # Add new node success, error_msg = self.pre.add_multicast_node( session_id, worker_id, dev_port) if not success: return (False, error_msg) self.multicast_groups[session_id][worker_id] = dev_port self.log.info('Added UDP worker {}:{} {}'.format( worker_id, worker_mac, worker_ip)) return (True, None) def run(self): try: # Start listening for RPCs self.grpc_future = self.grpc_executor.submit( self.grpc_server.run, self.event_loop, self) self.log.info('gRPC server started') # Start CLI self.cli.run() # Stop gRPC server and event loop self.event_loop.call_soon_threadsafe(self.grpc_server.stop) # Wait for gRPC thread to end self.grpc_future.result() # Stop event loop self.event_loop.close() # Close gRPC executor self.grpc_executor.shutdown() self.log.info('gRPC server stopped') except Exception as e: self.log.exception(e) self.log.info('Stopping controller')