class RealIo(object): def __init__(self, iface_map): self.port_to_iface_name = iface_map self.iface_name_to_port = dict((n, p) for p, n in iface_map.items()) self.frame_io = FrameIOManager() self.ponsim = None self.io_ports = dict() @inlineCallbacks def start(self): log.debug('starting') yield self.frame_io.start() for port, iface_name in self.port_to_iface_name.items(): io_port = self.frame_io.open_port(iface_name, self.ingress) self.io_ports[port] = io_port log.info('started') returnValue(self) @inlineCallbacks def stop(self): log.debug('stopping') try: for port in self.io_ports.values(): yield self.frame_io.del_interface(port.iface_name) yield self.frame_io.stop() log.info('stopped') except Exception, e: log.exception('exception', e=e)
def startup_components(self): try: self.log.info('starting-internal-components') registry.register('main', self) yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.args.instance_id, config=self.config, consul=self.args.consul)).start() init_rest_service(self.args.rest_port) yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register( 'core', VolthaCore(instance_id=self.args.instance_id, version=VERSION, log_level=LogLevel.INFO)).start( config_backend=load_backend(self.args)) yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure to start all components {}'.format(e))
class RealIo(object): def __init__(self, iface_map): self.port_to_iface_name = iface_map self.iface_name_to_port = dict((n, p) for p, n in iface_map.items()) self.frame_io = FrameIOManager() self.ponsim = None self.io_ports = dict() @inlineCallbacks def start(self): log.debug('starting') yield self.frame_io.start() for port, iface_name in self.port_to_iface_name.items(): io_port = self.frame_io.open_port(iface_name, self.ingress) self.io_ports[port] = io_port log.info('started') returnValue(self) def stop(self): log.debug('stopping') for port in self.io_ports.values(): self.frame_io.del_interface(port.iface_name) self.frame_io.stop() log.info('stopped') def register_ponsim(self, ponsim): self.ponsim = ponsim def ingress(self, io_port, frame): port = self.iface_name_to_port.get(io_port.iface_name) log.debug('ingress', port=port, iface_name=io_port.iface_name, frame=hexify(frame)) decoded_frame = Ether(frame) if self.ponsim is not None: self.ponsim.ingress(port, decoded_frame) def egress(self, port, frame): if isinstance(frame, Packet): frame = str(frame) io_port = self.io_ports[port] log.debug('sending', port=port, frame=hexify(frame)) io_port.send(frame)
def startup_components(self): try: self.log.info('starting-internal-components', internal_host=self.args.internal_host_address, external_host=self.args.external_host_address, interface=self.args.interface, consul=self.args.consul, etcd=self.args.etcd) registry.register('main', self) if self.args.backend == 'consul': yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul, container_name_regex=self.args.container_name_regex) ).start() elif self.args.backend == 'etcd': yield registry.register( 'coordinator', CoordinatorEtcd( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, etcd=self.args.etcd, container_name_regex=self.args.container_name_regex) ).start() self.log.info('waiting-for-config-assignment') # Wait until we get a config id before we proceed self.core_store_id, store_prefix = \ yield registry('coordinator').get_core_store_id_and_prefix() self.log.info('store-id', core_store_id=self.core_store_id) # Update the logger to output the vcore id. self.log = update_logging(instance_id=self.instance_id, vcore_id=self.core_store_id) yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register('openolt_kafka_proxy', OpenoltKafkaProxy( self.args.kafka)).start() yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'core', VolthaCore(instance_id=self.instance_id, core_store_id=self.core_store_id, grpc_port=self.args.grpc_port, version=self.voltha_version, log_level=LogLevel.INFO) ).start(config_backend=load_backend(store_id=self.core_store_id, store_prefix=store_prefix, args=self.args)) init_rest_service(self.args.rest_port) yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) # Now that all components are loaded, in the scenario where this # voltha instance is picking up an existing set of data (from a # voltha instance that dies/stopped) then we need to setup this # instance from where the previous one left yield registry('core').reconcile_data() # Now that the data is in memory and the reconcile process # within the core has completed (the reconciliation may still be # in progress with the adapters) we expose the NBI of voltha core yield registry('core').register_grpc_service() self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure-to-start-all-components', e=e)
def setUp(self): yield self.make_veth_pairs_if_needed() self.mgr = FrameIOManager().start()
class TestFrameIO(TestCase): @inlineCallbacks def make_veth_pairs_if_needed(self): def has_iface(iface): return os.system('ip link show {}'.format(iface)) == 0 def make_veth(iface): os.system('ip link add type veth') os.system('ip link set {} up'.format(iface)) peer = iface[:len('veth')] + str(int(iface[len('veth'):]) + 1) os.system('ip link set {} up'.format(peer)) assert has_iface(iface) for iface_number in (0, 2): iface = 'veth{}'.format(iface_number) if not has_iface(iface): make_veth(iface) yield asleep(2) @inlineCallbacks def setUp(self): yield self.make_veth_pairs_if_needed() self.mgr = FrameIOManager().start() def tearDown(self): self.mgr.stop() @inlineCallbacks def test_packet_send_receive(self): rcvd = DeferredWithTimeout() p0 = self.mgr.open_port('veth0', none).up() p1 = self.mgr.open_port('veth1', lambda p, f: rcvd.callback( (p, f))).up() # sending to veth0 should result in receiving on veth1 and vice versa bogus_frame = 'bogus packet' bogus_frame_padded = bogus_frame + '\x00' * (FrameIOPort.MIN_PKT_SIZE - len(bogus_frame)) p0.send(bogus_frame_padded) # check that we receved packet port, frame = yield rcvd self.assertEqual(port, p1) self.assertEqual(frame, bogus_frame_padded) @inlineCallbacks def test_packet_send_receive_with_filter(self): rcvd = DeferredWithTimeout() filter = BpfProgramFilter('ip dst host 123.123.123.123') p0 = self.mgr.open_port('veth0', none).up() p1 = self.mgr.open_port('veth1', lambda p, f: rcvd.callback((p, f)), filter=filter).up() # sending bogus packet would not be received ip_packet = str(Ether() / IP(dst='123.123.123.123')) p0.send(ip_packet) # check that we receved packet port, frame = yield rcvd self.assertEqual(port, p1) self.assertEqual(frame, ip_packet) @inlineCallbacks def test_packet_send_drop_with_filter(self): rcvd = DeferredWithTimeout() filter = BpfProgramFilter('ip dst host 123.123.123.123') p0 = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', lambda p, f: rcvd.callback((p, f)), filter=filter).up() # sending bogus packet would not be received p0.send('bogus packet') try: _ = yield rcvd except TimeOutError: pass else: self.fail('not timed out') @inlineCallbacks def test_concurrent_packet_send_receive(self): done = Deferred() queue1 = [] queue2 = [] n = 100 def append(queue): def _append(_, frame): queue.append(frame) if len(queue1) == n and len(queue2) == n: done.callback(None) return _append p1in = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', append(queue1)).up() p2in = self.mgr.open_port('veth2', none).up() self.mgr.open_port('veth3', append(queue2)).up() @inlineCallbacks def send_packets(port, n): for i in xrange(n): port.send(str(i)) yield asleep(0.00001 * random.random()) # to interleave # sending two concurrent streams send_packets(p1in, n) send_packets(p2in, n) # verify that both queue got all packets yield done @inlineCallbacks def test_concurrent_packet_send_receive_with_filter(self): done = Deferred() queue1 = [] queue2 = [] n = 100 def append(queue): def _append(_, frame): queue.append(frame) if len(queue1) == n / 2 and len(queue2) == n / 2: done.callback(None) return _append filter = BpfProgramFilter('vlan 100') p1in = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', append(queue1), filter).up() p2in = self.mgr.open_port('veth2', none).up() self.mgr.open_port('veth3', append(queue2), filter).up() @inlineCallbacks def send_packets(port, n): for i in xrange(n): # packets have alternating VLAN ids 100 and 101 pkt = Ether() / Dot1Q(vlan=100 + i % 2) port.send(str(pkt)) yield asleep(0.00001 * random.random()) # to interleave # sending two concurrent streams send_packets(p1in, n) send_packets(p2in, n) # verify that both queue got all packets yield done @inlineCallbacks def test_shared_interface(self): queue1 = DeferredQueue() queue2 = DeferredQueue() # two senders hooked up to the same interface (sharing it) # here we test if they can both send pin1 = self.mgr.open_port('veth0', none).up() pin2 = self.mgr.open_port('veth0', none).up() pout1 = self.mgr.open_port('veth1', lambda p, f: queue1.put( (p, f))).up() filter = BpfProgramFilter('ip dst host 123.123.123.123') pout2 = self.mgr.open_port('veth1', lambda p, f: queue2.put((p, f)), filter=filter).up() # sending from pin1, should be received by pout1 bogus_frame = 'bogus packet' bogus_frame_padded = bogus_frame + '\x00' * (FrameIOPort.MIN_PKT_SIZE - len(bogus_frame)) pin1.send(bogus_frame_padded) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, bogus_frame_padded) self.assertEqual(len(queue1.pending), 0) self.assertEqual(len(queue2.pending), 0) # sending from pin2, should be received by pout1 pin2.send(bogus_frame_padded) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, bogus_frame_padded) self.assertEqual(len(queue1.pending), 0) self.assertEqual(len(queue2.pending), 0) # sending from pin1, should be received by both pouts ip_packet = str(Ether() / IP(dst='123.123.123.123')) pin1.send(ip_packet) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue1.pending), 0) port, frame = yield queue2.get() self.assertEqual(port, pout2) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue2.pending), 0) # sending from pin2, should be received by pout1 ip_packet = str(Ether() / IP(dst='123.123.123.123')) pin2.send(ip_packet) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue1.pending), 0) port, frame = yield queue2.get() self.assertEqual(port, pout2) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue2.pending), 0) self.mgr.close_port(pin1) self.mgr.close_port(pin2) self.mgr.close_port(pout1) self.mgr.close_port(pout2)
def startup_components(self): try: self.log.info('starting-internal-components', internal_host=self.args.internal_host_address, external_host=self.args.external_host_address, interface=self.args.interface, consul=self.args.consul) registry.register('main', self) yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul)).start() self.log.info('waiting-for-config-assignment') # Wait until we get a config id before we proceed self.core_store_id, store_prefix = \ yield registry('coordinator').get_core_store_id_and_prefix() self.log.info('store-id', core_store_id=self.core_store_id) yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'core', VolthaCore(instance_id=self.instance_id, core_store_id=self.core_store_id, grpc_port=self.args.grpc_port, version=VERSION, log_level=LogLevel.INFO) ).start(config_backend=load_backend(store_id=self.core_store_id, store_prefix=store_prefix, args=self.args)) init_rest_service(self.args.rest_port) yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) # Now that all components are loaded, in the scenario where this # voltha instance is picking up an existing set of data (from a # voltha instance that dies/stopped) then we need to setup this # instance from where the previous one left yield registry('core').reconcile_data() self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure-to-start-all-components', e=e)
def __init__(self, iface_map): self.port_to_iface_name = iface_map self.iface_name_to_port = dict((n, p) for p, n in iface_map.items()) self.frame_io = FrameIOManager() self.ponsim = None self.io_ports = dict()
class TestFrameIO(TestCase): @inlineCallbacks def make_veth_pairs_if_needed(self): def has_iface(iface): return os.system('ip link show {}'.format(iface)) == 0 def make_veth(iface): os.system('ip link add type veth') os.system('ip link set {} up'.format(iface)) peer = iface[:len('veth')] + str(int(iface[len('veth'):]) + 1) os.system('ip link set {} up'.format(peer)) assert has_iface(iface) for iface_number in (0, 2): iface = 'veth{}'.format(iface_number) if not has_iface(iface): make_veth(iface) yield asleep(2) @inlineCallbacks def setUp(self): yield self.make_veth_pairs_if_needed() self.mgr = FrameIOManager().start() def tearDown(self): self.mgr.stop() @inlineCallbacks def test_packet_send_receive(self): rcvd = DeferredWithTimeout() p0 = self.mgr.open_port('veth0', none).up() p1 = self.mgr.open_port('veth1', lambda p, f: rcvd.callback((p, f))).up() # sending to veth0 should result in receiving on veth1 and vice versa bogus_frame = 'bogus packet' bogus_frame_padded = bogus_frame + '\x00' * ( FrameIOPort.MIN_PKT_SIZE - len(bogus_frame)) p0.send(bogus_frame_padded) # check that we receved packet port, frame = yield rcvd self.assertEqual(port, p1) self.assertEqual(frame, bogus_frame_padded) @inlineCallbacks def test_packet_send_receive_with_filter(self): rcvd = DeferredWithTimeout() filter = BpfProgramFilter('ip dst host 123.123.123.123') p0 = self.mgr.open_port('veth0', none).up() p1 = self.mgr.open_port('veth1', lambda p, f: rcvd.callback((p, f)), filter=filter).up() # sending bogus packet would not be received ip_packet = str(Ether() / IP(dst='123.123.123.123')) p0.send(ip_packet) # check that we receved packet port, frame = yield rcvd self.assertEqual(port, p1) self.assertEqual(frame, ip_packet) @inlineCallbacks def test_packet_send_drop_with_filter(self): rcvd = DeferredWithTimeout() filter = BpfProgramFilter('ip dst host 123.123.123.123') p0 = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', lambda p, f: rcvd.callback((p, f)), filter=filter).up() # sending bogus packet would not be received p0.send('bogus packet') try: _ = yield rcvd except TimeOutError: pass else: self.fail('not timed out') @inlineCallbacks def test_concurrent_packet_send_receive(self): done = Deferred() queue1 = [] queue2 = [] n = 100 def append(queue): def _append(_, frame): queue.append(frame) if len(queue1) == n and len(queue2) == n: done.callback(None) return _append p1in = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', append(queue1)).up() p2in = self.mgr.open_port('veth2', none).up() self.mgr.open_port('veth3', append(queue2)).up() @inlineCallbacks def send_packets(port, n): for i in xrange(n): port.send(str(i)) yield asleep(0.00001 * random.random()) # to interleave # sending two concurrent streams send_packets(p1in, n) send_packets(p2in, n) # verify that both queue got all packets yield done @inlineCallbacks def test_concurrent_packet_send_receive_with_filter(self): done = Deferred() queue1 = [] queue2 = [] n = 100 def append(queue): def _append(_, frame): queue.append(frame) if len(queue1) == n / 2 and len(queue2) == n / 2: done.callback(None) return _append filter = BpfProgramFilter('vlan 100') p1in = self.mgr.open_port('veth0', none).up() self.mgr.open_port('veth1', append(queue1), filter).up() p2in = self.mgr.open_port('veth2', none).up() self.mgr.open_port('veth3', append(queue2), filter).up() @inlineCallbacks def send_packets(port, n): for i in xrange(n): # packets have alternating VLAN ids 100 and 101 pkt = Ether() / Dot1Q(vlan=100 + i % 2) port.send(str(pkt)) yield asleep(0.00001 * random.random()) # to interleave # sending two concurrent streams send_packets(p1in, n) send_packets(p2in, n) # verify that both queue got all packets yield done @inlineCallbacks def test_shared_interface(self): queue1 = DeferredQueue() queue2 = DeferredQueue() # two senders hooked up to the same interface (sharing it) # here we test if they can both send pin1 = self.mgr.open_port('veth0', none).up() pin2 = self.mgr.open_port('veth0', none).up() pout1 = self.mgr.open_port( 'veth1', lambda p, f: queue1.put((p, f))).up() filter = BpfProgramFilter('ip dst host 123.123.123.123') pout2 = self.mgr.open_port( 'veth1', lambda p, f: queue2.put((p, f)), filter=filter).up() # sending from pin1, should be received by pout1 bogus_frame = 'bogus packet' bogus_frame_padded = bogus_frame + '\x00' * ( FrameIOPort.MIN_PKT_SIZE - len(bogus_frame)) pin1.send(bogus_frame_padded) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, bogus_frame_padded) self.assertEqual(len(queue1.pending), 0) self.assertEqual(len(queue2.pending), 0) # sending from pin2, should be received by pout1 pin2.send(bogus_frame_padded) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, bogus_frame_padded) self.assertEqual(len(queue1.pending), 0) self.assertEqual(len(queue2.pending), 0) # sending from pin1, should be received by both pouts ip_packet = str(Ether() / IP(dst='123.123.123.123')) pin1.send(ip_packet) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue1.pending), 0) port, frame = yield queue2.get() self.assertEqual(port, pout2) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue2.pending), 0) # sending from pin2, should be received by pout1 ip_packet = str(Ether() / IP(dst='123.123.123.123')) pin2.send(ip_packet) port, frame = yield queue1.get() self.assertEqual(port, pout1) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue1.pending), 0) port, frame = yield queue2.get() self.assertEqual(port, pout2) self.assertEqual(frame, ip_packet) self.assertEqual(len(queue2.pending), 0) self.mgr.close_port(pin1) self.mgr.close_port(pin2) self.mgr.close_port(pout1) self.mgr.close_port(pout2)