def _transmit_bpdu(self): while True: # Send config BPDU packet if port role is DESIGNATED_PORT. if self.role == DESIGNATED_PORT: now = datetime.datetime.today() if self.send_tc_timer and self.send_tc_timer < now: self.send_tc_timer = None self.send_tc_flg = False if not self.send_tc_flg: flags = 0b00000000 log_msg = '[port=%d] Send Config BPDU.' else: flags = 0b00000001 log_msg = '[port=%d] Send TopologyChange BPDU.' bpdu_data = self._generate_config_bpdu(flags) self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data) self.logger.debug(log_msg, self.ofport.port_no, extra=self.dpid_str) # Send Topology Change Notification BPDU until receive Ack. if self.send_tcn_flg: bpdu_data = self._generate_tcn_bpdu() self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data) self.logger.debug('[port=%d] Send TopologyChangeNotify BPDU.', self.ofport.port_no, extra=self.dpid_str) hub.sleep(self.port_times.hello_time)
def transact_block(request, connection): """Emulate jsonrpc.Connection.transact_block without blocking eventlet. """ error = connection.send(request) reply = None if error: return error, reply ovs_poller = poller.Poller() while not error: ovs_poller.immediate_wake() error, reply = connection.recv() if error != errno.EAGAIN: break if (reply and reply.id == request.id and reply.type in (jsonrpc.Message.T_REPLY, jsonrpc.Message.T_ERROR)): break connection.run() connection.wait(ovs_poller) connection.recv_wait(ovs_poller) ovs_poller.block() hub.sleep(0) return error, reply
def lldp_loop(self): while self.is_active: self.lldp_event.clear() now = time.time() timeout = None ports_now = [] ports = [] for (key, data) in self.ports.items(): if data.timestamp is None: ports_now.append(key) continue expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_PORT if expire <= now: ports.append(key) continue timeout = expire - now break for port in ports_now: self.send_lldp_packet(port) for port in ports: self.send_lldp_packet(port) hub.sleep(self.LLDP_SEND_GUARD) # don't burst if timeout is not None and ports: timeout = 0 # We have already slept # LOG.debug('lldp sleep %s', timeout) self.lldp_event.wait(timeout=timeout)
def _change_status(self, new_state, thread_switch=True): if new_state is not PORT_STATE_DISABLE: self.ofctl.set_port_status(self.ofport, new_state) if (new_state is PORT_STATE_FORWARD or (self.state is PORT_STATE_FORWARD and (new_state is PORT_STATE_DISABLE or new_state is PORT_STATE_BLOCK))): self.topology_change_notify(new_state) if (new_state is PORT_STATE_DISABLE or new_state is PORT_STATE_BLOCK): self.send_tc_flg = False self.send_tc_timer = None self.send_tcn_flg = False self.send_bpdu_thread.stop() elif new_state is PORT_STATE_LISTEN: self.send_bpdu_thread.start() self.state = new_state self.send_event(EventPortStateChange(self.dp, self)) if self.state_event is not None: self.state_event.set() self.state_event = None if thread_switch: hub.sleep(0) # For thread switching.
def pause(self, seconds=0): """Relinquishes hub for given number of seconds. In other words is puts to sleep to give other greenthread a chance to run. """ hub.sleep(seconds)
def test_ssl(self): """Tests SSL server functionality.""" # TODO: TLS version enforcement is necessary to avoid # vulnerable versions. Currently, this only tests TLS # connectivity. this_dir = os.path.dirname(sys.modules[__name__].__file__) saved_exception = None try: ssl_version = ssl.PROTOCOL_TLS except AttributeError: # For compatibility with older pythons. ssl_version = ssl.PROTOCOL_TLSv1 for i in range(3): try: # Try a few times as this can fail with EADDRINUSE port = random.randint(5000, 10000) server = hub.spawn(self._test_ssl, this_dir, port) hub.sleep(1) client = hub.StreamClient(("127.0.0.1", port), timeout=5, ssl_version=ssl_version) if client.connect() is not None: break except Exception as e: saved_exception = e continue finally: try: hub.kill(server) except Exception: pass else: self.fail("Failed to connect: " + str(saved_exception))
def _do_timeout_for_leave(self, timeout, datapath, dst, in_port): """the process when the QUERY from the switch timeout expired.""" parser = datapath.ofproto_parser dpid = datapath.id hub.sleep(timeout) outport = self._to_querier[dpid]['port'] if self._to_hosts[dpid][dst]['ports'][in_port]['out']: return del self._to_hosts[dpid][dst]['ports'][in_port] self._del_flow_entry(datapath, in_port, dst) actions = [] ports = [] for port in self._to_hosts[dpid][dst]['ports']: actions.append(parser.OFPActionOutput(port)) ports.append(port) if len(actions): self._send_event( EventMulticastGroupStateChanged(MG_MEMBER_CHANGED, dst, outport, ports)) self._set_flow_entry(datapath, actions, outport, dst) self._to_hosts[dpid][dst]['leave'] = None else: self._remove_multicast_group(datapath, outport, dst) del self._to_hosts[dpid][dst]
def _service_loop(self): while self.is_active: self.zserv = ZServer(self) self.zserv.start() hub.sleep(CONF.retry_interval) self.close()
def _update_wait_bpdu_timer(self): if self.wait_timer_event is not None: self.wait_timer_event.set() self.wait_timer_event = None self.logger.debug('[port=%d] Wait BPDU timer is updated.', self.ofport.port_no, extra=self.dpid_str) hub.sleep(0) # For thread switching.
def test_start(self): """ Checks if the poller is started """ self.poller.send_req = self.fake_send_req self.poller.start(mock.Mock(), active=True) poller_thread = self.poller.thread hub.sleep(self.interval + 1) self.assertTrue(self.send_called) self.assertFalse(poller_thread.dead)
def _retry_loop(): # Delays registration if ofp_handler is not started yet while True: if ofp_handler.controller is not None: for a, i in _TMP_ADDRESSES.items(): ofp_handler.controller.spawn_client_loop(a, i) hub.sleep(1) break hub.sleep(1)
def _echo_request_loop(self): if not self.max_unreplied_echo_requests: return while (self.send_q and (len(self.unreplied_echo_requests) <= self.max_unreplied_echo_requests)): echo_req = self.ofproto_parser.OFPEchoRequest(self) self.unreplied_echo_requests.append(self.set_xid(echo_req)) self.send_msg(echo_req) hub.sleep(self.echo_request_interval) self.close()
def _link_request_sync(self, interval): while self.is_active: request = event.EventLinkRequest() LOG.debug('link_request sync %s thread(%s)', request, id(hub.getcurrent())) reply = self.send_request(request) LOG.debug('link_reply sync %s', reply) if len(reply.links) > 0: for link in reply.links: LOG.debug(' %s', link) hub.sleep(interval)
def _switch_request_sync(self, interval): while self.is_active: request = event.EventSwitchRequest() LOG.debug('switch_request sync %s thread(%s)', request, id(hub.getcurrent())) reply = self.send_request(request) LOG.debug('switch_reply sync %s', reply) if len(reply.switches) > 0: for sw in reply.switches: LOG.debug(' %s', sw) hub.sleep(interval)
def test_stop(self): """ Check if a poller can be stopped """ self.poller.send_req = self.fake_send_req self.poller.start(mock.Mock(), active=True) poller_thread = self.poller.thread self.poller.stop() hub.sleep(self.interval + 1) self.assertFalse(self.send_called) self.assertTrue(poller_thread.dead)
def dictify(row): if row is None: return result = {} for key, value in row._data.items(): result[key] = value.to_python(_uuid_to_row) hub.sleep(0) return result
def _idl_loop(self): while self.is_active: try: self._idl.run() self._transactions() except Exception: self.logger.exception('Error running IDL for system_id %s' % self.system_id) raise hub.sleep(0)
def __call__(self): """Send request loop. Delays the initial request for a random interval to reduce load. Then sends a request to the datapath, waits the specified interval and checks that a response has been received in a loop.""" # TODO: this should use a deterministic method instead of random hub.sleep(random.randint(1, self.conf.interval)) while True: self.send_req() self.reply_pending = True hub.sleep(self.conf.interval) if self.reply_pending: self.no_response()
def _event_proxy_loop(self): while self.is_active: events = self._idl.events if not events: hub.sleep(0.1) continue for e in events: ev = e[0] args = e[1] self._submit_event(ev(self.system_id, *args)) hub.sleep(0)
def _do_timeout_for_query(self, timeout, datapath): """the process when the QUERY from the querier timeout expired.""" dpid = datapath.id hub.sleep(timeout) outport = self._to_querier[dpid]['port'] remove_dsts = [] for dst in self._to_hosts[dpid]: if not self._to_hosts[dpid][dst]['replied']: # if no REPORT message sent from any members of # the group, remove flow entries about the group and # send a LEAVE message if exists. self._remove_multicast_group(datapath, outport, dst) remove_dsts.append(dst) for dst in remove_dsts: del self._to_hosts[dpid][dst]
def _send_loop(self): """ A loop to proceed periodic BFD packet transmission. """ while self._enable_send: hub.sleep(self._xmit_period) # Send BFD packet. (RFC5880 Section 6.8.7.) if self._remote_discr == 0 and not self._active_role: continue if self._remote_min_rx_interval == 0: continue if self._remote_demand_mode and \ self._session_state == bfd.BFD_STATE_UP and \ self._remote_session_state == bfd.BFD_STATE_UP and \ not self._is_polling: continue self._send()
def _switch_request_async(self, interval): while self.is_active: request = event.EventSwitchRequest() LOG.debug('switch_request async %s thread(%s)', request, id(hub.getcurrent())) self.send_event(request.dst, request) start = time.time() busy = interval / 2 i = 0 while i < busy: if time.time() > start + i: i += 1 LOG.debug(' thread is busy... %s/%s thread(%s)', i, busy, id(hub.getcurrent())) LOG.debug(' thread yield to switch_reply handler. thread(%s)', id(hub.getcurrent())) # yield hub.sleep(0) LOG.debug(' thread get back. thread(%s)', id(hub.getcurrent())) hub.sleep(interval - busy)
def _thread_jitter(self, period, jitter=2): """Reschedule another thread with a random jitter and check for dead threads.""" hub.sleep(period + (random.random() * jitter)) # At least one thread needs to run to be able to detect that any of the others has died. self._check_thread_exception()
def _send_query(self): """ send a QUERY message periodically.""" timeout = 60 ofproto = self._datapath.ofproto parser = self._datapath.ofproto_parser if ofproto_v1_0.OFP_VERSION == ofproto.OFP_VERSION: send_port = ofproto.OFPP_NONE else: send_port = ofproto.OFPP_ANY # create a general query. res_igmp = igmp.igmp(msgtype=igmp.IGMP_TYPE_QUERY, maxresp=igmp.QUERY_RESPONSE_INTERVAL * 10, csum=0, address='0.0.0.0') res_ipv4 = ipv4.ipv4(total_length=len(ipv4.ipv4()) + len(res_igmp), proto=inet.IPPROTO_IGMP, ttl=1, src='0.0.0.0', dst=igmp.MULTICAST_IP_ALL_HOST) res_ether = ethernet.ethernet( dst=igmp.MULTICAST_MAC_ALL_HOST, src=self._datapath.ports[ofproto.OFPP_LOCAL].hw_addr, ethertype=ether.ETH_TYPE_IP) res_pkt = packet.Packet() res_pkt.add_protocol(res_ether) res_pkt.add_protocol(res_ipv4) res_pkt.add_protocol(res_igmp) res_pkt.serialize() flood = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)] while True: # reset reply status. for status in self._mcast.values(): for port in status.keys(): status[port] = False # send a general query to the host that sent this message. self._do_packet_out(self._datapath, res_pkt.data, send_port, flood) hub.sleep(igmp.QUERY_RESPONSE_INTERVAL) # QUERY timeout expired. del_groups = [] for group, status in self._mcast.items(): del_ports = [] actions = [] for port in status.keys(): if not status[port]: del_ports.append(port) else: actions.append(parser.OFPActionOutput(port)) if len(actions) and len(del_ports): self._set_flow_entry(self._datapath, actions, self.server_port, group) if not len(actions): self._del_flow_entry(self._datapath, self.server_port, group) del_groups.append(group) if len(del_ports): for port in del_ports: self._del_flow_entry(self._datapath, port, group) for port in del_ports: del status[port] for group in del_groups: del self._mcast[group] rest_time = timeout - igmp.QUERY_RESPONSE_INTERVAL hub.sleep(rest_time)
def _monitor(self): while True: for dp in self.datapaths.values(): self._request_stats(dp) hub.sleep(10)
def _recv_loop(self): buf = bytearray() count = 0 min_read_len = remaining_read_len = ofproto_common.OFP_HEADER_SIZE while self.state != DEAD_DISPATCHER: try: read_len = min_read_len if remaining_read_len > min_read_len: read_len = remaining_read_len ret = self.socket.recv(read_len) except SocketTimeout: continue except ssl.SSLError: # eventlet throws SSLError (which is a subclass of IOError) # on SSL socket read timeout; re-try the loop in this case. continue except (EOFError, IOError): break if not ret: break buf += ret buf_len = len(buf) while buf_len >= min_read_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) if msg_len < min_read_len: # Someone isn't playing nicely; log it, and try something sane. LOG.debug( "Message with invalid length %s received from switch at address %s", msg_len, self.address) msg_len = min_read_len if buf_len < msg_len: remaining_read_len = (msg_len - buf_len) break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) def dispatchers(x): return x.callers[ev.__class__].dispatchers handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler) ] for handler in handlers: handler(ev) buf = buf[msg_len:] buf_len = len(buf) remaining_read_len = min_read_len # We need to schedule other greenlets. Otherwise, os_ken # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)