def switch_features_handler(self, ev): datapath = ev.msg.datapath self.datapaths[datapath.id] = datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser # install table-miss flow entry # # We specify NO BUFFER to max_len of the output action due to # OVS bug. At this moment, if we specify a lesser number, e.g., # 128, OVS will send Packet-In with invalid buffer_id and # truncated packet data. In that case, we cannot output packets # correctly. The bug has been fixed in OVS v2.1.0. match = parser.OFPMatch() actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] self.add_flow(datapath, 0, match, actions) self.logger.debug("datapath id %s", datapath.id) match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ipv4_dst=ipAdd(datapath.id)) actions = [parser.OFPActionOutput(1)] self.add_flow(datapath, 1, match, actions) hub.patch(socket=True, thread=True, os=True, select=True) if len(self.datapaths) == len(self.topo): # All switches have connected # Can call to install new path from here hub.spawn_after(10, self._cyclic_update)
def sync_ovs_from_tunnels(self, mac_to_tunswitch: MACToTunnelAndSwitch) -> None: logger.warning( "negotiator tunnels are:\n%s\n%s\n%s", "-" * 40, "\n\n".join( f"- {mac} {mac_to_tun_name(mac)} {swi.hostname}:\n |--{tun}\n |--{swi}" for mac, (tun, swi) in sorted(mac_to_tunswitch.items())), "-" * 40, ) tun_to_tunswitch: TUNToTunnelAndSwitch = { mac_to_tun_name(mac): (tunnel, switch) for mac, (tunnel, switch) in mac_to_tunswitch.items() } # TODO clean the dict? Eventually it might eat up a lot of memory. self._tun_to_dest_mac.update( (tun, tunnel.dst_mac) for tun, (tunnel, _) in tun_to_tunswitch.items()) self._update_relay_mac_in_ovs_sync(mac_to_tunswitch) self._add_ports_to_bridge(tun_to_tunswitch) self.flow_hysteresis.update(tun_to_tunswitch) self._remove_extraneous_ports_in_ovs_sync(tun_to_tunswitch) # Create a new eventlet's green thread (akin to asyncio.ensure_future). hub.spawn_after( self.flow_hysteresis.hysteresis_seconds + 0.1, self._update_port_flows, list(tun_to_tunswitch.keys()), )
def switch_features_handler(self, event): dp = event.msg.datapath print("switch hidup dengan id ", dp.id) Collector.datapaths[dp.id] = dp Collector.port_info[dp.id] = {} Collector.topo[dp.id] = {} Collector.flow_entry[dp.id] = {} def send_port_desc_stats_request(datapath): ofp_parser = datapath.ofproto_parser req = ofp_parser.OFPPortDescStatsRequest(datapath, 0) datapath.send_msg(req) # Postponed for 1 second, wait till all ports goes up. hub.spawn_after(1 ,send_port_desc_stats_request, dp) ofproto = dp.ofproto parser = dp.ofproto_parser actions = [parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER, max_len=ofproto.OFPCML_NO_BUFFER)] inst = [parser.OFPInstructionActions(type_=ofproto.OFPIT_APPLY_ACTIONS, actions=actions)] mod = parser.OFPFlowMod(datapath=dp, priority=0, match=parser.OFPMatch(), instructions=inst) dp.send_msg(mod) req = parser.OFPSetConfig(dp, ofproto_v1_3.OFPC_FRAG_NORMAL, 1500) dp.send_msg(req)
def connection_handler(self, fd): self.logger.info( "------------------------------in connection handler----------------------" ) while True: data = fd.recv(24) # self.logger.info("here is the data in connection handler" + str(binascii.hexlify(data))) if len(data) == 0: fd.close() return self.start_receiving_update_time = time() * 1000 msg_len, sending_time, self.sw_to_ctrl_delay = struct.unpack( 'Ldd', data) self.logger.info("length: %d" % msg_len) self.logger.debug("length: %d" % msg_len) pickle_msg = recv_size(fd, msg_len) # self.logger.info("pickle_msg_by_recv_size in conn handler" + str(pickle_msg)) self.logger.debug("recv length: %d" % len(pickle_msg)) msg = pickle.loads(pickle_msg) # self.logger.info("msg_by_recv_size in conn handler" + str(msg)) # self.logger.info("Receive message from global ctrl %s ms after sending (at %s)" # % (str(self.current_time_to_transfer_install_msg), time_in_date)) self.logger.debug(msg) hub.spawn_after(self.sw_to_ctrl_delay / 1000, self.process_install_msg, msg)
def process_ezsegway_notification_msg(self, pkt, datapath, receiving_time=time()): agg_msg = pickle.loads(pkt.protocols[-1]) receiving_time *= 1000 agg_msg.receiving_time = receiving_time self.logger.info( "msg %s is transfer in: %s ms" % (str(agg_msg), agg_msg.receiving_time - agg_msg.sending_time)) # if self.receiving_update_time is not None: # elapsed_time = agg_msg.receiving_time - self.receiving_update_time # self.logger.info("msg %s is received at %s ms since starting" % # (str(agg_msg), str(elapsed_time))) self.logger.debug("receive messages: %s", agg_msg) notification_msgs = self.split_msgs(agg_msg) self.handler.scheduler.trace.add_trace_for_msgs(notification_msgs) update_info_s = [] finished = 0 for notification_msg in notification_msgs: if finished != 1: update_infos, finished = self.handler.do_handle_notification( notification_msg) else: update_infos, no_care = self.handler.do_handle_notification( notification_msg) self.logger.debug("finished: %s for msg: %s" % (finished, notification_msg)) update_info_s.extend(update_infos) self.logger.debug(update_info_s) self.install_updates(datapath, update_info_s) if finished == 1: # hub.spawn_after(global_vars.sw_to_ctrl_delays[self.switch_id]/1000, self.send_finish_msg) hub.spawn_after(self.sw_to_ctrl_delay / 1000, self.send_finish_msg)
def start_path_recomputation(self, req, **kwargs): multipath_controller = self.mp_instance hub.spawn_after(1, multipath_controller.multipath_computation) return Response(content_type='text/html', body='Path recomputatation started!\n')
def _link_add_handler(self, ev): s1 = ev.link.src s2 = ev.link.dst adjacency[s1.dpid][s2.dpid] = s1.port_no adjacency[s2.dpid][s1.dpid] = s2.port_no # print s1.dpid, s2.dpid hub.spawn_after(1, monitor_link, s1, s2)
def on_port_config(self, evt): """ Process PortConfig event evt.msg: instance of pb.PortConfig """ msg = evt.msg if fibclog.dump_msg(): _LOG.debug("%s", msg) try: if msg.cmd == pb.PortConfig.ADD: self._on_port_config_add(msg) if msg.cmd == pb.PortConfig.MODIFY: self._on_port_config_add(msg) elif msg.cmd == pb.PortConfig.DELETE: hub.spawn_after(_PORT_CONFIG_DELETE_DELAY_SEC, self._on_port_config_del, msg) else: pass except KeyError: _LOG.warn("vm port not registered. re_id:%s, ifname:%s", msg.re_id, msg.ifname) except Exception as expt: _LOG.exception(expt)
def do_when_finish(self, encounter_deadlock): finished_time = time() * 1000 finish_time_from_start = finished_time - self.current_start_time finish_time_from_last_sending = finished_time - self.current_finish_sending_time total_sending_time = self.current_finish_sending_time - self.current_sending_time update_only_time = (finish_time_from_start - self.current_dependency_graph_cal) max_delay = max(global_vars.sw_to_ctrl_delays) self.handler.scheduler.trace.convert_to_time_from_starting(self.current_finish_sending_time, self.current_sending_time - self.current_start_time + max_delay ) log.info("test-%d: %f\t%d\t%f\t%f\t%f\t%f\t%d\t%s" % (self.test_number - 1, finish_time_from_start, 0, self.current_dependency_graph_cal, update_only_time, finish_time_from_last_sending, total_sending_time, self.handler.message_count * 2, encounter_deadlock)) log.info("test-%d-new_path: %s" % ((self.test_number - 1), self.handler.scheduler.trace.times_using_new_path_to_string())) # log.info("calculating time: %d ms" % self.current_dependency_graph_cal) # log.info("finished after %s ms from sending" % (finish_time_from_sending * 1000)) if self.test_number < 1: hub.spawn_after(1, self._cyclic_update) else: os.kill(os.getpid(), signal.SIGTERM) return
def __init__(self, *args, **kwargs): super(LocalController, self).__init__(*args, **kwargs) logger.init('./localhapi.log', logging.INFO) self.logger = logger.getLogger('local', logging.INFO) self.local_id = 1 #remember change me ! self.topo = {} self.time = 0 self.datapaths = {} self.neighbors = {2: {1: 2}} #port:dpid self.hosts = {2: {"10.0.0.3": 1}} #ip:port # self.flows_final = False self.packts_buffed = 0 #temp for update trigger self.pool = redis.ConnectionPool(host='localhost', port=6379) self.rds = redis.Redis(connection_pool=self.pool) self.packets_to_save = [] # flowdes0 = FlowDes("10.0.0.1","10.0.0.2",5001,[],[0,1,2],consts.BUF,'udp') # flowdes0.up_step = consts.BUF_ADD #self.flows = {"10.0.0.110.0.0.25001":flowdes0} # flow_id(src,dst,dst_port):{src,dst,dst_port,update_type:"BUF",xids=[],doing="BUF_DEL"} self.flows = {} self.xid_find_flow = {} #xid:[flow_id] self.conn_with_global = socket.socket() hub.spawn_after(5, self.conn_with_global.connect, ('127.0.0.1', 9999)) hub.spawn(self.run_server)
def signal_fauilure(self, *args): self.logger.critical(*args) self.change_status('failed') try: requests.get(CONF['webhook_failed']) except requests.ConnectionError: pass hub.spawn_after(1, TipsyController.do_exit)
def _handle_barrier(self, ev): dpid = ev.msg.datapath.id # self.logger.info("barrier from switch %d, invoke at time: %s" % (dpid, (time() - self.current_start_time) * 1000)) delay = global_vars.sw_to_ctrl_delays[dpid-1] * self.rng.uniform(0.95, 1.1) latency = 2 * (delay/1000) self.logger.debug("latency: %s ms" % str(latency * 1000)) # hub.spawn_after(latency, self._progress_update, dpid) hub.spawn_after(latency, self.handler.do_handle_barrier_from_sw, dpid-1, self.call_process_update_info, self.call_to_send_barrier, self.do_when_finish)
def switch_features_handler(self, ev): datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser if datapath.id == 1: def delay(): self.next_experiment(datapath) hub.spawn_after(20, delay)
def __init__(self, *args, **kwargs): super(MultipathControllerApp, self).__init__(*args, **kwargs) now = int(datetime.now().timestamp()) self.multipath_report_folder = "multipath-%d" % (now) self.multipath_enabled = True # If True, multipath functions enabled else, all switches work as L2 switch self.watch_generated_flows = False #If flows generated by this class is reported, It is used to test self.flow_statistics_save_period = 100 # if watch_generated_flows True, defines flow statistics save period self.activation_delay = 1 # (sec.) flow is checked after activation_delay to active multipath self.min_packet_in_period = 10 # After activation delay, Multipath starts if flow packet count is greater than min_packet_in_period self.multipath_params = { # if multipath_enabled is enabled, it defines parameters of multipath manager 'forward_with_random_ip': True, #random ip generation is activated. 'random_ip_for_each_hop': True, # if False, first node generates random ip 'random_ip_subnet':"10.93.0.0", #random ip subnet, default mask is 255.255.0.0 'max_random_paths': 200, # maximum random paths used in multipath manager 'max_installed_path_count': 2, # maximum flow count installed in switch for each path 'max_time_period_in_second': 2, # random path expire time in seconds. 'lowest_flow_priority': 20000, # minimum flow priority in random path flows 'report_folder' : self.multipath_report_folder } logger.warning("............................................................................") logger.warning("SDN CONTROLLER started - multipath enabled: %s" % self.multipath_enabled) logger.warning("Watch_generated_flows: %s !!! All flows statistics will be saved." % self.watch_generated_flows) if self.multipath_enabled: logger.warning("..... multipath starts if activation_delay : %s" % self.activation_delay) logger.warning("..... multipath starts if min_packet_in_period: %s" % self.min_packet_in_period) multipath_manager_params = json.dumps(self.multipath_params, indent=4, separators=(',', '= ')) logger.warning("..... multipath manager params: \n%s" % multipath_manager_params) logger.warning("............................................................................") self.sw_cookie = defaultdict() self.unused_cookie = 0x0010000 self.flow_managers = defaultdict() self.datapath_list = {} self.flows = defaultdict() self.topology = nx.DiGraph() self.hosts = {} self.host_ip_map = {} self.mac_to_port = {} self.no_flood_ports = None self.lock = RLock() self.statistics = defaultdict() self.statistics["flows"] = defaultdict() self.statistics["created-flow-count"] = 0 self.statistics["removed-flow-count"] = 0 if self.watch_generated_flows: hub.spawn_after(5, self._save_statistics_periodically)
def run(self): ''' Called to start the monitoring/computation in the controller It can be called with a GET to the API: /multipath/start_path_computation ''' # Network monitor module hub.spawn_after(3, self.network_monitor) # Multipath computation module hub.spawn_after(5, self.multipath_computation)
def run(self): ''' Called to start the monitoring/computation in the controller It can be called with a GET to the API: /multipath/start_path_computation ''' # Network monitor module self.monitoringhub = hub.spawn(self.network_monitor) # Multipath computation module hub.spawn_after(5, self.multipath_computation)
def process_install_msg(self, msg): # all switch id coming from the handler zero based self.finish_receiving_update_time = time() * 1000 self.computation_time_in_ctrl = msg.computation_time_in_ctrl update_infos, finished, finishing_computing_time = self.handler.do_install_update(msg) self.finishing_computing_time = finishing_computing_time - self.finish_receiving_update_time self.logger.debug("update_info: %s in switch having id: %d. Finished = %s" % (update_infos, self.switch_id, finished)) self.install_updates(self.datapath, update_infos) if finished == 1: # hub.spawn_after(global_vars.sw_to_ctrl_delays[self.switch_id]/1000, self.send_finish_msg) hub.spawn_after(self.sw_to_ctrl_delay / 1000, self.send_finish_msg)
def start(self, interval, now=True): """Start running pre-set function every interval seconds. """ if interval < 0: raise ValueError('interval must be >= 0') if self._running: self.stop() self._running = True self._interval = interval if now: self._self_thread = hub.spawn_after(0, self) else: self._self_thread = hub.spawn_after(self._interval, self)
def _handleTimeout(self): self.logger.debug('Timeout occured for ' + str(self)) self.state = self.STATE_TIMEOUT_TIME_WAIT if self.quietTimer: self.quietTimer.cancel() self.quietTimer = hub.spawn_after(self.QUIET_TIMER, self._handleQuietTimerTimeout)
def _handleClosing(self, flags, from_client, p, seq, ack): if self.garbageTimer is None: self.garbageTimer = hub.spawn_after(self.GARBAGE_TIMER, self._handleGarbage) if flags & tcp.TCP_RST: self._handleReset() return if from_client: if flags & tcp.TCP_FIN: self.client_state = self.CLOSING_FIN_SENT self.client_fin_ack = seq + len(p) + 1 if p else seq + 1 if self.server_state == self.CLOSING_FIN_SENT and ack == self.server_fin_ack and flags & tcp.TCP_ACK: self.server_state = self.STATE_CLOSED if self.client_state == self.STATE_CLOSED: self.state = self.STATE_TIME_WAIT else: if flags & tcp.TCP_FIN: self.server_state = self.CLOSING_FIN_SENT self.server_fin_ack = seq + len(p) + 1 if p else seq + 1 if self.client_state == self.CLOSING_FIN_SENT and ack == self.client_fin_ack and flags & tcp.TCP_ACK: self.client_state = self.STATE_CLOSED if self.server_state == self.STATE_CLOSED: self.state = self.STATE_TIME_WAIT
def __init__(self, *args, **kwargs): super(HostDiscovery, self).__init__(*args, **kwargs) self.datapaths = {} self.is_active = True self.threads.append(hub.spawn_after(self.START_AFTER, self._arp_loop))
def __init__(self, *args, **kwargs): super(Bgp_sender, self).__init__(*args, **kwargs) self.counter = 0 self.uri_enabled_capability = False self.peer = {} self.client = MongoClient() self.db = self.client.connections self.thread = hub.spawn_after(0, self.bgp_sender)
def _handleReset(self): self.state = self.STATE_CLOSED_RESET_TIME_WAIT if self.timeoutTimer is not None: self.timeoutTimer.cancel() if self.quietTimer is not None: self.quietTimer.cancel() self.quietTimer = hub.spawn_after(self.RESET_TIMER, self._handleQuietTimerTimeout)
def __init__(self, *args, **kwargs): super(LocalController, self).__init__(*args, **kwargs) logger.init('./localhapi.log',logging.INFO) self.local_id = int(os.environ.get("LOCAL_ID", 0)) topofile = os.environ.get("TOPO", './data/topo.intra') local_dpfile = os.environ.get('LOCAL_DP','./data/local_dp.intra') dp_hostfile = os.environ.get('DP_HOST','./data/dp_host.intra') self.buf_size = float(os.environ.get('BUF_SIZE',1)) # self.logger.info("inited self.buf_size" + str(self.buf_size)) dp_tcamfile = os.environ.get('TCAM_SIZE','./data/dp_tcam.intra') self.redis_port = os.environ.get('REDIS_PORT',6379) self.logger=logger.getLogger('local' + str(self.local_id),logging.INFO) self.topo = None # self.topo_input = os.environ.get("TOPO_INPUT", 1) # self.local_id = 0 #remember change me ! self.neighbors = get_local_neighbors(topofile,local_dpfile,self.local_id) self.hosts = get_local_hosts(self.neighbors,dp_hostfile) self.dp_tcam_size = self.read_dp_tcam(dp_tcamfile) self.logger.info(self.neighbors) self.logger.info(self.hosts) self.time = 0 self.datapaths={} # self.neighbors = {1:{2:3}}#dpid:port # self.hosts = {1:{"10.0.0.1":1, # "10.0.0.2":2}}#ip:port self.packts_buffed = 0 #temp for update trigger self.pool = redis.ConnectionPool(host='localhost',port=self.redis_port) self.rds = redis.Redis(connection_pool=self.pool) self.packets_to_save = [] # flowdes0 = FlowDes("10.0.0.1","10.0.0.2",5001,[],[0,1,2],consts.BUF,'udp') # flowdes0.up_step = consts.BUF_ADD #self.flows = {"10.0.0.110.0.0.25001":flowdes0} # flow_id(src,dst,dst_port):{src,dst,dst_port,update_type:"BUF",xids=[],doing="BUF_DEL"} self.flows={} self.xid_find_flow = {} #xid:[flow_id] self.conn_with_global = socket.socket() hub.spawn_after(5,self.conn_with_global.connect,('127.0.0.1',9999)) hub.spawn(self.run_server)
def __init__(self, **kwargs): self.sessions = [] self.garbageLoop = hub.spawn_after(1, self._garbageCollector) super(ServiceEngine, self).__init__(**kwargs) self.type = 'se' self.handover = None self.rsttcp = None self.lock = Semaphore()
def _spawn_activity_after(self, seconds, activity, *args, **kwargs): self._validate_activity(activity) # Schedule to spawn a new greenthread after requested delay greenthread = hub.spawn_after(seconds, activity.start, *args, **kwargs) self._child_thread_map[activity.name] = greenthread self._child_activity_map[activity.name] = activity return greenthread
def __init__(self, *args, **kwargs): super(TopoSpec, self).__init__(*args, **kwargs) app_manager.require_app("ryu.topology.switches") CONF.register_opt(StrOpt("spec_path", default="spec/mininet.yml")) self.graph = nx.Graph() self.spec = Spec.from_yaml(CONF.spec_path) self._thread = hub.spawn_after(3, self._compare_spec)
def reset(self): """Skip the next iteration and reset timer. """ if self._self_thread is not None: # Cancel currently scheduled call self._self_thread.cancel() self._self_thread = None # Schedule a new call self._self_thread = hub.spawn_after(self._interval, self)
def __init__(self, *args, **kwargs): super(ShortestForwarding, self).__init__(*args, **kwargs) self.name = 'shortest_forwarding' self.awareness = kwargs["network_awareness"] self.monitor = kwargs["network_monitor"] self.delay_detector = kwargs["network_delay_detector"] self.datapaths = {} self.weight = self.WEIGHT_MODEL[CONF.weight] self.port_mac_dic = {} self.send_ra_thread = hub.spawn_after(10, self.send_ra)
def set_timeout(self, timeout_sec, switch, barrier_xid, msg_xids): """ Spawn a timeout handler after timeout_sec seconds to clear up any associated state with the request """ def _handle_timeout(): return self._handle_timeout(switch, barrier_xid, msg_xids) # spawn timeout func to ensure cleanup occurs self._timeout_thread = hub.spawn_after(timeout_sec, _handle_timeout)
def __init__(self, *args, **kwargs): super(Main, self).__init__(*args, **kwargs) print("SNHx is running...") # configuration verification error if Config.service == 'L2_FABRIC' and Config.forwarding == 'MPLS': print('Wrong Configuration: L2_FABRIC + MPLS') sys.exit() self.thread = {} self.thread['cli_thread'] = hub.spawn(self._cli) self.thread['routing_thread'] = hub.spawn_after(11 , self._routing) self.thread['monitoring_thread'] = hub.spawn_after(11, self._stats_request) if Config.service == 'MPLS': pass # self.thread['arp_req'] = hub.spawn_after(10, self._arp_req) # run wsgi wsgi = kwargs['wsgi'] wsgi.register(SNHxAPI, {'SNHxAPI': self})
def _garbageCollector(self): self.lock.acquire() for sess in self.sessions[:]: # type: TCPSesssion if sess.state in [ TCPSesssion.STATE_CLOSED, TCPSesssion.STATE_TIMEOUT, TCPSesssion.STATE_CLOSED_RESET, TCPSesssion.STATE_HANDOVERED ]: self.logger.info('Removing finished session ' + str(sess)) self.sessions.remove(sess) self.lock.release() self.garbageLoop = hub.spawn_after(1, self._garbageCollector)
def reset_neighbor(ip_address): neighs_conf = CORE_MANAGER.neighbors_conf neigh_conf = neighs_conf.get_neighbor_conf(ip_address) # Check if we have neighbor with given IP. if not neigh_conf: raise RuntimeConfigError('No neighbor configuration found for given' ' IP: %s' % ip_address) # If neighbor is enabled, we disable it. if neigh_conf.enabled: # Disable neighbor to close existing session. neigh_conf.enabled = False # Enable neighbor after NEIGHBOR_RESET_WAIT_TIME # this API works asynchronously # it's recommended to check it really reset neighbor later def up(): neigh_conf.enabled = True hub.spawn_after(NEIGHBOR_RESET_WAIT_TIME, up) else: raise RuntimeConfigError('Neighbor %s is not enabled, hence cannot' ' reset.' % ip_address) return True
def _repeat_handover_msg_to_vnf(self, dp, handover, vnf, handover_msg): """ Callback of message repeat timer which sends a message to the VNF and calls itself after the REPEAT_TIME :param dp: Switch datapath :param handover: Handover in question :param vnf: Destination VNF :param handover_msg: Message to repeat :return: """ self.send_packet_to_vnf(dp, vnf, handover_msg) timer = hub.spawn_after(REPEAT_TIME, self._repeat_handover_msg_to_vnf, dp, handover, vnf, handover_msg) handover.repeat_timers[self.position][handover.dst_vnf == vnf] = timer
def __init__(self, *args, **kwargs): super(SwitchLinkTrustEvaluator, self).__init__(*args, **kwargs) self.name = 'trust_evaluator' #store a list of connected switches self.datapaths = {} #TODO use only datapaths_stats to track alive switches # dict datapths statistics self.datapaths_stats = {} #TODO thread-safe needed ? # link list self.link_list = {} # counter for pending statistics requests self.pending_stats_req = 0 #TODO thread-safe needed ? # bool to check if first statistic request self.is_first_stat_req = True self.threads.append( hub.spawn_after(self.INIT_TIME, self._stats_request_loop) )
def __call__(self): if self._running: # Schedule next iteration of the call. self._self_thread = hub.spawn_after(self._interval, self) self._funct(*self._args, **self._kwargs)
def _spawn_after(self, name, seconds, callable_, *args, **kwargs): self._validate_callable(callable_) greenthread = hub.spawn_after(seconds, callable_, *args, **kwargs) self._child_thread_map[name] = greenthread return greenthread