def __init__(self, xml_network_number): self.gm = GraphMan() if is_scheduler_run: self.xml_parser = XMLParser("net_xmls/net_2p_stwithsingleitr.xml", str(xml_network_number)) else: self.xml_parser = XMLParser( "ext/net_xmls/net_2p_stwithsingleitr.xml", str(xml_network_number)) self.init_network_from_xml() #Useful state variables self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport'] - 1 #Scher state dicts self.N = 0 #num_activesessions self.sessions_beingserved_dict = {} self.sessions_pre_served_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.give_actual_resource_dict() #for perf plotting self.perf_plotter = PerfPlotter(self.actual_res_dict) #for control_comm self.cci = ControlCommIntf() self.cci.reg_commpair(sctag='scher-acter', proto='tcp', _recv_callback=self._handle_recvfromacter, s_addr=info_dict['lacter_addr'], c_addr=info_dict['acterl_addr'])
def __init__(self, xml_net_num, sching_logto): logging.basicConfig(level=logging.DEBUG) self.sching_logto = sching_logto if is_scheduler_run: self.xml_parser = XMLParser("net_2p_stwithsingleitr.xml", str(xml_net_num)) else: self.xml_parser = XMLParser("ext/net_2p_stwithsingleitr.xml", str(xml_net_num)) self.gm = GraphMan() self.init_network_from_xml() #Useful state variables self.last_sching_id_given = -1 self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport']-1 #Scher state dicts self.num_dstusers = 0 self.users_beingserved_dict = {} #user_ip:{'gw_dpid':<>,'gw_conn_port':<> ...} # self.N = 0 #num_activesessions self.alloc_dict = None self.sessions_beingserved_dict = {} self.sessions_pre_served_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.give_actual_resource_dict() #for perf plotting self.perf_plotter = PerfPlotter(self.actual_res_dict) #for control_comm self.cci = ControlCommIntf() self.cci.reg_commpair(sctag = 'scher-acter', proto = 'tcp', _recv_callback = self._handle_recvfromacter, s_addr = info_dict['lacter_addr'], c_addr = info_dict['acterl_addr'] )
def _readConfig(self): """ Reads the configuration file """ if self._configFile != None: self._config = XMLParser() self._config.readfile(self._configFile) for section in self._config.xsd7config[0].xsection: for option in section.xoption: if not self._options.has_key(section.pname): self._options[section.pname] = {} self._options[section.pname][option.pname] = option.pvalue
def parse(self, xml): xml = XMLParser.parse(self, xml) _, begin = self.searchFirst(xml, 'beginPosition') _, end = self.searchFirst(xml, 'endPosition') return qgstime.QgsTimePeriod(begin, end)
def parse (self, xml): xml = XMLParser.parse(self, xml) _, begin = self.searchFirst(xml, 'beginPosition') _, end = self.searchFirst(xml, 'endPosition') return qgstime.QgsTimePeriod(begin, end)
def __init__(self, xml_net_num, sching_logto, data_over_tp, act): # logging.basicConfig(filename='logs/schinglog',filemode='w',level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG) # if not (sching_logto == 'console' or sching_logto == 'file'): logging.error('Unexpected sching_logto=%s', sching_logto) return self.sching_logto = sching_logto # if not (data_over_tp == 'tcp' or data_over_tp == 'udp'): logging.error('Unexpected data_over_tp=%s', data_over_tp) return self.data_over_tp = data_over_tp # self.net_xml_file_url = "net_xmls/net_simpler.xml" #"net_xmls/net_four_paths.xml" #"net_xmls/net_mesh_topo.xml" #"net_xmls/net_resubmit_exp.xml" if not is_scheduler_run: self.net_xml_file_url = "ext/" + self.net_xml_file_url logging.info("Scheduler:: self.net_xml_file_url= %s", self.net_xml_file_url) self.xml_parser = XMLParser(self.net_xml_file_url, str(xml_net_num)) # self.gm = GraphMan() self.init_network_from_xml() # self.gm.print_graph() # Useful state variables self.last_sching_id_given = -1 self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport']-1 # Scher state dicts self.num_dstusers = 0 self.users_beingserved_dict = {} # user_ip:{'gw_dpid':<>, 'gw_conn_port':<> ...} # self.N = 0 #num_activesessions self.alloc_dict = None self.sessionsbeingserved_dict = {} self.sessionspreserved_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.get_actual_resource_dict() # for perf plotting # self.perf_plotter = PerfPlotter(self.actual_res_dict) # for control_comm self.cci = ControlCommIntf() self.act = act if act: self.cci.reg_commpair(sctag = 'scher-acter', proto = 'tcp', _recv_callback = self._handle_recvfromacter, s_addr = info_dict['lacter_addr'], c_addr = info_dict['acterl_addr'] ) self.dtsuser_intf = DTSUserCommIntf() # self.couplinginfo_dict = {} self.starting_time = time.time() # self.sid_schregid_dict = {} self.schregid_sid_dict = {} self.schingid_rescapalloc_dict = {} self.geninfo_dict = {}
def main(): """Call the command line and the parser modules.""" print colorize('Pardus Testing Framework\n', 'bold') # Call the clarguments module filename, custompackages, allpackages = arguments_parse() # Check whether the file is valid or not check_file(filename) # Now check the conditions and create the object if custompackages is not None: customparsefile = XMLParser(os.path.abspath(filename), custom_package_parse(custompackages)) print "Custom parsing:\t'{0}'\n".format(os.path.abspath(custompackages)) customparsefile.parser_main() else: parsefile = XMLParser(os.path.abspath(filename), None) if allpackages is not None: parsefile.output_package_list(os.path.abspath(allpackages)) parsefile.parser_main()
def main(): file_flag = 1 string_flag = 2 argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--source", nargs=1, required=False) argument_parser.add_argument("--input", nargs=1, required=False) args = argument_parser.parse_args() if args.input is not None: input_file_name = args.input[0] try: input_file = open(input_file_name, "r") except IOError: print("Couldn't open the file", input_file_name, "\n") raise OpenInputFileException() else: """Here should be some flag to change where the input should come from""" pass if args.source is not None: source_file_name = args.source[0] try: source_file = open(source_file_name, "r") reading_from = file_flag except IOError: print("Couldn't open the file", source_file_name, "\n") raise OpenSourceFileException() else: source_file = sys.stdin.read() reading_from = string_flag xml_parser = XMLParser() instructions = xml_parser.ParseXml(source_file, reading_from) """ for idx in range(0, len(instructions)): print(instructions[idx].order, instructions[idx].name, instructions[idx].arguments) pass exit(0) """ interpret = Interpret(instructions) interpret.run_interpret()
def parse(self, xml): xml = XMLParser.parse(self, xml) timeNode, timeType = self.searchFirst(xml, '*@type') if not timeType: timeNode, _ = self.searchFirst(xml, '*@id') timeType = 'gml:' + timeNode.tagName() + 'Type' if timeType == 'gml:TimePeriodType': return GMLTimePeriodParser().parse(timeNode) elif timeType == 'gml:TimeInstantType': return GMLTimeInstantParser().parse(timeNode) else: return qgstime.QgsTime()
def parse (self, xml): xml = XMLParser.parse(self, xml) timeNode, timeType = self.searchFirst(xml, '*@type') if not timeType: timeNode, _ = self.searchFirst(xml, '*@id') timeType = 'gml:' + timeNode.tagName() + 'Type' if timeType == 'gml:TimePeriodType': return GMLTimePeriodParser().parse(timeNode) elif timeType == 'gml:TimeInstantType': return GMLTimeInstantParser().parse(timeNode) else: return qgstime.QgsTime()
def parse(self, xml): xml = XMLParser.parse(self, xml) _, time = self.searchFirst(xml, 'timePosition') return qgstime.QgsTimeInstant(time)
class BootStrap(object): """ Main BootStrap Class """ _manifestFile = None #: Path to BootStrap manifest _manifest = XMLParser() #: The manifest data _toolDownloadFactory = ToolDownloadFactory() #: Download factory _toolFactory = ToolFactory() #: Generic tool factory def __init__(self, options): """ Init @param options: A dictonary containing all BootStrap options """ self._platformDetect(options) self._configurePaths(options) self._configureEnv(options) self._manifestFile = options["global"]["BootStrap.manifest"] self._manifest.readfile(self._manifestFile) def _configurePaths(self, options): """ Configures the applications paths """ def setPath(path): """" Sets path @param path """ if path.startswith('/'): return path else: return os.getcwd() + '/' + path.strip("./") self._downloadPath = \ setPath(options["global"]["BootStrap.downloadPath"]) self._outputPath = setPath(options["global"]["BootStrap.outputPath"]) self._prefix = setPath(options["global"]["BootStrap.prefix"]) self._patches = setPath(options["global"]["BootStrap.patches"]) self._moduleStatus = \ setPath(options["global"]["BootStrap.moduleStatus"]) self._branch = options["global"]["BootStrap.branch"] if not os.path.exists(self._downloadPath): os.makedirs(self._downloadPath, 0755) if not os.path.exists(self._outputPath): os.makedirs(self._outputPath, 0755) if not os.path.exists(self._prefix): os.makedirs(self._prefix, 0755) if not os.path.exists(self._prefix + "/lib64") and \ self._arch == 'x86_64': if not os.path.exists(self._prefix + "/lib"): os.makedirs(self._prefix + "/lib", 0755) os.symlink(self._prefix + "/lib", self._prefix + "/lib64") def _configureEnv(self, options): """ Configures the application environment """ # Enviorment vars prefix = os.environ['PREFIX'] = self._prefix version = sys.version_info version = str(version[0]) + "." + str(version[1]) self._python_version = version a, b = os.popen4("gcc -v") self._gcc_version = "".join( re.search('gcc.* ([0-9]*\.[0-9]*)', b.read()).group(1).split(".")) os.environ['MY_GCC_VERSION'] = self._gcc_version os.environ['PATH'] = prefix + '/bin:' + os.environ['PATH'] def setEnv(key, val, sep=":"): aux = "" if os.environ.has_key(key): aux = sep + os.environ[key] os.environ[key] = val + aux setEnv('LD_LIBRARY_PATH', prefix + '/lib') setEnv('PYTHONPATH', os.getcwd() + ":" + prefix + '/lib/python' + \ version + '/site-packages') setEnv('CPPFLAGS', '-I' + prefix + '/include', " ") setEnv('LDFLAGS', '-L' + prefix + '/lib', " ") setEnv('PKG_CONFIG_PATH', prefix + '/lib/pkgconfig') def _platformDetect(self, options): """ Detects and sets the current architecture and platform details """ if options["global"].has_key("BootStrap.platform"): self._platform = options["global"]["BootStrap.platform"] else: if sys.platform == "linux2": self._platform = "linux" else: self._platform = sys.platform if options["global"].has_key("BootStrap.arch"): self._arch = options["global"]["BootStrap.arch"] else: if hasattr(os, "uname"): # Sanity check import struct bytes = struct.calcsize('P') self._arch = os.uname()[4] if self._arch == 'x86_64' and bytes != 8: raise UnsuportedPlatformError, """ It looks like you are running a 64 bits kernel, but your user-space applications are compiled as 32 bits. You are attempting to build a 64 binary with a 32 bits compiler and, as far as I know, that it is not possible. In order to avoid this warning, you may launch this script under the shell created by 'linux32'. Check documentation of the 'linux32' or the 'util-linux' Debian/Ubuntu packages """ else: # Needs to be ported (defaulting to 32 bits) self._arch = "i686" # End Platform stuff (Move this code to another place) def _searchModule(self, moduleName): """ Returns the module associated to a Name @param moduleName the module """ for module in self._manifest.xbootstrap[0].xmodule: if hasattr(module, "pignore") and module.pignore == "yes": continue if module.pname == moduleName: return module raise ModuleNotFoundError, moduleName def getDefaultTarget(self): """ Returns the default target module """ if hasattr(self._manifest.xbootstrap[0], "pdefault_target"): return self._manifest.xbootstrap[0].pdefault_target return None def _mget(self, module, update=False, redownload=False): """ Downloads a module, using the specified method @param module: Module node object got from XMLParser @param update: If true it will update the module code (only for SVN/CVS modules) downloaded code will be unpacked again @param redownload: If true it will download again the entire code from scratch """ # Architecture/Platform stuff wget_sources = [] svn_sources = [] for source in module.xsource: if hasattr(source,"pplatform") and \ source.pplatform != self._platform: continue if hasattr(source, "parch") and source.parch != self._arch: continue if hasattr(source,"ppython") and \ source.ppython != self._python_version: continue if hasattr(source, "pbranch") and source.pbranch != self._branch: continue if source.pmethod == 'wget': wget_sources.append(source) else: svn_sources.append(source) if len(wget_sources) == 0 and len(svn_sources) == 0: info = sys.platform if hasattr(os, "uname"): info += " " + " ".join(os.uname()) info += " Python " + self._python_version raise UnsuportedPlatformError, info # End architecture/Platform stuff while len(wget_sources) != 0 or len(svn_sources) != 0: if len(wget_sources) == 0: wget_sources = svn_sources svn_sources = [] if len(wget_sources) != 0: source = wget_sources[int(random.random() * len(wget_sources))] wget_sources.remove(source) try: self._mget_inner(module, source, update, redownload) except ToolError, e: print e print "Cannot download from that source,\ attempting another one" continue return raise DownloadError, "Cannot download from any suitable location"
class Scheduler(object): event_chief = EventChief() def __init__(self, xml_net_num, sching_logto, data_over_tp, act): # logging.basicConfig(filename='logs/schinglog',filemode='w',level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG) # if not (sching_logto == 'console' or sching_logto == 'file'): logging.error('Unexpected sching_logto=%s', sching_logto) return self.sching_logto = sching_logto # if not (data_over_tp == 'tcp' or data_over_tp == 'udp'): logging.error('Unexpected data_over_tp=%s', data_over_tp) return self.data_over_tp = data_over_tp # self.net_xml_file_url = "net_xmls/net_simpler.xml" #"net_xmls/net_four_paths.xml" #"net_xmls/net_mesh_topo.xml" #"net_xmls/net_resubmit_exp.xml" if not is_scheduler_run: self.net_xml_file_url = "ext/" + self.net_xml_file_url logging.info("Scheduler:: self.net_xml_file_url= %s", self.net_xml_file_url) self.xml_parser = XMLParser(self.net_xml_file_url, str(xml_net_num)) # self.gm = GraphMan() self.init_network_from_xml() # self.gm.print_graph() # Useful state variables self.last_sching_id_given = -1 self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport']-1 # Scher state dicts self.num_dstusers = 0 self.users_beingserved_dict = {} # user_ip:{'gw_dpid':<>, 'gw_conn_port':<> ...} # self.N = 0 #num_activesessions self.alloc_dict = None self.sessionsbeingserved_dict = {} self.sessionspreserved_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.get_actual_resource_dict() # for perf plotting # self.perf_plotter = PerfPlotter(self.actual_res_dict) # for control_comm self.cci = ControlCommIntf() self.act = act if act: self.cci.reg_commpair(sctag = 'scher-acter', proto = 'tcp', _recv_callback = self._handle_recvfromacter, s_addr = info_dict['lacter_addr'], c_addr = info_dict['acterl_addr'] ) self.dtsuser_intf = DTSUserCommIntf() # self.couplinginfo_dict = {} self.starting_time = time.time() # self.sid_schregid_dict = {} self.schregid_sid_dict = {} self.schingid_rescapalloc_dict = {} self.geninfo_dict = {} # # self.exp() def recv_from_user(self, userinfo_dict, msg): user_ip = userinfo_dict['user_ip'] #reg everytime, in case the user is new self.dtsuser_intf.reg_user(user_ip = user_ip, userinfo_dict = userinfo_dict, _recv_callback = self._handle_recvfromuser, _send_callback = self._handle_sendtouser ) self.dtsuser_intf.pass_to_dts(user_ip = user_ip, msg = msg ) def get_couplingdoneinfo_dict(self): return self.couplinginfo_dict def get_sessionspreserved_dict(self): return self.sessionspreserved_dict def get_schingid_rescapalloc_dict(self): return self.schingid_rescapalloc_dict def get_geninfo_dict(self): return self.geninfo_dict ################################### _handle_*** methods ########################################## def _handle_recvfromacter(self, msg): [type_, data_] = msg if type_ == 's_sching_reply' or type_ == 'res_sching_reply': reply = data_['reply'] # # s_id = int(data_['s_id']) # sch_req_id = self.sid_schregid_dict[s_id] sch_req_id = int(data_['s_id']) s_id = self.schregid_sid_dict[sch_req_id] s_info = self.sessionsbeingserved_dict[sch_req_id] [p_ip, c_ip] = s_info['p_c_ip_list'] user_info = self.users_beingserved_dict[p_ip] userinfo_dict = {'ip': p_ip, 'mac': user_info['mac'], 'gw_dpid': user_info['gw_dpid'], 'gw_conn_port': user_info['gw_conn_port'] } if reply == 'done': s_info['sching_job_done'] = True s_alloc_info = self.alloc_dict['s-wise'][s_id] type_touser = None if type_ == 's_sching_reply': type_touser = '******' elif type_ == 'res_sching_reply': type_touser = '******' # to consumer if type_ == 's_sching_reply': # no need to resend for resching msg = {'type': type_touser, 'data': {'sch_req_id': sch_req_id, 'tp_dst': s_info['tp_dst'] } } if self.dtsuser_intf.relsend_to_user(user_ip = c_ip, msg = msg ) == 0: logging.error('_handle_recvfromacter:: could not send msg=%s, userinfo_dict= \n%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromacter:: sent msg=%s' % pprint.pformat(msg) ) # to producer datasize = None try: datasize = s_info['datasize_to_tx_list'][-1] except: pass msg = {'type': type_touser, 'data': {'sch_req_id': sch_req_id, 'bw': s_alloc_info['bw'], 'tp_dst': s_info['tp_dst'], 'datasize': datasize } } if self.dtsuser_intf.relsend_to_user(user_ip = p_ip, msg = msg ) == 0: logging.error('_handle_recvfromacter:: could not send msg= %s, userinfo_dict= \n%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromacter:: sent msg=%s', pprint.pformat(msg) ) else: logging.error('_handle_recvfromacter:: Unexpected reply=%s', reply) msg = {'type':'sching_reply', 'data':'sorry' } if self.dtsuser_intf.relsend_to_user(user_ip = p_ip, msg = msg ) == 0: logging.error('_handle_recvfromacter:: could not send msg=%s, userinfo_dict= \n%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromacter:: sent msg=%s', pprint.pformat(msg) ) # # def _handle_sendtouser(self, userinfo_dict, msg_str): Scheduler.event_chief.raise_event('send_msg_to_user', msg_str, userinfo_dict) def _handle_recvfromuser(self, userinfo_dict, msg_): user_ip = userinfo_dict['user_ip'] [type_, data_] = msg_ if type_ == 'join_req': if self.welcome_user(user_ip = user_ip, user_mac = userinfo_dict['user_mac'], gw_dpid = userinfo_dict['gw_dpid'], gw_conn_port = userinfo_dict['gw_conn_port'] ): msg = {'type':'join_reply', 'data':'welcome' } if self.dtsuser_intf.relsend_to_user(user_ip = user_ip, msg = msg ) == 0: logging.error('_handle_recvfromuser:: could not send msg=%s, \nuserinfo_dict=%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromuser:: sent msg=%s', pprint.pformat(msg) ) else: msg = {'type':'join_reply', 'data':'sorry' } if self.dtsuser_intf.relsend_to_user(user_ip = user_ip, msg = msg ) == 0: logging.error('_handle_recvfromuser:: could not send msg=%s, \nuserinfo_dict=%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromuser:: sent msg=%s', pprint.pformat(msg) ) elif type_ == 'sching_req': sch_req_id = self.welcome_session(p_c_ip_list = [user_ip, data_['c_ip']], req_dict = data_['req_dict'], app_pref_dict = data_['app_pref_dict'] ) if sch_req_id != -1: # TODO: for now ... self.do_sching() else: msg = {'type':'sching_reply', 'data':'sorry' } if self.dtsuser_intf.relsend_to_user(user_ip = user_ip, msg = msg ) == 0: logging.error('_handle_recvfromuser:: could not send msg=%s, \nuserinfo_dict=%s', pprint.pformat(msg), pprint.pformat(userinfo_dict) ) else: logging.debug('_handle_recvfromuser:: sent msg=%s', pprint.pformat(msg) ) elif type_ == 'session_done': sch_req_id = int(data_['sch_req_id']) logging.debug('_handle_recvfromuser:: ........................................................') logging.debug('_handle_recvfromuser:: session_done for sch_req_id= %s', sch_req_id) logging.debug('_handle_recvfromuser:: ........................................................') if not sch_req_id in self.couplinginfo_dict: self.couplinginfo_dict[sch_req_id] = {} self.couplinginfo_dict[sch_req_id]['session_done'] = data_ self.bye_session(sch_req_id = sch_req_id ) elif type_ == 'coupling_done': sch_req_id = int(data_['sch_req_id']) logging.debug('_handle_recvfromuser:: OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO') logging.debug('_handle_recvfromuser:: coupling_done for sch_req_id= %s', sch_req_id) logging.debug('_handle_recvfromuser:: OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO') if not sch_req_id in self.couplinginfo_dict: self.couplinginfo_dict[sch_req_id] = {} self.couplinginfo_dict[sch_req_id]['coupling_done'] = data_ #################### scher_state_management methods ####################### def init_network_from_xml(self): [node_list, edge_list] = self.xml_parser.get_node__edge_list() # print 'node_list= %s' % pprint.pformat(node_list) # print 'edge_list= %s' % pprint.pformat(edge_list) self.gm.graph_add_nodes(node_list) self.gm.graph_add_edges(edge_list) def print_scher_state(self): print '<---------------------------------------->' print 'is_scheduler_run: ', is_scheduler_run print 'users_beingserved_dict:' pprint.pprint(self.users_beingserved_dict) print 'sessions_beingserved_dict:' pprint.pprint(self.sessionsbeingserved_dict) print 'sessions_pre_served_dict:' pprint.pprint(self.sessionspreserved_dict) print '<---------------------------------------->' def next_sching_id(self): self.last_sching_id_given += 1 return self.last_sching_id_given def next_sch_req_id(self): self.last_sch_req_id_given += 1 return self.last_sch_req_id_given def next_tp_dst(self): self.last_tp_dst_given += 1 return self.last_tp_dst_given def did_user_joindts(self, user_ip): return user_ip in self.users_beingserved_dict def welcome_user(self, user_ip, user_mac, gw_dpid, gw_conn_port): """ if self.did_user_joindts(user_ip): print 'user_ip=%s already joined' % user_ip return False """ # self.users_beingserved_dict.update({user_ip:{'gw_dpid':gw_dpid, 'gw_conn_port':gw_conn_port, 'mac': user_mac } } ) logging.info('welcome user:: ip=%s, mac=%s, gw_dpid=%s, gw_conn_port=%s', user_ip, user_mac, gw_dpid, gw_conn_port) return True # not used now, for future def bye_user(self, user_ip): if not self.did_user_joindts(user_ip): logging.error('bye_user:: user_ip=%s is not joined.', user_ip) return False # del self.users_beingserved_dict[user_ip] logging.info('bye user:: bye ip=%s', user_ip) return True def welcome_session(self, p_c_ip_list, req_dict, app_pref_dict): #sch_req_id: should be unique for every sch_session [p_ip, c_ip] = p_c_ip_list if not (self.did_user_joindts(p_ip) and self.did_user_joindts(c_ip) ): logging.error('welcome_session:: nonjoined user in sching_req.') return -1 # p_c_gwtag_list = ['s'+str(self.users_beingserved_dict[p_ip]['gw_dpid']), 's'+str(self.users_beingserved_dict[c_ip]['gw_dpid']) ] #update global var, list and dicts self.N += 1 sch_req_id = self.next_sch_req_id() self.sessionsbeingserved_dict.update( {sch_req_id:{'tp_dst': self.next_tp_dst(), 'p_c_ip_list': p_c_ip_list, 'p_c_gwtag_list': p_c_gwtag_list, 'app_pref_dict': app_pref_dict, 'req_dict': req_dict, 'sching_job_done': False } } ) #print 'self.sessionsbeingserved_dict: ' #pprint.pprint(self.sessionsbeingserved_dict) # return sch_req_id def bye_session(self, sch_req_id): self.N -= 1 # Send sessions whose "sching job" is done is sent to pre_served category self.sessionspreserved_dict[sch_req_id] = self.sessionsbeingserved_dict[sch_req_id] path_info = self.sid_res_dict[sch_req_id]['path_info'] self.gm.rm_user_from_edge__itr_list(path_info['edge_on_path_list'], path_info['itr_on_path_list']) del self.sessionsbeingserved_dict[sch_req_id] del self.sid_res_dict[sch_req_id] # logging.info('bye_session:: bye sch_req_id=%s, session_info=\n%s', sch_req_id, pprint.pformat(self.sessionspreserved_dict[sch_req_id]) ) ################################### Sching rel methods ########################################### def update_sid_res_dict(self): for s_id in self.sessionsbeingserved_dict: if not s_id in self.sid_res_dict: p_c_gwtag_list = self.sessionsbeingserved_dict[s_id]['p_c_gwtag_list'] path_info = self.gm.get_path__edge__itr_on_path_list__fair_bw_dict(p_c_gwtag_list[0], p_c_gwtag_list[1]) self.sid_res_dict[s_id] = {'path_info': copy.copy(path_info) } self.gm.add_user_to_edge__itr_list(path_info['edge_on_path_list'], path_info['itr_on_path_list']) logging.debug('update_sid_res_dict:: s_id=%s, path=\n%s', s_id, path_info['path']) for s_id_ in self.sid_res_dict: path_info_ = self.sid_res_dict[s_id_]['path_info'] [path_bw_, path_fair_bw_] = self.gm.get_path_bw__fair_bw(path_info_['edge_on_path_list']) path_info_['bw'] = path_bw_ path_info_['fair_bw'] = path_fair_bw_ # def give_incintkeyform(self, indict1, indict2): # indict1 and indict2 have same set of keys outdict1, outdict2 = {}, {} i = 0 for k in indict1: outdict1[i] = indict1[k] outdict2[i] = indict2[k] self.schregid_sid_dict[k] = i self.sid_schregid_dict[i] = k i += 1 # return [outdict1, outdict2] def do_sching(self): # Currently for active sessions, gets things together to work sching logic and then sends corresponding # walk/itjob rules to correspoding actuator - which is a single actuator right now ! sching_id = self.next_sching_id() if self.sching_logto == 'file': fname = 'ext/sching_decs/sching_' + sching_id + '.log' logging.basicConfig(filename=fname, filemode='w', level=logging.DEBUG) elif self.sching_logto == 'console': logging.basicConfig(level=logging.DEBUG) for sch_req_id, sinfo in self.sessionsbeingserved_dict.items(): if not 'initial_datasize' in sinfo: sinfo['initial_datasize'] = sinfo['req_dict']['datasize'] sinfo['total_txed_datasize'] = 0 if 'sched_time_list' in sinfo: elapsed_time = time.time() - self.starting_time - sinfo['sched_time_list'][-1] sinfo['elapsed_time_list'].append(elapsed_time) elapsed_datasize, txed_datasize = None, None last_txt = sinfo['txt_list'][-1] # if elapsed_time > last_txt: # logging.debug('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') # logging.debug('do_sching:: sch_req_id= %s, elapsed_time= %s > last_txt= %s but session_done is still not received.', sch_req_id, elapsed_time, last_txt) # logging.debug('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') # # self.bye_session(sch_req_id) # sinfo['resching_case_list'].append('elapsed_time= %s > last_txt= %s' % (elapsed_time, last_txt) ) # else: datasize = sinfo['req_dict']['datasize'] try: txed_datasize = sinfo['datasize_to_tx_list'][-1]*elapsed_time/last_txt except: txed_datasize = datasize*elapsed_time/last_txt # txed_datasize = datasize*elapsed_time/last_txt sinfo['total_txed_datasize'] += txed_datasize # for multiple itrs # sinfo['datasize_to_tx_list'].append(datasize - txed_datasize) sinfo['datasize_to_tx_list'].append(sinfo['initial_datasize'] - sinfo['total_txed_datasize']) last_resching_case_id = None try: last_resching_case_id = sinfo['resching_case_id_list'][-1] except: last_resching_case_id = 0 # sinfo['req_dict']['datasize'] = sinfo['datasize_to_tx_list'][-1] tobeproced_data_transt = sinfo['tobeproced_data_transt_list'][-1] tobeproced_datasize = sinfo['tobeproced_datasize_list'][-1] if elapsed_time < tobeproced_data_transt: sinfo['resching_case_list'].append('elapsed_time= %s < tobeproced_data_transt= %s' % (elapsed_time, tobeproced_data_transt) ) elapsed_datasize = (0.9)*float(tobeproced_datasize*float(elapsed_time))/tobeproced_data_transt # if tobeproced_data_transt/elapsed_time >= 4: # elapsed_datasize = (0.9)*float(tobeproced_datasize*float(elapsed_time))/tobeproced_data_transt # else: # elapsed_datasize = (0.5)*float(tobeproced_datasize*float(elapsed_time))/tobeproced_data_transt sinfo['req_dict']['datasize'] = max(0.01, datasize - elapsed_datasize) sinfo['resching_case_id_list'].append(0) # if last_resching_case_id == 0: # sinfo['datasize_to_tx_list'].append(datasize - txed_datasize) # elif last_resching_case_id == 1: # sinfo['datasize_to_tx_list'].append(sinfo['initial_datasize'] - sinfo['total_txed_datasize']) else: sinfo['resching_case_list'].append('elapsed_time= %s >= tobeproced_data_transt= %s' % (elapsed_time, tobeproced_data_transt) ) elapsed_datasize = txed_datasize sinfo['req_dict']['datasize'] = max(0.01, datasize - elapsed_datasize) # sinfo['datasize_to_tx_list'][-1] sinfo['resching_case_id_list'].append(1) # if last_resching_case_id == 0: # sinfo['datasize_to_tx_list'].append(datasize - txed_datasize) # elif last_resching_case_id == 1: # sinfo['datasize_to_tx_list'].append(sinfo['initial_datasize'] - sinfo['total_txed_datasize']) sinfo['elapsed_datasize_list'].append(elapsed_datasize) sinfo['req_dict']['slack_metric'] = sinfo['slack_metric_list'][-1] - elapsed_time # logging.info('do_sching:: sching_id=%s started;', sching_id) self.update_sid_res_dict() [sessionsbeingserved_dict_, sid_res_dict_] = self.give_incintkeyform(self.sessionsbeingserved_dict, self.sid_res_dict) sching_opter = SchingOptimizer(sessionsbeingserved_dict_, self.actual_res_dict, sid_res_dict_ ) sching_opter.solve() # self.alloc_dict = sching_opter.get_sching_result() logging.info('do_sching:: alloc_dict=\n%s', pprint.pformat(self.alloc_dict)) for s_id, salloc in self.alloc_dict['s-wise'].items(): sch_req_id = self.sid_schregid_dict[s_id] sinfo = self.sessionsbeingserved_dict[sch_req_id] if not 'sched_time_list' in sinfo: sinfo['sched_time_list'] = [] sinfo['slack_metric_list'] = [] sinfo['bw_list'] = [] sinfo['datasize_list'] = [] sinfo['txt_list'] = [] sinfo['walk_list'] = salloc['walk_list'] sinfo['tobeproced_datasize_list'] = [] sinfo['tobeproced_data_transt_list'] = [] sinfo['resching_case_list'] = [] sinfo['resching_case_id_list'] = [] sinfo['elapsed_datasize_list'] = [] sinfo['elapsed_time_list'] = [] sinfo['datasize_to_tx_list'] = [] # sinfo['sched_time_list'].append(time.time() - self.starting_time) sinfo['slack_metric_list'].append(sinfo['req_dict']['slack_metric']) sinfo['bw_list'].append(salloc['bw']) sinfo['datasize_list'].append(sinfo['req_dict']['datasize']) sinfo['txt_list'].append(salloc['s_txt']) sinfo['tobeproced_datasize_list'].append(salloc['tobeproced_datasize']) sinfo['tobeproced_data_transt_list'].append(salloc['tobeproced_data_transt']) sinfo['trans_time'] = salloc['trans_time'] # Resource capacity allocation distribution over sessions self.schingid_rescapalloc_dict[sching_id] = self.alloc_dict['res-wise'] self.geninfo_dict = self.alloc_dict['general'] # logging.info('saving sching_dec to figs...') # self.perf_plotter.save_sching_result(g_info_dict = self.alloc_dict['general'], # s_info_dict = self.alloc_dict['s-wise'], # res_info_dict = self.alloc_dict['res-wise']) # Convert sching decs to rules for s_id in range(self.N): s_allocinfo_dict = self.alloc_dict['s-wise'][s_id] s_itwalk_dict = s_allocinfo_dict['itwalk_dict'] s_walk_list = s_allocinfo_dict['walk_list'] if not self.data_over_tp == 'tcp': logging.error("do_sching:: Unexpected data_over_tp= %s", self.data_over_tp) # [walk_rule_list, itjob_rule_dict] = \ self.get_overtcp_session_walk_rule_list__itjob_rule_dict(s_id, s_walk_list = s_walk_list, s_itwalk_dict = s_itwalk_dict) s_info = self.sessionsbeingserved_dict[self.sid_schregid_dict[s_id]] s_info['slack-tt'] = s_allocinfo_dict['slack-tt'] s_info['slack-transtime'] = abs(s_allocinfo_dict['trans_time']-s_info['req_dict']['slack_metric']) # logging.debug('for s_id= %s;', s_id) # logging.debug('walk_rule_list= \n%s', pprint.pformat(walk_rule_list) ) # logging.debug('itjob_rule_dict= \n%s', pprint.pformat(itjob_rule_dict) ) # Dispatching rule to actuator_actuator if s_info['sching_job_done'] == False: type_toacter = 's_sching_req' else: type_toacter = 'res_sching_req' msg = json.dumps({'type': type_toacter, 'data': {'s_id': self.sid_schregid_dict[s_id], 'walk_rule_list': walk_rule_list, 'itjob_rule_dict': itjob_rule_dict } } ) self.cci.send_to_client('scher-acter', msg) # # logging.info('do_sching:: sching_id= %s done.', sching_id) def get_overtcp_session_walk_rule_list__itjob_rule_dict(self, s_id, s_walk_list, s_itwalk_dict): def get_port_name(dpid, port): return 's' + str(dpid) + '-eth' + str(port) # def chop_swalk_into_tcppaths(): chopped_swalk_list = [] cur_chop_id = 0 # l_ = list(enumerate(s_walk_list)) for i, node_str in l_: node = self.gm.get_node(node_str) node_type = node['type'] if i == 0: if node_type != 'sw': logging.error('right after p only sw type is allowed! what is found=(%s, %s)', node_str, node_type) system.exit(2) # chopped_swalk_list.append(['p', node_str]) elif i == len(l_) - 1: if node_type != 'sw': logging.error('right before c only sw type is allowed! what is found=(%s, %s)', node_str, node_type) system.exit(2) # chopped_swalk_list[cur_chop_id].append(node_str) chopped_swalk_list[cur_chop_id].append('c') else: # i is pointing to intermediate pwalk_nodes if node_type == 'sw': chopped_swalk_list[cur_chop_id].append(node_str) elif node_type == 't': chopped_swalk_list[cur_chop_id].append(node_str) cur_chop_id += 1 chopped_swalk_list.append([node_str]) # return chopped_swalk_list # chopped_swalk_list = chop_swalk_into_tcppaths() # # print '---> for s_id= %s' % (s_id) # print 's_itwalk_dict= \n%s' % pprint.pformat(s_itwalk_dict) # print 's_walk_list= \n%s' % s_walk_list # print 'chopped_swalk_list= \n%s' % pprint.pformat(chopped_swalk_list) itjob_rule_dict = {} walk_rule_list = [] s_info_dict = self.sessionsbeingserved_dict[self.sid_schregid_dict[s_id]] s_tp_dst = s_info_dict['tp_dst'] p_c_ip_list = s_info_dict['p_c_ip_list'] duration = 0 [from_ip, to_ip] = p_c_ip_list p_info_dict = self.users_beingserved_dict[from_ip] c_info_dict = self.users_beingserved_dict[to_ip] [from_mac, to_mac] = [p_info_dict['mac'], c_info_dict['mac']] # first_itr_done = False uptoitrjob_list = [] # for i, swalk_chop in enumerate(chopped_swalk_list): chop_walk_rule_list = [] head_i, tail_i = 0, len(swalk_chop) - 1 head_str, tail_str = swalk_chop[head_i], swalk_chop[tail_i] head_ip, tail_ip = None, None try: chop_head = self.gm.get_node(head_str) head_ip, head_mac = chop_head['ip'], chop_head['mac'] except KeyError: #head_str = 'p' head_ip, head_mac = from_ip, from_mac try: chop_tail = self.gm.get_node(tail_str) tail_ip, tail_mac = chop_tail['ip'], chop_tail['mac'] except KeyError: #tail_str = 'c' tail_ip, tail_mac = to_ip, to_mac # Extract forward route from head to tail # print 'extracting forward route >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>' for j in range(head_i+1, tail_i-1): #sws in [head_sw, tail_sw) sw_str = swalk_chop[j] sw = self.gm.get_node(sw_str) forward_edge = self.gm.get_edge((sw_str, swalk_chop[j+1])) tail_ip_ = tail_ip if head_str == 'p': tail_ip_ = to_ip # print 'sw_str= %s, swalk_chop[j+1]= %s, forward_edge= %s' % (sw_str, swalk_chop[j+1], pprint.pformat(forward_edge) ) chop_walk_rule_list.append({'conn': [sw['dpid'], head_ip], 'typ': 'forward', 'wc': [head_ip, tail_ip_, 6, -1, int(s_tp_dst)], 'rule': [forward_edge['pre_dev'], duration] }) # Extract backward route from tail to head # print 'extracting backward route >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>' for j in range(head_i+2, tail_i): #sws in (head_sw-tail_sw] sw_str = swalk_chop[j] sw = self.gm.get_node(sw_str) backward_edge = self.gm.get_edge((swalk_chop[j-1], sw_str)) # print 'swalk_chop[j-1]= %s, sw_str= %s, backward_edge= %s' % (swalk_chop[j-1], sw_str, pprint.pformat(backward_edge) ) chop_walk_rule_list.append({'conn': [sw['dpid'], tail_ip], 'typ': 'forward', 'wc': [tail_ip, head_ip, 6, int(s_tp_dst), -1], 'rule': [backward_edge['post_dev'], duration] }) # Extract forward (if tail is itr) or modify_forward (if tail is c) # Tail tail_sw_str = swalk_chop[tail_i-1] tail_sw = self.gm.get_node(tail_sw_str) to_tail_sw_port_name, type_, rule_list = None, None, None tail_ip_ = tail_ip if tail_str == 'c': to_tail_sw_port_name = get_port_name(dpid = c_info_dict['gw_dpid'], port = c_info_dict['gw_conn_port']) type_ = 'forward' rule_list = [to_tail_sw_port_name, duration] else: # itr to_tail_sw_port_name = self.gm.get_edge((tail_sw_str, tail_str))['pre_dev'] if first_itr_done: type_ = 'forward' rule_list = [to_tail_sw_port_name, duration] else: # first itr type_ = 'mod_nw_dst__forward' rule_list = [tail_ip, tail_mac, to_tail_sw_port_name, duration] first_itr_done = True tail_ip_ = to_ip # # print 'tail_sw_str= %s, tail_str= %s, to_tail_sw_port_name= %s' % (tail_sw_str, tail_str, to_tail_sw_port_name) chop_walk_rule_list.append({ 'conn': [tail_sw['dpid'], head_ip], 'typ': type_, 'wc': [head_ip, tail_ip_, 6, -1, int(s_tp_dst)], 'rule': rule_list }) # Extract backward (if head is itr) or modify_backward (if head is p) route to head head_sw_str = swalk_chop[head_i + 1] head_sw = self.gm.get_node(head_sw_str) to_head_sw_port_name, type_, rule_list = None, None, None if head_str == 'p': to_head_sw_port_name = get_port_name(dpid = p_info_dict['gw_dpid'], port = p_info_dict['gw_conn_port']) type_ = 'mod_nw_src__forward' rule_list = [to_ip, to_mac, to_head_sw_port_name, duration] else: # head is itr head_edge = self.gm.get_edge((head_sw_str, head_str)) to_head_sw_port_name = head_edge['pre_dev'] type_ = 'forward' rule_list = [to_head_sw_port_name, duration] # Fill up it_job_rule for the itr assigned_job = s_itwalk_dict['itr_info_dict'][head_str] if not (tail_sw['dpid'] in itjob_rule_dict): itjob_rule_dict[head_sw['dpid']] = [] itjob_rule_dict[head_sw['dpid']].append({ 'proto': 6, 'itr_ip': head_ip, 'itr_mac': head_mac, 'swdev_to_itr': to_head_sw_port_name, 'assigned_job': assigned_job, 'uptoitrjob_list': copy.copy(uptoitrjob_list), 's_tp_dst': int(s_tp_dst), 'to_ip': tail_ip, 'datasize': s_itwalk_dict['info']['datasize'], 'bw': s_itwalk_dict['info']['bw'] }) # uptoitrjob_list.append(assigned_job) # # print 'head_str= %s, head_sw_str= %s, to_head_sw_port_name= %s' % (head_str, head_sw_str, to_head_sw_port_name) chop_walk_rule_list.append({ 'conn': [head_sw['dpid'], tail_ip], 'typ': type_, 'wc': [tail_ip, head_ip, 6, int(s_tp_dst), -1], 'rule': rule_list }) # walk_rule_list += chop_walk_rule_list return [walk_rule_list, itjob_rule_dict] def exp(self): print '*** exp::' userinfo_list = [ {'user_ip':'10.0.1.0','user_mac':'00:00:00:01:01:00','gw_dpid':12, 'gw_conn_port':2} ] # userinfo_list = [ {'user_ip':'10.0.2.0','user_mac':'00:00:00:01:02:00','gw_dpid':1, 'gw_conn_port':3}, # {'user_ip':'10.0.2.1','user_mac':'00:00:00:01:02:01','gw_dpid':1, 'gw_conn_port':4}, # {'user_ip':'10.0.1.0','user_mac':'00:00:00:01:01:00','gw_dpid':2, 'gw_conn_port':3}, # {'user_ip':'10.0.1.1','user_mac':'00:00:00:01:01:01','gw_dpid':2, 'gw_conn_port':4} ] for userinfo in userinfo_list: self.welcome_user(user_ip = userinfo['user_ip'], user_mac = userinfo['user_mac'], gw_dpid = userinfo['gw_dpid'], gw_conn_port = userinfo['gw_conn_port'] ) self.dtsuser_intf.reg_user(user_ip = userinfo['user_ip'], userinfo_dict = userinfo, _recv_callback = self._handle_recvfromuser, _send_callback = self._handle_sendtouser ) print '***' def run_sching(self): self.update_sid_res_dict() sching_opter = SchingOptimizer(self.give_incintkeyform(flag=True, indict=self.sessionsbeingserved_dict), self.actual_res_dict, self.give_incintkeyform(flag=False, indict=self.sid_res_dict) ) sching_opter.solve() def test(self, num_session): userinfo_list = None if self.net_xml_file_url == "net_xmls/net_four_paths.xml": userinfo_list = [{'user_ip':'10.0.2.0', 'user_mac':'00:00:00:01:02:00', 'gw_dpid':20, 'gw_conn_port':3}, {'user_ip':'10.0.2.1', 'user_mac':'00:00:00:01:02:01', 'gw_dpid':21, 'gw_conn_port':4}, {'user_ip':'10.0.2.2', 'user_mac':'00:00:00:01:02:02', 'gw_dpid':22, 'gw_conn_port':4}, {'user_ip':'10.0.2.3', 'user_mac':'00:00:00:01:02:03', 'gw_dpid':23, 'gw_conn_port':3}, {'user_ip':'10.0.1.0', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':10, 'gw_conn_port':3}, {'user_ip':'10.0.1.1', 'user_mac':'00:00:00:01:01:01', 'gw_dpid':11, 'gw_conn_port':4}, {'user_ip':'10.0.1.2', 'user_mac':'00:00:00:01:01:02', 'gw_dpid':12, 'gw_conn_port':4}, {'user_ip':'10.0.1.3', 'user_mac':'00:00:00:01:01:03', 'gw_dpid':13, 'gw_conn_port':3} ] elif self.net_xml_file_url == 'net_xmls/net_simpler.xml': userinfo_list = [{'user_ip':'10.0.2.0', 'user_mac':'00:00:00:01:02:00', 'gw_dpid':1, 'gw_conn_port':3}, {'user_ip':'10.0.1.0', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':2, 'gw_conn_port':3} ] elif self.net_xml_file_url == 'net_xmls/net_mesh_topo.xml': userinfo_list = [{'user_ip':'10.0.2.0', 'user_mac':'00:00:00:01:02:00', 'gw_dpid':20, 'gw_conn_port':3}, {'user_ip':'10.0.2.1', 'user_mac':'00:00:00:01:02:01', 'gw_dpid':20, 'gw_conn_port':4}, {'user_ip':'10.0.2.2', 'user_mac':'00:00:00:01:02:02', 'gw_dpid':20, 'gw_conn_port':5}, {'user_ip':'10.0.1.0', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':10, 'gw_conn_port':3}, {'user_ip':'10.0.1.1', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':10, 'gw_conn_port':4}, {'user_ip':'10.0.1.2', 'user_mac':'00:00:00:01:01:01', 'gw_dpid':10, 'gw_conn_port':5} ] elif self.net_xml_file_url == 'net_xmls/net_resubmit_exp.xml': userinfo_list = [{'user_ip':'10.0.2.0', 'user_mac':'00:00:00:01:02:00', 'gw_dpid':11, 'gw_conn_port':3}, {'user_ip':'10.0.2.1', 'user_mac':'00:00:00:01:02:01', 'gw_dpid':11, 'gw_conn_port':4}, {'user_ip':'10.0.2.2', 'user_mac':'00:00:00:01:02:02', 'gw_dpid':11, 'gw_conn_port':5}, {'user_ip':'10.0.1.0', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':12, 'gw_conn_port':3}, {'user_ip':'10.0.1.1', 'user_mac':'00:00:00:01:01:00', 'gw_dpid':12, 'gw_conn_port':4}, {'user_ip':'10.0.1.2', 'user_mac':'00:00:00:01:01:01', 'gw_dpid':12, 'gw_conn_port':5} ] # for userinfo in userinfo_list: self.welcome_user(user_ip = userinfo['user_ip'], user_mac = userinfo['user_mac'], gw_dpid = userinfo['gw_dpid'], gw_conn_port = userinfo['gw_conn_port'] ) # #datasize (MB) slack_metric (ms) req_dict_list = [ {'datasize':100, 'slack_metric':100, 'func_list':['fft','upsampleplot']}, {'datasize':100, 'slack_metric':100, 'func_list':['fft','upsampleplot']}, {'datasize':100, 'slack_metric':100, 'func_list':['fft','upsampleplot']}, {'datasize':100, 'slack_metric':100, 'func_list':['fft','upsampleplot']} ] app_pref_dict_list = [ {'m_p': 10,'m_u': 0,'x_p': 0,'x_u': 0}, {'m_p': 0,'m_u': 50,'x_p': 0,'x_u': 0}, {'m_p': 50,'m_u': 0,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0} ] p_c_ip_list_list = [ ['10.0.2.0','10.0.1.0'], ['10.0.2.1','10.0.1.1'], ['10.0.2.2','10.0.1.2'], ['10.0.2.3','10.0.1.3'] ] for i in range(num_session): self.welcome_session(p_c_ip_list = p_c_ip_list_list[int(i%4)], req_dict = req_dict_list[int(i%4)], app_pref_dict = app_pref_dict_list[int(i%4)] ) # # self.run_sching() self.do_sching()
def parse (self, xml): xml = XMLParser.parse(self, xml) _, time = self.searchFirst(xml, 'timePosition') return qgstime.QgsTimeInstant (time)
def main(): parser = XMLParser(sys.argv[1]) parser.parse()
class Scheduler(object): event_chief = EventChief() def __init__(self, xml_net_num, sching_logto): logging.basicConfig(level=logging.DEBUG) self.sching_logto = sching_logto if is_scheduler_run: self.xml_parser = XMLParser("net_2p_stwithsingleitr.xml", str(xml_net_num)) else: self.xml_parser = XMLParser("ext/net_2p_stwithsingleitr.xml", str(xml_net_num)) self.gm = GraphMan() self.init_network_from_xml() #Useful state variables self.last_sching_id_given = -1 self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport']-1 #Scher state dicts self.num_dstusers = 0 self.users_beingserved_dict = {} #user_ip:{'gw_dpid':<>,'gw_conn_port':<> ...} # self.N = 0 #num_activesessions self.alloc_dict = None self.sessions_beingserved_dict = {} self.sessions_pre_served_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.give_actual_resource_dict() #for perf plotting self.perf_plotter = PerfPlotter(self.actual_res_dict) #for control_comm self.cci = ControlCommIntf() self.cci.reg_commpair(sctag = 'scher-acter', proto = 'tcp', _recv_callback = self._handle_recvfromacter, s_addr = info_dict['lacter_addr'], c_addr = info_dict['acterl_addr'] ) ######################### _handle_*** methods ####################### def _handle_recvfromacter(self, msg): #msg = [type_, data_] [type_, data_] = msg if type_ == 'sp_sching_reply': s_id, p_id = int(data_['s_id']), int(data_['p_id']) reply = data_['reply'] if reply == 'done': #updating global dicts self.sessions_beingserved_dict[s_id]['sching_job_done'][p_id] = True #get s_alloc_info s_alloc_info = self.alloc_dict['s-wise'][s_id] s_pl = s_alloc_info['parism_level'] #get s_info, dtsuserinfo s_info = self.sessions_beingserved_dict[s_id] p_ip = s_info['p_c_ip_list'][0] user_info = self.users_beingserved_dict[p_ip] info_dict = {'ip': p_ip, 'mac': user_info['mac'], 'gw_dpid': user_info['gw_dpid'], 'gw_conn_port': user_info['gw_conn_port'] } msg = json.dumps({'type':'sching_reply', 'data':{'parism_level':s_pl, 'p_bw':s_alloc_info['p_bw'][0:s_pl], 'p_tp_dst':s_info['tp_dst_list'][0:s_pl] } }) #fire send_sching_reply to the user Scheduler.event_chief.raise_event('send_msg_to_user',msg,info_dict) else: logging.error('Unexpected reply=%s', reply) def _handle_recvfromdtsuser(self, msg, dtsuserinfo_dict): msg_ = check_msg('recv', 'dts-user', msg) if msg_ == None: print 'msg is not proto-good' return #msg_ = [type_, data_] [type_, data_] = msg_ # src_ip = dtsuserinfo_dict['src_ip'] src_mac = dtsuserinfo_dict['src_mac'] src_gw_dpid = dtsuserinfo_dict['src_gw_dpid'] src_gw_conn_port = dtsuserinfo_dict['src_gw_conn_port'] info_dict = {'ip': src_ip, 'mac': src_mac, 'gw_dpid': src_gw_dpid, 'gw_conn_port': src_gw_conn_port} if type_ == 'join_req': if self.welcome_user(user_ip = src_ip, user_mac = src_mac, gw_dpid = src_gw_dpid, gw_conn_port = src_gw_conn_port ): msg = json.dumps({'type':'join_reply', 'data':'welcome' }) Scheduler.event_chief.raise_event('send_msg_to_user',msg,info_dict) else: #fire send_neg_reply to the user msg = json.dumps({'type':'join_reply', 'data':'sorry' }) Scheduler.event_chief.raise_event('send_msg_to_user',msg,info_dict) elif type_ == 'sching_req': if self.welcome_session(p_c_ip_list = [src_ip, data_['c_ip']], req_dict = data_['req_dict'], app_pref_dict = data_['app_pref_dict'] ): #TODO: for now ... self.do_sching() else: #fire send_neg_reply to the user msg = json.dumps({'type':'sching_reply', 'data':'sorry' }) Scheduler.event_chief.raise_event('send_msg_to_user',msg,info_dict) #################### scher_state_management methods ####################### def print_scher_state(self): print '<---------------------------------------->' print 'is_scheduler_run: ', is_scheduler_run print 'users_beingserved_dict:' pprint.pprint(self.users_beingserved_dict) print 'sessions_beingserved_dict:' pprint.pprint(self.sessions_beingserved_dict) print 'sessions_pre_served_dict:' pprint.pprint(self.sessions_pre_served_dict) print '<---------------------------------------->' def next_sching_id(self): self.last_sching_id_given += 1 return self.last_sching_id_given def next_sch_req_id(self): self.last_sch_req_id_given += 1 return self.last_sch_req_id_given def next_tp_dst(self): self.last_tp_dst_given += 1 return self.last_tp_dst_given def did_user_joindts(self, user_ip): return user_ip in self.users_beingserved_dict def welcome_user(self, user_ip, user_mac, gw_dpid, gw_conn_port): if self.did_user_joindts(user_ip): print 'user_ip=%s already joined' % user_ip return False # self.users_beingserved_dict.update({user_ip:{'gw_dpid':gw_dpid, 'gw_conn_port':gw_conn_port, 'mac': user_mac } }) print 'welcome user: ip=%s, mac=%s, gw_dpid=%s, gw_conn_port=%s' % (user_ip,user_mac,gw_dpid,gw_conn_port) return True #not used now, for future def bye_user(self, user_ip): if not self.did_user_joindts(user_ip): print 'user_ip=%s is not joined' % user_ip return False # del self.users_beingserved_dict[user_ip] print 'bye user: ip=%s' % user_ip return True def welcome_session(self, p_c_ip_list, req_dict, app_pref_dict): ''' sch_req_id: should be unique for every sch_session ''' [p_ip, c_ip] = p_c_ip_list if not (self.did_user_joindts(p_ip) and self.did_user_joindts(c_ip)): print 'nonjoined user in sching_req' return False # p_c_gwtag_list = ['s'+str(self.users_beingserved_dict[p_ip]['gw_dpid']), 's'+str(self.users_beingserved_dict[c_ip]['gw_dpid']) ] #update global var, list and dicts self.N += 1 s_pl = req_dict['parism_level'] s_tp_dst_list = [self.next_tp_dst() for i in range(0,s_pl)] sch_req_id = self.next_sch_req_id() self.sessions_beingserved_dict.update( {sch_req_id:{'tp_dst_list': s_tp_dst_list, 'p_c_ip_list': p_c_ip_list, 'p_c_gwtag_list': p_c_gwtag_list, 'app_pref_dict': app_pref_dict, 'req_dict': req_dict, 'sching_job_done':[False]*s_pl } } ) #print 'self.sessions_beingserved_dict: ' #pprint.pprint(self.sessions_beingserved_dict) # return True def bye_session(self, sch_req_id): self.N -= 1 # Send sessions whose "sching job" is done is sent to pre_served category self.sessions_pre_served_dict.update( {sch_req_id: self.sessions_beingserved_dict[sch_req_id]}) del self.sessions_beingserved_dict[sch_req_id] def init_network_from_xml(self): node_edge_lst = self.xml_parser.give_node_and_edge_list_from_xml() #print 'node_lst:' #pprint.pprint(node_edge_lst['node_lst']) #print 'edge_lst:' #pprint.pprint(node_edge_lst['edge_lst']) self.gm.graph_add_nodes(node_edge_lst['node_lst']) self.gm.graph_add_edges(node_edge_lst['edge_lst']) ######################### sching_rel methods ############################### def update_sid_res_dict(self): """ Network resources will be only the ones on the session_shortest path. It resources need to lie on the session_shortest path. """ logging.info('update_sid_res_dict:') #TODO: sessions whose resources are already specified no need for putting them in the loop for s_id in self.sessions_beingserved_dict: p_c_gwdpid_list = self.sessions_beingserved_dict[s_id]['p_c_gwtag_list'] s_all_paths = self.gm.give_all_paths(p_c_gwdpid_list[0], p_c_gwdpid_list[1]) #print out all_paths for debugging dict_ = {i:p for i,p in enumerate(s_all_paths)} logging.info('s_id=%s, all_paths=\n%s', s_id, pprint.pformat(dict_)) # for i,p in dict_.items(): p_net_edge_list = self.gm.pathlist_to_netedgelist(p) p_itres_list = self.gm.give_itreslist_on_path(p) if not (s_id in self.sid_res_dict): self.sid_res_dict[s_id] = {'s_info':{}, 'ps_info':{}} self.sid_res_dict[s_id]['ps_info'].update( {i: {'path': p, 'net_edge_list': p_net_edge_list, 'itres_list': p_itres_list } }) def do_sching(self): ''' Currently for active sessions, gets things together to work sching logic and then sends corresponding walk/itjob rules to correspoding actuator - which is a single actuator right now ! ''' sching_id = self.next_sching_id() if self.sching_logto == 'file': fname = 'ext/sching_decs/sching_'+sching_id+'.log' logging.basicConfig(filename=fname,filemode='w',level=logging.DEBUG) elif self.sching_logto == 'console': logging.basicConfig(level=logging.DEBUG) else: logging.error('Unexpected sching_logto=%s', self.sching_logto) return # logging.info('<<< sching_id=%s starts >>>', sching_id) self.update_sid_res_dict() sching_opter = SchingOptimizer(self.sessions_beingserved_dict, self.actual_res_dict, self.sid_res_dict ) sching_opter.solve() self.alloc_dict = sching_opter.get_sching_result() logging.info('alloc_dict=\n%s', pprint.pformat(self.alloc_dict)) # """ logging.info('saving sching_dec to figs...') self.perf_plotter.save_sching_result(g_info_dict = self.alloc_dict['general'], s_info_dict = self.alloc_dict['s-wise'], res_info_dict = self.alloc_dict['res-wise']) """ #Convert sching decs to rules for s_id in range(0,self.N): s_allocinfo_dict = self.alloc_dict['s-wise'][s_id] # itwalkinfo_dict = s_allocinfo_dict['itwalkinfo_dict'] p_walk_dict = s_allocinfo_dict['pwalk_dict'] for p_id in range(0,s_allocinfo_dict['parism_level']): p_walk = p_walk_dict[p_id] sp_walk__tprrule = \ self.get_spwalkrule__sptprrule(s_id, p_id, p_walk = p_walk, pitwalkbundle_dict = itwalkinfo_dict[p_id]) logging.info('for s_id=%s, p_id=%s;', s_id, p_id) #print 'walkrule:' #pprint.pprint(sp_walk__tprrule['walk_rule']) #print 'itjob_rule:' #pprint.pprint(sp_walk__tprrule['itjob_rule']) # #Dispatching rule to actuator_actuator msg = json.dumps({'type':'sp_sching_dec', 'data':{'s_id':s_id, 'p_id':p_id, 'walk_rule':sp_walk__tprrule['walk_rule'], 'itjob_rule':sp_walk__tprrule['itjob_rule']} }) self.cci.send_to_client('scher-acter', msg) logging.info('<<< sching_id=%s ends >>>', sching_id) def get_spwalkrule__sptprrule(self,s_id,p_id,p_walk,pitwalkbundle_dict): def get_swportname(dpid, port): return 's'+str(dpid)+'-eth'+str(port) #print '---> for s_id:%i' % s_id #print 'pitwalkbundle_dict:' #pprint.pprint(pitwalkbundle_dict) #print 'p_walk: ', p_walk s_info_dict = self.sessions_beingserved_dict[s_id] s_tp_dst = s_info_dict['tp_dst_list'][p_id] p_c_ip_list = s_info_dict['p_c_ip_list'] # itjob_rule_dict = {} # walk_rule = [] cur_from_ip = p_c_ip_list[0] cur_to_ip = p_c_ip_list[1] duration = 50 cur_node_str = None for i,node_str in list(enumerate(p_walk)):#node = next_hop if i == 0: cur_node_str = node_str #for adding reverse-walk rule for p_gw_sw user_info_dict = self.users_beingserved_dict[cur_from_ip] swportname = get_swportname(dpid = user_info_dict['gw_dpid'], port = user_info_dict['gw_conn_port']) node = self.gm.get_node(node_str) walk_rule.append({'conn':[node['dpid'],cur_to_ip], 'typ':'forward', 'wc':[cur_to_ip,p_c_ip_list[0],int(s_tp_dst)], 'rule':[swportname, duration] }) # continue cur_node = self.gm.get_node(cur_node_str) if cur_node['type'] == 't': cur_node_str = node_str continue # node = self.gm.get_node(node_str) edge = self.gm.get_edge(cur_node_str, node_str) if node['type'] == 't': #sw-t walk_rule.append({'conn':[cur_node['dpid'],cur_from_ip], 'typ':'modify_forward', 'wc':[cur_from_ip,cur_to_ip,int(s_tp_dst)], 'rule':[node['ip'],node['mac'],edge['pre_dev'],duration] }) if not (cur_node['dpid'] in itjob_rule_dict): itjob_rule_dict[cur_node['dpid']] = [{ 'tpr_ip':node['ip'], 'tpr_mac':node['mac'], 'swdev_to_tpr':edge['pre_dev'], 'assigned_job': pitwalkbundle_dict['itbundle'][node_str], 'session_tp': int(s_tp_dst), 'consumer_ip': cur_to_ip, 'datasize': pitwalkbundle_dict['p_info']['datasize'] }] else: itjob_rule_dict[cur_node['dpid']].append( [{ 'tpr_ip':node['ip'], 'tpr_mac':node['mac'], 'swdev_to_tpr':edge['pre_dev'], 'assigned_job':pitwalkbundle_dict['itbundle'][node_str], 'session_tp': int(s_tp_dst), 'consumer_ip': cur_to_ip, 'datasize': pitwalkbundle_dict['p_info']['datasize'] }] ) cur_from_ip = node['ip'] elif node['type'] == 'sw': #sw-sw walk_rule.append({'conn':[cur_node['dpid'],cur_from_ip], 'typ':'forward', 'wc':[cur_from_ip,cur_to_ip,int(s_tp_dst)], 'rule':[edge['pre_dev'], duration] }) cur_from_ip #for reverse walk: data from c to p walk_rule.append({'conn':[node['dpid'],cur_to_ip], 'typ':'forward', 'wc':[cur_to_ip,p_c_ip_list[0],int(s_tp_dst)], 'rule':[edge['post_dev'], duration] }) ''' #to deliver sch_response to src walk_rule.append({'conn':[node['dpid'],info_dict['scher_vip']], 'typ':'forward', 'wc':[info_dict['scher_vip'],p_c_ip_list[0], info_dict['sching_port']], 'rule':[edge['post_dev'], duration] }) ''' else: raise KeyError('Unknown node_type') cur_node_str = node_str #default rule to forward packet to consumer user_info_dict = self.users_beingserved_dict[cur_to_ip] swportname = get_swportname(dpid = user_info_dict['gw_dpid'], port = user_info_dict['gw_conn_port']) walk_rule.append({'conn':[user_info_dict['gw_dpid'],cur_from_ip], 'typ':'forward', 'wc':[cur_from_ip,cur_to_ip,int(s_tp_dst)], 'rule':[swportname,duration] }) """ #default rule to forward sch_response to producer walk_rule.append({'conn':[11,info_dict['scher_vip']], 'typ':'forward', 'wc':[info_dict['scher_vip'],p_c_ip_list[0]], 'rule':['s11-eth1',duration] }) """ return {'walk_rule':walk_rule, 'itjob_rule':itjob_rule_dict} ############################################################################## def test(self): userinfo_list = [ {'user_ip':'10.0.0.2','user_mac':'00:00:00:01:00:02','gw_dpid':11,'gw_conn_port':1}, {'user_ip':'10.0.0.1','user_mac':'00:00:00:01:00:01','gw_dpid':12,'gw_conn_port':2} ] for userinfo in userinfo_list: self.welcome_user(user_ip = userinfo['user_ip'], user_mac = userinfo['user_mac'], gw_dpid = userinfo['gw_dpid'], gw_conn_port = userinfo['gw_conn_port'] ) # num_session = 1 req_dict_list = [ {'data_size':4, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':1, 'par_share':[1]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3'], 'parism_level':2, 'par_share':[0.5, 0.5]}, ] app_pref_dict_list = [ {'m_p': 10,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 10,'m_u': 0.5,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 0.5,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, ] p_c_ip_list_list = [ ['10.0.0.2','10.0.0.1'], ] for i in range(0, num_session): self.welcome_session(p_c_ip_list = p_c_ip_list_list[0], req_dict = req_dict_list[i], app_pref_dict = app_pref_dict_list[i] ) self.do_sching()
class MainApp(object): """ Main BootStrap Application """ _args = [] #: Defines extra args to be appened to the cmd _options = { 'global': { 'BootStrap.downloadPath': 'downloads', 'BootStrap.outputPath': 'depends', 'BootStrap.prefix': 'runtime', 'BootStrap.patches': 'patches', 'BootStrap.moduleStatus': 'BootStrap.status.xml', 'BootStrap.manifest': 'config/bootstrap.xml', 'BootStrap.branch': 'stable' } } #: Store all options (init to defaults) _configFile = None #: Defines configuration file _modules = [] #: The list of modules were bootstrap operates _command = None #: The command to run _config = None _bootStrap = None #: An instance of the BootStrap class def __init__(self, args=None): """ @param args: App Args """ self._run(args) def _run(self, args=None): """ Run it @param args: App Args """ if args != None: self._parseArgs(args) self._readConfig() self._setCmdConfig() #print self._options self._bootStrap = BootStrap(self._options) self._runcmd() def _addCmdOption(self, key, value): """ Adds a new command line option """ if not self._options.has_key("cmd"): self._options["cmd"] = {} self._options["cmd"][key] = value def _parseArgs(self, argv): """ Parse coniguration args @param argv: The args """ n = len(argv) - 1 for i in xrange(1, n + 1): param = argv[i] if param.startswith("-"): if param == "-c" and i < n: i += 1 self._configFile = argv[i] elif param == "--prefix" and i < n: i += 1 self._addCmdOption("BootStrap.prefix", argv[i]) elif param == "-o" and i < n - 1: i += 1 self._addCmdOption(argv[i], argv[i + 1]) i += 1 else: self._args.append(param) else: self._modules.append(param) if len(self._modules) != 0: self._command = self._modules.pop(0) def _readConfig(self): """ Reads the configuration file """ if self._configFile != None: self._config = XMLParser() self._config.readfile(self._configFile) for section in self._config.xsd7config[0].xsection: for option in section.xoption: if not self._options.has_key(section.pname): self._options[section.pname] = {} self._options[section.pname][option.pname] = option.pvalue def _setCmdConfig(self): """ Command line passed args, are more prioritary """ if self._options.has_key("cmd"): for opt in self._options["cmd"]: self._options["global"][opt] = self._options["cmd"][opt] def _runcmd(self): """ Runs a cmd with args for the specified modules """ cmd = self._command if cmd == None or cmd == "help": print """sd7 BootStrap Application bootstrap/bootstrap.py [options] command <module1> <module2> <module3> get <module1> <module2> ... Downloads the module(s) -f : Forces re-download update <module1> <module2> ... Updates the module(s) remove <module1> <module2> ... Removes the module(s) patch <module1> <module2> ... Patches the module(s) build <module1> <module2> ... Builds the module(s) clean <module1> <module2> ... Cleans the module(s) install <module1> <module2> ... Installs the module(s) auto <module1> <module2> ... Automatically builds and installs the module(s) """ return if len(self._modules) == 0: self._modules = [ self._bootStrap.getDefaultTarget(), ] for m in self._modules: if hasattr(self._bootStrap, cmd): getattr(self._bootStrap, cmd)(m, self._args) else: print "Unknown command %s" % (cmd, )
class Scheduler(object): #event_chief = EventChief() def __init__(self, xml_network_number): self.gm = GraphMan() if is_scheduler_run: self.xml_parser = XMLParser("net_xmls/net_2p_stwithsingleitr.xml", str(xml_network_number)) else: self.xml_parser = XMLParser( "ext/net_xmls/net_2p_stwithsingleitr.xml", str(xml_network_number)) self.init_network_from_xml() #Useful state variables self.last_sch_req_id_given = -1 self.last_tp_dst_given = info_dict['base_sport'] - 1 #Scher state dicts self.N = 0 #num_activesessions self.sessions_beingserved_dict = {} self.sessions_pre_served_dict = {} self.sid_res_dict = {} self.actual_res_dict = self.gm.give_actual_resource_dict() #for perf plotting self.perf_plotter = PerfPlotter(self.actual_res_dict) #for control_comm self.cci = ControlCommIntf() self.cci.reg_commpair(sctag='scher-acter', proto='tcp', _recv_callback=self._handle_recvfromacter, s_addr=info_dict['lacter_addr'], c_addr=info_dict['acterl_addr']) def _handle_recvfromacter(self, msg): #msg = [type_, data_] print 'recvedfromacter msg=%s' % msg def print_scher_state(self): print '<-------------------H--------------------->' print 'is_scheduler_run: ', is_scheduler_run print 'sessions_beingserved_dict:' pprint.pprint(self.sessions_beingserved_dict) print 'sessions_pre_served_dict:' pprint.pprint(self.sessions_pre_served_dict) print '<-------------------E--------------------->' def next_sch_req_id(self): self.last_sch_req_id_given += 1 return self.last_sch_req_id_given def next_tp_dst(self): self.last_tp_dst_given += 1 return self.last_tp_dst_given def welcome_session(self, p_c_ip_list, p_c_gw_list, req_dict, app_pref_dict): """ sch_req_id: should be unique for every sch_session """ #update global var, list and dicts self.N += 1 s_tp_dst_list = [ self.next_tp_dst() for i in range(0, req_dict['parism_level']) ] sch_req_id = self.next_sch_req_id() self.sessions_beingserved_dict.update({ sch_req_id: { 'tp_dst_list': s_tp_dst_list, 'req_dict': req_dict, 'p_c_ip_list': p_c_ip_list, 'p_c_gw_list': p_c_gw_list, 'app_pref_dict': app_pref_dict, 'sch_job_done': False } }) #print 'self.sessions_beingserved_dict: ' #pprint.pprint(self.sessions_beingserved_dict) def bye_session(self, sch_req_id): self.N -= 1 # Send sessions whose "sching job" is done is sent to pre_served category self.sessions_pre_served_dict.update( {sch_req_id: self.sessions_beingserved_dict[sch_req_id]}) del self.sessions_beingserved_dict[sch_req_id] def update_sid_res_dict(self): """ Network resources will be only the ones on the session_shortest path. It resources need to lie on the session_shortest path. """ print '------ update_sid_res_dict ------' #TODO: sessions whose resources are already specified no need for putting them in the loop for s_id in self.sessions_beingserved_dict: p_c_gw_list = self.sessions_beingserved_dict[s_id]['p_c_gw_list'] s_all_paths = self.gm.give_all_paths(p_c_gw_list[0], p_c_gw_list[1]) #print out all_paths for debugging dict_ = {i: p for i, p in enumerate(s_all_paths)} print 's_id:%i, all_paths:' % s_id pprint.pprint(dict_) # for i, p in dict_.items(): p_net_edge_list = self.gm.pathlist_to_netedgelist(p) p_itres_list = self.gm.give_itreslist_on_path(p) if not (s_id in self.sid_res_dict): self.sid_res_dict[s_id] = {'s_info': {}, 'ps_info': {}} self.sid_res_dict[s_id]['ps_info'].update({ i: { 'path': p, 'net_edge_list': p_net_edge_list, 'itres_list': p_itres_list } }) print '---------------- OOO ----------------' def do_sching(self): ''' For currently active sessions, get things together to work sching logic and then send corresponding rules to correspoding actuator (which is a single actuator right now !) ''' alloc_dict = self.allocate_resources() print '---------------SCHING Started ---------------' print 'alloc_dict:' pprint.pprint(alloc_dict) #''' self.perf_plotter.save_sching_result(alloc_dict['general'], alloc_dict['s-wise'], alloc_dict['res-wise']) #''' #Convert sching decs to rules print '**** self.N: ', self.N for s_id in range(0, self.N): s_allocinfo_dict = alloc_dict['s-wise'][s_id] # itwalkinfo_dict = s_allocinfo_dict['itwalkinfo_dict'] p_walk_dict = s_allocinfo_dict['pwalk_dict'] for p_id in range(0, s_allocinfo_dict['parism_level']): p_walk = p_walk_dict[p_id] #Dispatching rule to actuator_actuator sp_walk__tprrule = \ self.get_spwalkrule__sptprrule(s_id, p_id, p_walk = p_walk, pitwalkbundle_dict = itwalkinfo_dict[p_id]) print 'for s_id:%i, p_id:%i;' % (s_id, p_id) #print 'walkrule:' #pprint.pprint(sp_walk__tprrule['walk_rule']) #print 'itjob_rule:' #pprint.pprint(sp_walk__tprrule['itjob_rule']) msg = json.dumps({ 'type': 'sp_sching_dec', 'data': { 's_id': s_id, 'p_id': p_id, 'walk_rule': sp_walk__tprrule['walk_rule'], 'itjob_rule': sp_walk__tprrule['itjob_rule'] } }) self.cci.send_to_client('scher-acter', msg) print '---------------SCHING End---------------' def get_spwalkrule__sptprrule(self, s_id, p_id, p_walk, pitwalkbundle_dict): #print '---> for s_id:%i' % s_id #print 'pitwalkbundle_dict:' #pprint.pprint(pitwalkbundle_dict) #print 'p_walk: ', p_walk s_info_dict = self.sessions_beingserved_dict[s_id] s_tp_dst = s_info_dict['tp_dst_list'][p_id] p_c_ip_list = s_info_dict['p_c_ip_list'] # itjob_rule_dict = {} # walk_rule = [] cur_from_ip = p_c_ip_list[0] cur_to_ip = p_c_ip_list[1] duration = 50 cur_node_str = None for i, node_str in list(enumerate(p_walk)): #node = next_hop if i == 0: cur_node_str = node_str #for adding reverse-walk rule for gw_sw #TODO: 's11-eth1' must be gotten autonomously node = self.gm.get_node(node_str) walk_rule.append({ 'conn': [node['dpid'], cur_to_ip], 'typ': 'forward', 'wc': [cur_to_ip, p_c_ip_list[0], int(s_tp_dst)], 'rule': ['s11-eth1', duration] }) # continue cur_node = self.gm.get_node(cur_node_str) if cur_node['type'] == 't': cur_node_str = node_str continue # node = self.gm.get_node(node_str) edge = self.gm.get_edge(cur_node_str, node_str) if node['type'] == 't': #sw-t walk_rule.append({ 'conn': [cur_node['dpid'], cur_from_ip], 'typ': 'modify_forward', 'wc': [cur_from_ip, cur_to_ip, int(s_tp_dst)], 'rule': [node['ip'], node['mac'], edge['pre_dev'], duration] }) if not (cur_node['dpid'] in itjob_rule_dict): itjob_rule_dict[cur_node['dpid']] = [{ 'tpr_ip': node['ip'], 'tpr_mac': node['mac'], 'swdev_to_tpr': edge['pre_dev'], 'assigned_job': pitwalkbundle_dict['itbundle'][node_str], 'session_tp': int(s_tp_dst), 'consumer_ip': cur_to_ip, 'datasize': pitwalkbundle_dict['p_info']['datasize'] }] else: itjob_rule_dict[cur_node['dpid']].append([{ 'tpr_ip': node['ip'], 'tpr_mac': node['mac'], 'swdev_to_tpr': edge['pre_dev'], 'assigned_job': pitwalkbundle_dict['itbundle'][node_str], 'session_tp': int(s_tp_dst), 'consumer_ip': cur_to_ip, 'datasize': pitwalkbundle_dict['p_info']['datasize'] }]) cur_from_ip = node['ip'] elif node['type'] == 'sw': #sw-sw walk_rule.append({ 'conn': [cur_node['dpid'], cur_from_ip], 'typ': 'forward', 'wc': [cur_from_ip, cur_to_ip, int(s_tp_dst)], 'rule': [edge['pre_dev'], duration] }) cur_from_ip #for reverse walk: data from c to p walk_rule.append({ 'conn': [node['dpid'], cur_to_ip], 'typ': 'forward', 'wc': [cur_to_ip, p_c_ip_list[0], int(s_tp_dst)], 'rule': [edge['post_dev'], duration] }) ''' #to deliver sch_response to src walk_rule.append({'conn':[node['dpid'],info_dict['scher_vip']], 'typ':'forward', 'wc':[info_dict['scher_vip'],p_c_ip_list[0], info_dict['sching_tp_dst']], 'rule':[edge['post_dev'], duration] }) ''' else: raise KeyError('Unknown node_type') cur_node_str = node_str #default rule to forward packet to consumer #TODO: 's12-eth2' must be gotten autonomously walk_rule.append({ 'conn': [12, cur_from_ip], 'typ': 'forward', 'wc': [cur_from_ip, cur_to_ip, int(s_tp_dst)], 'rule': ['s12-eth2', duration] }) """ #default rule to forward sch_response to producer walk_rule.append({'conn':[11,info_dict['scher_vip']], 'typ':'forward', 'wc':[info_dict['scher_vip'],p_c_ip_list[0]], 'rule':['s11-eth1',duration] }) """ return {'walk_rule': walk_rule, 'itjob_rule': itjob_rule_dict} def allocate_resources(self): #returns alloc_dict self.update_sid_res_dict() sching_opter = SchingOptimizer(self.sessions_beingserved_dict, self.actual_res_dict, self.sid_res_dict) sching_opter.solve() # return sching_opter.get_sching_result() def init_network_from_xml(self): node_edge_lst = self.xml_parser.give_node_and_edge_list_from_xml() #print 'node_lst:' #pprint.pprint(node_edge_lst['node_lst']) #print 'edge_lst:' #pprint.pprint(node_edge_lst['edge_lst']) self.gm.graph_add_nodes(node_edge_lst['node_lst']) self.gm.graph_add_edges(node_edge_lst['edge_lst']) def test(self): num_session = 2 ''' sr1: {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3']} {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0} ''' ''' {'data_size':2, 'slack_metric':60, 'func_list':['f1', 'f2']}, {'data_size':1, 'slack_metric':24, 'func_list':['f1','f2','f3']}, {'data_size':0.1, 'slack_metric':4, 'func_list':['f1']}, ''' req_dict_list = [ { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 1, 'par_share': [1] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, { 'data_size': 1, 'slack_metric': 24, 'func_list': ['f1', 'f2', 'f3'], 'parism_level': 2, 'par_share': [0.5, 0.5] }, ] """ app_pref_dict_list = [ {'m_p': 1,'m_u': 0.1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, {'m_p': 1,'m_u': 1,'x_p': 0,'x_u': 0}, ] """ app_pref_dict_list = [ { 'm_p': 10, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 10, 'm_u': 0.5, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 0.5, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, { 'm_p': 1, 'm_u': 1, 'x_p': 0, 'x_u': 0 }, ] p_c_ip_list_list = [ ['10.0.0.2', '10.0.0.1'], ] p_c_gw_list_list = [ ['s11', 's12'], ] for i in range(0, num_session): self.welcome_session(p_c_ip_list_list[0], p_c_gw_list_list[0], req_dict_list[i], app_pref_dict_list[i]) self.do_sching() """