def dp_join(self, dp, stats): dpid_obj = datapathid.from_host(dp) stats['dpid'] = dp self.dp_stats[dp] = stats # convert all port hw_addrs to ASCII # and register all port names with bindings storage port_list = self.dp_stats[dp]['ports'] for i in range(0,len(port_list)): new_mac = mac_to_str(port_list[i]['hw_addr']).replace(':','-') port_list[i]['hw_addr'] = new_mac # polling intervals for switch statistics self.dp_poll_period[dp] = {} self.dp_poll_period[dp]['table'] = DEFAULT_POLL_TABLE_PERIOD self.dp_poll_period[dp]['port'] = DEFAULT_POLL_PORT_PERIOD self.dp_poll_period[dp]['aggr'] = DEFAULT_POLL_AGGREGATE_PERIOD # Switch descriptions do not change while connected, so just send once self.ctxt.send_desc_stats_request(dp) # stagger timers by one second self.post_callback(self.dp_poll_period[dp]['table'], lambda : self.table_timer(dp)) self.post_callback(self.dp_poll_period[dp]['port'] + 1, lambda : self.port_timer(dp)) return CONTINUE
def __dpid_request(self): try: dpid = datapathid.from_host(long(self._queue.pop())) FFLOG.debug("Request flows for dpid=%s", str(dpid)) ff = self._ffa.fetch(dpid, {}, lambda: self.__flows_replay(dpid, ff)) except Exception as e: FFLOG.error(str(e))
def _test_flow_fetcher(self, request, arg): try: flow_stats_request = json_parse_message_body(request) dpid = datapathid.from_host(long(flow_stats_request['dpid'], 16)) ff = self.ffa.fetch(dpid, flow_stats_request, lambda: report_results(ff)) except Exception, e: return self.err(Failure(), request, "_test_flow_fetcher", "Could not request flows.")
def do_location_lookup(self, dpid, port): def err(res): lg.error("error on directory query: %s" % str(res)) def ok(loc_names): if (len(loc_names) == 0): lg.error("couldn't find loc name for dp = %s port = %s" \ % (dpid,port)) return loc_names query_dict = {"dpid": datapathid.from_host(dpid)} if port != None: query_dict["port"] = port d = self.dm.search_principals(Directory.LOCATION_PRINCIPAL, query_dict) d.addErrback(err) return d
def do_location_lookup(self, dpid, port): def err(res): lg.error("error on directory query: %s" % str(res)) def ok(loc_names): if(len(loc_names) == 0): lg.error("couldn't find loc name for dp = %s port = %s" \ % (dpid,port)) return loc_names query_dict = {"dpid" : datapathid.from_host(dpid) } if port != None: query_dict["port"] = port d = self.dm.search_principals(Directory.LOCATION_PRINCIPAL, query_dict) d.addErrback(err) return d
def __init__(self, request, hitter_list, dirman, num_req): self.dm = dirman self.request = request self.hitter_list = hitter_list self.num_requested = num_req ds = [] for item in hitter_list: dpid_obj = datapathid.from_host(item[0]) query = {'dpid': dpid_obj} ds.append( self.dm.search_principals(Directory.SWITCH_PRINCIPAL, query)) d = defer.DeferredList(ds, consumeErrors=True) d.addCallback(self.switch_names_resolved) d.addErrback(self.err, self.request, "dp_name_resolve_fsm", "Could not retrieve switch names.")
def __init__(self, request, hitter_list, dirman, num_req): self.dm = dirman self.request = request self.hitter_list = [] self.num_requested = num_req ds = [] for item in hitter_list: dpid_obj = datapathid.from_host(item[0]) query = {'dpid' : dpid_obj, 'port' : item[1]} ds.append(self.dm.search_principals(Directory.LOCATION_PRINCIPAL,query)) self.hitter_list.append([item, {"value" : item[2]}, dpid_obj]) d = defer.DeferredList(ds, consumeErrors=True) d.addCallback(self.loc_search_done) d.addErrback(self.err, self.request, "loc_name_resolve_fsm", "Could not retrieve location names.")
def __init__(self, request, hitter_list, dirman, num_req): self.dm = dirman self.request = request self.hitter_list = hitter_list self.num_requested = num_req ds = [] for item in hitter_list: dpid_obj = datapathid.from_host(item[0]) query = {'dpid' : dpid_obj} ds.append(self.dm.search_principals(Directory.SWITCH_PRINCIPAL, query)) d = defer.DeferredList(ds, consumeErrors=True) d.addCallback(self.switch_names_resolved) d.addErrback(self.err, self.request, "dp_name_resolve_fsm", "Could not retrieve switch names.")
def dp_join(self, dp, stats): lg.warn('############### dp_join ###############\n') #lg.warn('dp_list_size = ', len(dp_list)) global dp_num if dp in dp_list: dp_list.remove(dp) lg.warn(dp) lg.warn('#######################################\n') dp_list.append(dp) dp_list.sort() dpid_obj = datapathid.from_host(dp) stats['dpid'] = dp self.dp_stats[dp] = stats # convert all port hw_addrs to ASCII # and register all port names with bindings storage port_list = self.dp_stats[dp]['ports'] for i in range(0,len(port_list)): new_mac = mac_to_str(port_list[i]['hw_addr']).replace(':','-') port_list[i]['hw_addr'] = new_mac # polling intervals for switch statistics self.dp_poll_period[dp] = {} self.dp_poll_period[dp]['table'] = DEFAULT_POLL_TABLE_PERIOD self.dp_poll_period[dp]['port'] = DEFAULT_POLL_PORT_PERIOD self.dp_poll_period[dp]['aggr'] = DEFAULT_POLL_AGGREGATE_PERIOD self.dp_poll_period[dp]['flow'] = DEFAULT_POLL_FLOW_PERIOD # Switch descriptions do not change while connected, so just send once #self.ctxt.send_desc_stats_request(dp) # stagger timers by one second self.post_callback(self.dp_poll_period[dp]['table'], lambda : self.table_timer(dp)) self.post_callback(self.dp_poll_period[dp]['port'] + 0.5, lambda : self.port_timer(dp)) self.post_callback(self.dp_poll_period[dp]['aggr'] + 1, lambda : self.aggr_timer(dp)) self.post_callback(self.dp_poll_period[dp]['flow'] + 1.5, lambda : self.flow_timer(dp)) self.post_callback(DEFAULT_POLL_FILE_PERIOD, lambda : self.file_timer()) return CONTINUE
def __init__(self, request, hitter_list, dirman, num_req): self.dm = dirman self.request = request self.hitter_list = [] self.num_requested = num_req ds = [] for item in hitter_list: dpid_obj = datapathid.from_host(item[0]) query = {'dpid': dpid_obj, 'port': item[1]} ds.append( self.dm.search_principals(Directory.LOCATION_PRINCIPAL, query)) self.hitter_list.append([item, {"value": item[2]}, dpid_obj]) d = defer.DeferredList(ds, consumeErrors=True) d.addCallback(self.loc_search_done) d.addErrback(self.err, self.request, "loc_name_resolve_fsm", "Could not retrieve location names.")
def dp_leave(self, dp): dpid_obj = datapathid.from_host(dp) if self.dp_stats.has_key(dp): del self.dp_stats[dp] else: log.err('Unknown datapath leave', system='switchstats') if self.dp_poll_period.has_key(dp): del self.dp_poll_period[dp] if self.dp_table_stats.has_key(dp): del self.dp_table_stats[dp] if self.dp_desc_stats.has_key(dp): del self.dp_desc_stats[dp] if self.dp_port_stats.has_key(dp): del self.dp_port_stats[dp] return CONTINUE
def dp_leave(self, dp): dpid_obj = datapathid.from_host(dp) if self.dp_stats.has_key(dp): del self.dp_stats[dp] else: log.err("Unknown datapath leave", system="switchstats") if self.dp_poll_period.has_key(dp): del self.dp_poll_period[dp] if self.dp_table_stats.has_key(dp): del self.dp_table_stats[dp] if self.dp_desc_stats.has_key(dp): del self.dp_desc_stats[dp] if self.dp_port_stats.has_key(dp): del self.dp_port_stats[dp] if dp in self.port_listeners: del self.port_listeners[dp] return CONTINUE
def _netic_create_path(self, request, arg): # content = json_parse_message_body(arg) content = request.content.read() errorCode = {} try: nw_src = str(request.args["nw_src"][0]) nw_dst = str(request.args["nw_dst"][0]) duration = int(request.args["duration"][0]) bandwidth = int(request.args["bandwidth"][0]) set_arp = int(request.args["set_arp"][0]) bidirectional = int(request.args["bidirectional"][0]) except: errorCode["errorCode"] = NTC_VP_ERR_MISSING_M_FIELD webservice.badRequest(request, errcode2str[NTC_VP_ERR_MISSING_M_FIELD], errorCode) return NOT_DONE_YET # bidirectional and set_arp has to be either 0 or 1: if ((set_arp != 0) and (set_arp != 1)) or ((bidirectional != 0) and (bidirectional != 1)): errorCode["errorCode"] = NTC_VP_ERR_MISSING_M_FIELD webservice.badRequest(request, errcode2str[NTC_VP_ERR_MISSING_M_FIELD], errorCode) return NOT_DONE_YET paramsMissing = 0 try: dp_src = datapathid.from_host(int(request.args["dp_src"][0])) dp_dst = datapathid.from_host(int(request.args["dp_dst"][0])) first_port = int(request.args["first_port"][0]) last_port = int(request.args["last_port"][0]) except: dp_src = -1 dp_dst = -1 first_port = -1 last_port = -1 paramsMissing = 1 info_src = self.discovery.find_host_by_ipstr(nw_src) info_dst = self.discovery.find_host_by_ipstr(nw_dst) if paramsMissing == 1: # Try to find in which port and switch the path will begin and # terminate if info_src == None or info_dst == None: errorCode["errorCode"] = NTC_VP_ERR_MISSING_LOC_INFO webservice.badRequest(request, errcode2str[NTC_VP_ERR_MISSING_LOC_INFO], errorCode) return NOT_DONE_YET else: dp_src = datapathid.from_host(info_src["dp"]) first_port = info_src["port"] dp_dst = datapathid.from_host(info_dst["dp"]) last_port = info_dst["port"] else: # the dp source and dest and also first and last ports # are those specified by the request, lets check them if not self.discoveryws.discovery.is_valid_dpid( dp_src.as_host() ) or not self.discoveryws.discovery.is_valid_dpid(dp_dst.as_host()): errorCode["errorCode"] = NTC_VP_ERR_UNKNOWN_SWITCH webservice.badRequest(request, errcode2str[NTC_VP_ERR_UNKNOWN_SWITCH], errorCode) return NOT_DONE_YET if not self.discoveryws.discovery.is_valid_port_in_dpid( dp_src.as_host(), first_port ) or not self.discoveryws.discovery.is_valid_port_in_dpid(dp_dst.as_host(), last_port): errorCode["errorCode"] = NTC_VP_ERR_UNKNOWN_PORT webservice.badRequest(request, errcode2str[NTC_VP_ERR_UNKNOWN_PORT], errorCode) return NOT_DONE_YET # At this point we have the mandatory params of the flow and # location of the source and destination point of the path paramsMissing = 0 keyError = 0 with_arp = False if set_arp == 1: with_arp = True try: dl_src = create_eaddr(str(request.args["dl_src"][0])) dl_dst = create_eaddr(str(request.args["dl_dst"][0])) except KeyError: dl_src = 0 dl_dst = 0 keyError = 1 paramsMissing = 1 except: print "other error" if (dl_src == None) | (dl_dst == None): paramsMissing = 1 if paramsMissing == 1: if info_src != None: dl_src = info_src["dl_addr"] if info_dst != None: dl_dst = info_dst["dl_addr"] if (dl_src == None) | (dl_dst == None): if keyError == 1: errorCode["errorCode"] = NTC_VP_ERR_MISSING_MAC_ADDRESS webservice.badRequest(request, errcode2str[NTC_VP_ERR_MISSING_MAC_ADDRESS], errorCode) else: errorCode["errorCode"] = NTC_VP_ERR_BAD_MAC_ADDRESS webservice.badRequest(request, errcode2str[NTC_VP_ERR_MISSING_MAC_ADDRESS], errorCode) return NOT_DONE_YET # At this point even arp info is ready tp_src = 0 tp_dst = 0 ip_proto = 255 granularity = True try: ip_proto = int(request.args["ip_proto"][0]) except: granularity = False if granularity == True: try: tp_src = int(request.args["tp_src"][0]) except: tp_src = 0 try: tp_dst = int(request.args["tp_dst"][0]) except: tp_dst = 0 else: ip_proto = 255 npi = Netic_path_info() npi.nw_src = nw_src npi.nw_dst = nw_dst npi.duration = duration npi.bandwidth = bandwidth if set_arp == True: npi.set_arp = True else: npi.set_arp = False if bidirectional == True: npi.bidirect = True else: npi.bidirect = False npi.dp_src = dp_src npi.dp_dst = dp_dst npi.first_port = first_port npi.last_port = last_port if set_arp == True: npi.dl_src = dl_src npi.dl_dst = dl_dst npi.ip_proto = ip_proto npi.tp_src = tp_src npi.tp_dst = tp_dst res, error = self.bodrt.netic_create_route(npi) if error == 0: a = {} a["directPath"] = res.directPath self.discoveryws.add_created_path(res.directPath) if res.reversPath >= 0: self.discoveryws.add_created_path(res.reversPath) a["reversPath"] = res.reversPath neticResponse(request, NTC_OK, a) else: neticResponse(request, NTC_VP_INFO_PATH_NOT_FOUND)
def get_switch_conn_p_s(self, dpid): return self.cswitchstats.get_switch_conn_p_s(datapathid.from_host(dpid))
def get_flows(self, request, id): dpid = datapathid.from_host(long(request['dpid'], 16)) ff = self.ffa.fetch(dpid, request, lambda: report_results(ff, id, self.classifier_som))
def get_flows(self, request, id): dpid = datapathid.from_host(long(request['dpid'], 16)) ff = self.ffa.fetch( dpid, request, lambda: report_results(ff, id, self.classifier_som))
def _netic_provision(self,request,arg): indp=datapathid.from_host(int(arg['first_node'])) outdp=datapathid.from_host(int(arg['last_node'])) ret= self.pyrt.netic_install_route(int(arg['first_port']),int(arg['last_port']),indp,outdp,str(arg['src_ip']),str(arg['dst_ip'])) return ret
def timer(self): val1 = datapathid.from_host(1) val2 = datapathid.from_host(3) self.routing.netic_install_route(1,1,val1,val2,"4.0.0.10","6.0.0.10") #oute = pyrouting.Route() #route.id.src=val1 #route.id.dst=val2 #if self.routing.get_route(route): # log.err('Found route') # sip=create_ipaddr("10.0.0.1") # dip=create_ipaddr("10.0.0.4") # print str(sip) # print str(dip)# # flow={core.NW_SRC:"10.0.0.1",core.NW_DST:"10.0.0.4"} # print str(flow) # #self.routing.setup_route(flow, route, 1, 2, 5, [], True) # self.install_datapath_flow(1, flow, 5, 0, # {}, [], # openflow.OFP_DEFAULT_PRIORITY,0,0) #else: # log.err('errore del cazzo!!!') #print self.pytop.is_internal(val1, 0) #self.pytop.synchronize_node(val1) #print self.pytop.get_dpinfo(val2).active #print self.pytop.get_dpinfo(val2).dpid #li=self.pytop.synchronize_node(val1) #pinfo=self.pytop.synchronize_port("s4-eth1") #print len(ll) #item1 = li[0] #item2 = li[1] #item3 = li[2] #for item in li: # print "----------" # print item.name # print item.active # print item.duplexity # print "----------" #print pinfo.name #print pinfo.active #print pinfo.duplexity #print pinfo.speed #print pinfo.medium #print item1.name #print item1.active #print item1.duplexity #print item1.speed #print item1.medium #print item2.name #print item2.active #print item2.duplexity #print item2.speed #print item2.medium # print item3.name #print item3.active #for node in ll: # print node.active # print node.dpid #self.pytop.get_whole_topology() self.post_callback(10, self.timer)
def timer(self): val = datapathid.from_host(0) print self.pytop.is_internal(val, 0) self.post_callback(1, self.timer)