def _timer_func (): global start_time, sent_time1, sent_time2, src_dpid, dst_dpid if src_dpid <>0: sent_time1=time.time() * 1000 - start_time #print "sent_time1:", sent_time1 #send out port_stats_request packet through src_dpid core.openflow.getConnection(src_dpid).send(of.ofp_stats_request(body=of.ofp_port_stats_request())) f = myproto() f.timestamp = int(time.time()*1000 - start_time) #print f.timestamp e = pkt.ethernet() e.src=EthAddr("0:0:0:0:0:2") e.dst=EthAddr("0:1:0:0:0:1") e.type=0x5577 e.payload = f msg = of.ofp_packet_out() msg.data = e.pack() msg.actions.append(of.ofp_action_output(port=2)) core.openflow.getConnection(src_dpid).send(msg) if dst_dpid <>0: sent_time2=time.time() * 1000 - start_time #print "sent_time2:", sent_time2 #send out port_stats_request packet through dst_dpid core.openflow.getConnection(dst_dpid).send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def RoundRobin(): pathRead = {} for p in monitored_paths: pathRead[p] = False for p in monitored_paths: #Walk through all distinct paths, not even flows if pathRead[p] != True: if p not in pathIterator or pathIterator[ p] == p.src: # Round Robin switch selection pathIterator[p] = p.dst else: pathIterator[p] = p.prev[pathIterator[p]] curSwitch = pathIterator[p] #log.debug("Sending message to switch %s", util.dpid_to_str(curSwitch)) msg = of.ofp_stats_request( body=of.ofp_flow_stats_request()) switches[curSwitch].connection.send(msg) msg2 = of.ofp_stats_request( body=of.ofp_port_stats_request()) switches[curSwitch].connection.send(msg2) for pPrime in monitored_pathsBySwitch[ curSwitch]: #Circumvent polling multiple switches for paths from whom the stats have already been requested pathRead[pPrime] = True
def request_stats(self): # Get consumption of each node in the topology for i in range(len(info_manager.nodes)): self.count_flow_stats_straight += 1 self.count_port_stats_straight += 1 for h in info_manager.hosts: border_node = info_manager.get_node(h.dpid) try: core.openflow.getConnection(border_node.id).send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) self.count_flow_stats_adaptive += 1 except: continue nodes_list = self.G.nodes() aux = 0 for node in nodes_list: aux += 1 connection = core.openflow.getConnection(node) if connection: connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) self.count_port_stats_adaptive += 1 if aux == 0: self.count_port_stats_adaptive = 0 update_monitoring_stats(self.count_flow_stats_straight, self.count_flow_stats_adaptive, self.count_port_stats_straight, self.count_port_stats_adaptive) info_manager.update_network_consumption()
def _timer_func(): """ handler for timer function that sends the requests to all the switches connected to the controller. """ log.debug("start check stat") for connection in core.openflow._connections.values(): # FlowStatsReceived connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request(), type=of.ofp_stats_types_rev_map.get("OFPST_FLOW")) ) # AggregateFlowStatsReceived connection.send( of.ofp_stats_request( body=of.ofp_aggregate_stats_request(), type=of.ofp_stats_types_rev_map.get("OFPST_AGGREGATE") ) ) # TableStatsReceived # I don't know which methode to call (it's not of.ofp_flow_stats()) # connection.send(of.ofp_stats_request(body=of.ofp_table_stats())) # PortStatsReceived connection.send( of.ofp_stats_request( body=of.ofp_port_stats_request(port_no=of.OFPP_NONE), type=of.ofp_stats_types_rev_map.get("OFPST_PORT") ) ) # QueueStatsReceived body = of.ofp_queue_stats_request(port_no=of.OFPP_NONE, queue_id=of.OFPQ_ALL) connection.send(of.ofp_stats_request(body=body, type=of.ofp_stats_types_rev_map.get("OFPST_QUEUE")))
def _timer_func(): global s1_dpid, s2_dpid, s3_dpid, s4_dpid, turn core.openflow.getConnection(s1_dpid).send(of.ofp_stats_request(body = of.ofp_port_stats_request())) core.openflow.getConnection(s2_dpid).send(of.ofp_stats_request(body = of.ofp_port_stats_request())) core.openflow.getConnection(s3_dpid).send(of.ofp_stats_request(body = of.ofp_port_stats_request())) if turn == 0: msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.in_port = 1 msg.match.dl_type = 0x0800 msg.match.nw_dst = "10.0.0.2" msg.actions.append(of.ofp_action_output(port = 3)) core.openflow.getConnection(s1_dpid).send(msg) turn = 1 msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.in_port = 1 msg.match.dl_type = 0x0806 msg.match.nw_dst = "10.0.0.2" msg.actions.append(of.ofp_action_output(port = 3)) core.openflow.getConnection(s1_dpid).send(msg) turn = 1 return if turn == 1: msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.in_port = 1 msg.match.dl_type = 0x0800 msg.match.nw_dst = "10.0.0.2" msg.actions.append(of.ofp_action_output(port = 2)) core.openflow.getConnection(s1_dpid).send(msg) turn = 0 msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.in_port = 1 msg.match.dl_type = 0x0806 msg.match.nw_dst = "10.0.0.2" msg.actions.append(of.ofp_action_output(port = 2)) core.openflow.getConnection(s1_dpid).send(msg) turn = 0 return
def _timer_func(): for connection in core.openflow._connections.values(): connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send( of.ofp_stats_request(body=of.ofp_port_stats_request())) log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
def _on_timer(): for n in nodes: #Sends out requests to the network nodes core.openflow.getConnection(n.connection.dpid).send(of.ofp_features_request()) n.connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) n.connection.send(of.ofp_stats_request(body=of.ofp_aggregate_stats_request())) n.connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def _request_stats(): log.debug('Number of connections: {}'.format(len( core.openflow.connections))) log.info('Sending stats requests') for connection in core.openflow.connections: log.debug("Sending stats request") connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def _timer_func(): for connection in core.openflow._connections.values(): dpid_str = dpidToStr(connection.dpid) # 给每个connection的sw发送request 可以加if判断发送哪个 connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def timer_func(): for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send( of.ofp_stats_request(body=of.ofp_aggregate_stats_request()) ) # ???????????????not supported in POX yet connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
def _send_ofp_stats_request(self): for connection in core.openflow._connections.values(): connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send( of.ofp_stats_request(body=of.ofp_port_stats_request())) log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections)) core.flow_stats._raise_StatsEvent() #invoke event
def send_stats_requests(): """ Send stats request to the connecting switch """ for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
def _timer_func (): for connection in core.openflow._connections.values(): #log.debug("!!!!!!" + dpidToStr(connection.dpid)) if(dpidToStr(connection.dpid) == "00-00-00-00-00-05"): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
def launch_stats_query(self): """Sends an OpenFlow FlowStatsRequest and PortStatsRequest to the switch associated with this object.""" if self.is_connected: self.connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request())) self._last_flow_stats_query_send_time = time.time() self.connection.send( of.ofp_stats_request(body=of.ofp_port_stats_request())) self._last_port_stats_query_send_time = time.time() log.debug('Sent flow and port stats requests to switch: ' + dpid_to_str(self.dpid))
def requestForUpdateCost(): global middleBoxProcess for con in core.openflow._connections.values(): nodeName = str(con).split("-")[-1].split(" ")[0] for i in range(16 - len(nodeName)): nodeName = "0" + nodeName if (nodeName in middleBoxProcess.keys()): # just for middelBoxes con.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) if (nodeName == "0000000000000002" or nodeName == "0000000000000001"): # for sw2 con.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def _handle_timer(self): for dpid in core.topo.graph.nodes_iter(): for _,_,(pt,_) in core.topo.graph.out_edges_iter(dpid, data='pt'): msg = of.ofp_stats_request() msg.body = of.ofp_port_stats_request() msg.body.port_no = pt core.openflow.getConnection(dpid).send(msg) for dpid in core.topo.graph.nodes_iter(): msg = of.ofp_stats_request() msg.body = of.ofp_table_stats_request() core.openflow.getConnection(dpid).send(msg)
def _timer_func (self): """ Recurring function to request stats from switches for each connection (which represents a switch actually) request statistics """ for connection in core.openflow._connections.values(): # connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_queue_stats_request()))
def req_for_stats(): tik = 30 while True: time.sleep(1) tik = (tik - 1 + 30) % 30 if tik == 0: allCon = core.openflow.connections for con in allCon: con.send( of.ofp_stats_request(body=of.ofp_flow_stats_request())) con.send( of.ofp_stats_request(body=of.ofp_port_stats_request()))
def timer_function (): """ Request Flow and Port Stats """ for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) global timer_now timer_now = datetime.datetime.now() log.debug("Sent %i flow/port stats requests", len(core.openflow._connections))
def _handle_timer(message): print print message connections = core.openflow._connections print 'connected switch:', for connection in connections: print connection, # print 'connection:',connection.dpid,connection.ports #to see what connection consists of connection.send(of.ofp_stats_request(body = of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body = of.ofp_port_stats_request())) #delet all flows connection.send(of.ofp_flow_mod(match = of.ofp_match(),command = of.OFPFC_DELETE)) print '\n'
def _handle_timer(message): print print message connections = core.openflow._connections print 'connected switch:', for connection in connections: print connection, # print 'connection:',connection.dpid,connection.ports #to see what connection consists of connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) #delet all flows connection.send( of.ofp_flow_mod(match=of.ofp_match(), command=of.OFPFC_DELETE)) print '\n'
def _timer_func(): global s1_dpid, s2_dpid, s3_dpid, s4_dpid, s5_dpid, turn core.openflow.getConnection(s1_dpid).send( of.ofp_stats_request(body=of.ofp_port_stats_request())) core.openflow.getConnection(s2_dpid).send( of.ofp_stats_request(body=of.ofp_port_stats_request())) core.openflow.getConnection(s3_dpid).send( of.ofp_stats_request(body=of.ofp_port_stats_request())) core.openflow.getConnection(s4_dpid).send( of.ofp_stats_request(body=of.ofp_port_stats_request())) #print getTheTime(), "sent the port stats request to s1_dpid" if turn == 0: msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.dl_type = 0x0800 msg.match.nw_dst = "10.0.0.4" msg.actions.append(of.ofp_action_output(port=5)) core.openflow.getConnection(s1_dpid).send(msg) turn = 1 return if turn == 1: msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.dl_type = 0x0800 msg.match.nw_dst = "10.0.0.4" msg.actions.append(of.ofp_action_output(port=6)) core.openflow.getConnection(s1_dpid).send(msg) turn = 2 return if turn == 2: msg = of.ofp_flow_mod() msg.command = of.OFPFC_MODIFY_STRICT msg.priority = 100 msg.idle_timeout = 0 msg.hard_timeout = 0 msg.match.dl_type = 0x0800 msg.match.nw_dst = "10.0.0.4" msg.actions.append(of.ofp_action_output(port=4)) core.openflow.getConnection(s1_dpid).send(msg) turn = 0 return
def LastSwitch(): switchRead = {} for dpid in switches: switchRead[dpid] = False for p in monitored_paths: #Walk through all distinct paths and select both last and first switch to calculate throughput and packet loss. if switchRead[p.dst] == False: switchRead[p.dst] = True msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) switches[p.dst].connection.send(msg) if switchRead[p.src] == False: switchRead[p.src] = True msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) switches[p.src].connection.send(msg)
def LastSwitch(): switchRead = {} for dpid in switches: switchRead[dpid] = False for p in monitored_paths: #Walk through all distinct paths and select both last and first switch to calculate throughput and packet loss. if switchRead[p[-1]] == False: switchRead[p[-1]] = True msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) switches[p[-1]].connection.send(msg) if switchRead[p[0]] == False: switchRead[p[0]] = True msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) switches[p[0]].connection.send(msg)
def SendStatsReq(self): # Send flow stat requests to every switch that has active flows. del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again. if core.forwarding.ActFlows: log.debug("Sending Port Stat Requests to all switches that have active flows") # Remove non-active flows in case they exist in the list core.forwarding.removeInactiveFlows() # log.debug("Before Sending Port Stats messages the ActFlow list is:") for fl in core.forwarding.ActFlows: # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active for r in fl.used_path: if (fl.used_path[fl.used_path.index(r)][0]) not in self.PolSwitches: self.PolSwitches.append(fl.used_path[fl.used_path.index(r)][0]) else: log.debug("No active flows at the moment, so not sending any port stat requests") self.pol_counter+=1# increment polling session number if self.PolSwitches: for con in core.openflow.connections: if dpid_to_str(con.dpid) in self.PolSwitches: log.debug("Sending Port Stats Request to %s: ", dpid_to_str(con.dpid)) msg = of.ofp_stats_request(body=of.ofp_port_stats_request()) con.send(msg)
def _change_port(self, dpid, port_no, down): con = core.openflow.getConnection(dpid) p = con.ports[port_no] log.info('%s' % con.ports) if down: s = 'down' else: s = 'up' log.info('change_port( dpid:%s, port_no:%s, dir:%s )' % (dpid, port_no, s)) new_state = down * of.OFPPC_PORT_DOWN new_state = of.OFPPC_PORT_DOWN \ | of.OFPPC_NO_STP \ | of.OFPPC_NO_RECV \ | of.OFPPC_NO_RECV_STP \ | of.OFPPC_NO_FLOOD \ | of.OFPPC_NO_FWD \ | of.OFPPC_NO_PACKET_IN log.info('change_port: new_state: %d %s ' % (new_state, con.info)) pm = of.ofp_port_mod(port_no=p.port_no, hw_addr=p.hw_addr, config=new_state, mask=new_state) # of.OFPPC_PORT_DOWN ) con.send(pm) body = of.ofp_port_stats_request() body.port_no = of.OFPP_NONE # request all port statics msg = of.ofp_stats_request(body=body) con.send(msg.pack())
def getSwitchAggregate(self, switch_dpid): try: bodyMsg = of.ofp_aggregate_stats_request() bodyMsg.match = of.ofp_match() bodyMsg.table_id = of.TABLE_ALL bodyMsg.out_port = of.OFPP_NONE msg = of.ofp_stats_request(body=bodyMsg) msg.type = of.OFPST_AGGREGATE data = {} aggStats = self.get_switch_stats(switch_dpid, msg, "aggregate flows") if aggStats == None: self.log.error("Error getting aggregate stats") return data data = { "bytes": aggStats.byte_count, "flows": aggStats.flow_count, "packets": aggStats.packet_count } return data except BaseException, e: self.log.error(e.message) data = {} return data
def servidorSocketSobrecarga(): global listaIDS global listaIDSold global ipBro global mensagem global pronto CAPACIDADE = 50 listaIDS=[] inserirMaquina(listaIDS, 'sdn_firewall_dpi_bro', 6) porta = 3000 socketControlador = socket(AF_INET,SOCK_STREAM) socketControlador.bind(('',porta)) socketControlador.listen(1) #print 'O servidor esta funcionando' while 1: conexao, endereco = socketControlador.accept() ipBro = None listaIDSold = listaIDS[:] mensagem = conexao.recv(1024) #print 'Alarme recebido\n' if mensagem == "sobrecarga": inserirMaquina(listaIDS, 'sdn_firewall_dpi2', 4) pronto=False while True: #print 'peeeera' if pronto == True: break elif mensagem == "descarga": removerMaquina(listaIDS, 4) conexao.close() for switchControlado in core.openflow.connections: switchControlado.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) thread.exit()
def global_stats(): _init() log.info("Sending port stats req to all {0} switches".format(num_sw)) for con in core.openflow.connections: handler[con.dpid] = con.addListenerByName("PortStatsReceived", _stats_listener) con.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) Timer(wait_timeout, _check)
def _request_stats(self): """ Send OFPT_STATS_REQUEST messages to every known switch. """ for connection in core.openflow._connections.values(): connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def handle_congestion_notification(self, dpid): """ Upon reception of a congestion notification, requests for flow stats in the congestioned switch """ dpid = dpid[:len(dpid)-1] dpid = dpid[len(dpid)-12:] #print 'Received dpid: ' + str(dpid) # We leave the 10% to handle new flows, during congestion. # Request flow stats from switch #print "dpid parameter: " + str(dpid) switch=dict.fromkeys(['dpid', 'flow_stats', 'wait_flag', 'drop_policy', 'bw_policy']) switch['drop_policy'] = 'Random' switch['bw_policy'] = 'Penalty' switch['dpid'] = dpid switch['wait_flag'] = 0 switch['flow_stats'] = [] switch_states.append(switch) for connection in self.myconnections: connection_dpid = connection.dpid #print "Connection dpid: " + str(connection_dpid) dpid_str = dpidToStr(connection_dpid) dpid_str = dpid_str.replace("-", "") #print 'Real dpid_str: ' + dpid_str if dpid == dpid_str: connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) print 'Flow stats requets sent to: ' + str(connection)
def SendFlowStatsReq(self): # Send flow stat requests to every switch that has active flows. del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again. # I must remove old link current values so that calculations are executed correctly when i get responses core.current_topology.resetLinkCounters() if core.forwarding.ActFlows: log.debug("Sending Flow Stat Requests to all switches that have active flows") # Here is where i should remove non-active flows core.forwarding.removeInactiveFlows() # log.debug("Before Sending Flow Stats messages the ActFlow list is:") for fl in core.forwarding.ActFlows: # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active for r in fl.used_path: if (fl.used_path[fl.used_path.index(r)][0]) not in self.PolSwitches: self.PolSwitches.append(fl.used_path[fl.used_path.index(r)][0]) else: log.debug("No active flows at the moment, so not sending any flow stat requests") self.pol_counter+=1# increment polling session number if self.PolSwitches: for con in core.openflow.connections: if dpid_to_str(con.dpid) in self.PolSwitches: log.debug("SendingFlowStatsRequest to %s: ", dpid_to_str(con.dpid)) msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) con.send(msg)
def SendFlowStatsReq(self): # Send flow stat requests to the first switch in the path of every active flow. del self.PolSwitches[:] # empty the plist of switches to be polled in order to calculate it again. if core.forwarding.ActFlows: log.debug("Sending Flow Stat Request to the first switch in the path of every active flow.") # Here is where i should remove non-active flows core.forwarding.removeInactiveFlows() # log.debug("Before Sending Flow Stats messages the ActFlow list is:") for fl in core.forwarding.ActFlows: # print fl.flow_match.s_ip, fl.flow_match.d_ip, fl.used_path, fl.active if (fl.used_path[0][0]) not in self.PolSwitches: self.PolSwitches.append(fl.used_path[0][0]) else: log.debug("No active flows at the moment, so not sending any flow stat requests") self.pol_counter+=1# increment polling session number if self.PolSwitches: for con in core.openflow.connections: if dpid_to_str(con.dpid) in self.PolSwitches: log.debug("SendingFlowStatsRequest to %s: ", dpid_to_str(con.dpid)) msg = of.ofp_stats_request(body=of.ofp_flow_stats_request()) con.send(msg)
def _handle_ConnectionUp(event): if (dpidToStr(event.dpid) == '00-e0-4c-2a-33-4f'): nomeswitch = 'Switch UL' elif (dpidToStr(event.dpid) == '00-08-54-aa-cb-bc'): nomeswitch = 'Switch DL' elif (dpidToStr(event.dpid) == '00-06-4f-86-af-ff'): nomeswitch = 'Switch HW' elif (dpidToStr(event.dpid) == '00-40-a7-0c-01-75'): nomeswitch = 'Switch SW' else: nomeswitch = 'Switch desconhecido' log.info("%s conectado.", nomeswitch) msg1 = of.ofp_flow_mod() msg1.match.in_port = 2 msg1.priority = 2 msg1.actions.append(of.ofp_action_output(port=1)) msg1.hard_timeout = 10 #msg1.flags |= of.OFPFF_SEND_FLOW_REM #event.connection.send(msg1) addRegra(event, msg1) #msg2 = of.ofp_flow_mod() #msg2.match.in_port = 3 #msg2.priority = 2 #msg2.actions.append(of.ofp_action_output(port = 2)) #event.connection.send(msg2) event.connection.send( of.ofp_stats_request(body=of.ofp_flow_stats_request())) log.info("Regras adicionadas.")
def _timer_func(): for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(\ body = of.ofp_flow_stats_request())) log.info("Sent %i flow stats requests", len(core.openflow._connections))
def enable_flow_stats_retrieval(self, interval_secs): """ Starts a timer that periodically requests the switch to send flow statistics. The reply will be received through a FlowStatsReceived event. """ self._stats_req_timer = recoco.Timer(interval_secs, lambda: self._connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())), recurring=True)
def _exec_cmd_get_flow_stats (self, event): try: msg = event.msg dpid = strToDPID(msg['dpid']) con = core.openflow.getConnection(dpid) if con is None: raise RuntimeError("No such switch") match = event.msg.get('match') table_id = event.msg.get('table_id', 0xff) out_port = event.msg.get('out_port', of.OFPP_NONE) sr = of.ofp_stats_request() sr.body = of.ofp_flow_stats_request() if match is None: match = of.ofp_match() else: match = dict_to_match(match) sr.body.match = match sr.body.table_id = table_id sr.body.out_port = out_port con.send(sr) self.reply(event,**{'type':'set_table','xid':sr.xid}) except: #log.exception("Exception in get_flow_stats") log.debug("Exception in get_flow_stats - %s:%s", sys.exc_info()[0].__name__, sys.exc_info()[1]) self.reply(event, exception="%s: %s" % (sys.exc_info()[0],sys.exc_info()[1]), traceback=traceback.format_exc())
def _timer_func (): for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) # We could even request port stats! #connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) print "Sent " + str(len(core.openflow._connections))+ " flowstats request(s)" log.info("Sent %i flow/port stats request(s)", )
def requestStats(self): #Requests udp statistics from the switch that is connected to clients self.unblockTried = set() for connection in core.openflow.connections: connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def _timer_func(): ''' handler for timer function that sends the requests to all the switches connected to the controller. ''' for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
def _change_port (self, dpid, port_no, down): con = core.openflow.getConnection(dpid) p = con.ports[port_no] log.info('%s' % con.ports) if down: s = 'down' else: s = 'up' log.info('change_port( dpid:%s, port_no:%s, dir:%s )' % (dpid, port_no, s)) new_state = down * of.OFPPC_PORT_DOWN new_state = of.OFPPC_PORT_DOWN \ | of.OFPPC_NO_STP \ | of.OFPPC_NO_RECV \ | of.OFPPC_NO_RECV_STP \ | of.OFPPC_NO_FLOOD \ | of.OFPPC_NO_FWD \ | of.OFPPC_NO_PACKET_IN log.info('change_port: new_state: %d %s ' % (new_state, con.info)) pm = of.ofp_port_mod( port_no=p.port_no, hw_addr=p.hw_addr, config = new_state, mask = new_state ) # of.OFPPC_PORT_DOWN ) con.send(pm) body = of.ofp_port_stats_request() body.port_no = of.OFPP_NONE # request all port statics msg = of.ofp_stats_request(body=body) con.send(msg.pack())
def decider(bool_val, packet): global layer1_correct global last_packet if bool_val: last_packet = packet print "### Stage 2: Started ###" print "Question A: No, Policy does not match actual behaviour" print "Searching Question B: 'Does policy match device state?'" layer1_correct = False # Seatch deeper on device state (Layer 2) #start_spanning_tree() con_check = check_switch_connectivity() # Request flow stats (Layer 3 Question D) if con_check: # if Yes on question B print "### Stage 3: Started ###" print "Question B: Yes, Device state matches Policy." print "Searching Question D: 'Does device state match hardware?'" for con in core.openflow._connections.values(): print "Requesting flow table entries from device", con con.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) else: print "### Stage 3: Started" print "Question B: No, Device state doesn't match Policy." print "Searching Question C: 'Does physical view match Device State?'"
def sendAggregateStatsRequest(self, event): sr = of.ofp_stats_request() sr.type = of.OFPST_AGGREGATE sr.body = of.ofp_aggregate_stats_request() event.connection.send(sr) print "Sending aggregate stat request message to Switch %s " % event.dpid
def _beginDemandEstimation(self): if self.all_switches_up: self.demandEstimationLock.acquire() # Clear all self.flows = [] self.flowQueryMsg = {} #set() # Store response to be received # Ask for outgoing flow from edge switches to hosts for sw_name in self.t.layer_nodes(self.t.LAYER_EDGE): connected_hosts = self.t.down_nodes(sw_name) sw_dpid = self.t.id_gen(name=sw_name).dpid self.flowQueryMsg[sw_dpid] = 0 for host_name in connected_hosts: sw_port, host_port = self.t.port(sw_name, host_name) msg = of.ofp_stats_request() msg.type = of.OFPST_FLOW msg.body = of.ofp_flow_stats_request() msg.body.out_port = sw_port msg.body.match.nw_proto = ipv4.TCP_PROTOCOL self.switches[sw_dpid].connection.send(msg) # print "request (sw, src_port) = (" + str(sw_dpid) + ", " + str(sw_port) + ")" #self.flowQueryMsg.add((sw_dpid, sw_port)) # NOTE: sw_port doesn't match what we get back from openflow. # So, the best we can do is to count it... self.flowQueryMsg[sw_dpid] += 1 self.demandEstimationLock.release() timer = Timer(2.0, self._beginDemandEstimation) timer.start()
def flow_stat_thread(self): while True: # Send flow stat to switch self._of_send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) # Save timing events to file. with open(TIMING_EVENT_FILE, 'a') as timing_f: for (event_name, queue) in [('pkt_in', self.pkt_in_queue), ('flow_mod', self.flow_mod_queue)]: while not queue.empty(): try: (start_time, packet) = queue.get() except Queue.Empty(): break match = of.ofp_match.from_packet(packet) print >> timing_f, '%.8f,%s,%s,%s' % (start_time, event_name, match.tp_src, match.tp_dst) sleep_time = 0 while True: time.sleep(0.5) sleep_time += 0.5 with self.lock: if sleep_time >= self.flow_stat_interval: break
def _exec_cmd_get_flow_stats(self, event): try: msg = event.msg dpid = strToDPID(msg['dpid']) con = core.openflow.getConnection(dpid) if con is None: raise RuntimeError("No such switch") match = event.msg.get('match') table_id = event.msg.get('table_id', 0xff) out_port = event.msg.get('out_port', of.OFPP_NONE) sr = of.ofp_stats_request() sr.body = of.ofp_flow_stats_request() if match is None: match = of.ofp_match() else: match = dict_to_match(match) sr.body.match = match sr.body.table_id = table_id sr.body.out_port = out_port con.send(sr) self.reply(event, **{'type': 'set_table', 'xid': sr.xid}) except: #log.exception("Exception in get_flow_stats") log.debug("Exception in get_flow_stats - %s:%s", sys.exc_info()[0].__name__, sys.exc_info()[1]) self.reply(event, exception="%s: %s" % (sys.exc_info()[0], sys.exc_info()[1]), traceback=traceback.format_exc())
def act_like_switch (self, packet, packet_in): """ Implement switch-like behavior. """ srcaddr = EthAddr(packet.src) if not self.mac_to_port.has_key(srcaddr): self.mac_to_port[srcaddr] = packet_in.in_port for key, value in dict.items(self.mac_to_port): print key, value dstaddr = EthAddr(packet.dst) #if my_match.dl_dst in mac_t _port: if dstaddr in self.mac_to_port: # Send packet out the associated port out_port = self.mac_to_port[dstaddr] match = of.ofp_match() msg = of.ofp_flow_mod() #creates a flow table entry in switc #msg.match.dl_src = srcaddr #msg.match.dl_dst = dstaddr msg.match.tp_dst = out_port for con in core.openflow.connections: msg2 = of.ofp_queue_stats_request() msg2.port_no = out_port msg2.queue_id = of.OFPQ_ALL con.send(of.ofp_stats_request(body=msg2)) # Add an action to send to the specified port action = of.ofp_action_output(port = of.OFPP_CONTROLLER) msg.actions.append(action) queue_action = of.ofp_action_enqueue() queue_action.port = out_port queue_action.queue_id = 1 msg.actions.append(queue_action) print "printing q stats in act like switch\n" #global stats1 #print ("%s",s.stats) for f in s.stats: print f print "\n" # Send message to switch self.connection.send(msg) self.resend_packet(packet_in,out_port) else: # Flood the packet out everything but the input port # This part looks familiar, right? self.resend_packet (packet_in,of.OFPP_FLOOD) print "------flood-------"
def flow_stats_request(self,switch): sr = of.ofp_stats_request() sr.body = of.ofp_flow_stats_request() match = of.ofp_match() sr.body.match = match sr.body.table_id = 0xff sr.body.out_port = of.OFPP_NONE self.switches[switch]['connection'].send(sr)
def dropping (duration=None): msg.priority=65535 msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = 831)) # modify the vlan id from the 830 to 831 print "dropping packets from ", IP_attack for connection in self.myconnections: connection.send(msg) #send the msg to the switch connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) #getting the flux stats print "MSG sent"
def request_stats(): # Request both port and flow stats for each connection # Event handler will receive the results for conn in get_connection_list(): conn.send(of.ofp_stats_request(body=of.ofp_port_stats_request())) # conn.send( of.ofp_stats_request(body=of.ofp_flow_stats_request()) ) log.debug("{0} connections to check".format(len(get_connection_list())))
def _ping_switches(self): log.info("Pinging Switches") nodes = self.G.nodes() for dpid in nodes: con = core.openflow.getConnection(dpid) msg = of.ofp_stats_request() msg.body = of.ofp_port_stats_request() if con is not None and con.connect_time is not None: con.send(msg)
def requestStats(self): "Send all switches a flow statistics request" self.flowstats = [] for connection in core.openflow._connections.values(): connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request())) self.log.debug("ravel: sent {0} flow stats requests".format( len(core.openflow._connections))) return True
def __init__ (self, connection): self.connection = connection # My table self.macToPort = {} # We want to hear PacketIn messages, so we listen to the connection connection.addListeners(self) print "stat message is sent to sw_dpid: ", dpid_to_str(self.connection.dpid) self.connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def _handle_PacketIn (self, event): """ Handle packet in messages from the switch to implement above algorithm. """ packet = event.parsed print "Rxed packet: ", packet self.handle_IP_packet(packet) for con in core.openflow.connections: con.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def _handle_TopologyConverged(self, event): # when it catches an event that the topology has converged, it registers the listeners on how to handle the # openflow events. It means that it can start monitoring the network as the topology has converged # after bringing up the module for the first time. core.openflow.addListeners(self, priority = 10) for con in core.openflow.connections: # init prev_counter for all links log.debug("Sending Port Stats Request to all switches for the first time: ") msg = of.ofp_stats_request(body=of.ofp_port_stats_request()) con.send(msg) self.PolSwitches.append(dpid_to_str(con.dpid))