def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto self.logger.debug( "EventOFPErrorMsg received.\n" "version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s\n" "OFPErrorMsg(type=%s, code=%s, data=b'%s')\n" " |-- type: %s\n" " |-- code: %s", hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid), ofp.ofp_msg_type_to_str(msg.msg_type), hex(msg.type), hex(msg.code), utils.binary_str(msg.data), ofp.ofp_error_type_to_str(msg.type), ofp.ofp_error_code_to_str(msg.type, msg.code)) if len(msg.data) >= ofp.OFP_HEADER_SIZE: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) self.logger.debug( " `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s", hex(version), hex(msg_type), hex(msg_len), hex(xid), ofp.ofp_msg_type_to_str(msg_type)) else: self.logger.warning( "The data field sent from the switch is too short: " "len(msg.data) < OFP_HEADER_SIZE\n" "The OpenFlow Spec says that the data field should contain " "at least 64 bytes of the failed request.\n" "Please check the settings or implementation of your switch.")
def SBP_handler(self, ev): # parser the msg and handle the SBP message. # raise the event. # finish it in service or app. msg = ev.msg domain = msg.domain data = msg.data if CONF.sbp_proto_type == oxproto_v1_0.OXPS_OPENFLOW: buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE if len(data) == 0: return buf += data while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self.network_aware.fake_datapath, version, msg_type, msg_len, xid, buf) if msg: ev = oxp_event.sbp_to_oxp_msg_to_ev(msg) ev.domain = domain self.send_event_to_observers(ev, MAIN_DISPATCHER) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE
def handle(self): desc = ofproto_protocol.ProtocolDesc() residue = b'' while True: if residue: data = residue residue = b'' else: data = self.request.recv(1024) if data == b'': break if self.verbose: print(data) h = ofproto_parser.header(data) if self.verbose: print(h) version, msg_type, msg_len, xid = h residue = data[msg_len:] desc.set_version(version=version) if msg_type == desc.ofproto.OFPT_HELLO: hello = desc.ofproto_parser.OFPHello(desc) hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: buf.append(data[:msg_len]) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid brep.serialize() self.request.send(brep.buf) break
def blocking_read_sdn_message(self, block_read_written=False): ''' @returns an SDN message ''' reader_method = self._sdn_socket.blocking_read if block_read_written: reader_method = self._sdn_socket.blocking_read_written # first read openflow header msg_buffer = reader_method(OFP_HEADER_SIZE) while len(msg_buffer) != OFP_HEADER_SIZE: diff = OFP_HEADER_SIZE - len(msg_buffer) msg_buffer.extend(reader_method(diff)) # based on size of openflow header, decide how much more to # read (version, msg_type, msg_len, xid) = ofproto_parser.header(msg_buffer) while len(msg_buffer) != msg_len: diff = msg_len - len(msg_buffer) msg_buffer.extend(reader_method(diff)) msg = ofproto_parser.msg(_OF_1_0_DATAPATH, version, msg_type, msg_len, xid, str(msg_buffer)) msg.original_buffer = msg_buffer return msg
def SBP_handler(self, ev): # Parser the msg and raise an event. # Handle event in service or app. msg = ev.msg domain = msg.domain data = msg.data if domain.sbp_proto_type == oxproto_v1_0.OXPS_OPENFLOW: buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE if len(data) == 0: return buf += data while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) self.logger.debug('ofp msg %s cls %s', msg, msg.__class__) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self.fake_datapath, version, msg_type, msg_len, xid, buf) if msg: ev = oxp_event.sbp_to_oxp_msg_to_ev(msg) ev.domain = domain self.send_event_to_observers(ev, MAIN_DISPATCHER) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE
def blocking_read_sdn_message(self,block_read_written=False): ''' @returns an SDN message ''' reader_method = self._sdn_socket.blocking_read if block_read_written: reader_method = self._sdn_socket.blocking_read_written # first read openflow header msg_buffer = reader_method(OFP_HEADER_SIZE) while len(msg_buffer) != OFP_HEADER_SIZE: diff = OFP_HEADER_SIZE - len(msg_buffer) msg_buffer.extend(reader_method(diff)) # based on size of openflow header, decide how much more to # read (version, msg_type, msg_len, xid) = ofproto_parser.header(msg_buffer) while len(msg_buffer) != msg_len: diff = msg_len - len(msg_buffer) msg_buffer.extend(reader_method(diff)) msg = ofproto_parser.msg( _OF_1_0_DATAPATH, version, msg_type, msg_len, xid, str(msg_buffer)) msg.original_buffer = msg_buffer return msg
def SBP_handler(self, ev): # Parser the msg and raise an event. # Handle event in service or app. msg = ev.msg domain = msg.domain data = msg.data if domain.sbp_proto_type == oxproto_v1_0.OXPS_OPENFLOW: buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE if len(data) == 0: return buf += data while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) self.logger.debug('ofp msg %s cls %s', msg, msg.__class__) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self.fake_datapath, version, msg_type, msg_len, xid, buf) if msg: ev = oxp_event.sbp_to_oxp_msg_to_ev(msg) ev.domain = domain self.send_event_to_observers(ev, MAIN_DISPATCHER) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE
def _test_msg(self, name, wire_msg, json_str): json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = DummyDatapath(*self._ofp_versions[version]) if has_parser: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) # XXXdebug code open(('/tmp/%s.json' % name), 'wb').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) # json -> OFPxxx -> json msg2 = self._jsondict_to_msg(dp, json_dict) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) eq_(wire_msg, msg2.buf)
def SBP_handler(self, ev): # parser the msg and handle the SBP message. # raise the event. # finish it in service or app. msg = ev.msg domain = msg.domain data = msg.data if CONF.sbp_proto_type == oxproto_v1_0.OXPS_OPENFLOW: buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE if len(data) == 0: return buf += data while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self.network_aware.fake_datapath, version, msg_type, msg_len, xid, buf) if msg: ev = oxp_event.sbp_to_oxp_msg_to_ev(msg) ev.domain = domain self.send_event_to_observers(ev, MAIN_DISPATCHER) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE
def _recv_loop(self): buf = bytearray() required_len = ofproto.OFP_HEADER_SIZE count = 0 while self.is_active: ret = self.socket.recv(required_len) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) #LOG.debug('queue msg %s cls %s', msg, msg.__class__) self.ev_q.queue(ofp_event.ofp_msg_to_ev(msg)) buf = buf[required_len:] required_len = ofproto.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 gevent.sleep(0)
def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto self.logger.debug( "EventOFPErrorMsg received.\n" "version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s\n" "OFPErrorMsg(type=%s, code=%s, data=b'%s')\n" " |-- type: %s\n" " |-- code: %s", hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid), ofp.ofp_msg_type_to_str(msg.msg_type), hex(msg.type), hex(msg.code), utils.binary_str(msg.data), ofp.ofp_error_type_to_str(msg.type), ofp.ofp_error_code_to_str(msg.type, msg.code)) if len(msg.data) >= ofp.OFP_HEADER_SIZE: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) self.logger.debug( " `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s", hex(version), hex(msg_type), hex(msg_len), hex(xid), ofp.ofp_msg_type_to_str(msg_type)) else: self.logger.warning( "The data field sent from the switch is too short: " "len(msg.data) < OFP_HEADER_SIZE\n" "The OpenFlow Spec says that the data field should contain " "at least 64 bytes of the failed request.\n" "Please check the settings or implementation of your switch.")
def test_check_msg_parser(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) version = 0xff ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn)
def _on_header(self, data): assert len(data) == ofproto_common.OFP_HEADER_SIZE self._msg_header = data (version, msg_type, msg_len, xid) = ofproto_parser.header(data) assert msg_len >= ofproto_common.OFP_HEADER_SIZE if not self.stream.closed(): self.stream.read_bytes(msg_len-ofproto_common.OFP_HEADER_SIZE, stack_context.wrap(self._on_message))
def handle_event(self, header, msg): #required_len = self.ofp.OFP_HEADER_SIZE ret = bytearray(msg) (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) self.netide_xid = header[NetIDEOps.NetIDE_header['XID']] msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, ret) if msg: ev = ofp_event.ofp_msg_to_ev(msg) event_observers = self.ofp_brick.get_observers(ev,self.state) module_id = header[NetIDEOps.NetIDE_header['MOD_ID']] for key, value in self.channel.running_modules.iteritems(): if value == module_id and key in event_observers: module_brick = ryu.base.app_manager.lookup_service_brick(key) module_brick_handlers = module_brick.get_handlers(ev) for handler in module_brick_handlers: handler(ev) break # Sending the FENCE message to the Core only if self.netide_xid is not 0 if self.netide_xid is not 0: msg_to_send = NetIDEOps.netIDE_encode('NETIDE_FENCE', self.netide_xid, module_id, 0, "") self.channel.socket.send(msg_to_send) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: handler(ev) # Resetting netide_xid to zero self.netide_xid = 0
def _test_msg(self, name, wire_msg, json_str): json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = DummyDatapath(*self._ofp_versions[version]) if has_parser: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) # XXXdebug code open(('/tmp/%s.json' % name), 'wb').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) # json -> OFPxxx -> json msg2 = self._jsondict_to_msg(dp, json_dict) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) eq_(wire_msg, msg2.buf)
def handle_event(self, header, msg): #required_len = self.ofp.OFP_HEADER_SIZE ret = bytearray(msg) (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) self.netide_xid = header[NetIDEOps.NetIDE_header['XID']] msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, ret) if msg: ev = ofp_event.ofp_msg_to_ev(msg) event_observers = self.ofp_brick.get_observers(ev,self.state) module_id = header[NetIDEOps.NetIDE_header['MOD_ID']] for key, value in self.channel.running_modules.iteritems(): if value == module_id and key in event_observers: module_brick = ryu.base.app_manager.lookup_service_brick(key) module_brick_handlers = module_brick.get_handlers(ev) for handler in module_brick_handlers: handler(ev) break # Sending the FENCE message to the Core only if self.netide_xid is not 0 if self.netide_xid is not 0: msg_to_send = NetIDEOps.netIDE_encode('NETIDE_FENCE', self.netide_xid, module_id, 0, "") self.channel.socket.send(msg_to_send) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: handler(ev) # Resetting netide_xid to zero self.netide_xid = 0
def testHello(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufHello) eq_(version, 1) eq_(msg_type, 0) eq_(msg_len, 8) eq_(xid, 1)
def msg_parser (msg): msg_decoded = "" global datapaths (netide_version, netide_msg_type, netide_msg_len, netide_xid, netide_mod_id, netide_datapath) = NetIDEOps.netIDE_decode_header(msg) message_data = msg[NetIDEOps.NetIDE_Header_Size:] ret = bytearray(message_data) if len(ret) >= ofproto_common.OFP_HEADER_SIZE: (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) msg_decoded = ofproto_parser.msg(netide_datapath, version, msg_type, msg_len, xid, ret) #print(msg_decoded) if str(msg_decoded).find("OFPSwitchFeatures", 0, len(str(msg_decoded))) != -1: datapath = msg_decoded.datapath #print(type(datapath)) if not datapath in datapaths: datapaths.append(datapath) return (0, msg_decoded) if str(msg_decoded).find("OFPPortStatsReply", 0, len(str(msg_decoded))) != -1: #print(msg_decoded) #print(str(msg_decoded).find("OFPPortStatsReplay", 0, len(str(msg_decoded)))) return (1, msg_decoded) if str(msg_decoded).find("OFPFlowStatsReply", 0, len(str(msg_decoded))) != -1: return (2, msg_decoded) if str(msg_decoded).find("OFPAggregateStatsReply", 0, len(str(msg_decoded))) != -1: return (3, msg_decoded) if str(msg_decoded).find("OFPQueueStatsReply", 0, len(str(msg_decoded))) != -1: return (4, msg_decoded) if str(msg_decoded).find("OFPTableStatsReply", 0, len(str(msg_decoded))) != -1: return (5, msg_decoded) else: return (0, msg_decoded) else: return (0, msg_decoded)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while self.is_active: ret = self.socket.recv(required_len) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: self.principal_connection.receive_principal_message(msg) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def handle(self): desc = ofproto_protocol.ProtocolDesc() residue = b'' while True: if residue: data = residue residue = b'' else: data = self.request.recv(1024) if data == b'': break if self.verbose: print(data) h = ofproto_parser.header(data) if self.verbose: print(h) version, msg_type, msg_len, xid = h residue = data[msg_len:] desc.set_version(version=version) if msg_type == desc.ofproto.OFPT_HELLO: hello = desc.ofproto_parser.OFPHello(desc) hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: # HACK: Clear xid into zero buf.append(data[:4] + b'\x00\x00\x00\x00' + data[8:msg_len]) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid brep.serialize() self.request.send(brep.buf) break
def _on_message(self, data): (version, msg_type, msg_len, xid) = ofproto_parser.header(self._msg_header) assert len(data) == msg_len - ofproto_common.OFP_HEADER_SIZE msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self._msg_header+data) logging.getLogger("openflow").debug("RECV %s %s" % (self.address, msg)) self.dispatch(msg) if not self.stream.closed(): self.stream.read_bytes(ofproto_common.OFP_HEADER_SIZE, stack_context.wrap(self._on_header))
def testPacketIn(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPPacketIn))
def testHello(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufHello) eq_(version, 1) eq_(msg_type, 0) eq_(msg_len, 8) eq_(xid, 1)
def _parse(self, buf, parse): required_len = ofproto_common.OFP_HEADER_SIZE while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) if len(buf) < msg_len: break pkt = buf[:msg_len] parse(pkt) buf = buf[msg_len:] return buf
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while self.state != DEAD_DISPATCHER: ret = "" try: ret = self.socket.recv(required_len) except SocketTimeout: continue except ssl.SSLError: # eventlet throws SSLError (which is a subclass of IOError) # on SSL socket read timeout; re-try the loop in this case. continue except (EOFError, IOError): break if len(ret) == 0: break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler) ] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while True: ret = "" try: ret = self.socket.recv(required_len) except: # Hit socket timeout; decide what to do. if self.close_requested: pass else: continue if (len(ret) == 0) or (self.close_requested): self.socket.close() break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler) ] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while self.state != DEAD_DISPATCHER: ret = "" try: ret = self.socket.recv(required_len) except SocketTimeout: continue except ssl.SSLError: # eventlet throws SSLError (which is a subclass of IOError) # on SSL socket read timeout; re-try the loop in this case. continue except (EOFError, IOError): break if len(ret) == 0: break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg( self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while True: ret = "" try: ret = self.socket.recv(required_len) except: # Hit socket timeout; decide what to do. if self.close_requested: pass else: continue if (len(ret) == 0) or (self.close_requested): self.socket.close() break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg( self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def testFeaturesReply(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufFeaturesReply) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufFeaturesReply) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPSwitchFeatures)) LOG.debug(msg.ports[65534]) ok_(isinstance(msg.ports[1], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[2], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[65534], ofproto_v1_0_parser.OFPPhyPort))
def error_msg_handler(self, ev): err_msg = ev.msg #self.logger.info('OFPErrorMsg received: type=0x%02x code=0x%02x message=%s', msg.type, msg.code, utils.hex_array(msg.data)) if err_msg.type == ofproto_v1_3.OFPET_FLOW_MOD_FAILED and err_msg.code == ofproto_v1_3.OFPFMFC_TABLE_FULL: print 'caputred full table error message' self.rand_remove(err_msg.datapath) #parser = err_msg.datapath.ofproto_parser (version,msg_type,msg_len,xid)= ofproto_parser.header(err_msg.data) print 'version %s, msg_type %s, msg_len %s, xid %s, len %s' % (version, msg_type, msg_len, xid, len(err_msg.data)) msg = ofproto_parser.msg(err_msg.datapath, version,msg_type,msg_len,xid,err_msg.data) #controller will assign a new xid msg.xid=None error_msg.datapath.send_msg(msg)
def test_check_msg_parser(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) version = 0xff ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn)
def decode_sent_message(msg): (netide_version, netide_msg_type, netide_msg_len, netide_xid, netide_mod_id, netide_datapath) = NetIDEOps.netIDE_decode_header(msg) print(netide_version, netide_msg_type, netide_msg_len, netide_xid, netide_mod_id, netide_datapath) message_data = msg[NetIDEOps.NetIDE_Header_Size:] ret = bytearray(message_data) #print(len(message_data)) if len(ret) >= ofproto_common.OFP_HEADER_SIZE: #prrr = struct.unpack('BBHI', message_data) #print(prrr) (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) print(version, msg_type, msg_len, xid) msg_decoded = ofproto_parser.msg(netide_datapath, version, msg_type, msg_len, xid, ret) print(msg_decoded)
def testPacketIn(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPPacketIn))
def send(self, buf): # -------------------------- Fujitsu code start ----------------------------- # For optical enhancing (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: LOG.debug("send data error unmatch size (required size: %d / buffer size %d)", required_len, msg_len) LOG.debug("send data (dpid=%s) <%s> [%s]", str(self.id), self.msgtype_dict.setdefault(msg_type, 'unknown(%d)' % msg_type), binascii.hexlify(buf)) # -------------------------- Fujitsu code end ------------------------------- if self.send_q: self.send_q.put(buf)
def recv_ofp(self): logging.debug("%s", binascii.hexlify(self.data)) self.recv(ofproto_common.OFP_HEADER_SIZE) version, msg_type, msg_len, xid = ofproto_parser.header(self.data) self.recv(msg_len) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.data[:msg_len]) self.data = self.data[msg_len:] return msg
def OF_error_msg_handler(self, event): msg = event.msg try: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) ryu_msg = ofproto_parser.msg( self._datapath, version, msg_type, msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data) LOG.error('OFPErrorMsg received: %s', ryu_msg) except Exception: LOG.error('Unrecognized OFPErrorMsg received: ' 'type=0x%(type)02x code=0x%(code)02x ' 'message=%(msg)s', {'type': msg.type, 'code': msg.code, 'msg': utils.hex_array(msg.data)})
def recv_ofp(self): logging.debug("%s", binascii.hexlify(self.data)) self.recv(ofproto_common.OFP_HEADER_SIZE) version, msg_type, msg_len, xid = ofproto_parser.header(self.data) self.recv(msg_len) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.data[:msg_len]) self.data = self.data[msg_len:] return msg
def _test_msg(self, name, wire_msg, json_str): json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = DummyDatapath(*self._ofp_versions[version]) if has_parser: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) # XXXdebug code open(('/tmp/%s.json' % name), 'wb').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) # json -> OFPxxx -> json msg2 = self._jsondict_to_msg(dp, json_dict) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) eq_(wire_msg, msg2.buf) # check if "len" "length" fields can be omitted def _remove(d, names): f = lambda x: _remove(x, names) if isinstance(d, list): return map(f, d) if isinstance(d, dict): d2 = {} for k, v in d.iteritems(): if k in names: continue d2[k] = f(v) return d2 return d json_dict3 = _remove(json_dict, ['len', 'length']) msg3 = self._jsondict_to_msg(dp, json_dict3) msg3.serialize() eq_(wire_msg, msg3.buf) msg2.serialize() eq_(wire_msg, msg2.buf)
def _test_msg(self, name, wire_msg, json_str): json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = ofproto_protocol.ProtocolDesc(version=version) if has_parser: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) # XXXdebug code open(('/tmp/%s.json' % name), 'wb').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) # json -> OFPxxx -> json msg2 = self._jsondict_to_msg(dp, json_dict) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) eq_(wire_msg, msg2.buf) # check if "len" "length" fields can be omitted def _remove(d, names): f = lambda x: _remove(x, names) if isinstance(d, list): return map(f, d) if isinstance(d, dict): d2 = {} for k, v in d.iteritems(): if k in names: continue d2[k] = f(v) return d2 return d json_dict3 = _remove(json_dict, ['len', 'length']) msg3 = self._jsondict_to_msg(dp, json_dict3) msg3.serialize() eq_(wire_msg, msg3.buf) msg2.serialize() eq_(wire_msg, msg2.buf)
def handle_event(self, header, msg, of_proto): #required_len = self.ofp.OFP_HEADER_SIZE ret = bytearray(msg) (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) self.netide_xid = header[NetIDEOps.NetIDE_header['XID']] msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, ret) if msg: ev = ofp_event.ofp_msg_to_ev(msg) event_observers = self.ofp_brick.get_observers(ev, self.state) module_id = header[NetIDEOps.NetIDE_header['MOD_ID']] for key, value in self.channel.running_modules.iteritems(): if value == module_id and key in event_observers: module_brick = ryu.base.app_manager.lookup_service_brick( key) module_brick_handlers = module_brick.get_handlers(ev) for handler in module_brick_handlers: handler(ev) break # Sending the FENCE message to the Core only if self.netide_xid is not 0 if self.netide_xid is not 0: msg_to_send = NetIDEOps.netIDE_encode('NETIDE_FENCE', self.netide_xid, module_id, 0, "") self.channel.socket.send(msg_to_send) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler) ] for handler in handlers: # we record that we received a feature reply from that specific device if msg_type == of_proto.OFPT_FEATURES_REPLY: self.datapath_init[msg.datapath.id] = True # we do not allow multipart messages until the feature_reply has been received (needed for OF1.3 or higher) if of_proto.OFP_VERSION >= 0x04: if msg_type == of_proto.OFPT_MULTIPART_REPLY and msg.datapath.id not in self.datapath_init: break handler(ev) # Resetting netide_xid to zero self.netide_xid = 0
def _upstream_parse(self, pkt): (version, msg_type, msg_len, xid) = ofproto_parser.header(pkt) if(xid == 0xffffffff): self._upstream_collector(pkt) # return else: try: self.controller_socket.send(pkt) except: if(self.id != None): print(datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S') + " : ERROR [" + str(hex(self.id)) + "] --- Broken Pipe (Upstream)") else: print(datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S') + " : ERROR [" + str(self.id) + "] --- Broken Pipe (Upstream)") # self._close() # pass self._upstream_collector(pkt)
def handle_event(self, header, msg, of_proto): #required_len = self.ofp.OFP_HEADER_SIZE ret = bytearray(msg) (version, msg_type, msg_len, xid) = ofproto_parser.header(ret) self.netide_xid = header[NetIDEOps.NetIDE_header['XID']] msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, ret) if msg: ev = ofp_event.ofp_msg_to_ev(msg) event_observers = self.ofp_brick.get_observers(ev,self.state) module_id = header[NetIDEOps.NetIDE_header['MOD_ID']] for key, value in self.channel.running_modules.iteritems(): if value == module_id and key in event_observers: module_brick = ryu.base.app_manager.lookup_service_brick(key) module_brick_handlers = module_brick.get_handlers(ev) for handler in module_brick_handlers: handler(ev) break # Sending the FENCE message to the Core only if self.netide_xid is not 0 if self.netide_xid is not 0: msg_to_send = NetIDEOps.netIDE_encode('NETIDE_FENCE', self.netide_xid, module_id, 0, "") self.channel.socket.send(msg_to_send) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: # we record that we received a feature reply from that specific device if msg_type == of_proto.OFPT_FEATURES_REPLY: self.datapath_init[msg.datapath.id] = True # we do not allow multipart messages until the feature_reply has been received (needed for OF1.3 or higher) if of_proto.OFP_VERSION >= 0x04: if msg_type == of_proto.OFPT_MULTIPART_REPLY and msg.datapath.id not in self.datapath_init: break handler(ev) # Resetting netide_xid to zero self.netide_xid = 0
def testFeaturesReply(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufFeaturesReply) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufFeaturesReply) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPSwitchFeatures)) LOG.debug(msg.ports[65534]) ok_(isinstance(msg.ports[1], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[2], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[65534], ofproto_v1_0_parser.OFPPhyPort))
def handle(self): desc = ofproto_protocol.ProtocolDesc() residue = b'' while True: if residue: data = residue residue = b'' else: data = self.request.recv(1024) if data == b'': break if self.verbose: print(data) h = ofproto_parser.header(data) if self.verbose: print(h) version, msg_type, msg_len, xid = h residue = data[msg_len:] desc.set_version(version=version) if msg_type == desc.ofproto.OFPT_HELLO: hello = desc.ofproto_parser.OFPHello(desc) hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: self._add_msg_to_buf(data, msg_len) elif version == 4 and msg_type == desc.ofproto.OFPT_EXPERIMENTER: # This is for OF13 Ext-230 bundle # TODO: support bundle for OF>1.3 exp = desc.ofproto_parser.OFPExperimenter.parser( object(), version, msg_type, msg_len, xid, data) self._add_msg_to_buf(data, msg_len) if isinstance(exp, desc.ofproto_parser.ONFBundleCtrlMsg): ctrlrep = desc.ofproto_parser.ONFBundleCtrlMsg( desc, exp.bundle_id, exp.type + 1, 0, []) ctrlrep.xid = xid ctrlrep.serialize() self.request.send(ctrlrep.buf) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid brep.serialize() self.request.send(brep.buf)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while self.is_active: ret = self.socket.recv(required_len) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) #LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in handler.dispatchers ] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def handle(self): desc = ofproto_protocol.ProtocolDesc() residue = b'' while True: if residue: data = residue residue = b'' else: data = self.request.recv(1024) if data == b'': break if self.verbose: print(data) h = ofproto_parser.header(data) if self.verbose: print(h) version, msg_type, msg_len, xid = h residue = data[msg_len:] desc.set_version(version=version) if msg_type == desc.ofproto.OFPT_HELLO: hello = desc.ofproto_parser.OFPHello(desc) hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: self._add_msg_to_buf(data, msg_len) elif version == 4 and msg_type == desc.ofproto.OFPT_EXPERIMENTER: # This is for OF13 Ext-230 bundle # TODO: support bundle for OF>1.3 exp = desc.ofproto_parser.OFPExperimenter.parser( object(), version, msg_type, msg_len, xid, data) self._add_msg_to_buf(data, msg_len) if isinstance(exp, desc.ofproto_parser.ONFBundleCtrlMsg): ctrlrep = desc.ofproto_parser.ONFBundleCtrlMsg( desc, exp.bundle_id, exp.type + 1, 0, []) ctrlrep.xid = xid ctrlrep.serialize() self.request.send(ctrlrep.buf) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid brep.serialize() self.request.send(brep.buf)
def _recv_loop(self): buf = bytearray() required_len = ofproto_common.OFP_HEADER_SIZE count = 0 while self.is_active: ret = self.socket.recv(required_len) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) #LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in handler.dispatchers] for handler in handlers: handler(ev) buf = buf[required_len:] required_len = ofproto_common.OFP_HEADER_SIZE # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) self.logger.debug('EventOFPErrorMsg received.') self.logger.debug('version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid)) self.logger.debug(' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg.msg_type)) self.logger.debug("OFPErrorMsg(type=%s, code=%s, data=b'%s')", hex(msg.type), hex(msg.code), utils.binary_str(msg.data)) self.logger.debug(' |-- type: %s', ofp.ofp_error_type_to_str(msg.type)) self.logger.debug(' |-- code: %s', ofp.ofp_error_code_to_str(msg.type, msg.code)) self.logger.debug( ' `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(version), hex(msg_type), hex(msg_len), hex(xid)) self.logger.debug(' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg_type))
def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) self.logger.debug('EventOFPErrorMsg received.') self.logger.debug( 'version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid)) self.logger.debug( ' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg.msg_type)) self.logger.debug( "OFPErrorMsg(type=%s, code=%s, data=b'%s')", hex(msg.type), hex(msg.code), utils.binary_str(msg.data)) self.logger.debug( ' |-- type: %s', ofp.ofp_error_type_to_str(msg.type)) self.logger.debug( ' |-- code: %s', ofp.ofp_error_code_to_str(msg.type, msg.code)) self.logger.debug( ' `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s', hex(version), hex(msg_type), hex(msg_len), hex(xid)) self.logger.debug( ' `-- msg_type: %s', ofp.ofp_msg_type_to_str(msg_type))
def parser(cls, buf): from ryu.ofproto import ofproto_parser from ryu.ofproto import ofproto_protocol (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) msg_parser = ofproto_parser._MSG_PARSERS.get(version) if msg_parser is None: msg = OFPUnparseableMsg(None, version, msg_type, msg_len, xid, buf[cls._MIN_LEN:msg_len]) return cls(msg), cls, buf[msg_len:] datapath = ofproto_protocol.ProtocolDesc(version=version) try: msg = msg_parser(datapath, version, msg_type, msg_len, xid, buf[:msg_len]) except: msg = OFPUnparseableMsg( datapath, version, msg_type, msg_len, xid, buf[datapath.ofproto.OFP_HEADER_SIZE:msg_len]) return cls(msg), cls, buf[msg_len:]
def _downstream_parse(self, pkt): try: self.switch_socket.send(pkt) except: if (self.id != None): print( datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S') + " : ERROR [" + str(hex(self.id)) + "] --- Broken Pipe (Downstream)") else: print( datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S') + " : ERROR [" + str(self.id) + "] --- Broken Pipe (Downstream)") # self._close() pass (version, msg_type, msg_len, xid) = ofproto_parser.header(pkt) # Controller command messages if msg_type == ofproto_v1_0.OFPT_FLOW_MOD: msg = ofproto_v1_0_parser.OFPFlowMod.parser( self.datapath, version, msg_type, msg_len, xid, pkt)
def _recv_loop(self): buf = bytearray() required_len = ofproto.OFP_HEADER_SIZE while self.is_active: ret = self.socket.recv(ofproto.OFP_MSG_SIZE_MAX) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) #LOG.debug('queue msg %s cls %s', msg, msg.__class__) self.recv_q.put(msg) buf = buf[required_len:] required_len = ofproto.OFP_HEADER_SIZE
def _recv_loop(self): buf = bytearray() required_len = ofproto.OFP_HEADER_SIZE while self.is_active: ret = self.socket.recv(ofproto.OFP_MSG_SIZE_MAX) if len(ret) == 0: self.is_active = False break buf += ret while len(buf) >= required_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) required_len = msg_len if len(buf) < required_len: break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf) #LOG.debug('queue msg %s cls %s', msg, msg.__class__) self.recv_q.put(msg) buf = buf[required_len:] required_len = ofproto.OFP_HEADER_SIZE
def parser(cls, buf): from ryu.ofproto import ofproto_parser from ryu.ofproto import ofproto_protocol (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) msg_parser = ofproto_parser._MSG_PARSERS.get(version) if msg_parser is None: msg = OFPUnparseableMsg( None, version, msg_type, msg_len, xid, buf[cls._MIN_LEN:msg_len]) return cls(msg), cls, buf[msg_len:] datapath = ofproto_protocol.ProtocolDesc(version=version) try: msg = msg_parser(datapath, version, msg_type, msg_len, xid, buf[:msg_len]) except: msg = OFPUnparseableMsg( datapath, version, msg_type, msg_len, xid, buf[datapath.ofproto.OFP_HEADER_SIZE:msg_len]) return cls(msg), cls, buf[msg_len:]
def handle_read(self,msg): decoded_header = NetIDEOps.netIDE_decode_header(msg) if decoded_header is False: return False logger.debug("Received from Core: Message header: %s", decoded_header) message_length = decoded_header[NetIDEOps.NetIDE_header['LENGTH']] message_data = msg[NetIDEOps.NetIDE_Header_Size:NetIDEOps.NetIDE_Header_Size+message_length] logger.debug("Received from Core: Message body: %s",':'.join(x.encode('hex') for x in message_data)) if decoded_header[NetIDEOps.NetIDE_header['VERSION']] is not NetIDEOps.NetIDE_version: print ("Attempt to connect from unsupported client") return else: #If new client is connecting if decoded_header[NetIDEOps.NetIDE_header['TYPE']] is NetIDEOps.NetIDE_type['NETIDE_HELLO']: if message_length is 0: print ("WARNING: Client does not support any protocol") return if self.controller.connection_up is False: print ("WARNING: It seems that the server controller is not connected to the switches/Mininet") return backend_id = decoded_header[NetIDEOps.NetIDE_header['MOD_ID']] logger.debug( "Received HELLO message from backend: ,%s", backend_id) message_data = NetIDEOps.netIDE_decode_handshake(message_data, message_length) negotiated_protocols = {} #Find the common protocols that client and server support count = 0 while count < message_length: protocol = message_data[count] version = message_data[count+1] count += 2 if version in self.controller.supported_protocols[protocol]: if protocol in negotiated_protocols: negotiated_protocols[protocol].append(version) else: negotiated_protocols.update({protocol:[version]}) #After protocols have been negotiated, send back message to client to notify for common protocols proto_data = NetIDEOps.netIDE_encode_handshake(negotiated_protocols) if len(proto_data) == 0: msg = NetIDEOps.netIDE_encode('NETIDE_ERROR', None, backend_id, None, None) self.socket.send(msg) else: msg = NetIDEOps.netIDE_encode('NETIDE_HELLO', None, backend_id, None, proto_data) self.socket.send(msg) #Resend request for features for the new client self.controller.send_features_request(backend_id) elif decoded_header[NetIDEOps.NetIDE_header['TYPE']] is NetIDEOps.NetIDE_type['NETIDE_OPENFLOW']: purge_xid() # removes the old entries from the xid database if message_length is 0: return if decoded_header[NetIDEOps.NetIDE_header['DPID']] is not 0: self.datapath = self.controller.switches[int(decoded_header[NetIDEOps.NetIDE_header['DPID']])] # Here we set a ""fake" xid so that the replies to request messages can be forwarded to the correct module by the core (version, msg_type, msg_len, xid) = ofproto_parser.header(message_data) module_id = decoded_header[NetIDEOps.NetIDE_header['MOD_ID']] if module_id is not None: new_xid = store_xid(xid,module_id) ret = bytearray(message_data) set_xid(ret,new_xid) message_data = str(ret) self.datapath.send(message_data) else: self.datapath = None
def _packet_in_handler(self, ev): # If you hit this you might want to increase # the "miss_send_length" of your switch if ev.msg.msg_len < ev.msg.total_len: self.logger.debug("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len) msg = ev.msg #print msg datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser in_port = msg.match['in_port'] pkt = packet.Packet(msg.data) eth = pkt.get_protocols(ethernet.ethernet)[0] if eth.ethertype == ether_types.ETH_TYPE_LLDP: return if eth.ethertype == ether_types.ETH_TYPE_ARP: return if pkt.get_protocols(vrrp.vrrp): print 'receive vrrp packet' print pkt return if pkt.get_protocols(ipv4.ipv4)[0].dst != local_ip: return if pkt.get_protocols(tcp.tcp): p = pkt.get_protocols(tcp.tcp)[0] ip = pkt.get_protocols(ipv4.ipv4)[0] cur_seq = p.seq cur_ack = p.ack if p.bits & 0b000010: #receive SYN packet re_pkt = build_tcp_packet(pkt,TCP_REPLY) re_pkt.serialize() self.send_packet_out(datapath,in_port,re_pkt.data) #self.set_tcp_stat(p.seq,add_seq) if p.bits & 0b001000: #receive PSH packet re_pkt = build_tcp_packet(pkt,TCP_REPLY) re_pkt.serialize() self.send_packet_out(datapath,in_port,re_pkt.data) if ofproto_parser.header(pkt.protocols[-1]): version, msg_type, msg_len, xid = ofproto_parser.header(pkt.protocols[-1]) payload_length = len(bytearray(pkt.protocols[-1])) cur_seq = cur_seq+payload_length #fake datapath desc = ofproto_protocol.ProtocolDesc() desc.set_version(version=version) #print pkt if msg_type == desc.ofproto.OFPT_ERROR: print 'error' return if msg_type == desc.ofproto.OFPT_PACKET_IN: self.set_managed_switches(port=p.src_port) #find datapath.id #add seq number to match next packet add_seq = len(bytearray(pkt.protocols[-1])) content = desc.ofproto_parser.OFPPacketIn(desc).parser(desc, version, msg_type, msg_len, xid,pkt.protocols[-1]) desc_pkt = packet.Packet(content.data) desc_pkt_eth = desc_pkt.get_protocol(ethernet.ethernet) '''test code two port switch''' packet_in_port = content.match['in_port'] buffer_id = content.buffer_id #print buffer_id #print type(buffer_id) if int(packet_in_port) == 1: re_pkt = build_tcp_packet(re_pkt,OPFMSG) OFP_payload = build_OFP_payload(desc,SEND_PACKET_OUT,port_no=2,data=content.data) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) else: re_pkt = build_tcp_packet(re_pkt,OPFMSG) OFP_payload = build_OFP_payload(desc,SEND_PACKET_OUT,port_no=1,data=content.data) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) return '''process arp packet''' if desc_pkt.get_protocol(arp.arp): print 'process arp' desc_pkt_arp = desc_pkt.get_protocol(arp.arp) packet_in_port = content.match['in_port'] msg_type = ARP_DROP_PACKET re_pkt = build_tcp_packet(re_pkt,OPFMSG) OFP_payload = build_OFP_payload(desc,msg_type,port_no=packet_in_port,packet=desc_pkt) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) #print add_seq self.send_packet_out(datapath,in_port,re_pkt.data) #self.set_tcp_stat(p.seq,add_seq) msg_type = ARP_REPLY re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,msg_type,port_no=packet_in_port,packet=desc_pkt) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) msg_type = ARP_HIGH_PRIO re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,msg_type,port_no=packet_in_port,packet=desc_pkt) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) if desc_pkt_arp.opcode == arp.ARP_REPLY: msg_type = ARP_REMOVE re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,msg_type,port_no=packet_in_port,packet=desc_pkt) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) #print add_seq self.send_packet_out(datapath,in_port,re_pkt.data) #self.set_tcp_stat(p.seq,add_seq) msg_type = ADD_LAYER2_RULE re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,msg_type,port_no=packet_in_port,packet=desc_pkt) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) #print add_seq self.send_packet_out(datapath,in_port,re_pkt.data) #self.set_tcp_stat(p.seq,add_seq) if desc_pkt.get_protocol(ipv4.ipv4): '''process ipv4 packet''' desc_pkt_ip = desc_pkt.get_protocol(ipv4.ipv4) packet_in_port = content.match['in_port'] src = str(desc_pkt_ip.src) dst = str(desc_pkt_ip.dst) return '''receive feature reply and send port desc stats request''' if msg_type == desc.ofproto.OFPT_FEATURES_REPLY: reply = desc.ofproto_parser.OFPSwitchFeatures.parser(desc,version,msg_type,msg_len,xid,pkt.protocols[-1]) self.set_tcp_stat(cur_seq=cur_seq,cur_ack=cur_ack,dpid=reply.datapath_id) self.set_port_to_dpid(dpid=reply.datapath_id,ip=ip.src,port=p.src_port) self.set_managed_switches(port=p.src_port) re_pkt = build_tcp_packet(re_pkt,OPFMSG) desc = ofproto_protocol.ProtocolDesc() desc.set_version(version=version) OFP_payload = build_OFP_payload(desc,desc.ofproto.OFPT_FLOW_MOD) re_pkt.add_protocol(OFP_payload) #add PACKETIN event to controller re_pkt.serialize() #print 'add packet in rule' add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) cur_ack = cur_ack+add_seq self.set_tcp_stat(cur_seq=cur_seq,cur_ack=cur_ack,pre_seq=p.seq,pre_ack=p.ack) re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,desc.ofproto.OFPT_MULTIPART_REQUEST) add_seq = len(bytearray(OFP_payload)) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() self.send_packet_out(datapath,in_port,re_pkt.data) cur_ack = cur_ack+add_seq self.set_tcp_stat(cur_seq=cur_seq,cur_ack=cur_ack,pre_seq=p.seq,pre_ack=p.ack) return '''receive port desc reply''' if msg_type == desc.ofproto.OFPT_MULTIPART_REPLY: print 'OFPT_MULTIPART_REPLY' port = desc.ofproto_parser.OFPMultipartReply.parser(desc,version,msg_type,msg_len,xid,pkt.protocols[-1]) self.set_tcp_stat(cur_seq=cur_seq,cur_ack=p.ack,pre_seq=p.seq,pre_ack=p.ack) #print port return '''receive echo request''' if msg_type == desc.ofproto.OFPT_ECHO_REQUEST: reply = desc.ofproto_parser.OFPEchoRequest.parser(desc,version,msg_type,msg_len,xid,pkt.protocols[-1]) #print 'echo request' #print reply #print p.seq,p.ack re_pkt = build_tcp_packet(re_pkt,OPFMSG) OFP_payload = build_OFP_payload(desc,msg_type) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(pkt.protocols[-1])) self.send_packet_out(datapath,in_port,re_pkt.data) cur_ack = cur_ack+add_seq self.set_tcp_stat(cur_seq=cur_seq,cur_ack=cur_ack,pre_seq=p.seq,pre_ack=p.ack) return if msg_type == desc.ofproto.OFPT_HELLO: re_pkt = build_tcp_packet(re_pkt,OPFMSG) OFP_payload = build_OFP_payload(desc,msg_type) re_pkt.add_protocol(OFP_payload) '''send OFP HELLO to switch''' re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) re_pkt = build_tcp_packet(re_pkt,OPFMSG,add_seq=add_seq) OFP_payload = build_OFP_payload(desc,desc.ofproto.OFPT_FEATURES_REQUEST) re_pkt.add_protocol(OFP_payload) re_pkt.serialize() add_seq = len(bytearray(OFP_payload)) self.send_packet_out(datapath,in_port,re_pkt.data) return if p.bits & 0b000001: print 'send fin' re_pkt = build_tcp_packet(pkt,TCP_REPLY) #print re_pkt re_pkt.serialize() self.send_packet_out(datapath,in_port,re_pkt.data)
def _recv_loop(self): buf = bytearray() count = 0 min_read_len = remaining_read_len = ofproto_common.OFP_HEADER_SIZE while self.state != DEAD_DISPATCHER: try: read_len = min_read_len if (remaining_read_len > min_read_len): read_len = remaining_read_len ret = self.socket.recv(read_len) except SocketTimeout: continue except ssl.SSLError: # eventlet throws SSLError (which is a subclass of IOError) # on SSL socket read timeout; re-try the loop in this case. continue except (EOFError, IOError): break if len(ret) == 0: break buf += ret buf_len = len(buf) while buf_len >= min_read_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) if (msg_len < min_read_len): # Someone isn't playing nicely; log it, and try something sane. LOG.debug("Message with invalid length %s received from switch at address %s", msg_len, self.address) msg_len = min_read_len if buf_len < msg_len: remaining_read_len = (msg_len - buf_len) break msg = ofproto_parser.msg( self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) dispatchers = lambda x: x.callers[ev.__class__].dispatchers handlers = [handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler)] for handler in handlers: handler(ev) buf = buf[msg_len:] buf_len = len(buf) remaining_read_len = min_read_len # We need to schedule other greenlets. Otherwise, ryu # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)
def str2mod(dp, line, cmd): buf = ofp4.str2mod(line, cmd=cmd) (version, msg_type, msg_len, xid) = ofproto_parser_common.header(buf) return OFPFlowMod.parser(dp, version, msg_type, msg_len, None, buf)
def _test_msg(self, name, wire_msg, json_str): def bytes_eq(buf1, buf2): if buf1 != buf2: msg = 'EOF in either data' for i in range(0, min(len(buf1), len(buf2))): c1 = six.indexbytes(six.binary_type(buf1), i) c2 = six.indexbytes(six.binary_type(buf2), i) if c1 != c2: msg = 'differs at chr %d, %d != %d' % (i, c1, c2) break assert buf1 == buf2, "%r != %r, %s" % (buf1, buf2, msg) json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = ofproto_protocol.ProtocolDesc(version=version) if has_parser: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) # XXXdebug code open(('/tmp/%s.json' % name), 'w').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) # json -> OFPxxx -> json xid = json_dict[list(json_dict.keys())[0]].pop('xid', None) msg2 = self._jsondict_to_msg(dp, json_dict) msg2.set_xid(xid) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) bytes_eq(wire_msg, msg2.buf) # check if "len" "length" fields can be omitted def _remove(d, names): f = lambda x: _remove(x, names) if isinstance(d, list): return list(map(f, d)) if isinstance(d, dict): d2 = {} for k, v in d.items(): if k in names: continue d2[k] = f(v) return d2 return d json_dict3 = _remove(json_dict, ['len', 'length']) msg3 = self._jsondict_to_msg(dp, json_dict3) msg3.set_xid(xid) msg3.serialize() bytes_eq(wire_msg, msg3.buf) msg2.serialize() bytes_eq(wire_msg, msg2.buf)