def testHello(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufHello) eq_(version, 1) eq_(msg_type, 0) eq_(msg_len, 8) eq_(xid, 1)
def test_check_msg_parser(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) version = 0xff ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn)
def OF_error_msg_handler(self, event): msg = event.msg try: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) os_ken_msg = ofproto_parser.msg( self._datapath, version, msg_type, msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data) LOG.error('OFPErrorMsg received: %s', os_ken_msg) except Exception: LOG.error('Unrecognized OFPErrorMsg received: ' 'type=0x%(type)02x code=0x%(code)02x ' 'message=%(msg)s', {'type': msg.type, 'code': msg.code, 'msg': utils.hex_array(msg.data)})
def testPacketIn(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufPacketIn) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufPacketIn) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPPacketIn))
def testFeaturesReply(self): (version, msg_type, msg_len, xid) = ofproto_parser.header(self.bufFeaturesReply) msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, self.bufFeaturesReply) LOG.debug(msg) ok_(isinstance(msg, ofproto_v1_0_parser.OFPSwitchFeatures)) LOG.debug(msg.ports[65534]) ok_(isinstance(msg.ports[1], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[2], ofproto_v1_0_parser.OFPPhyPort)) ok_(isinstance(msg.ports[65534], ofproto_v1_0_parser.OFPPhyPort))
def handle(self): desc = ofproto_protocol.ProtocolDesc() residue = b'' while True: if residue: data = residue residue = b'' else: data = self.request.recv(1024) if data == b'': break if self.verbose: print(data) h = ofproto_parser.header(data) if self.verbose: print(h) version, msg_type, msg_len, xid = h residue = data[msg_len:] desc.set_version(version=version) if msg_type == desc.ofproto.OFPT_HELLO: hello = desc.ofproto_parser.OFPHello(desc) hello.serialize() self.request.send(hello.buf) elif msg_type == desc.ofproto.OFPT_FLOW_MOD: self._add_msg_to_buf(data, msg_len) elif version == 4 and msg_type == desc.ofproto.OFPT_EXPERIMENTER: # This is for OF13 Ext-230 bundle # TODO: support bundle for OF>1.3 exp = desc.ofproto_parser.OFPExperimenter.parser( object(), version, msg_type, msg_len, xid, data) self._add_msg_to_buf(data, msg_len) if isinstance(exp, desc.ofproto_parser.ONFBundleCtrlMsg): ctrlrep = desc.ofproto_parser.ONFBundleCtrlMsg( desc, exp.bundle_id, exp.type + 1, 0, []) ctrlrep.xid = xid ctrlrep.serialize() self.request.send(ctrlrep.buf) elif msg_type == desc.ofproto.OFPT_BARRIER_REQUEST: brep = desc.ofproto_parser.OFPBarrierReply(desc) brep.xid = xid brep.serialize() self.request.send(brep.buf)
def error_msg_handler(self, ev): msg = ev.msg ofp = msg.datapath.ofproto self.logger.debug( "EventOFPErrorMsg received.\n" "version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s", hex(msg.version), hex(msg.msg_type), hex(msg.msg_len), hex(msg.xid), ofp.ofp_msg_type_to_str(msg.msg_type)) if msg.type == ofp.OFPET_EXPERIMENTER: self.logger.debug( "OFPErrorExperimenterMsg(type=%s, exp_type=%s," " experimenter=%s, data=b'%s')", hex(msg.type), hex(msg.exp_type), hex(msg.experimenter), utils.binary_str(msg.data)) else: self.logger.debug( "OFPErrorMsg(type=%s, code=%s, data=b'%s')\n" " |-- type: %s\n" " |-- code: %s", hex(msg.type), hex(msg.code), utils.binary_str(msg.data), ofp.ofp_error_type_to_str(msg.type), ofp.ofp_error_code_to_str(msg.type, msg.code)) if msg.type == ofp.OFPET_HELLO_FAILED: self.logger.debug(" `-- data: %s", msg.data.decode('ascii')) elif len(msg.data) >= ofp.OFP_HEADER_SIZE: (version, msg_type, msg_len, xid) = ofproto_parser.header(msg.data) self.logger.debug( " `-- data: version=%s, msg_type=%s, msg_len=%s, xid=%s\n" " `-- msg_type: %s", hex(version), hex(msg_type), hex(msg_len), hex(xid), ofp.ofp_msg_type_to_str(msg_type)) else: self.logger.warning( "The data field sent from the switch is too short: " "len(msg.data) < OFP_HEADER_SIZE\n" "The OpenFlow Spec says that the data field should contain " "at least 64 bytes of the failed request.\n" "Please check the settings or implementation of your switch.")
def parser(cls, buf): from os_ken.ofproto import ofproto_parser from os_ken.ofproto import ofproto_protocol (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) msg_parser = ofproto_parser._MSG_PARSERS.get(version) if msg_parser is None: msg = OFPUnparseableMsg( None, version, msg_type, msg_len, xid, buf[cls._MIN_LEN:msg_len]) return cls(msg), cls, buf[msg_len:] datapath = ofproto_protocol.ProtocolDesc(version=version) try: msg = msg_parser(datapath, version, msg_type, msg_len, xid, buf[:msg_len]) except: msg = OFPUnparseableMsg( datapath, version, msg_type, msg_len, xid, buf[datapath.ofproto.OFP_HEADER_SIZE:msg_len]) return cls(msg), cls, buf[msg_len:]
def _test_msg(self, name, wire_msg, json_str): def bytes_eq(buf1, buf2): if buf1 != buf2: msg = 'EOF in either data' for i in range(0, min(len(buf1), len(buf2))): c1 = six.indexbytes(six.binary_type(buf1), i) c2 = six.indexbytes(six.binary_type(buf2), i) if c1 != c2: msg = 'differs at chr %d, %d != %d' % (i, c1, c2) break assert buf1 == buf2, "%r != %r, %s" % (buf1, buf2, msg) json_dict = json.loads(json_str) # on-wire -> OFPxxx -> json (version, msg_type, msg_len, xid) = ofproto_parser.header(wire_msg) try: has_parser, has_serializer = implemented[version][msg_type] except KeyError: has_parser = True has_serializer = True dp = ofproto_protocol.ProtocolDesc(version=version) if has_parser: try: msg = ofproto_parser.msg(dp, version, msg_type, msg_len, xid, wire_msg) json_dict2 = self._msg_to_jsondict(msg) except exception.OFPTruncatedMessage as e: json_dict2 = { 'OFPTruncatedMessage': self._msg_to_jsondict(e.ofpmsg) } # XXXdebug code open(('/tmp/%s.json' % name), 'w').write(json.dumps(json_dict2)) eq_(json_dict, json_dict2) if 'OFPTruncatedMessage' in json_dict2: return # json -> OFPxxx -> json xid = json_dict[list(json_dict.keys())[0]].pop('xid', None) msg2 = self._jsondict_to_msg(dp, json_dict) msg2.set_xid(xid) if has_serializer: msg2.serialize() eq_(self._msg_to_jsondict(msg2), json_dict) bytes_eq(wire_msg, msg2.buf) # check if "len" "length" fields can be omitted def _remove(d, names): f = lambda x: _remove(x, names) if isinstance(d, list): return list(map(f, d)) if isinstance(d, dict): d2 = {} for k, v in d.items(): if k in names: continue d2[k] = f(v) return d2 return d json_dict3 = _remove(json_dict, ['len', 'length']) msg3 = self._jsondict_to_msg(dp, json_dict3) msg3.set_xid(xid) msg3.serialize() bytes_eq(wire_msg, msg3.buf) msg2.serialize() bytes_eq(wire_msg, msg2.buf)
def _recv_loop(self): buf = bytearray() count = 0 min_read_len = remaining_read_len = ofproto_common.OFP_HEADER_SIZE while self.state != DEAD_DISPATCHER: try: read_len = min_read_len if remaining_read_len > min_read_len: read_len = remaining_read_len ret = self.socket.recv(read_len) except SocketTimeout: continue except ssl.SSLError: # eventlet throws SSLError (which is a subclass of IOError) # on SSL socket read timeout; re-try the loop in this case. continue except (EOFError, IOError): break if not ret: break buf += ret buf_len = len(buf) while buf_len >= min_read_len: (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) if msg_len < min_read_len: # Someone isn't playing nicely; log it, and try something sane. LOG.debug( "Message with invalid length %s received from switch at address %s", msg_len, self.address) msg_len = min_read_len if buf_len < msg_len: remaining_read_len = (msg_len - buf_len) break msg = ofproto_parser.msg(self, version, msg_type, msg_len, xid, buf[:msg_len]) # LOG.debug('queue msg %s cls %s', msg, msg.__class__) if msg: ev = ofp_event.ofp_msg_to_ev(msg) self.ofp_brick.send_event_to_observers(ev, self.state) def dispatchers(x): return x.callers[ev.__class__].dispatchers handlers = [ handler for handler in self.ofp_brick.get_handlers(ev) if self.state in dispatchers(handler) ] for handler in handlers: handler(ev) buf = buf[msg_len:] buf_len = len(buf) remaining_read_len = min_read_len # We need to schedule other greenlets. Otherwise, os_ken # can't accept new switches or handle the existing # switches. The limit is arbitrary. We need the better # approach in the future. count += 1 if count > 2048: count = 0 hub.sleep(0)