-
Notifications
You must be signed in to change notification settings - Fork 2
/
panorama.py
379 lines (340 loc) · 13.4 KB
/
panorama.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
from pox.core import core
from pox.lib.revent import *
from pox.openflow.of_json import *
from pox.lib.recoco import Timer
from pox.lib.util import dpidToStr
import pox.openflow.libopenflow_01 as of
from pox.lib.addresses import IPAddr, EthAddr
from pox.lib.packet.ipv4 import IP_ANY, IP_BROADCAST
from pox.lib.packet.ethernet import ETHER_ANY, ETHER_BROADCAST
from pox.openflow.discovery import Discovery
import BaseHTTPServer
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import re
import cgi
import json
import time
import thread
import argparse
import threading
import webbrowser
from os import curdir, sep
from collections import defaultdict
log = core.getLogger()
switches = []
hosts = defaultdict(lambda:defaultdict(lambda:None))
switch_desc = defaultdict(lambda:defaultdict(lambda:None))
adjacency = defaultdict(lambda:defaultdict(lambda:None))
link_bw = defaultdict(lambda:defaultdict(lambda:None))
link_bw_total = defaultdict(lambda:defaultdict(lambda:None))
byte = defaultdict(lambda:defaultdict(lambda:None))
byte_r = defaultdict(lambda:defaultdict(lambda:None))
clock = defaultdict(lambda:defaultdict(lambda:None))
flow_stats = defaultdict(lambda:None)
aggr_stats = defaultdict(lambda:None)
port_stats = defaultdict(lambda:None)
FILE = 'index.html'
PORT = 8080
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
return
def do_GET(self):
global flow_stats, port_stats, hosts, switch_desc, aggr_stats
if self.path=='/':
self.path='/index.html'
try:
sendReply = False
success = False
if self.path.endswith('.html'):
mimetype='text/html'
sendReply = True
if self.path.endswith('.png'):
mimetype='image/png'
sendReply = True
if self.path.endswith('.js'):
mimetype='application/javascript'
sendReply = True
if self.path.endswith('.css'):
mimetype='text/css'
sendReply = True
if self.path.endswith('.ttf'):
mimetype='application/ttf'
sendReply = True
if self.path.endswith('.woff'):
mimetype='application/woff'
sendReply = True
if self.path.endswith('.woff2'):
mimetype='application/woff2'
sendReply = True
if self.path.endswith('.ico'):
mimetype='text/html'
sendReply = True
if sendReply == True:
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
success = True
else:
if None != re.search('/switch_info/*', self.path):
#print 'Switch Info ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(switch_desc, sort_keys=True))
success = True
if None != re.search('/host_info/*', self.path):
#print 'Host Info ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(hosts, sort_keys=True))
success = True
if None != re.search('/topo_info/*', self.path):
#print 'Topo Info ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def transform_key(d):
newd = dict()
for k,v in d.iteritems():
if isinstance(v, dict):
v = transform_key(v)
newd[dpidToStr(k)] = v
return newd
def stripNone(xdata):
if isinstance(xdata, dict):
return {k:stripNone(v) for k, v in xdata.items() if k is not None and v is not None}
else:
return xdata
adj_raw = transform_key(adjacency) # dpidToStr.
adj_raw_tmp = stripNone(adj_raw) # Remove self links.
self.wfile.write(json.dumps(adj_raw_tmp, sort_keys=True))
success = True
if None != re.search('/port_stats/*', self.path):
#print 'Port Stats ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(port_stats, sort_keys=True))
success = True
if None != re.search('/aggr_stats/*', self.path):
#print 'Aggregate Stats ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(aggr_stats, sort_keys=True))
success = True
if None != re.search('/flow_stats/*', self.path):
#print 'Flow Stats ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def process_fs(flow_stats):
for i in flow_stats:
for f in flow_stats[i]:
ix = 'match'
jx = 'nw_src'
if jx in f[ix]:
ip = f[ix][jx]
f[ix][jx] = str(ip)
jx = 'nw_dst'
if jx in f[ix]:
ip = f[ix][jx]
f[ix][jx] = str(ip)
return flow_stats
processed_fs = process_fs(flow_stats)
self.wfile.write(json.dumps(processed_fs, ensure_ascii=False, sort_keys=True))
success = True
if None != re.search('/bw/*', self.path):
#print 'Throughput ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(link_bw, sort_keys=True))
success = True
if None != re.search('/bandwidth/*', self.path):
#print 'Throughput Bidectional ...'
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(link_bw_total, sort_keys=True))
success = True
if success == False:
print 'Error'
self.send_response(403)
self.end_headers()
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
def _timer_func ():
if len(core.openflow._connections.values())==0:
# since no switch is connected, clean everything.
del switches[:]
hosts.clear()
switch_desc.clear()
adjacency.clear()
link_bw.clear()
link_bw_total.clear()
byte.clear()
byte_r.clear()
clock.clear()
flow_stats.clear()
aggr_stats.clear()
port_stats.clear()
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_aggregate_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_desc_stats_request()))
def _handle_portstats_received (event):
global port_stats
port_stats[dpidToStr(event.connection.dpid)] = flow_stats_to_list(event.stats)
for f in event.stats:
if int(f.port_no)<65534:
for p in switches:
if adjacency[event.connection.dpid][p]!=None and adjacency[event.connection.dpid][p]==f.port_no:
if byte[dpidToStr(event.connection.dpid)][dpidToStr(p)]>0:
link_bw[dpidToStr(event.connection.dpid)][dpidToStr(p)] = format((f.tx_bytes - byte[dpidToStr(event.connection.dpid)][dpidToStr(p)]) * 8.0 / (time.time()-clock[dpidToStr(event.connection.dpid)][dpidToStr(p)]),'.2f')
link_bw_total[dpidToStr(event.connection.dpid)][dpidToStr(p)] = format((f.tx_bytes + f.rx_bytes - byte[dpidToStr(event.connection.dpid)][dpidToStr(p)]- byte_r[dpidToStr(event.connection.dpid)][dpidToStr(p)]) * 8.0 / (time.time()-clock[dpidToStr(event.connection.dpid)][dpidToStr(p)]),'.2f')
byte[dpidToStr(event.connection.dpid)][dpidToStr(p)] = f.tx_bytes
byte_r[dpidToStr(event.connection.dpid)][dpidToStr(p)] = f.rx_bytes
clock[dpidToStr(event.connection.dpid)][dpidToStr(p)] = time.time()
if adjacency[event.connection.dpid][p]==None:
link_bw[dpidToStr(event.connection.dpid)][dpidToStr(p)] = 'N/A' #since no data transmits from a switch to itself
link_bw_total[dpidToStr(event.connection.dpid)][dpidToStr(p)] = 'N/A'
def _handle_aggregate_flowstats_received (event):
global aggr_stats
aggr_stats[dpidToStr(event.connection.dpid)] = [event.stats.__dict__]
def _handle_flowstats_received (event):
global flow_stats
flow_stats[dpidToStr(event.connection.dpid)] = flow_stats_to_list(event.stats)
def _handle_switchdesc_received (event):
global switch_desc
event.connection.dpid
switch_desc[dpidToStr(event.connection.dpid)]['hw_desc'] = event.stats.hw_desc
switch_desc[dpidToStr(event.connection.dpid)]['sw_desc'] = event.stats.sw_desc
switch_desc[dpidToStr(event.connection.dpid)]['mfr_desc'] = event.stats.mfr_desc
switch_desc[dpidToStr(event.connection.dpid)]['serial_num'] = event.stats.serial_num
def _handle_ConnectionUp (event):
switches.append(event.connection.dpid)
def _handle_ConnectionDown (event):
sw = event.connection.dpid
for i in switches:
if sw in adjacency[i]: del adjacency[i][sw]
if dpidToStr(sw) in link_bw[dpidToStr(i)]: del link_bw[dpidToStr(i)][dpidToStr(sw)]
if dpidToStr(sw) in link_bw_total[dpidToStr(i)]: del link_bw_total[dpidToStr(i)][dpidToStr(sw)]
expired_hosts = []
for i in hosts: # remove associated hosts
if hosts[i]['switch'] == dpidToStr(sw):
expired_hosts.append(i)
for i in expired_hosts:
del hosts[i]
switches.remove(sw)
del adjacency[sw]
del link_bw[dpidToStr(sw)]
del link_bw_total[dpidToStr(sw)]
del flow_stats[dpidToStr(sw)]
del aggr_stats[dpidToStr(sw)]
del port_stats[dpidToStr(sw)]
del switch_desc[dpidToStr(sw)]
class panorama (EventMixin):
global hosts
def __init__ (self):
def startup ():
self.HOST_TIMEOUT = 15 # time (in seconds) to perform cleanup, generally it should be greater than expected HARD_TIMEOUTs
Timer(self.HOST_TIMEOUT, self.host_refresh, recurring=True)
core.openflow.addListeners(self, priority=0)
core.openflow_discovery.addListeners(self)
core.call_when_ready(startup, ('openflow','openflow_discovery'))
def host_refresh(self):
expired_hosts = []
for i in hosts:
if int(time.time()) - hosts[i]['last_time_seen'] > self.HOST_TIMEOUT:
expired_hosts.append(i)
for i in expired_hosts:
del hosts[i]
def _handle_PacketIn (self, event):
eth_packet = event.parsed
in_port = event.port
# assuming that switches do not have any forwarding rule pre-installed
# so the first packet from a host will travel to controller
# it is a rough logic to discover host, it can be improved
if not eth_packet.parsed:
return
src_mac = eth_packet.src
if eth_packet.type == ethernet.ARP_TYPE:
arp_packet = eth_packet.payload
if not arp_packet.hwsrc == src_mac:
return
src_ip = arp_packet.protosrc
if src_ip == IP_ANY:
return
elif eth_packet.type == ethernet.IP_TYPE:
ip_packet = eth_packet.payload
src_ip = ip_packet.srcip
if src_ip == IP_ANY:
return
else:
return
if not hosts[str(src_ip)]:
hosts[str(src_ip)]['mac'] = str(src_mac)
hosts[str(src_ip)]['switch'] = dpidToStr(event.connection.dpid)
hosts[str(src_ip)]['last_time_seen'] = int(time.time())
hosts[str(src_ip)]['in_port'] = in_port
def _handle_LinkEvent (self, event):
def flip (link):
return Discovery.Link(link[2],link[3], link[0],link[1])
l = event.link
sw1 = l.dpid1
sw2 = l.dpid2
if event.removed:
if sw2 in adjacency[sw1]: del adjacency[sw1][sw2]
if sw1 in adjacency[sw2]: del adjacency[sw2][sw1]
else:
if adjacency[sw1][sw2] is None:
if flip(l) in core.openflow_discovery.adjacency:
adjacency[sw1][sw2] = l.port1
adjacency[sw2][sw1] = l.port2
def launch ():
def server_launch():
webbrowser.open('http://localhost:%s/%s' % (PORT, FILE))
server = SimpleHttpServer('127.0.0.1', int(PORT))
print 'HTTP Server Running...........'
server.start()
server.waitForThread()
thread.start_new(server_launch, ())
from pox.log.level import launch
launch(CRITICAL=True)
from openflow.discovery import launch
launch()
from openflow.spanning_tree import launch
launch('--no-flood --hold-down')
core.registerNew(panorama)
core.openflow.addListenerByName('ConnectionUp', _handle_ConnectionUp)
core.openflow.addListenerByName('ConnectionDown', _handle_ConnectionDown)
core.openflow.addListenerByName('PortStatsReceived', _handle_portstats_received)
core.openflow.addListenerByName('AggregateFlowStatsReceived', _handle_aggregate_flowstats_received)
core.openflow.addListenerByName('FlowStatsReceived', _handle_flowstats_received)
core.openflow.addListenerByName('SwitchDescReceived', _handle_switchdesc_received)
Timer(1, _timer_func, recurring=True)