-
Notifications
You must be signed in to change notification settings - Fork 1
/
ndn_flow.py
1261 lines (976 loc) · 49.8 KB
/
ndn_flow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
from settings import log, ROOT_DIR
import settings
import datetime
import os
import os.path
import sys
import signal
import math
import types
import collections
import time
import threading
import pyndn
from pyndn import _pyndn
PAPER = "ndnflow" #project ndn_name to use FlowConsumer, and log file is <PAPER>.log
IP_HEADER_SIZE = 20
UDP_HEADER_SIZE = 8
TCP_HEADER_SIZE = 20
ETH_MTU = 1500
DEFAULT_DATA_SIZE = 5448
CHUNK_HEADER_SIZE = 420 #about, if the ndn_name is quite long, the size should be larger
MAX_DATA_SIZE = 8350 #when chunk size is bigger than 9000, the consumer cannot receive the chunk, 8370 is the max, leave some for ndn_name
#8392(more, we use 8350 for max size of ndnx),6920(1%), 5448(2%),3976(3%), 2504(5%), 1032(12%)
#MAX_DATA_SIZE = 10000
RTT_ALPHA = 7.0/8
RTT_BELTA = 3.0/4
DEFAULT_INTEREST_LIFETIME = float(4000.0) #must be float
DEFAULT_NDN_RUN_TIMEOUT = 1000 #microseconds
MAX_WINDOWSN = 256 #for slide window mechanism
ADAPTIVE_MOD_FLAG = "<adaptive>" #this is embeded in Interest.name to tell consumer the data_size which is recommended by consumer
class MyData(object):
"""hold the status data when FlowConsumer is running
Attributes:
next_byte: the byte sequence that FlowConsumer requests
final_byte: when Data contained final_block_id information, the final_byte is filled. When next_byte== final_byte, all the data received
unsatisfied_chunks_keyed_by_name: those chunks have been requested, including re-expressing Interest, but not satisfied yet
"""
def __init__(self):
self.beginT = datetime.datetime.now()
self.endT = None
self.next_byte = 0
self.final_byte = None
self.accept_bytes = 0
self.expected_chunkI = 0 # the chunk index in the chunkinfos which is expecting to be satisfied
self.satisfied_chunkN = 0 # the total number of chunks which is already satisfied
self.unsatisfied_chunks_keyed_by_name = collections.OrderedDict() #chunkinfo key by ndn_name, the dictionary is ordered by the insertion order
def __str__(self):
temp = self.get_time_cost()
return "timecost=%s, accept_bytes=%s, next_byte=%s, final_byte=%s, unsatisfied_chunksN=%s" \
%(temp, self.accept_bytes, self.next_byte, self.final_byte, len(self.unsatisfied_chunks_keyed_by_name))
def get_time_cost(self):
temp = None
if self.endT != None:
t2 = self.endT
else:
t2 = datetime.datetime.now()
temp2 = t2 - self.beginT
temp = temp2.days * 24 * 3600 + temp2.seconds + temp2.microseconds/1000000.0
return temp
class Monitor(object):
def __init__(self, Id, out_dir):
self.Id = Id
self.out_dir = out_dir
self.vars = []
self.vars2 = []
def sample(self):
"""wait for override and is called when status updated (update_loss, update_receiving or get_optimal_data_size)
"""
pass
def _output(self, outpath, li, tags):
fout = open(outpath, "w")
fout.write("#")
for tag in tags:
fout.write(tag+"\t")
fout.write("\n")
for i in range(len(li)):
val = li[i]
fout.write("%s\t%s\n" %(i+1, val))
fout.close()
def show(self):
"""wait for override
outpath, file to output
"""
pass
class ChunkInfo(object):
"""Chunk's Information
Attributes:
beginT: the time that chunk is requested (Interest Sent)
begin_byte: the begin_byte of the chunk
end_byte: the end_byte of the chunk, thus, chunk_size = end_byte - begin_byte + 1
packetN: the number of underlying packet, this is expected number, but not the real number(final chunk, illegal chunk)
reTxN: number of retransmission, if no retransmission, reTxN == 1
endT: the time that chunk is received (Data Received)
data_size: size applicaiton data contained in the chunk
chunk_size: the whole size of the chunk, contained chunk header, signature, etc.
"""
def __init__(self):
#follwoing attributes are filled when instance is created, first_express_interest fill it
self.beginT = None
self.begin_byte = None
self.end_byte = None
self.packetN = None
#this attribute is updated during transmission and retransmission, express_interest change it.
self.retxN = 0
self.status = 1 #1 means being sent, 0 mean being suppressed and waiting, 2 means satisfied
#the following attributes are filled when content is receieved, do_receive_content fill it
self.endT = None
self.data_size = None
self.chunk_size = 0
def __str__(self):
return "beginT=%s, begin_byte=%s, endT=%s, end_byte=%s, packetN=%s, retxN=%s, data_size=%s, chunk_size=%s, status=%s" \
%(self.beginT, self.begin_byte, self.endT, self.end_byte, self.packetN, self.retxN, self.data_size, self.chunk_size, self.status)
class SlideWindow(Monitor):
"""manage the congestion window
Implemenation ref to "TCP Congestion Control" p571-580, Computer Networks(ed 5), by Andrew S. Tanenbaum and David J. Wetherall
packet_on_the_fly management:
add: first_express and re_express
reduce: nack, in_order_content, out_of_order_content and timeout
Attributes:
threshold: threshold of congestion window, measured by packet number
_cwnd: current size of congestion window, measured by packet number of underlying network (UDP/IP/Ethernet). private variable, since it may be a float.
we call NDN layer datagram "chunk" and leave "packet" donate to datagram of underlying network
packet_on_the_fly: number of packets which has been requested but not received
is_fix: whether to return a fix number evey time or not. None means not, a integer value means yes
"""
def __init__(self, Id, out_dir, enable_monitor, is_fix=None):
Monitor.__init__(self, Id, out_dir)
self.threshold = sys.maxint
self.packet_on_the_fly = 0
self.enable_monitor = enable_monitor
if is_fix != None:
assert type(is_fix) == types.IntType, "illegal is_fix type: %s" %(str(is_fix))
self.is_fix = is_fix #if is_fix is a integer, then the optimal_data_size return the fix integer value every time
self._cwnd = 1 if self.is_fix==None else self.is_fix
#measured by number of underlying network packet, can be float during additive increase stage
#self.wait_re_express =
self.sample()
def __str__(self):
return "threshold:%s, cwnd:%s, packet_on_the_fly:%s" %(self.threshold, self._cwnd, self.packet_on_the_fly)
#overrides(Monitor)
def sample(self):
if not self.enable_monitor:
return
if self.is_fix == None:
self.vars.append(self._cwnd)
else:
self.vars.append(self.is_fix)
#overrides(Monitor)
def show(self):
if not self.enable_monitor:
return
li = self.vars
tags = ["ChunkReceivedOrder", "CongestionWindowSize"]
outpath = os.path.join(self.out_dir, self.Id+"-winsize.txt")
self._output(outpath, li, tags)
return self.vars
def get_cwnd(self):
"""return integer value"""
if self.is_fix != None:
return self.is_fix
else:
return int(self._cwnd)
def update_express(self, chunkinfo):
self.packet_on_the_fly += chunkinfo.packetN
def update_nack(self, chunkinfo):
#we don't increase window, due to near end, we do need to make window larger
self.packet_on_the_fly -= chunkinfo.packetN
def update_loss(self, chunkinfo):
self.packet_on_the_fly -= chunkinfo.packetN
if self.is_fix == None:
self._cwnd = int(self._cwnd/2.0) #sawtooth
if self._cwnd < 1:
self._cwnd = 1
self.threshold = self._cwnd #fast recovery
log.debug("change cwnd to %s, threshold to %s, packet_on_the_fly to %s" %(self._cwnd, self.threshold, self.packet_on_the_fly))
self.sample()
def update_receiving(self, chunkinfo):
self.packet_on_the_fly -= chunkinfo.packetN
if self.is_fix == None:
if self._cwnd < self.threshold:
self._cwnd += chunkinfo.packetN #slow start
else:
self._cwnd += chunkinfo.packetN * 1.0/self._cwnd #additive increase
if self._cwnd > MAX_WINDOWSN:
self._cwnd = MAX_WINDOWSN
self.sample()
log.debug("change cwnd to %s, threshold to %s, packet_on_the_fly to %s" %(self._cwnd, self.threshold, self.packet_on_the_fly))
class RtoEstimator(Monitor):
def __init__(self, Id, out_dir, is_fix, enable_monitor):
#self.rttEstimator = RtoEstimator(Id=self.Id, out_dir=self.monitor_out_dir, is_fix=rtt_fix)
Monitor.__init__(self, Id, out_dir)
self.enable_monitor = enable_monitor
self.is_fix = is_fix
self._rto = DEFAULT_INTEREST_LIFETIME if self.is_fix==None else self.is_fix
self._srtt = DEFAULT_INTEREST_LIFETIME if self.is_fix==None else self.is_fix
self._rttvar = 0
self.sample()
def __str__(self):
pass
def sample(self):
if not self.enable_monitor:
return
if self.is_fix == None:
self.vars.append(self._rto)
else:
self.vars.append(self.is_fix)
#overrides(Monitor)
def show(self):
"""fout, file to output
mode: txt, write data to txt,
fig: draw a figure
"""
if not self.enable_monitor:
return
li = self.vars
tags = ["InterestGeneratedOrder", "RTT"]
outpath = os.path.join(self.out_dir, self.Id+"-rtt.txt")
self._output(outpath, li, tags)
return self.vars
def get_rto(self):
self.sample()
if self.is_fix != None:
return self.is_fix
else:
return self._rto
def update(self, chunkinfo):
if chunkinfo.retxN > 1:
return
R = chunkinfo.endT - chunkinfo.beginT #timedelta
R = R.seconds + R.microseconds/1000000.0 #float seconds
self._srtt = RTT_ALPHA * self._srtt + (1 - RTT_ALPHA) * R
self._rttvar = RTT_BELTA * self._rttvar + (1 - RTT_BELTA) * abs(self._srtt - R)
self._rto = self._srtt + 4 * self._rttvar
class ChunkSizeEstimator(Monitor):
"""estimate the optimal chunk size, according to Xiaoke Jiang's paper
Attributes:
receivedN: packet received
lostN: packet lost
packet_max_data_size: the max data (NDN Layer's Data) size of the packet, typically, 1472, (1500 - 20 - 8)
is_fix: whether to return a fix number evey time or not. None means not, a integer value means yes
lossratetype: packet or chunk, chunk is default and currently chunk type is the sole implementation in function get_optimal_data_size
"""
def __init__(self, Id, out_dir, packet_max_data_size, enable_monitor, is_fix=None, lossratetype="chunk"):
Monitor.__init__(self, Id, out_dir)
self.enable_monitor = enable_monitor
self.receivedN = 0
self.lostN = 0
self.packet_max_data_size = packet_max_data_size
self.lossratetype = lossratetype
self.T = 0
self._loss_rate = 0
if is_fix != None:
assert type(is_fix) == types.IntType, "illegal is_fix type: %s" %(str(is_fix))
self.is_fix = is_fix #if is_fix is a integer, then the optimal_data_size return the fix integer value every time
self._optimal_size = 0 if self.is_fix == None else self.is_fix
self.sample()
def __str__(self):
return "receivedN=%s, lostN=%s, loss_rate=%s, optimal_size=%s" %(self.receivedN, self.lostN, self._loss_rate, self._optimal_size)
#overrides(Monitor)
def sample(self):
if not self.enable_monitor:
return
self.vars.append(self._optimal_size)
self.vars2.append(self._loss_rate)
#overrides(Monitor)
def show(self):
"""fout, file to output
mode: txt, write data to txt,
fig: draw a figure
"""
if not self.enable_monitor:
return
li = self.vars
tags = ["InterestGeneratedOrder", "OptimalDataSize"]
outpath = os.path.join(self.out_dir, self.Id+"-chunksize.txt")
self._output(outpath, li, tags)
li = self.vars2
tags = ["InterestGeneratedOrder", "PacketLossRate"]
outpath = os.path.join(self.out_dir, self.Id+"-lossrate.txt")
self._output(outpath, li, tags)
return self.vars, self.vars2
def get_loss_rate(self):
"""get the packet loss rate
"""
loss_rate = float(self.lostN)/(self.lostN + self.receivedN)
x = 1 - pow((1 - loss_rate),1 / math.ceil((CHUNK_HEADER_SIZE + self._optimal_size) /float( self.packet_max_data_size )))
return x
def get_optimal_data_size(self):
"""only data, not including the chunk header size
Return:
chunk_data_size: the size of data contained in a chunk, chunk header/signature ... are not included
"""
if (self.receivedN + self.lostN) == 0 or float(self.lostN)/(self.receivedN + self.lostN) == 0:
if self.receivedN > 20: #quite reliable underlying layer protocol
chunk_data_size = MAX_DATA_SIZE
else: #just in prelimary stage
chunk_data_size = DEFAULT_DATA_SIZE
if (self.receivedN + self.lostN) != 0:
self._loss_rate = float(self.lostN)/(self.receivedN + self.lostN)
else:
#ref to:
loss_rate = float(self.lostN)/(self.receivedN + self.lostN) # 1- \omega
self._loss_rate = loss_rate
if loss_rate == 1:
chunk_data_size = self._optimal_size/2
if chunk_data_size <= 1000:
chunk_data_size = 1000
self._optimal_size = chunk_data_size
log.debug("optimal chunk data size: %s, loss rate: %s" %(chunk_data_size, self._loss_rate))
self.sample()
return chunk_data_size
if self._optimal_size == 0 :
self._optimal_size = DEFAULT_DATA_SIZE
M = self.packet_max_data_size
T = pow((1 - loss_rate),1 / math.ceil((CHUNK_HEADER_SIZE + self._optimal_size) /float( M )))
self.T = T
D = CHUNK_HEADER_SIZE #\Delta
#log.debug("lossrate=%s" %(loss_rate))
#print "loss_rate is %s" %loss_rate
chunk_data_size = (-1*D*math.log(T) -pow((D*(math.log(T)))**2-4*M*math.log(T)*D, 0.5))/(2*math.log(T)) #data size of the chunk
k = int (math.ceil((CHUNK_HEADER_SIZE + chunk_data_size) / M))
temp = k
r1 = (T**temp) * (temp * M) / (CHUNK_HEADER_SIZE + temp*M)
temp = k+1
r2 = (T**temp) * (temp * M) / (CHUNK_HEADER_SIZE + temp*M)
if r1 > r2:
chunk_data_size = k * M - CHUNK_HEADER_SIZE
else:
chunk_data_size = (k+1) * M - CHUNK_HEADER_SIZE
if chunk_data_size > MAX_DATA_SIZE:
chunk_data_size = MAX_DATA_SIZE
self._optimal_size = chunk_data_size
if self.is_fix != None:
chunk_data_size = self.is_fix
self._optimal_size = chunk_data_size
log.debug("optimal chunk data size: %s, loss rate: %s" %(chunk_data_size, self._loss_rate))
self.sample()
return chunk_data_size
def update_loss(self, chunkinfo):
self.lostN += 1
#self.lostmaxN += chunkinfo.packetN
if self.lossratetype == "packet":
self.receivedN += chunkinfo.packetN - 1
elif self.lossratetype == "chunk":
self.receivedN += 0
else:
log.warn("unknow lossratetype: %s" %(self.lossratetype))
self.sample()
def update_receiving(self, chunkinfo):
if self.lossratetype == "packet":
self.receivedN += chunkinfo.packetN
elif self.lossratetype == "chunk":
self.receivedN += 1
else:
log.warn("unknow lossratetype: %s" %(self.lossratetype))
#self.receivedN += chunkinfo.packetN
class Controller(object):
STATUS_ON = 1
STATUS_OFF = 2
STATUS_STEP_ON = 3
STATUS_STEP_OFF = 4
def __init__(self):
self.status = Controller.STATUS_ON
class FlowConsumer(pyndn.Closure, Controller):
"""Continuously request Data, with TCP congestion control mechanism and optimal chunk size estimation
Attributes:
status: whether it works or not
ndn_name: ndn_name prefix of the data
fout: the received content will write to the fout, make sure fout has the "write" method, fout.write("<the content>")
size_fix: whether to return a fix number evey time or not. None means not, a integer value means yes
window_fix: whether to return a fix number evey time or not. None means not, a integer value means yes
packet_max_data_size: the max data (NDN Layer's Data) size of the packet, typcally, 1472, (1500 - 20 - 8)
is_all: whether already fetch all the contents or not
"""
def __init__(self, Id, name, fout=None, monitor_out_dir=settings.OUT_DATA_DIR, cache_data=True,
enable_monitor=True, size_fix=None, window_fix=None, rtt_fix=None,
packet_max_data_size=ETH_MTU-IP_HEADER_SIZE-UDP_HEADER_SIZE):
"""
"""
Controller.__init__(self)
self.Id = Id
if monitor_out_dir == None:
monitor_out_dir = settings.OUT_DATA_DIR
self.monitor_out_dir = monitor_out_dir
print self.monitor_out_dir
log.info("monitor file: %s" %(self.monitor_out_dir))
if not os.path.exists(self.monitor_out_dir):
os.makedirs(self.monitor_out_dir)
global CHUNK_HEADER_SIZE, MAX_DATA_SIZE
#if len(ndn_name) > 50:
CHUNK_HEADER_SIZE += len(name)
MAX_DATA_SIZE -= len(name)
if not name.startswith("ndnx:") and not name.startswith("/"):
name = "/" + name
self.ndn_name = pyndn.Name(name)
"""since there is a "name" field in threading.Thread, we name it as ndn_name
"""
self.cache_data = cache_data
self.enable_monitor = enable_monitor
if self.enable_monitor == True:
self.event_log = os.path.join(self.monitor_out_dir, "upcall_events-%s.log" %(self.Id))
self.event_log = open(self.event_log, "w")
self.fout = fout
if self.fout == None:
self.fout = os.path.join(".", "acquire")
if not os.path.exists(self.fout):
os.makedirs(self.fout)
self.fout = os.path.join(self.fout, name.replace("/", "-")[1:])
#self.fout = os.path.join(self.fout, Id)
self.fout = open(self.fout, "w")
self.size_fix = size_fix
self.window_fix = window_fix
self.packet_max_data_size = packet_max_data_size
self.is_all = False #already fetch all the chunks,
self.handle = pyndn.NDN()
self.chunkInfos = []#only insert new elements when first_express_interest
self.mydata = MyData()
self.chunkSizeEstimator = ChunkSizeEstimator(Id=self.Id, out_dir=self.monitor_out_dir, packet_max_data_size=self.packet_max_data_size, is_fix=size_fix, enable_monitor=enable_monitor)
self.window = SlideWindow(Id=self.Id, out_dir=self.monitor_out_dir, is_fix=window_fix, enable_monitor=enable_monitor)
self.rtoEstimator = RtoEstimator(Id=self.Id, out_dir=self.monitor_out_dir, is_fix=rtt_fix, enable_monitor=enable_monitor)
def start(self):
"""a big different with the way, self.handle.run(-1), which cann't catch the signal interrupt all all, even if its parent thread
however, with while loop check, the parent thread can catch the signal, for the whole process won't sink in self.handle.run()
"""
log.warn("%s begin to request %s" %(self.Id, self.ndn_name))
self.status = Controller.STATUS_ON
self.first_express_interest()
#self.handle.run(-1)
while self.status != Controller.STATUS_OFF:
#print "test! status=%s" %(self.status)
if self.status == Controller.STATUS_ON:
self.handle.run(DEFAULT_NDN_RUN_TIMEOUT)
elif self.status == Controller.STATUS_STEP_ON:
self.handle.run(DEFAULT_NDN_RUN_TIMEOUT)
self.status = Controller.STATUS_STEP_OFF
elif self.status == Controller.STATUS_STEP_OFF:
time.sleep(1)
return self.is_all
def __str__(self):
temp = "requestedChunkN=%s" %(len(self.chunkInfos)) +", "+ str(self.mydata) +", "+\
str(self.window) + ", " + str(self.chunkSizeEstimator)
return temp
def stop(self):
self.status = Controller.STATUS_OFF
"""this is important, since we don't want to call stop twice.
stop is called implicitly in in_order_content when consuemr acquire all the contents
thus, when upper layer application call stop, it won't cause any problem, like fout is closed
meanwhile, we don't suggest upper layer applications change the status
"""
if _pyndn.is_run_executing(self.handle.ndn_data):
self.handle.setRunTimeout(1)
if not self.fout.closed:
self.mydata.endT = datetime.datetime.now()
self.fout.flush()
self.fout.close()
if self.enable_monitor:
if not self.event_log.closed:
self.event_log.flush()
self.event_log.close()
if threading.currentThread().is_alive():
log.info("%s stops!" %(self.Id))
log.info("requestedChunkN=%s" %(len(self.chunkInfos)))
log.info(str(self.mydata))
log.info(str(self.window))
log.info(str(self.chunkSizeEstimator))
return 0
def summary(self):
outpath = id
OptimalChunkSizes, PacketLossRates = self.chunkSizeEstimator.show()
CongestionWindowSizes = self.window.show()
Rto = self.rtoEstimator.show()
TimeCost = self.mydata.get_time_cost()
return OptimalChunkSizes, PacketLossRates, CongestionWindowSizes, Rto, [TimeCost]
def re_express_interest(self, chunkinfo):
if self.status == Controller.STATUS_OFF or self.status == Controller.STATUS_STEP_OFF:
return
if self.mydata.final_byte !=None and chunkinfo.begin_byte >= self.mydata.final_byte: #suppress the illegal reuqest
log.debug("illegel request, do not re-express it: %s" %(chunkinfo.ndn_name))
elif self.is_all:#shouldn't happen, since already check before re-expressing
log.error("already get all the requests, do not re-express it: %s" %(chunkinfo.ndn_name))
else:
self.window.update_express(chunkinfo)
self.chunkSizeEstimator.get_optimal_data_size()
self.express_interest(chunkinfo)
def first_express_interest(self):
if self.status == Controller.STATUS_OFF or self.status == Controller.STATUS_STEP_OFF:
return
if self.mydata.final_byte != None and self.mydata.next_byte >= self.mydata.final_byte:
#we do not use is_all to check, since final_byte is more accurate and is_all -> final_byte
log.debug("illegel request, do not express it, next_byte: %s" %(self.mydata.next_byte))
return
chunkinfo = ChunkInfo()
chunkinfo.beginT = datetime.datetime.now()
chunkinfo.begin_byte = self.mydata.next_byte
temp = self.chunkSizeEstimator.get_optimal_data_size()
chunkinfo.end_byte = chunkinfo.begin_byte + temp - 1
chunkinfo.packetN = math.ceil((temp + CHUNK_HEADER_SIZE)/float(self.packet_max_data_size))
chunkinfo.status = 1
name = self.ndn_name
name = self.ndn_name.append(ADAPTIVE_MOD_FLAG).append(str(temp)) #only leave the data size
name = name.append(chunkinfo.begin_byte)
chunkinfo.ndn_name = name
self.chunkInfos.append(chunkinfo)
self.mydata.unsatisfied_chunks_keyed_by_name[str(name)] = chunkinfo
#packet_on_the_fly, 3 results, illegal, out-of-order, in-order and retransmission
self.window.update_express(chunkinfo)
self.express_interest(chunkinfo)
self.mydata.next_byte = chunkinfo.end_byte + 1
def express_interest(self, chunkinfo):
"""this method may express illegal Interest, thus, re_express_interest and first_express_interest are in charge of checking;
even that, there may also illegal Interest, due to unknown final_byte, leading to useless chunkinfo in chunkinfos and illegal Data(Nack) or Interest timeout
(we do not use is_all to check, since final_byte is more accurate and is_all -> final_byte);
thus, we need do_receiving_content to handle illegal Data
"""
assert chunkinfo != None, "chunkinfo == None"
assert chunkinfo.endT == None, "chunkinfo.endT != None"
selector = pyndn.Interest()
selector.answerOriginKind = 0#producer generate every time
selector.childSelctor = 1
selector.interestLifetime = self.rtoEstimator.get_rto()
rst = self.handle.expressInterest(chunkinfo.ndn_name, self, selector)
if rst != None and rst < 0:
log.info("fail to express interest=%s with result %s" %(chunkinfo.ndn_name, rst))
self.window.update_nack(chunkinfo)
chunkinfo.status = 0
else:
chunkinfo.retxN += 1
log.debug("express interest=%s" %(chunkinfo.ndn_name))
def log_upcall_events(self, kind, upcallInfo):
#pass
#output to file upcall_events.log
#row schema: time chunkinfo_index_in_self.chunkinfos chunkinfo-information
ist = upcallInfo.Interest
name = ist.name
flag_index = None #len(ist_name) - 2 #index of the end component
for i in range(len(name)-2):
sub = name[i]
if sub == ADAPTIVE_MOD_FLAG:
flag_index = i
break
begin_byte = int(name[flag_index+2])
for j in self.chunkInfos:
if j.begin_byte == begin_byte:
index = self.chunkInfos.index(j)
break
#fout = open("upcall_events.log", "a")
if kind == pyndn.UPCALL_INTEREST_TIMED_OUT:
kindstr = "loss"
elif kind == pyndn.UPCALL_CONTENT:
kindstr = "fetch"
else:
kindstr = str(kind)
self.event_log.write("kind=%s, time=%s, index=%i, %s\n" %( kindstr, datetime.datetime.now() - self.chunkInfos[index].beginT, index, self.chunkInfos[index]))
def upcall(self, kind, upcallInfo):
if kind == pyndn.UPCALL_FINAL:#handler is about to be deregistered
#if self.status and not self.is_all:
#self.handle.setRunTimeout(DEFAULT_NDN_RUN_TIMEOUT)
#log.error("handler is about to be deregistered, reset it." )
return pyndn.RESULT_OK
if kind in [pyndn.UPCALL_INTEREST, pyndn.UPCALL_CONSUMED_INTEREST]:
log.error("unexpected kind: %s" %kind)
return pyndn.RESULT_OK
self.log_upcall_events(kind, upcallInfo)
if kind == pyndn.UPCALL_INTEREST_TIMED_OUT:
self.do_meet_accident(kind, upcallInfo)
return pyndn.RESULT_OK
if kind in [pyndn.UPCALL_CONTENT_UNVERIFIED, pyndn.UPCALL_CONTENT_BAD]:
self.do_meet_accident(kind, upcallInfo)
return pyndn.RESULT_OK
assert kind == pyndn.UPCALL_CONTENT, "kind: "+str(kind)
self.do_receive_content(kind, upcallInfo)
return pyndn.RESULT_OK
def do_meet_accident(self, kind, upcallInfo):
name = str(upcallInfo.Interest.name)
if not name in self.mydata.unsatisfied_chunks_keyed_by_name:
#since it's not possible that two same Interest on the fly at the same time, it sholdn't happen
log.error("timeout Interest not in the unsatisfied list, it should not happend: %s!!" %(name))
return
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name[name]
self.chunkSizeEstimator.update_loss(chunkinfo)
self.window.update_loss(chunkinfo)
if kind == 4:
log.debug("timeout, Interest=%s, out packet: %d" \
%(upcallInfo.Interest.name, self.window.packet_on_the_fly))
#log.warn("%s" %(upcallInfo))
else:
log.warn("-----------------strange accident: kind=%s, Interest=%s------------------" %(kind, upcallInfo.Interest.name))
#window check here
if self.window.packet_on_the_fly < self.window.get_cwnd():
#it's already make sure that the chunk is not satisfied yet, but it could be illegal
self.re_express_interest(chunkinfo)
else:
chunkinfo.status = 0 #wait for re-expressing
def do_receive_content(self, kind, upcallInfo):
"""receive a contents, there are 4 different scenarios: duplicated content, in-order content, out-of-order content, illegal content
"""
name = str(upcallInfo.Interest.name)
if not name in self.mydata.unsatisfied_chunks_keyed_by_name:
log.debug(self.mydata.unsatisfied_chunks_keyed_by_name.keys())
#the chunkinfo is already satisfied by previous chunk (retransmission here)
self.duplicate_content(upcallInfo)
return
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name[name]
chunkinfo.endT = datetime.datetime.now()
chunkinfo.data_size = len(upcallInfo.ContentObject.content)
chunkinfo.chunk_size = len(_pyndn.dump_charbuf(upcallInfo.ContentObject.ndn_data))
chunkinfo.content = upcallInfo.ContentObject.content
temp = math.ceil((chunkinfo.chunk_size)/float(self.packet_max_data_size))
self.rtoEstimator.update(chunkinfo)
fbi = upcallInfo.ContentObject.signedInfo.finalBlockID
if fbi != None:
if isinstance(fbi, str):
fbi = pyndn.Name.seg2num(fbi)
#log.info("***************final chunk id: %s" %(fbi))
if self.mydata.final_byte == None: #the first final block content
self.mydata.final_byte = int(fbi)
else:
assert self.mydata.final_byte == int(fbi), "get different final block id, old %s and new %s" %(self.mydata.final_byte, int(fbi))
si = upcallInfo.ContentObject.signedInfo
if si.type == pyndn.CONTENT_NACK:
self.nack_content(upcallInfo)
elif si.type == pyndn.CONTENT_DATA:
if chunkinfo.packetN != temp:
if self.mydata.final_byte != None and chunkinfo.end_byte > self.mydata.final_byte:#final chunk or illegal chunk
log.debug("final chunk, thus size is shorter than expected")
else:
log.debug("expected packetN (%s) != real packetN (%s), final_byte (%s), upcallInfo: %s, chunksize:%s" %(chunkinfo.packetN, temp, self.mydata.final_byte, name, chunkinfo.chunk_size))
#chunkinfo.packetN = temp
self.chunkSizeEstimator.update_receiving(chunkinfo)
# if self.mydata.final_byte!=None and chunkinfo.end_byte < self.mydata.final_byte:
# assert chunkinfo.data_size > 500, "chukinfo is strange, %s" %(chunkinfo)
#
if name == self.mydata.unsatisfied_chunks_keyed_by_name.keys()[0]:
self.in_order_content(upcallInfo)
else:
self.out_of_order_content(upcallInfo)
retxQ = []
for chunkinfo in self.mydata.unsatisfied_chunks_keyed_by_name.itervalues():
if chunkinfo.status == 0:#waiting for re-expressing
retxQ.append(chunkinfo)
if len(retxQ) == 2:
break
#here we do not check whether the request is legal or not
for i in [0, 1]:#mulply add
if self.window.packet_on_the_fly < self.window.get_cwnd():
#re-expressing is prior to request new
if len(retxQ) != 0:
chunkinfo = retxQ.pop(0)
chunkinfo.status = 1
self.re_express_interest(chunkinfo)
continue
if self.mydata.final_byte== None:
self.first_express_interest()
elif self.mydata.final_byte!= None and self.mydata.next_byte < self.mydata.final_byte:
self.first_express_interest()
else:
log.critical("unkown Data type: %s" %(upcallInfo.ContentObject))
def nack_content(self, upcallInfo):
name = str(upcallInfo.Interest.name)
log.info("received Nack: %s" %(name))
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.pop(name)
self.window.update_nack(chunkinfo)
def duplicate_content(self, upcallInfo):
"""receive a duplicate content"""
log.warn("received duplicated Data: %s" %(upcallInfo.Interest.name))
def in_order_content(self, upcallInfo):
"""the best scenario, content is received in-order, however, we should check those buffered out-of-order chunks
"""
name = str(upcallInfo.Interest.name)
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.pop(name)
if not self.fout.closed:
self.fout.write(chunkinfo.content)
else:
log.critical("fails to write content")
self.mydata.accept_bytes += chunkinfo.data_size
if not self.cache_data:
chunkinfo.content = None
chunkinfo.status = 2 #satisfied yet
self.mydata.satisfied_chunkN += 1
self.mydata.expected_chunkI += 1
self.window.update_receiving(chunkinfo)
log.debug("received in-order Data: %s, out packet: %s" %(name, self.window.packet_on_the_fly))
#check the out-of-order contents recevied before
for name in self.mydata.unsatisfied_chunks_keyed_by_name.keys():
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name[name]
if chunkinfo.endT == None:
break
else:
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.pop(name)
if not self.fout.closed:
self.fout.write(chunkinfo.content)
else:
log.critical("fails to write content")
self.mydata.accept_bytes += chunkinfo.data_size
if not self.cache_data:
chunkinfo.content = None
self.mydata.expected_chunkI += 1
chunkinfo.status = 2 #satisfied yet
if self.mydata.final_byte == self.mydata.accept_bytes:
self.is_all = True
for chunkinfo in self.mydata.unsatisfied_chunks_keyed_by_name.itervalues():
log.warn(str(chunkinfo))
log.warn("------------------------ %s all the contents are received---------------------------" %(self.Id))
self.stop()
#
# if self.mydata.final_byte != None and not self.is_all: #check already fetch all the chunks or not
# if (len(self.mydata.unsatisfied_chunks_keyed_by_name) == 0 and self.mydata.next_byte >= self.mydata.final_byte) or \
# (len(self.mydata.unsatisfied_chunks_keyed_by_name) > 0 and \
# self.mydata.unsatisfied_chunks_keyed_by_name.itervalues().next().begin_byte >=self.mydata.final_byte):
# self.is_all = True
# log.warn("---------------------------all the contents are received---------------------------")
# for chunkinfo in self.mydata.unsatisfied_chunks_keyed_by_name.itervalues():
# log.warn(str(chunkinfo))
# self.stop()
#
def out_of_order_content(self, upcallInfo):
"""do nothing here, just buffer it and leave it to expected chunk come
do not update the window when out-of-order, until in-order chunk is received
"""
name = str(upcallInfo.Interest.name)
chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.get(name)
self.mydata.satisfied_chunkN += 1
self.window.update_receiving(chunkinfo)
log.debug("received out-of-order Data: %s, out packet: %s" %(name, self.window.packet_on_the_fly))
class FlowConsumerThread(FlowConsumer, threading.Thread):
def __init__(self, Id, name, fout=None, monitor_out_dir="output", cache_data=True, enable_monitor=True, size_fix=None, window_fix=None, rtt_fix=None,
packet_max_data_size=ETH_MTU-IP_HEADER_SIZE-UDP_HEADER_SIZE):
threading.Thread.__init__(self)
FlowConsumer.__init__(self, Id, name, fout=fout, monitor_out_dir=monitor_out_dir, cache_data=cache_data, enable_monitor=enable_monitor,
size_fix=size_fix, window_fix=window_fix, rtt_fix=rtt_fix, packet_max_data_size=packet_max_data_size)
def start(self):
"""since function start is implemented in both threading.Thread and FlowConsumer
"""
threading.Thread.start(self)
def run(self):
"""since function start is implemented in both threading.Thread and FlowConsumer
"""
FlowConsumer.start(self)
def stop(self):
"""in fact, stop is only implemented in FlowConsumer, but not threading.Thread, however, I just worry that future python may support stop
"""
#threading.Thread.stop(self)
FlowConsumer.stop(self)
return 0
class FlowProducer(pyndn.Closure, Controller):
"""to build producer which can response Interest from FlowConsumer, typically, can "understand" the ADAPTIVE_FLAG
Attributes:
ndn_name: the published ndn_name prefix, if
path: the local path of published content(s)
is_dir: whether the path is a directory or not,
if yes, the all the files contained in the directory is published with ndn_name prefix, and files' ndn_name is append to the ndn_name prefix respectively
if no, path is link to file and the file's ndn_name itself is ignored
readers: use to store all the opened file reader and keyed by ndn_name
"""
def __init__(self, name, path, is_dir=True):
"""
"""
Controller.__init__(self)
if not name.startswith("ndnx:") and not name.startswith("/"):
name = "/" + name
self.ndn_name = pyndn.Name(name)
self.path = path
if not os.path.exists(self.path):
log.critical("path %s does not exist" %(self.path))
exit(0)
if is_dir and (not os.path.isdir(self.path)):
log.critical("path %s is not a directory" %(self.path))
exit(0)
if (not is_dir) and (not os.path.isfile(self.path)):
log.critical("path %s is not a file" %(self.path))
exit(0)