def write(self): """write data to cache layout: horizontal, every sublist will be placed in one row """ if self.datass == None: log.critical("datass is None") return fout = open(self.cacheout, "w") #write the data to result file if self.headers != None: line = "" for header in self.headers: line += "|" + header line.strip() line = "#headers: " + line+"\n" fout.write(line) #line = "#command: " + case.cmd + "\n" #fout.write(line) #assert self.datass != None, "self.datass == None" assert isinstance(self.datass, list), "self.datass is not a list, %s" %(self.datass) #assert isinstance(self.datass[0], list), "self.datass[0] is not a list, %s" %(self.datass[0]) for li in self.datass: for val in li: fout.write("%s\t" %(val)) fout.write("\n") fout.flush() fout.close() log.info("$ " + self.Id+" cache writing")
def in_order_content(self, upcallInfo): """the best scenario, content is received in-order, however, we should check those buffered out-of-order chunks """ name = str(upcallInfo.Interest.name) chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.pop(name) if not self.fout.closed: self.fout.write(chunkinfo.content) else: log.critical("fails to write content") self.mydata.accept_bytes += chunkinfo.data_size if not self.cache_data: chunkinfo.content = None chunkinfo.status = 2 #satisfied yet self.mydata.satisfied_chunkN += 1 self.mydata.expected_chunkI += 1 self.window.update_receiving(chunkinfo) log.debug("received in-order Data: %s, out packet: %s" %(name, self.window.packet_on_the_fly)) #check the out-of-order contents recevied before for name in self.mydata.unsatisfied_chunks_keyed_by_name.keys(): chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name[name] if chunkinfo.endT == None: break else: chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name.pop(name) if not self.fout.closed: self.fout.write(chunkinfo.content) else: log.critical("fails to write content") self.mydata.accept_bytes += chunkinfo.data_size if not self.cache_data: chunkinfo.content = None self.mydata.expected_chunkI += 1 chunkinfo.status = 2 #satisfied yet if self.mydata.final_byte == self.mydata.accept_bytes: self.is_all = True for chunkinfo in self.mydata.unsatisfied_chunks_keyed_by_name.itervalues(): log.warn(str(chunkinfo)) log.warn("------------------------ %s all the contents are received---------------------------" %(self.Id)) self.stop()
def __init__(self, name, path, is_dir=True): """ """ Controller.__init__(self) if not name.startswith("ndnx:") and not name.startswith("/"): name = "/" + name self.ndn_name = pyndn.Name(name) self.path = path if not os.path.exists(self.path): log.critical("path %s does not exist" %(self.path)) exit(0) if is_dir and (not os.path.isdir(self.path)): log.critical("path %s is not a directory" %(self.path)) exit(0) if (not is_dir) and (not os.path.isfile(self.path)): log.critical("path %s is not a file" %(self.path)) exit(0) self.handle = pyndn.NDN() self.is_dir = is_dir self.readers = {} #keyed by ndn_name
def do_receive_content(self, kind, upcallInfo): """receive a contents, there are 4 different scenarios: duplicated content, in-order content, out-of-order content, illegal content """ name = str(upcallInfo.Interest.name) if not name in self.mydata.unsatisfied_chunks_keyed_by_name: log.debug(self.mydata.unsatisfied_chunks_keyed_by_name.keys()) #the chunkinfo is already satisfied by previous chunk (retransmission here) self.duplicate_content(upcallInfo) return chunkinfo = self.mydata.unsatisfied_chunks_keyed_by_name[name] chunkinfo.endT = datetime.datetime.now() chunkinfo.data_size = len(upcallInfo.ContentObject.content) chunkinfo.chunk_size = len(_pyndn.dump_charbuf(upcallInfo.ContentObject.ndn_data)) chunkinfo.content = upcallInfo.ContentObject.content temp = math.ceil((chunkinfo.chunk_size)/float(self.packet_max_data_size)) self.rtoEstimator.update(chunkinfo) fbi = upcallInfo.ContentObject.signedInfo.finalBlockID if fbi != None: if isinstance(fbi, str): fbi = pyndn.Name.seg2num(fbi) #log.info("***************final chunk id: %s" %(fbi)) if self.mydata.final_byte == None: #the first final block content self.mydata.final_byte = int(fbi) else: assert self.mydata.final_byte == int(fbi), "get different final block id, old %s and new %s" %(self.mydata.final_byte, int(fbi)) si = upcallInfo.ContentObject.signedInfo if si.type == pyndn.CONTENT_NACK: self.nack_content(upcallInfo) elif si.type == pyndn.CONTENT_DATA: if chunkinfo.packetN != temp: if self.mydata.final_byte != None and chunkinfo.end_byte > self.mydata.final_byte:#final chunk or illegal chunk log.debug("final chunk, thus size is shorter than expected") else: log.debug("expected packetN (%s) != real packetN (%s), final_byte (%s), upcallInfo: %s, chunksize:%s" %(chunkinfo.packetN, temp, self.mydata.final_byte, name, chunkinfo.chunk_size)) #chunkinfo.packetN = temp self.chunkSizeEstimator.update_receiving(chunkinfo) # if self.mydata.final_byte!=None and chunkinfo.end_byte < self.mydata.final_byte: # assert chunkinfo.data_size > 500, "chukinfo is strange, %s" %(chunkinfo) # if name == self.mydata.unsatisfied_chunks_keyed_by_name.keys()[0]: self.in_order_content(upcallInfo) else: self.out_of_order_content(upcallInfo) retxQ = [] for chunkinfo in self.mydata.unsatisfied_chunks_keyed_by_name.itervalues(): if chunkinfo.status == 0:#waiting for re-expressing retxQ.append(chunkinfo) if len(retxQ) == 2: break #here we do not check whether the request is legal or not for i in [0, 1]:#mulply add if self.window.packet_on_the_fly < self.window.get_cwnd(): #re-expressing is prior to request new if len(retxQ) != 0: chunkinfo = retxQ.pop(0) chunkinfo.status = 1 self.re_express_interest(chunkinfo) continue if self.mydata.final_byte== None: self.first_express_interest() elif self.mydata.final_byte!= None and self.mydata.next_byte < self.mydata.final_byte: self.first_express_interest() else: log.critical("unkown Data type: %s" %(upcallInfo.ContentObject))
def prepare(self, upcallInfo): ist = upcallInfo.Interest ist_name = ist.name flag_index = None #len(ist_name) - 2 #index of the end component for i in range(len(ist_name)-2): sub = ist_name[i] if sub == ADAPTIVE_MOD_FLAG: flag_index = i break if flag_index == None: log.error("not a flow consumer's interest, ignore: %s" %(ist_name)) return None expected_data_size = int(ist_name[flag_index+1]) begin_byte = int(ist_name[flag_index+2]) name = ist_name[:flag_index] #not include the flag name_str = str(name) if name_str in self.readers: reader = self.readers[name_str] log.debug("%s in reader" %(name_str)) else: if self.is_dir: subpath = ist_name[upcallInfo.matchedComps:flag_index] fpath = self.path for i in range(upcallInfo.matchedComps, flag_index): temp = ist_name[i] fpath = os.path.join(fpath, temp) #assume that matchedComps is the number of matched components, not index if not os.path.exists(fpath): log.critical("path %s from Interest %s does not exist" %(fpath, ist_name)) return None if os.path.isdir(fpath): log.critical("path %s from Interest %s is not a file" %(fpath, ist_name)) return None else:#not serve all the directory if upcallInfo.matchedComps != flag_index: log.critical("umatched ndn_name: %s, %s"%(ist_name, self.ndn_name)) return None else: fpath = self.path reader = Reader(fpath=fpath) self.readers[name_str] = reader log.debug("add a new reader: %s" %(name_str)) data = reader.read(begin_byte, expected_data_size) if data == None: log.critical("Interest %s: begin_byte %s > file_size %s" %(ist_name, begin_byte, reader.fsize)) nack = self._nack_template(ist_name, reader) return nack else: log.info("Interest: %s, expected_data_size: %s, begin_byte: %s, data_size: %s" \ %(ist.name, expected_data_size, begin_byte, len(data))) co = self._data_template(ist_name, data, reader.fsize, pyndn.CONTENT_DATA) return co