async def main(): # Set up face async def face_loop(): nonlocal face, running while running: face.processEvents() await asyncio.sleep(0.001) face = Face() running = True face_event = event_loop.create_task(face_loop()) # register prefix in local NFD with it own name: /local_manager/building_1/camera_1 keychain = KeyChain() face.setCommandSigningInfo(keychain, keychain.getDefaultCertificateName()) prefix_id = face.registerPrefix(Name(VIDEO_STREAM_NAME), None, on_register_failed) filter_id = face.setInterestFilter(Name(VIDEO_STREAM_NAME), on_interest) print('Registered prefix ID {}'.format(prefix_id)) print('Registered filter ID {}'.format(filter_id)) cap = cv2.VideoCapture(0) # Capture a video every second while True: cur_time = await capture_video_chunk(duration=1, cap=cap) prepare_packets(cur_time=cur_time, keychain=keychain) remove_outdated_data(cur_time) cap.release() cv2.destroyAllWindows() running = False await face_event
class Server: def __init__(self, deeplab_manager, fst_manager, _root_path, storage): # type: (DeepLab, Fst, str, IStorage) -> None self.face = None self.keychain = KeyChain() # self.namespace = Namespace(Name(SERVER_PREFIX).append(RESULT_PREFIX), self.keychain) # self.namespace.addOnObjectNeeded(self.on_result_interest) self.segment_size = Face.getMaxNdnPacketSize() // 2 self.running = False self._restart = False self.deeplab_manager = deeplab_manager self.fst_manager = fst_manager self.storage = storage deeplab_manager.on_finished = self.on_process_finished fst_manager.on_finished = self.on_process_finished self.fetcher = Fetcher(self.keychain, self.on_payload, self.storage, self.on_fetch_fail) self.command_filter_id = 0 self.result_filter_id = 0 self.operations_set = self.fst_manager.get_models() | {"deeplab"} # Status set, one item per frame # TODO: Start with unfinished tasks # self.status_set = {} def save_status(self, name, status): # type: (Name, ResultStatus) -> None """Save status to database""" self.storage.put(Name(STATUS_PREFIX).append(name), status.to_bytes()) def load_status(self, name): """Load status from database""" # Get exact prefix to data if name[-1] == Name.Component("_meta") or name[-1].isSegment(): name = name[:-1] ret = self.storage.get(Name(STATUS_PREFIX).append(name)) if ret is not None: return ResultStatus.from_bytes(ret) else: return None async def _run(self): self.running = True while self.running: self.face = Face() self._restart = False try: self._network_start() logging.info("Starting...") while self.running and not self._restart: self.face.processEvents() await asyncio.sleep(0.01) except ConnectionRefusedError: logging.warning("Connection refused. Retry in %ss.", DISCONN_RETRY_TIME) finally: self.face.shutdown() self._network_stop() if self.running: time.sleep(DISCONN_RETRY_TIME) def run(self): event_loop = asyncio.get_event_loop() try: event_loop.run_until_complete(self._run()) finally: event_loop.close() def stop(self): self.running = False def _network_start(self): self.face.setCommandSigningInfo(self.keychain, self.keychain.getDefaultCertificateName()) # self.namespace.setFace(self.face, lambda prefix: print("Register failed for")) self.face.registerPrefix(Name(SERVER_PREFIX), None, self.on_register_failed) self.command_filter_id = self.face.setInterestFilter( Name(SERVER_PREFIX).append(COMMAND_PREFIX), self.on_command) self.result_filter_id = self.face.setInterestFilter( Name(SERVER_PREFIX).append(RESULT_PREFIX), self.on_result_interest) self.fetcher.network_start(self.face) def _network_stop(self): self.fetcher.network_stop() self.face.unsetInterestFilter(self.result_filter_id) self.face.unsetInterestFilter(self.command_filter_id) def on_register_failed(self, prefix): # type: (Name) -> None logging.error("Register failed for prefix: %s", prefix.toUri()) self._restart = True def on_command(self, _prefix, interest, _face, _interest_filter_id, _filter_obj): # type: (Name, Interest, Face, int, InterestFilter) -> None parameter_msg = SegmentParameterMessage() try: ProtobufTlv.decode(parameter_msg, interest.name[-1].getValue()) except ValueError: self.nodata_reply(interest.name, RET_MALFORMED_COMMAND) return parameter = parameter_msg.segment_parameter prefix = Name() for compo in parameter.name.component: prefix.append(compo.decode("utf-8")) # Check operations for op in parameter.operations.components: model_name = op.model.decode("utf-8") if model_name not in self.operations_set: self.nodata_reply(interest.name, RET_NOT_SUPPORTED) return # Fetch frames for frame_id in range(parameter.start_frame, parameter.end_frame + 1): frame_name = Name(prefix).append(str(frame_id)) for op in parameter.operations.components: model_name = op.model.decode("utf-8") data_name = Name(frame_name).append(model_name) logging.info("Request processed: %s", data_name) status = ResultStatus(prefix.toUri(), model_name, Common.getNowMilliseconds()) status.status = STATUS_FETCHING status.estimated_time = status.proecess_start_time + 10.0 self.save_status(data_name, status) # Check data existence and trigger fetching process for frame_id in range(parameter.start_frame, parameter.end_frame + 1): frame_name = Name(prefix).append(str(frame_id)) if self.storage.exists(frame_name): self.on_payload(frame_name) else: self.fetcher.fetch_data(frame_name) self.nodata_reply(interest.name, RET_OK, 10.0) # def on_result_interest(self, _namespace, needed_obj, _id): # # type: (Namespace, Namespace, int) -> bool def on_result_interest(self, _prefix, interest, face, _interest_filter_id, _filter_obj): # type: (Name, Interest, Face, int, InterestFilter) -> bool prefix = Name(SERVER_PREFIX).append(RESULT_PREFIX) if not prefix.isPrefixOf(interest.name): # Wrong prefix return False data_name = interest.name[prefix.size():] logging.info("On result interest: %s", data_name.toUri()) # key, stat = self._result_set_prefix_match(data_name) status = self.load_status(data_name) if status is None: # No such request self.nodata_reply(interest.name, RET_NO_REQUEST) return True if data_name[-1].isSegment(): # Segment no suffix seg_no = data_name[-1].toSegment() result = self.storage.get(data_name.getPrefix(-1)) elif data_name[-1] == Name("_meta")[0]: # MetaInfo suffix seg_no = -1 result = self.storage.get(data_name.getPrefix(-1)) else: # No suffix seg_no = None result = self.storage.get(data_name) if result is not None: # There are data segment_cnt = (len(result) + self.segment_size - 1) // self.segment_size # Note: I don't understand why namespace keep all data in memory metainfo = MetaInfo() # metainfo.setFinalBlockId(segment_cnt - 1) # WHY this doesn't work? metainfo.setFinalBlockId(Name().appendSegment(segment_cnt - 1)[0]) if segment_cnt > 1 and seg_no is None: # Fetch segmented data with no suffix will get only first segment seg_no = 0 data_name.appendSequenceNumber(seg_no) data = Data(Name(prefix).append(data_name)) data.setMetaInfo(metainfo) if seg_no == -1: # _meta data.content = self.storage.get(data_name) else: # data if segment_cnt > 1: # Segmented if seg_no < segment_cnt: start_offset = seg_no * self.segment_size end_offset = start_offset + self.segment_size data.content = Blob(bytearray(result[start_offset:end_offset])) else: data.content = None else: # No segmentation data.content = Blob(bytearray(result)) self.keychain.sign(data) face.putData(data) return True else: # Data are not ready if status.status == STATUS_NO_INPUT: self.nodata_reply(interest.name, RET_NO_INPUT) elif status.status == STATUS_FAILED: self.nodata_reply(interest.name, RET_EXECUTION_FAILED) else: self.nodata_reply(interest.name, RET_RETRY_AFTER, status.estimated_time - Common.getNowMilliseconds()) return True def nodata_reply(self, name, code, retry_after=0.0): # type: (Name, int, float) -> None logging.info("Reply with code: %s", code) data = Data(name) metainfo = MetaInfo() msg = ServerResponseMessage() msg.server_response.ret_code = code if code != RET_OK: metainfo.type = ContentType.NACK else: metainfo.type = ContentType.BLOB if retry_after > 0.1: metainfo.freshnessPeriod = int(retry_after / 10) msg.server_response.retry_after = int(retry_after) else: metainfo.freshnessPeriod = 600 data.setMetaInfo(metainfo) data.setContent(ProtobufTlv.encode(msg)) self.keychain.sign(data) self.face.putData(data) def on_payload(self, frame_name): # type: (Name) -> None for model in self.operations_set: data_name = Name(frame_name).append(model) status = self.load_status(data_name) if status is None: continue if self.storage.exists(data_name): logging.info("Result exists: %s", data_name.toUri()) continue logging.info("Ready to produce: %s", data_name.toUri()) status.proecess_start_time = Common.getNowMilliseconds() if model == "deeplab": ret = self.deeplab_manager.send(DeepLabRequest(frame_name.toUri()[1:], frame_name.toUri(), data_name)) else: ret = self.fst_manager.send(FstRequest(frame_name.toUri()[1:], model, frame_name.toUri(), data_name)) status.status = STATUS_PROCESSING if ret else STATUS_FAILED self.save_status(data_name, status) def on_process_finished(self, name_str, model_name): # type: (str, str) -> None data_name = Name(name_str).append(model_name) logging.info("Process finished: %s", data_name.toUri()) status = self.load_status(data_name) if status is None: logging.fatal("Database broken.") raise RuntimeError() status.end_time = Common.getNowMilliseconds() status.status = STATUS_SUCCEED meta_name = Name(data_name).append("_meta") content_metainfo = ContentMetaInfo() content_metainfo.setContentType("png") content_metainfo.setTimestamp(status.end_time) content_metainfo.setHasSegments(True) self.storage.put(meta_name, content_metainfo.wireEncode().toBytes()) def on_fetch_fail(self, frame_name): logging.error("Fail to fetch: {}".format(frame_name)) for model in self.operations_set: data_name = Name(frame_name).append(model) status = self.load_status(data_name) if status is None: continue status.status = STATUS_NO_INPUT self.save_status(data_name, status)
class Controller_Listener(object): def __init__(self): self.keyChain = KeyChain() self.isDone = False self.ofmsg = OFMSG() self.nodeid = OSCommand.getnodeid() self.face = Face() self.featurereq = FeatureReq() self.helloreq_name_list = [] self.new_CtrlInfo_data = "---Initial CtrlInfo data---" # used to get new ctrlinfo data and send to nodes. self.CtrlInfo_data = "" # used to record used ctrlinfo data def run(self): ControllerPrefixString = '/ndn/ie/tcd/controller01/ofndn/' ControllerPrefix = Name(ControllerPrefixString) self.face.setCommandSigningInfo(self.keyChain, \ self.keyChain.getDefaultCertificateName()) self.face.registerPrefix(ControllerPrefix, self.onInterest_Mian, self.onRegisterFailed) # run prefix #filters: # hello_msg_prefix = Name('/ndn/ie/tcd/controller01/ofndn/--/n1.0/0/0/0/') # self.face.setInterestFilter(hello_msg_prefix,self.onInterest_Hello) #for HelloReq msg error_msg_prefix = Name( '/ndn/ie/tcd/controller01/ofndn/--/n1.0/1/0/0/') self.face.setInterestFilter(error_msg_prefix, self.onInterest_ErrorMsg) # for Error msg packetin_msg_prefix = Name( '/ndn/ie/tcd/controller01/ofndn/--/n1.0/10/0/0/') self.face.setInterestFilter( packetin_msg_prefix, self.onInterest_PacketIn) #for packetin msg FlowRemoved_msg_prefix = Name( '/ndn/ie/tcd/controller01/ofndn/--/n1.0/11/0/0/') self.face.setInterestFilter( FlowRemoved_msg_prefix, self.onInterest_FlowRemoved) #for FlowRemoved msg # cannot be here, conflict with helloreq, since both of them occupy the 'listening channel' and will not release. # CtrlInfo_msg_prefix = Name('/ndn/ie/tcd/controller01/ofndn/--/n1.0/36/0/0/') # self.face.setInterestFilter(CtrlInfo_msg_prefix, self.onInterest_CtrlInfo) # Run the event loop forever. Use a short sleep to # prevent the Producer from using 100% of the CPU. while not self.isDone: #listen hello cannot stop self.face.processEvents() time.sleep(0.01) def ctrl_info_run(self): ControllerPrefixString = '/ndn/ie/tcd/controller01/ofndn/' ControllerPrefix = Name(ControllerPrefixString) self.face.setCommandSigningInfo(self.keyChain, \ self.keyChain.getDefaultCertificateName()) self.face.registerPrefix(ControllerPrefix, self.onInterest_Mian, self.onRegisterFailed) # run prefix # filters: CtrlInfo_msg_prefix = Name( '/ndn/ie/tcd/controller01/ofndn/--/n1.0/36/0/0/') self.face.setInterestFilter( CtrlInfo_msg_prefix, self.onInterest_CtrlInfo) # for CtrlInfo msg # Run the event loop forever. Use a short sleep to # prevent the Producer from using 100% of the CPU. while not self.isDone: # listen hello cannot stop self.face.processEvents() time.sleep(0.01) def onInterest_PacketIn(self, mainPrefix, interest, transport, registeredPrefixId): print("######### Received <<<PacketIn>>> Interest #########\n {0} \n". format(interest.getName().toUri())) (node_id, unknown_prefix) = NdnFlowTable.parse_Packetin_Interest(interest) node_id = node_id.strip('/') # FlowModDataList: [ep(0),face(1),prefix(2),cookie(3),command(4),idle_timeout(5), # hard_timeout(6), priority(7),buffer_id(8),out_face(9),flag(10), action(11)] flowmod_data = self.create_PacketIn_Data(node_id, unknown_prefix) data = self.ofmsg.create_flowmod_data(interest, flowmod_data) transport.send(data.wireEncode().toBuffer()) print('===== Send [ FlowMod Msg ] to {0}====='.format(node_id)) def create_PacketIn_Data(self, node_id, unknown_prefix): # This function is just used for demonstration to send exact flowmod message. if node_id == 'h1': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---267---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h2': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---271---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h3': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---269---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h4': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---271---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h5': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---276---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h6': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---261---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h7': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---260---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h8': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---260---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h9': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---261---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'h10': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'a': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'b': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'c': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'd': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'e': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) elif node_id == 'f': data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) else: data_tring = '*---*---{}---None---0x0000---3600---36000---1---None---255---0x0001---0x0000'.format( unknown_prefix) return data_tring def onInterest_FlowRemoved(self, mainPrefix, interest, transport, registeredPrefixId): print("------Received: <<<FlowRemoved>>> Msg ------") # for test def onInterest_CtrlInfo(self, mainPrefix, interest, transport, registeredPrefixId): print( "******** Received <<<CtrlInfoReq>>> Interest ******** \n {0} \n". format(interest.getName().toUri())) while (self.new_CtrlInfo_data == self.CtrlInfo_data ): # wait for new data. time.sleep(5) self.CtrlInfo_data = self.new_CtrlInfo_data data = self.ofmsg.create_ctrlinfo_res_data(interest, self.CtrlInfo_data) transport.send(data.wireEncode().toBuffer()) print("******** Sent <<<New CtrlInfo Res>>> Data ******** \n") def onInterest_Hello(self, mainPrefix, interest, transport, registeredPrefixId): print("\n --------Received <<<HelloReq>>> Interest --------\n {0} \n". format(interest.getName().toUri())) # for test print("--------Sent <<<HelloRes>>> Data -------- \n") hello_data = '[This is hello response data]' data = self.ofmsg.create_hello_res_data(interest, hello_data) transport.send(data.wireEncode().toBuffer()) NodePrefixTable.updatenodeprefixtable( interest) #to add NPT and fetch feature def onInterest_ErrorMsg(self, mainPrefix, interest, transport, registeredPrefixId): print("--------received <<<Error Msg>>> interest:\n" + interest.getName().toUri()) # for test errormsg_data = 'Error Report Acknowledge' data = self.ofmsg.create_errorAck_data(interest, errormsg_data) transport.send(data.wireEncode().toBuffer()) print("--------sent <<<Error Msg ACK>>>---------") #parse the errorMsg interest to get error information. def onInterest_Mian(self, mainPrefix, interest, transport, registeredPrefixId): pass def onRegisterFailed(self, ControllerPrefix): print("Register failed for prefix", ControllerPrefix.toUri()) self.isDone = True
class TemperatureProducer(object): """ A temperature sensor that publishes data periodically. Temperature Data packets have the name of /<prefix>/<timestamp> """ def __init__(self, prefix: Name, repo_name: Optional[Name]): self.prefix = prefix self.repo_name = repo_name self.face = Face() self.keychain = KeyChain() self.running = True self.name_str_to_data = dict() self.face.setCommandSigningInfo(self.keychain, self.keychain.getDefaultCertificateName()) self.face.registerPrefix(self.prefix, None, lambda prefix: logging.error("Prefix registration failed: %s", prefix)) self.filter_id = self.face.setInterestFilter(self.prefix, self.on_interest) event_loop = asyncio.get_event_loop() event_loop.create_task(self.face_loop()) self.latest_tp = 0 async def send_cmd_interest(self): event_loop = asyncio.get_event_loop() face_task = event_loop.create_task(self.face_loop()) parameter = RepoCommandParameterMessage() for compo in self.prefix: parameter.repo_command_parameter.name.component.append(compo.getValue().toBytes()) parameter.repo_command_parameter.start_block_id = self.latest_tp parameter.repo_command_parameter.end_block_id = parameter.repo_command_parameter.start_block_id param_blob = ProtobufTlv.encode(parameter) # Prepare cmd interest name = Name(self.repo_name).append("insert").append(Name.Component(param_blob)) interest = Interest(name) interest.canBePrefix = True self.face.makeCommandInterest(interest) logging.info("Express interest: {}".format(interest.getName())) ret = await fetch_data_packet(self.face, interest) if not isinstance(ret, Data): logging.warning("Insertion failed") else: # Parse response response = RepoCommandResponseMessage() try: ProtobufTlv.decode(response, ret.content) logging.info('Insertion command accepted: status code {}' .format(response.repo_command_response.status_code)) except RuntimeError as exc: logging.warning('Response decoding failed', exc) async def face_loop(self): while self.running: self.face.processEvents() await asyncio.sleep(0.001) def get_temp(self) -> int: return random.randint(0, 35) def publish_temp_packet(self): tp = int(time.time()) tp = tp - (tp % 5) self.latest_tp = tp data_name = Name(self.prefix).append(str(self.latest_tp)) data = Data(data_name) temp = self.get_temp() content_blob = Blob(temp.to_bytes(2, byteorder='little')) data.setContent(content_blob) data.metaInfo.setFreshnessPeriod(1000000) logging.info('Publish temp data {}, {} degree'.format(data.getName(), temp)) self.keychain.sign(data) self.name_str_to_data[str(data.getName())] = data if use_repo is True: event_loop = asyncio.get_event_loop() event_loop.create_task(self.send_cmd_interest()) logging.info("send repo insertion command") def on_interest(self, _prefix, interest: Interest, face, _filter_id, _filter): name = str(interest.getName()) if name in self.name_str_to_data: self.face.putData(self.name_str_to_data[name]) logging.info('Serve data: {}'.format(name)) async def run(self): """ Need to publish data with period of at least 5 second, otherwise Data packets are not immutable """ while self.running: self.publish_temp_packet() await asyncio.sleep(0.5) self.face.unsetInterestFilter(self.filter_id) await asyncio.sleep(4.5) self.filter_id = self.face.setInterestFilter(self.prefix, self.on_interest)
class PutfileClient(object): """ This client serves random segmented data """ def __init__(self, args): self.repo_name = Name(args.repo_name) self.file_path = args.file_path self.name_at_repo = Name(args.name) self.face = Face() self.keychain = KeyChain() self.face.setCommandSigningInfo( self.keychain, self.keychain.getDefaultCertificateName()) self.running = True self.m_name_str_to_data = dict() self.n_packets = 0 self.prepare_data() self.face.registerPrefix(self.name_at_repo, None, self.on_register_failed) self.face.setInterestFilter(self.name_at_repo, self.on_interest) async def face_loop(self): while self.running: self.face.processEvents() await asyncio.sleep(0.001) def prepare_data(self): """ Shard file into data packets. """ logging.info('preparing data') with open(self.file_path, 'rb') as binary_file: b_array = bytearray(binary_file.read()) if len(b_array) == 0: logging.warning("File is 0 bytes") return self.n_packets = int((len(b_array) - 1) / MAX_BYTES_IN_DATA_PACKET + 1) logging.info('There are {} packets in total'.format(self.n_packets)) seq = 0 for i in range(0, len(b_array), MAX_BYTES_IN_DATA_PACKET): data = Data(Name(self.name_at_repo).append(str(seq))) data.metaInfo.freshnessPeriod = 100000 data.setContent( b_array[i:min(i + MAX_BYTES_IN_DATA_PACKET, len(b_array))]) data.metaInfo.setFinalBlockId( Name.Component.fromSegment(self.n_packets - 1)) self.keychain.signWithSha256(data) self.m_name_str_to_data[str(data.getName())] = data seq += 1 @staticmethod def on_register_failed(prefix): logging.error("Prefix registration failed: %s", prefix) def on_interest(self, _prefix, interest: Interest, face, _filter_id, _filter): logging.info('On interest: {}'.format(interest.getName())) if str(interest.getName()) in self.m_name_str_to_data: self.face.putData(self.m_name_str_to_data[str(interest.getName())]) logging.info('Serve data: {}'.format(interest.getName())) else: logging.info('Data does not exist: {}'.format(interest.getName())) async def insert_segmented_file(self): event_loop = asyncio.get_event_loop() face_task = event_loop.create_task(self.face_loop()) parameter = RepoCommandParameterMessage() for compo in self.name_at_repo: parameter.repo_command_parameter.name.component.append( compo.getValue().toBytes()) parameter.repo_command_parameter.start_block_id = 0 parameter.repo_command_parameter.end_block_id = self.n_packets - 1 param_blob = ProtobufTlv.encode(parameter) # Prepare cmd interest name = Name(self.repo_name).append('insert').append( Name.Component(param_blob)) interest = Interest(name) self.face.makeCommandInterest(interest) logging.info('Send insert command interest') ret = await fetch_data_packet(self.face, interest) if not isinstance(ret, Data): logging.warning('Insert failed') return response = RepoCommandResponseMessage() try: ProtobufTlv.decode(response, ret.content) except RuntimeError as exc: logging.warning('Response decoding failed', exc) process_id = response.repo_command_response.process_id status_code = response.repo_command_response.status_code logging.info('Insertion process {} accepted: status code {}'.format( process_id, status_code)) # Use insert check command to probe if insert process is completed checker = CommandChecker(self.face, self.keychain) while True: response = await checker.check_insert(self.repo_name, process_id) if response is None or response.repo_command_response.status_code == 300: await asyncio.sleep(1) elif response.repo_command_response.status_code == 200: logging.info( 'Insert process {} status: {}, insert_num: {}'.format( process_id, response.repo_command_response.status_code, response.repo_command_response.insert_num)) break else: # Shouldn't get here assert (False) self.running = False await face_task
class FeatureRes(object): def __init__(self): self.keyChain = KeyChain() self.isDone = False self.nodeid = OSCommand.getnodeid() self.face = Face() self.featurereq = FeatureReq() def run(self): NodePrefixString = '/ndn/{}-site/{}/ofndn'.format( self.nodeid, self.nodeid) NodePrefix = Name(NodePrefixString) self.face.setCommandSigningInfo(self.keyChain, \ self.keyChain.getDefaultCertificateName()) # self.face.registerPrefix(NodePrefix, self.onInterest, self.onRegisterFailed) #run prefix self.face.registerPrefix(NodePrefix, self.onInterest_Mian, self.onRegisterFailed) # run prefix # filters: feature_msg_prefix = Name('/ndn/{}-site/{}/ofndn/feature'.format( self.nodeid, self.nodeid)) self.face.setInterestFilter(feature_msg_prefix, self.onInterest_Feature) # for FeatureReq packetout_msg_prefix = Name( '/ndn/{}-site/{}/ofndn/--/n1.0/13/0/0'.format( self.nodeid, self.nodeid)) self.face.setInterestFilter( packetout_msg_prefix, self.onInterest_PacketOut) # for PacketOut msg facemod_msg_prefix = Name( '/ndn/{}-site/{}/ofndn/--/n1.0/16/0/0'.format( self.nodeid, self.nodeid)) self.face.setInterestFilter(facemod_msg_prefix, self.onInterest_FaceMod) # for FaceMod msg print(NodePrefix.toUri()) # # Run the event loop forever. Use a short sleep to # # prevent the Producer from using 100% of the CPU. # while not self.isDone: # self.face.processEvents() # time.sleep(0.01) countnumber = 0 while countnumber < 200000000: self.face.processEvents() time.sleep(0.01) countnumber += 1 # print("10s feature response listening stop") def onInterest_PacketOut(self, mainPrefix, interest, transport, registeredPrefixId): print("--------Received <<<PacketOut>>> interest ----------") PacketOut_suffix = NdnFlowTable.parse_PacketOut_Interest(interest) NdnFlowTable.derectly_updatendnflowtable(PacketOut_suffix, self.nodeid) print("--------Updated the NdnFlowTable") self.isDone = True # def onInterest_FaceMod(self, mainPrefix, interest, transport, registeredPrefixId): print("--------Received <<<FaceMod>>> interest Msg") FaceMod_suffix_list = NdnFlowTable.parse_FaceMod_Interest(interest) # FaceMod_suffix_list pattern:[faceid, action] print(OSCommand.facemod(FaceMod_suffix_list) ) # modify the face and print the command output self.isDone = True # def onInterest_Feature(self, mainPrefix, interest, transport, registeredPrefixId): print("++++++++ Received <<<FeatureReq>>> interest ++++++++ \n") feature_face = OSCommand.getface() feature_FIB = OSCommand.getFIB() nodeid = bytes(self.nodeid, 'utf-8') feature_data = nodeid + b'----' + feature_face + b'----' + feature_FIB data = OFMSG().create_feature_res_data(interest, feature_data) transport.send(data.wireEncode().toBuffer()) print("++++++++ Sent <<<FeatureRes>>> Data ++++++++ \n") self.isDone = True # def onInterest_Mian(self, mainPrefix, interest, transport, registeredPrefixId): pass #print("--------received Main interest:" + interest.getName().toUri()) def onRegisterFailed(self, ControllerPrefix): print("Register failed for prefix", ControllerPrefix.toUri()) self.isDone = True