class TemperatureProducer(object): """ A temperature sensor that publishes data periodically. Temperature Data packets have the name of /<prefix>/<timestamp> """ def __init__(self, prefix: Name, repo_name: Optional[Name]): self.prefix = prefix self.repo_name = repo_name self.face = Face() self.keychain = KeyChain() self.running = True self.name_str_to_data = dict() self.face.setCommandSigningInfo(self.keychain, self.keychain.getDefaultCertificateName()) self.face.registerPrefix(self.prefix, None, lambda prefix: logging.error("Prefix registration failed: %s", prefix)) self.filter_id = self.face.setInterestFilter(self.prefix, self.on_interest) event_loop = asyncio.get_event_loop() event_loop.create_task(self.face_loop()) self.latest_tp = 0 async def send_cmd_interest(self): event_loop = asyncio.get_event_loop() face_task = event_loop.create_task(self.face_loop()) parameter = RepoCommandParameterMessage() for compo in self.prefix: parameter.repo_command_parameter.name.component.append(compo.getValue().toBytes()) parameter.repo_command_parameter.start_block_id = self.latest_tp parameter.repo_command_parameter.end_block_id = parameter.repo_command_parameter.start_block_id param_blob = ProtobufTlv.encode(parameter) # Prepare cmd interest name = Name(self.repo_name).append("insert").append(Name.Component(param_blob)) interest = Interest(name) interest.canBePrefix = True self.face.makeCommandInterest(interest) logging.info("Express interest: {}".format(interest.getName())) ret = await fetch_data_packet(self.face, interest) if not isinstance(ret, Data): logging.warning("Insertion failed") else: # Parse response response = RepoCommandResponseMessage() try: ProtobufTlv.decode(response, ret.content) logging.info('Insertion command accepted: status code {}' .format(response.repo_command_response.status_code)) except RuntimeError as exc: logging.warning('Response decoding failed', exc) async def face_loop(self): while self.running: self.face.processEvents() await asyncio.sleep(0.001) def get_temp(self) -> int: return random.randint(0, 35) def publish_temp_packet(self): tp = int(time.time()) tp = tp - (tp % 5) self.latest_tp = tp data_name = Name(self.prefix).append(str(self.latest_tp)) data = Data(data_name) temp = self.get_temp() content_blob = Blob(temp.to_bytes(2, byteorder='little')) data.setContent(content_blob) data.metaInfo.setFreshnessPeriod(1000000) logging.info('Publish temp data {}, {} degree'.format(data.getName(), temp)) self.keychain.sign(data) self.name_str_to_data[str(data.getName())] = data if use_repo is True: event_loop = asyncio.get_event_loop() event_loop.create_task(self.send_cmd_interest()) logging.info("send repo insertion command") def on_interest(self, _prefix, interest: Interest, face, _filter_id, _filter): name = str(interest.getName()) if name in self.name_str_to_data: self.face.putData(self.name_str_to_data[name]) logging.info('Serve data: {}'.format(name)) async def run(self): """ Need to publish data with period of at least 5 second, otherwise Data packets are not immutable """ while self.running: self.publish_temp_packet() await asyncio.sleep(0.5) self.face.unsetInterestFilter(self.filter_id) await asyncio.sleep(4.5) self.filter_id = self.face.setInterestFilter(self.prefix, self.on_interest)
class Server: def __init__(self, deeplab_manager, fst_manager, _root_path, storage): # type: (DeepLab, Fst, str, IStorage) -> None self.face = None self.keychain = KeyChain() # self.namespace = Namespace(Name(SERVER_PREFIX).append(RESULT_PREFIX), self.keychain) # self.namespace.addOnObjectNeeded(self.on_result_interest) self.segment_size = Face.getMaxNdnPacketSize() // 2 self.running = False self._restart = False self.deeplab_manager = deeplab_manager self.fst_manager = fst_manager self.storage = storage deeplab_manager.on_finished = self.on_process_finished fst_manager.on_finished = self.on_process_finished self.fetcher = Fetcher(self.keychain, self.on_payload, self.storage, self.on_fetch_fail) self.command_filter_id = 0 self.result_filter_id = 0 self.operations_set = self.fst_manager.get_models() | {"deeplab"} # Status set, one item per frame # TODO: Start with unfinished tasks # self.status_set = {} def save_status(self, name, status): # type: (Name, ResultStatus) -> None """Save status to database""" self.storage.put(Name(STATUS_PREFIX).append(name), status.to_bytes()) def load_status(self, name): """Load status from database""" # Get exact prefix to data if name[-1] == Name.Component("_meta") or name[-1].isSegment(): name = name[:-1] ret = self.storage.get(Name(STATUS_PREFIX).append(name)) if ret is not None: return ResultStatus.from_bytes(ret) else: return None async def _run(self): self.running = True while self.running: self.face = Face() self._restart = False try: self._network_start() logging.info("Starting...") while self.running and not self._restart: self.face.processEvents() await asyncio.sleep(0.01) except ConnectionRefusedError: logging.warning("Connection refused. Retry in %ss.", DISCONN_RETRY_TIME) finally: self.face.shutdown() self._network_stop() if self.running: time.sleep(DISCONN_RETRY_TIME) def run(self): event_loop = asyncio.get_event_loop() try: event_loop.run_until_complete(self._run()) finally: event_loop.close() def stop(self): self.running = False def _network_start(self): self.face.setCommandSigningInfo(self.keychain, self.keychain.getDefaultCertificateName()) # self.namespace.setFace(self.face, lambda prefix: print("Register failed for")) self.face.registerPrefix(Name(SERVER_PREFIX), None, self.on_register_failed) self.command_filter_id = self.face.setInterestFilter( Name(SERVER_PREFIX).append(COMMAND_PREFIX), self.on_command) self.result_filter_id = self.face.setInterestFilter( Name(SERVER_PREFIX).append(RESULT_PREFIX), self.on_result_interest) self.fetcher.network_start(self.face) def _network_stop(self): self.fetcher.network_stop() self.face.unsetInterestFilter(self.result_filter_id) self.face.unsetInterestFilter(self.command_filter_id) def on_register_failed(self, prefix): # type: (Name) -> None logging.error("Register failed for prefix: %s", prefix.toUri()) self._restart = True def on_command(self, _prefix, interest, _face, _interest_filter_id, _filter_obj): # type: (Name, Interest, Face, int, InterestFilter) -> None parameter_msg = SegmentParameterMessage() try: ProtobufTlv.decode(parameter_msg, interest.name[-1].getValue()) except ValueError: self.nodata_reply(interest.name, RET_MALFORMED_COMMAND) return parameter = parameter_msg.segment_parameter prefix = Name() for compo in parameter.name.component: prefix.append(compo.decode("utf-8")) # Check operations for op in parameter.operations.components: model_name = op.model.decode("utf-8") if model_name not in self.operations_set: self.nodata_reply(interest.name, RET_NOT_SUPPORTED) return # Fetch frames for frame_id in range(parameter.start_frame, parameter.end_frame + 1): frame_name = Name(prefix).append(str(frame_id)) for op in parameter.operations.components: model_name = op.model.decode("utf-8") data_name = Name(frame_name).append(model_name) logging.info("Request processed: %s", data_name) status = ResultStatus(prefix.toUri(), model_name, Common.getNowMilliseconds()) status.status = STATUS_FETCHING status.estimated_time = status.proecess_start_time + 10.0 self.save_status(data_name, status) # Check data existence and trigger fetching process for frame_id in range(parameter.start_frame, parameter.end_frame + 1): frame_name = Name(prefix).append(str(frame_id)) if self.storage.exists(frame_name): self.on_payload(frame_name) else: self.fetcher.fetch_data(frame_name) self.nodata_reply(interest.name, RET_OK, 10.0) # def on_result_interest(self, _namespace, needed_obj, _id): # # type: (Namespace, Namespace, int) -> bool def on_result_interest(self, _prefix, interest, face, _interest_filter_id, _filter_obj): # type: (Name, Interest, Face, int, InterestFilter) -> bool prefix = Name(SERVER_PREFIX).append(RESULT_PREFIX) if not prefix.isPrefixOf(interest.name): # Wrong prefix return False data_name = interest.name[prefix.size():] logging.info("On result interest: %s", data_name.toUri()) # key, stat = self._result_set_prefix_match(data_name) status = self.load_status(data_name) if status is None: # No such request self.nodata_reply(interest.name, RET_NO_REQUEST) return True if data_name[-1].isSegment(): # Segment no suffix seg_no = data_name[-1].toSegment() result = self.storage.get(data_name.getPrefix(-1)) elif data_name[-1] == Name("_meta")[0]: # MetaInfo suffix seg_no = -1 result = self.storage.get(data_name.getPrefix(-1)) else: # No suffix seg_no = None result = self.storage.get(data_name) if result is not None: # There are data segment_cnt = (len(result) + self.segment_size - 1) // self.segment_size # Note: I don't understand why namespace keep all data in memory metainfo = MetaInfo() # metainfo.setFinalBlockId(segment_cnt - 1) # WHY this doesn't work? metainfo.setFinalBlockId(Name().appendSegment(segment_cnt - 1)[0]) if segment_cnt > 1 and seg_no is None: # Fetch segmented data with no suffix will get only first segment seg_no = 0 data_name.appendSequenceNumber(seg_no) data = Data(Name(prefix).append(data_name)) data.setMetaInfo(metainfo) if seg_no == -1: # _meta data.content = self.storage.get(data_name) else: # data if segment_cnt > 1: # Segmented if seg_no < segment_cnt: start_offset = seg_no * self.segment_size end_offset = start_offset + self.segment_size data.content = Blob(bytearray(result[start_offset:end_offset])) else: data.content = None else: # No segmentation data.content = Blob(bytearray(result)) self.keychain.sign(data) face.putData(data) return True else: # Data are not ready if status.status == STATUS_NO_INPUT: self.nodata_reply(interest.name, RET_NO_INPUT) elif status.status == STATUS_FAILED: self.nodata_reply(interest.name, RET_EXECUTION_FAILED) else: self.nodata_reply(interest.name, RET_RETRY_AFTER, status.estimated_time - Common.getNowMilliseconds()) return True def nodata_reply(self, name, code, retry_after=0.0): # type: (Name, int, float) -> None logging.info("Reply with code: %s", code) data = Data(name) metainfo = MetaInfo() msg = ServerResponseMessage() msg.server_response.ret_code = code if code != RET_OK: metainfo.type = ContentType.NACK else: metainfo.type = ContentType.BLOB if retry_after > 0.1: metainfo.freshnessPeriod = int(retry_after / 10) msg.server_response.retry_after = int(retry_after) else: metainfo.freshnessPeriod = 600 data.setMetaInfo(metainfo) data.setContent(ProtobufTlv.encode(msg)) self.keychain.sign(data) self.face.putData(data) def on_payload(self, frame_name): # type: (Name) -> None for model in self.operations_set: data_name = Name(frame_name).append(model) status = self.load_status(data_name) if status is None: continue if self.storage.exists(data_name): logging.info("Result exists: %s", data_name.toUri()) continue logging.info("Ready to produce: %s", data_name.toUri()) status.proecess_start_time = Common.getNowMilliseconds() if model == "deeplab": ret = self.deeplab_manager.send(DeepLabRequest(frame_name.toUri()[1:], frame_name.toUri(), data_name)) else: ret = self.fst_manager.send(FstRequest(frame_name.toUri()[1:], model, frame_name.toUri(), data_name)) status.status = STATUS_PROCESSING if ret else STATUS_FAILED self.save_status(data_name, status) def on_process_finished(self, name_str, model_name): # type: (str, str) -> None data_name = Name(name_str).append(model_name) logging.info("Process finished: %s", data_name.toUri()) status = self.load_status(data_name) if status is None: logging.fatal("Database broken.") raise RuntimeError() status.end_time = Common.getNowMilliseconds() status.status = STATUS_SUCCEED meta_name = Name(data_name).append("_meta") content_metainfo = ContentMetaInfo() content_metainfo.setContentType("png") content_metainfo.setTimestamp(status.end_time) content_metainfo.setHasSegments(True) self.storage.put(meta_name, content_metainfo.wireEncode().toBytes()) def on_fetch_fail(self, frame_name): logging.error("Fail to fetch: {}".format(frame_name)) for model in self.operations_set: data_name = Name(frame_name).append(model) status = self.load_status(data_name) if status is None: continue status.status = STATUS_NO_INPUT self.save_status(data_name, status)
class PutfileClient(object): """ This client serves random segmented data """ def __init__(self, args): self.repo_name = Name(args.repo_name) self.file_path = args.file_path self.name_at_repo = Name(args.name) self.face = Face() self.keychain = KeyChain() self.face.setCommandSigningInfo( self.keychain, self.keychain.getDefaultCertificateName()) self.running = True self.m_name_str_to_data = dict() self.n_packets = 0 self.prepare_data() self.face.registerPrefix(self.name_at_repo, None, self.on_register_failed) self.face.setInterestFilter(self.name_at_repo, self.on_interest) async def face_loop(self): while self.running: self.face.processEvents() await asyncio.sleep(0.001) def prepare_data(self): """ Shard file into data packets. """ logging.info('preparing data') with open(self.file_path, 'rb') as binary_file: b_array = bytearray(binary_file.read()) if len(b_array) == 0: logging.warning("File is 0 bytes") return self.n_packets = int((len(b_array) - 1) / MAX_BYTES_IN_DATA_PACKET + 1) logging.info('There are {} packets in total'.format(self.n_packets)) seq = 0 for i in range(0, len(b_array), MAX_BYTES_IN_DATA_PACKET): data = Data(Name(self.name_at_repo).append(str(seq))) data.metaInfo.freshnessPeriod = 100000 data.setContent( b_array[i:min(i + MAX_BYTES_IN_DATA_PACKET, len(b_array))]) data.metaInfo.setFinalBlockId( Name.Component.fromSegment(self.n_packets - 1)) self.keychain.signWithSha256(data) self.m_name_str_to_data[str(data.getName())] = data seq += 1 @staticmethod def on_register_failed(prefix): logging.error("Prefix registration failed: %s", prefix) def on_interest(self, _prefix, interest: Interest, face, _filter_id, _filter): logging.info('On interest: {}'.format(interest.getName())) if str(interest.getName()) in self.m_name_str_to_data: self.face.putData(self.m_name_str_to_data[str(interest.getName())]) logging.info('Serve data: {}'.format(interest.getName())) else: logging.info('Data does not exist: {}'.format(interest.getName())) async def insert_segmented_file(self): event_loop = asyncio.get_event_loop() face_task = event_loop.create_task(self.face_loop()) parameter = RepoCommandParameterMessage() for compo in self.name_at_repo: parameter.repo_command_parameter.name.component.append( compo.getValue().toBytes()) parameter.repo_command_parameter.start_block_id = 0 parameter.repo_command_parameter.end_block_id = self.n_packets - 1 param_blob = ProtobufTlv.encode(parameter) # Prepare cmd interest name = Name(self.repo_name).append('insert').append( Name.Component(param_blob)) interest = Interest(name) self.face.makeCommandInterest(interest) logging.info('Send insert command interest') ret = await fetch_data_packet(self.face, interest) if not isinstance(ret, Data): logging.warning('Insert failed') return response = RepoCommandResponseMessage() try: ProtobufTlv.decode(response, ret.content) except RuntimeError as exc: logging.warning('Response decoding failed', exc) process_id = response.repo_command_response.process_id status_code = response.repo_command_response.status_code logging.info('Insertion process {} accepted: status code {}'.format( process_id, status_code)) # Use insert check command to probe if insert process is completed checker = CommandChecker(self.face, self.keychain) while True: response = await checker.check_insert(self.repo_name, process_id) if response is None or response.repo_command_response.status_code == 300: await asyncio.sleep(1) elif response.repo_command_response.status_code == 200: logging.info( 'Insert process {} status: {}, insert_num: {}'.format( process_id, response.repo_command_response.status_code, response.repo_command_response.insert_num)) break else: # Shouldn't get here assert (False) self.running = False await face_task