async def fetch_interest(article: str): try: name = Name.from_str(f'/lvs-test/article/xinyu/{article}') print(f'Sending Interest {Name.to_str(name)}') data_name, meta_info, content = await app.express_interest( name, must_be_fresh=True, can_be_prefix=True, lifetime=6000) print(f'Received Data Name: {Name.to_str(data_name)}') print(meta_info) print(bytes(content).decode() if content else None) except InterestNack as e: print(f'Nacked with reason={e.reason}') except InterestTimeout: print(f'Timeout') except InterestCanceled: print(f'Canceled') except ValidationFailure: print(f'Data failed to validate')
def on_sd_ctl_interest(name: FormalName, param: InterestParam, app_param: Optional[BinaryStr]): logging.info("SD : on interest") if app_param is None: logging.error("Malformed Interest") return interested_ids = {sid for sid in app_param} result = b'' cur_time = self.get_time_now_ms() for sname, exp_time in self.real_service_list.items(): sid = sname[2][2] if sid in interested_ids and exp_time > cur_time: result += Name.encode(sname) result += struct.pack("i", exp_time - cur_time) self.app.put_data(name, result, freshness_period=3000, identity=self.system_prefix) logging.debug("PutData") logging.debug(name)
def __init__(self, app: NDNApp, storage: Storage, read_handle: ReadHandle, write_handle: WriteCommandHandle, delete_handle: DeleteCommandHandle, tcp_bulk_insert_handle: TcpBulkInsertHandle, config: dict): """ An NDN repo instance. """ self.prefix = Name.from_str(config['repo_config']['repo_name']) self.app = app self.storage = storage self.write_handle = write_handle self.read_handle = read_handle self.delete_handle = delete_handle self.tcp_bulk_insert_handle = tcp_bulk_insert_handle self.running = True self.register_root = config['repo_config']['register_root']
def remove_data_packet(self, name: NonStrictName) -> bool: """ Remove a data packet named ``name``. :param name: NonStrictName. The name of the data packet. :return: True if a data packet is being removed. """ removed = False name = Name.normalize(name) try: del self.cache[name] removed = True except KeyError: pass if self._remove(self._get_name_bytes_wo_tl(name)): removed = True return removed
async def pubadv(self, dataname, storagetype=None, bundle=None, bundlesize=None, pubprefix=None, redefine=False): """ Coroutine function for issuing publish-advertise requests. :param dataname: data name which publications will be made to. :type dataname: str :param redefine: flag to inform if redefinition is allowed. (False) :type redefine: bool :return: True if `dataname` successfully advertised. False otherwise. """ pubadvinfo = PubAdvInfo(storagetype=storagetype, redefine=redefine) if storagetype is not None and storagetype == StorageType.PUBLISHER: pubadvinfo["pubprefix"] = pubprefix if storagetype is None or storagetype == StorageType.BROKER: if bundle is None: bundle = self.bundle or bundlesize is not None if bundle: if bundlesize is not None and int(bundlesize) > 1: pubadvinfo["bundle"] = True pubadvinfo["bundlesize"] = int(bundlesize) else: self.logger.debug( f"** {dataname} won't be bundled due to bizarre bundle size" ) pass if self.scope is not None: # Defaults to TopicScope.GLOBAL pubadvinfo["topicscope"] = self.scope command, interest_param, app_param = \ self.keeper.make_pubadv_cmd(self.keeper.net_name, patch_dataname(dataname), pubadvinfo=pubadvinfo) data_name, meta_info, content = await self.app.express_interest( Name.from_str(command), interest_param=interest_param, app_param=app_param) content = json.loads(bytes(content).decode()) status = content['status'] if 'reason' in content: #self.logger.debug(f"** PA {dataname} failed. {content['reason']}") pass return content['status'] == 'OK'
def get_data_packet(self, name:NonStrictName, can_be_prefix:bool=False, must_be_fresh:bool=False) -> Optional[bytes]: name = Name.normalize(name) try: if not can_be_prefix: data, expire_time_ms = self.cache[name] if not must_be_fresh or expire_time_ms > self.time_ms(): logging.info('SVSyncStorage: get from cache') return data else: it = self.cache.itervalues(prefix=name, shallow=True) while True: data, expire_time_ms = next(it) if not must_be_fresh or expire_time_ms > self.time_ms(): logging.info('SVSyncStorage: get from cache') return data except (KeyError, StopIteration): return None
async def validate(self, name:FormalName, sig_ptrs:SignaturePtrs): val = None if sig_ptrs.signature_info.signature_type is None: return True if sig_ptrs.signature_info.signature_type == 1: val = ValidatingInfo(ValidatingInfo.get_validator(SignatureType.DIGEST_SHA256)) else: try: keyname = Name.to_str(sig_ptrs.signature_info.key_locator.name) for key in self.dataValDict: if key == keyname: val = self.dataValDict[keyname] except AttributeError: val = None if val: return await val.validate(name, sig_ptrs) return True # We do not have the key for this keyname (cant error check it)
async def fetch_file(self, name_at_repo: NonStrictName, local_filename: str = None, overwrite=False): """ Fetch a file from remote repo, and write to the current working directory. :param name_at_repo: NonStrictName. The name with which this file is stored in the repo. :param local_filename: str. The filename of the retrieved file on the local file system. :param overwrite: If true, existing files are replaced. """ # If no local filename is provided, store file with last name component # of repo filename if local_filename is None: local_filename = Name.to_str(name_at_repo) local_filename = os.path.basename(local_filename) # If the file already exists locally and overwrite=False, retrieving the file makes no # sense. if os.path.isfile(local_filename) and not overwrite: raise FileExistsError("{} already exists".format(local_filename)) semaphore = aio.Semaphore(10) b_array = bytearray() async for (_, _, content, _) in concurrent_fetcher(self.app, name_at_repo, 0, None, semaphore): b_array.extend(content) if len(b_array) > 0: logging.info( f'Fetching completed, writing to file {local_filename}') # Create folder hierarchy local_folder = os.path.dirname(local_filename) if local_folder: os.makedirs(local_folder, exist_ok=True) # Write retrieved data to file if os.path.isfile(local_filename) and overwrite: os.remove(local_filename) with open(local_filename, 'wb') as f: f.write(b_array)
async def unregister(self, name: NonStrictName) -> bool: """ Unregister a route for a specific prefix. :param name: the Name prefix. :type name: :any:`NonStrictName` """ name = Name.normalize(name) del self._prefix_tree[name] try: await self.express_interest(make_command('rib', 'unregister', name=name), lifetime=1000) return True except (InterestNack, InterestTimeout, InterestCanceled, ValidationFailure): return False
def test_default_4(): data = bytes.fromhex( "0630 0703080145" "1400 1500 16031b0100" "1720f965ee682c6973c3cbaa7b69e4c7063680f83be93a46be2ccc98686134354b66" ) name, meta_info, content, sig = parse_data(data) assert name == Name.from_str("/E") assert meta_info.content_type is None assert meta_info.freshness_period is None assert meta_info.final_block_id is None assert sig.signature_info.signature_type == SignatureType.DIGEST_SHA256 assert content == b'' algo = hashlib.sha256() for part in sig.signature_covered_part: algo.update(part) assert sig.signature_value_buf == algo.digest()
async def equalize(self, incoming_md:MetaData) -> None: self.busy = True if (incoming_md.tseqno <= self.table.getMetaData().tseqno) and (incoming_md.nulled <= self.table.getMetaData().nulled): self.busy = False return for i in range(incoming_md.nopcks): incoming_sv = await self.getStatePckValue(Name.from_str(bytes(incoming_md.source).decode()), i+1) if incoming_sv == None: break missingList = self.table.processStateVector(incoming_sv, oldData=True) if missingList: self.updateCallback(missingList) self.table.updateMetaData() if (incoming_md.tseqno <= self.table.getMetaData().tseqno) and (incoming_md.nulled <= self.table.getMetaData().nulled): break logging.info(f'MSBalancer: nmeta {bytes(self.table.getMetaData().source).decode()} - {self.table.getMetaData().tseqno} total, {self.table.getMetaData().nulled} null, {self.table.getMetaData().nopcks} pcks') logging.info(f'MSBalancer: ntable {self.table.getCompleteStateVector().to_str()}') self.busy = False
def test_default_1(): data = (b"\x06\x42\x07\x14\x08\x05local\x08\x03ndn\x08\x06prefix" b"\x14\x03\x18\x01\x00" b"\x16\x03\x1b\x01\x00" b"\x17 \x7f1\xe4\t\xc5z/\x1d\r\xdaVh8\xfd\xd9\x94" b"\xd8\'S\x13[\xd7\x15\xa5\x9d%^\x80\xf2\xab\xf0\xb5") name, meta_info, content, sig = parse_data(data) assert name == Name.from_str("/local/ndn/prefix") assert meta_info.content_type == ContentType.BLOB assert meta_info.freshness_period is None assert meta_info.final_block_id is None assert sig.signature_info.signature_type == SignatureType.DIGEST_SHA256 assert content is None algo = hashlib.sha256() for part in sig.signature_covered_part: algo.update(part) assert sig.signature_value_buf == algo.digest()
def put_data_packet(self, name: NonStrictName, data: bytes): """ Insert a data packet named ``name`` with value ``data``. This method will parse ``data`` to get its freshnessPeriod, and compute its expiration time\ by adding the freshnessPeriod to the current time. :param name: NonStrictName. The name of the data packet. :param data: bytes. The value of the data packet. """ _, meta_info, _, _ = parse_data(data) expire_time_ms = self._time_ms() if meta_info.freshness_period: expire_time_ms += meta_info.freshness_period # write data packet and freshness_period to cache name = Name.normalize(name) self.cache[name] = (data, expire_time_ms) logging.info(f'Cache save: {Name.to_str(name)}')
def _on_insert_interest(self, int_name, _int_param, _app_param): logging.info('INSERT Interest Received: {}\n'.format( Name.to_str(int_name))) # an invalid Interest which has the exactly same name as the catalog's prefix if (len(int_name) == len(self.node_insert_prefix)): return data_name, hash, desired_copies = self.decode_datainfo( int_name, self.node_insert_prefix) self.app.put_data(int_name, content=b'ok', freshness_period=1000) if desired_copies == None: aio.ensure_future(self.command_client.add(data_name, hash)) else: aio.ensure_future( self.command_client.add(data_name, hash, desired_copies))
def test_encode_func(): name = Name.from_str('/a/b/c/d') buf = bytearray(20) with pytest.raises(IndexError): Name.encode(name, buf, 10) assert Name.encode( name, buf, 6 ) == b'\x00\x00\x00\x00\x00\x00\x07\x0c\x08\x01a\x08\x01b\x08\x01c\x08\x01d' assert Name.encode( name, buf ) == b'\x07\x0c\x08\x01a\x08\x01b\x08\x01c\x08\x01d\x08\x01c\x08\x01d' assert Name.encode([]) == b'\x07\x00'
def on_interest(name: FormalName, param: InterestParam, _app_param: Optional[BinaryStr]): logging.info(f'>> I: {Name.to_str(name)}, {param}') request = Name.to_str(name).split("/") print("handle Interest Name", Name.to_str(name)) if request[-2] == "metadata": print("handle Meta data") # content = json.dumps(list(pred_frame_buffer)).encode() # content = str(current_I_frame).encode() content = Name.to_str( name + [Component.from_number(current_I_frame, 0)]).encode() name = name app.put_data(name, content=content, freshness_period=300) logging.info("handle to name " + Name.to_str(name)) elif request[-3] == "frame": interest_frame_num = int(request[-1]) if interest_frame_num in frame_buffer_dict: content = frame_buffer_dict[interest_frame_num] app.put_data(name + [b'\x08\x02\x00\x00'], content=content, freshness_period=2000, final_block_id=Component.from_segment(0)) print( f'handle interest: publish pending interest' + Name.to_str(name) + "------------/" + str(interest_frame_num) + "length: ", len(content)) else: interest_buffer.append([interest_frame_num, name]) else: print("handle Request missing ", Name.to_str(name)) while len(interest_buffer) > 0 and len( frame_buffer) > 0 and frame_buffer[-1] >= interest_buffer[0][0]: pendingInterest = interest_buffer.popleft() pendingFN = pendingInterest[0] pendingName = pendingInterest[1] if pendingFN in frame_buffer_dict: content = frame_buffer_dict[pendingFN] app.put_data(pendingName + [b'\x08\x02\x00\x00'], content=content, freshness_period=2000, final_block_id=Component.from_segment(0)) print( f'handle interest: publish pending interest' + Name.to_str(pendingName) + "------------/" + str(pendingFN) + "length: ", len(content))
def test_meta_info(): data = ( b"\x06\x4e\x07\x17\x08\x05local\x08\x03ndn\x08\x06prefix\x25\x01\x00" b"\x14\x0c\x18\x01\x00\x19\x02\x03\xe8\x1a\x03\x3a\x01\x02" b"\x16\x03\x1b\x01\x00" b"\x17 \x0f^\xa1\x0c\xa7\xf5Fb\xf0\x9cOT\xe0FeC\x8f92\x04\x9d\xabP\x80o\'\x94\xaa={hQ" ) name, meta_info, content, sig = parse_data(data) assert name == Name.from_str("/local/ndn/prefix/37=%00") assert meta_info.content_type == ContentType.BLOB assert meta_info.freshness_period == 1000 assert meta_info.final_block_id == Component.from_sequence_num(2) assert sig.signature_info.signature_type == SignatureType.DIGEST_SHA256 assert content is None algo = hashlib.sha256() for part in sig.signature_covered_part: algo.update(part) assert sig.signature_value_buf == algo.digest()
def main(): config = get_yaml() logging.info(config) app = NDNApp() storage = SqliteStorage() read_handle = ReadHandle(app, storage) write_handle = WriteCommandHandle(app, storage, read_handle) delete_handle = DeleteCommandHandle(app, storage) tcp_bulk_insert_handle = TcpBulkInsertHandle(storage, read_handle, config['tcp_bulk_insert']['addr'], config['tcp_bulk_insert']['port']) repo = Repo(Name.from_str(config['repo_config']['repo_name']), app, storage, read_handle, write_handle, delete_handle, tcp_bulk_insert_handle) repo.listen() app.run_forever()
def on_check_interest(self, int_name, _int_param, _app_param): logging.info('on_check_interest(): {}'.format(Name.to_str(int_name))) response = None process_id = None try: parameter = self.decode_cmd_param_bytes(int_name) process_id = parameter.process_id except RuntimeError as exc: response = RepoCommandResponse() response.status_code = 403 if response is None and process_id not in self.m_processes: response = RepoCommandResponse() response.status_code = 404 if response is None: self.reply_to_cmd(int_name, self.m_processes[process_id]) else: self.reply_to_cmd(int_name, response)
def test_default_2(): data = (b'\x06L\x07\x14\x08\x05local\x08\x03ndn\x08\x06prefix' b'\x14\x03\x18\x01\x00' b'\x15\x0801020304' b'\x16\x03\x1b\x01\x00' b'\x17 \x94\xe9\xda\x91\x1a\x11\xfft\x02i:G\x0cO\xdd!' b'\xe0\xc7\xb6\xfd\x8f\x9cn\xc5\x93{\x93\x04\xe0\xdf\xa6S') name, meta_info, content, sig = parse_data(data) assert name == Name.from_str("/local/ndn/prefix") assert meta_info.content_type == ContentType.BLOB assert meta_info.freshness_period is None assert meta_info.final_block_id is None assert sig.signature_info.signature_type == SignatureType.DIGEST_SHA256 assert content == b'01020304' algo = hashlib.sha256() for part in sig.signature_covered_part: algo.update(part) assert sig.signature_value_buf == algo.digest()
async def fetch_file(self, name_at_repo): """ Fetch a file from remote repo, and write to disk. :param name_at_repo: NonStrictName. The name with which this file is stored in the repo. """ semaphore = aio.Semaphore(10) b_array = bytearray() async for data_bytes in concurrent_fetcher(self.app, name_at_repo, 0, None, semaphore): (_, _, content, _) = ndn_format_0_3.parse_data(data_bytes, with_tl=False) b_array.extend(content) if len(b_array) > 0: filename = Name.to_str(name_at_repo) filename = filename.strip().split('/')[-1] logging.info(f'Fetching completed, writing to file {filename}') with open(filename, 'wb') as f: f.write(b_array)
def test_meta_info(): data = ( b"\x06\x4e\x07\x17\x08\x05local\x08\x03ndn\x08\x06prefix\x25\x01\x00" b"\x14\x0c\x18\x01\x00\x19\x02\x03\xe8\x1a\x03\x25\x01\x02" b"\x16\x03\x1b\x01\x00" b"\x17 \x03\xb8,\x18\xffMw\x84\x86\xa5a\x94e\xcc\xdaQ\x15\xb7\xfb\x19\xab\x9d1lw\'\xdf\xac\x03#\xcad" ) name, meta_info, content, sig = parse_data(data) assert name == Name.from_str("/local/ndn/prefix/37=%00") assert meta_info.content_type == ContentType.BLOB assert meta_info.freshness_period == 1000 assert meta_info.final_block_id == Component.from_sequence_num(2) assert sig.signature_info.signature_type == SignatureType.DIGEST_SHA256 assert content is None algo = hashlib.sha256() for part in sig.signature_covered_part: algo.update(part) assert sig.signature_value_buf == algo.digest()
async def fetch_file(self, name_at_repo: NonStrictName): """ Fetch a file from remote repo, and write to the current working directory. :param name_at_repo: NonStrictName. The name with which this file is stored in the repo. """ semaphore = aio.Semaphore(10) b_array = bytearray() async for (_, _, content, _) in concurrent_fetcher(self.app, name_at_repo, 0, None, semaphore): b_array.extend(content) if len(b_array) > 0: filename = Name.to_str(name_at_repo) filename = filename.strip().split('/')[-1] logging.info(f'Fetching completed, writing to file {filename}') with open(filename, 'wb') as f: f.write(b_array)
def test_verify(self): pri_key = Ed25519PrivateKey.generate() key = pri_key.private_bytes( encoding=serialization.Encoding.Raw, format=serialization.PrivateFormat.Raw, encryption_algorithm=serialization.NoEncryption(), ) pub_key = pri_key.public_key() signer = Ed25519Signer("/K/KEY/x", key) pkt = make_data("/test", MetaInfo(), b"test content", signer=signer) _, _, _, sig_ptrs = parse_data(pkt) pub_bits = pub_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo, ) validator = Ed25519Checker.from_key("/K/KEY/x", bytes(pub_bits)) assert aio.run(validator(Name.from_str("/test"), sig_ptrs))
async def _perform_storage_delete(self, prefix, start_block_id: int, end_block_id: int) -> int: """ Delete items from storage. :param prefix: NonStrictName. :param start_block_id: int. :param end_block_id: int. :return: The number of data items deleted. """ delete_num = 0 for idx in range(start_block_id, end_block_id + 1): key = prefix[:] key.append(str(idx)) key = Name.to_str(key) if self.storage.exists(key): self.storage.remove(key) delete_num += 1 # Temporarily release control to make the process non-blocking await aio.sleep(0) return delete_num
async def pubunadv(self, dataname, allow_undefined=True): """ Coroutine function for issuing publish-unadvertise requests. :param dataname: data name which no more publications will be allowed to. :type dataname: str :param allow_undefined: flag to inform if undefined name is allowed. (True) :type allow_undefined: bool :return: True if `dataname` successfully unadvertised. False otherwise. """ pubadvinfo = PubAdvInfo(topicscope=self.scope) command, interest_param, app_param = \ self.keeper.make_pubunadv_cmd(self.keeper.net_name, patch_dataname(dataname), pubadvinfo=pubadvinfo) data_name, meta_info, content = await self.app.express_interest( Name.from_str(command), interest_param=interest_param, app_param=app_param) status = json.loads(bytes(content).decode())['status'] return status == 'OK' or (allow_undefined and status == 'ERR')
def main() -> int: cmdline_args = process_cmd_opts() configuration = process_config(cmdline_args) storage = create_storage(configuration['db_config']) app = NDNApp() read_handle = ReadHandle(app, storage) write_handle = WriteHandle(app, storage, read_handle) catalog = Catalog( Name.from_str(configuration['catalog_config']['catalog_name']), app, storage, read_handle, write_handle) aio.ensure_future(catalog.listen()) try: app.run_forever() except FileNotFoundError: print('Error: could not connect to NFD.') return 0
def check(self, check_prefix: NonStrictName, process_id: bytes) -> Optional[RepoCommandResponse]: """ Check the status of process ``process_id`` published under ``check_prefix``. This function\ returns the in-memory process status, therefore it returns immediately. :return: Optional[RepoCommandResponse]. The last known status of ``process_id``. Therefore,\ the first call to a process, it returns None. """ # If process_id is not seen before, subscribe to its status with PubSub if process_id not in self.process_id_to_response: topic = check_prefix + ['check', Component.from_bytes(process_id)] cb = self.make_on_msg(process_id) self.pb.subscribe(topic, cb) self.process_id_to_response[process_id] = None self.process_id_to_check_prefix[process_id] = check_prefix logging.info('CommandChecker subscribing to {}'.format(Name.to_str(topic))) # Remember when this process is last checked self.process_id_to_last_check_tp[process_id] = int(time.time()) return self.process_id_to_response[process_id]
async def query_face_id(self, uri): query_filter = FaceQueryFilter() query_filter.face_query_filter = FaceQueryFilterValue() query_filter.face_query_filter.uri = uri query_filter_msg = query_filter.encode() name = Name.from_str("/localhost/nfd/faces/query") + [ Component.from_bytes(query_filter_msg) ] try: _, _, data = await self.app.express_interest(name, lifetime=1000, can_be_prefix=True, must_be_fresh=True) except (InterestCanceled, InterestTimeout, InterestNack, ValidationFailure, NetworkError): logging.error(f'Query failed') return None msg = FaceStatusMsg.parse(data) if len(msg.face_status) <= 0: return None return msg.face_status[0].face_id
def main(): parser = argparse.ArgumentParser(description='segmented insert client') parser.add_argument('-r', '--repo_name', required=True, help='Name of repo') parser.add_argument('-p', '--process_id', required=True, help='Process ID') args = parser.parse_args() logging.basicConfig(format='[%(asctime)s]%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) app = NDNApp() try: app.run_forever( after_start=run_check(app, repo_name=Name.from_str(args.repo_name), process_id=int(args.process_id))) except FileNotFoundError: print('Error: could not connect to NFD.')