def test_handle_wedged_nodes(): import diskcache as dc from node_tools import ctlr_data as ct trie = ct.net_trie off_q = dc.Deque(directory='/tmp/test-oq') wdg_q = dc.Deque(directory='/tmp/test-wq') node_id = 'ee2eedb2e1' exit_id = 'beefea68e6' tail_id = 'ff2ffdb2e1' wdg_q.append(node_id) wdg_q.append(node_id) handle_wedged_nodes(trie, wdg_q, off_q) assert list(wdg_q) == [] assert list(off_q) == ['beefea68e6'] wdg_q.append(tail_id) wdg_q.append(tail_id) handle_wedged_nodes(trie, wdg_q, off_q) assert list(wdg_q) == [] assert list(off_q) == ['beefea68e6', 'ee2eedb2e1'] wdg_q.append(exit_id) wdg_q.append(exit_id) with pytest.raises(AssertionError): handle_wedged_nodes(trie, wdg_q, off_q)
def setUp(self): super(QueueMsgHandlingTest, self).setUp() import diskcache as dc self.node_q = dc.Deque(directory='/tmp/test-nq') self.reg_q = dc.Deque(directory='/tmp/test-rq') self.wait_q = dc.Deque(directory='/tmp/test-wq') self.node1 = 'beef01dead' self.node2 = '02beefdead' self.node3 = 'deadbeef03'
def test_run_cleanup_check(): import diskcache as dc from node_tools import state_data as st clean_q = dc.Deque(directory='/tmp/test-clq') pub_q = dc.Deque(directory='/tmp/test-pbq') clean_q.clear() pub_q.clear() clean_q.append('beef9f73c6') pub_q.append('beef9f73c6') run_cleanup_check(clean_q, pub_q) assert len(clean_q) == 0 clean_q.append('beefea68e6') run_cleanup_check(clean_q, pub_q) assert len(clean_q) == 0
def setUp(self): super(WaitForMsgHandlingTest, self).setUp() import diskcache as dc self.cfg_q = dc.Deque(directory='/tmp/test-aq') self.hold_q = dc.Deque(directory='/tmp/test-hq') self.reg_q = dc.Deque(directory='/tmp/test-rq') self.node1 = 'beef01dead' self.node2 = '02beefdead' self.node3 = 'deadbeef03' self.cfg1 = '{"node_id": "beef01dead", "networks": ["7ac4235ec5d3d938", "bb8dead3c63cea29"]}' self.cfg2 = '{"node_id": "02beefdead", "networks": ["7ac4235ec5d3d938"]}' self.cfg_q.append(self.cfg1) self.cfg_q.append(self.cfg2)
def test(status=False): if os.environ.get('TRAVIS') == 'true': return if os.environ.get('APPVEYOR') == 'True': return random.seed(SEED) deque = dc.Deque(range(SIZE)) processes = [] for count in range(8): process = mp.Process(target=stress, args=(SEED + count, deque)) process.start() processes.append(process) for value in it.count(): time.sleep(1) if status: print('\r', value, 's', len(deque), 'items', ' ' * 20, end='') if all(not process.is_alive() for process in processes): break if status: print('') assert all(process.exitcode == 0 for process in processes)
def wrapper(): deque = dc.Deque() try: func(deque) except Exception: rmdir(deque.directory) raise
def setUp(self): import diskcache as dc from node_tools import ctlr_data as ct self.node1 = 'deadbeef01' self.node2 = '20beefdead' self.needs = [False, True] self.net_list = ['7ac4235ec5d3d940'] self.trie = ct.id_trie self.node_q = dc.Deque(directory='/tmp/test-nq') self.off_q = dc.Deque(directory='/tmp/test-oq') self.pub_q = dc.Deque(directory='/tmp/test-pq') self.tmp_q = dc.Deque(directory='/tmp/test-tq') self.node_q.clear() self.off_q.clear() self.pub_q.clear() self.tmp_q.clear() self.trie.clear() self.addr = '127.0.0.1' self.tcp_addr = 'tcp://{}:9442'.format(self.addr) self.active_list = [] self.off_list = [] self.sub_list = [] def handle_msg(msg): self.sub_list.append(msg) return self.sub_list def handle_cfg(msg): self.active_list.append(msg) return self.active_list def offline(msg): if msg not in self.off_list: self.off_list.append(msg) return self.off_list self.service = Subscriber(self.tcp_addr) self.service.subscribe('handle_node', handle_msg) self.service.subscribe('cfg_msgs', handle_cfg) self.service.subscribe('offline', offline)
def test_populate_leaf_list(): import diskcache as dc from node_tools import state_data as st node_q = dc.Deque(directory='/tmp/test-nq') wait_q = dc.Deque(directory='/tmp/test-wq') tmp_q = dc.Deque(directory='/tmp/test-tq') node_q.clear() wait_q.clear() tmp_q.clear() node_q.append('beef9f73c6') peers = get_peer_status(cache) # print(peers) for peer in peers: if peer['role'] == 'LEAF': populate_leaf_list(node_q, wait_q, tmp_q, peer) assert len(st.leaf_nodes) == 1 assert st.leaf_nodes[0]['beef9f73c6'] == '134.47.250.137' node_q.clear() wait_q.append('beef9f73c6') for peer in peers: if peer['role'] == 'LEAF': populate_leaf_list(node_q, wait_q, tmp_q, peer) assert len(st.leaf_nodes) == 1 assert st.leaf_nodes[0]['beef9f73c6'] == '134.47.250.137' res = lookup_node_id('beef9f73c6', tmp_q) assert res['beef9f73c6'] == '134.47.250.137' assert len(tmp_q) == 1 node_update = dict({'beef9f73c6': '134.47.250.42'}) avoid_and_update('beef9f73c6', node_update, tmp_q) res = lookup_node_id('beef9f73c6', tmp_q) assert res['beef9f73c6'] == '134.47.250.42' assert len(tmp_q) == 1 wait_q.clear() tmp_q.clear() st.leaf_nodes = []
def test_init(): directory = '/tmp/diskcache/deque' sequence = list('abcde') deque = dc.Deque(sequence, None) assert deque == sequence rmdir(deque.directory) del deque rmdir(directory) deque = dc.Deque(sequence, directory) assert deque.directory == directory assert deque == sequence other = dc.Deque(directory=directory) assert other == deque del deque del other rmdir(directory)
def test_init(): directory = tempfile.mkdtemp() sequence = list('abcde') deque = dc.Deque(sequence, None) assert deque == sequence rmdir(deque.directory) del deque rmdir(directory) deque = dc.Deque(sequence, directory) assert deque.directory == directory assert deque == sequence other = dc.Deque(directory=directory) assert other == deque del deque del other rmdir(directory)
def test_gen_netobj_queue(): import diskcache as dc netobj_q = dc.Deque(directory='/tmp/test-oq') netobj_q.clear() gen_netobj_queue(netobj_q, ipnet='192.168.0.0/24') assert len(netobj_q) == 64 net = netobj_q.popleft() assert isinstance(net, ipaddress.IPv4Network) assert len(list(net)) == 4 assert len(list(net.hosts())) == 2 gen_netobj_queue(netobj_q) assert len(netobj_q) == 63
def network_cruft_cleaner(): """ This is (sort of) the companion to net_id_handler() for checking the net_q on startup and sending a ztcli command to leave any stale networks found (and clear the queue). """ import diskcache as dc from node_tools.node_funcs import run_ztcli_cmd if NODE_SETTINGS['node_role'] is None: net_q = dc.Deque(directory=get_cachedir('net_queue')) for nwid in list(net_q): res = run_ztcli_cmd(action='leave', extra=nwid) logger.debug('run_ztcli_cmd leave result: {}'.format(res)) net_q.clear()
def test_handle_net_cfg(): import diskcache as dc netobj_q = dc.Deque(directory='/tmp/test-oq') net1, mbr1, gw1 = handle_net_cfg(netobj_q) for fragment in [net1, mbr1, gw1]: assert isinstance(fragment, AttrDict) net2, mbr2, gw2 = handle_net_cfg(netobj_q) for fragment in [net2, mbr2, gw2]: assert isinstance(fragment, AttrDict) assert mbr1 != mbr2 assert mbr1.ipAssignments == ['192.168.0.6/30'] assert mbr1.authorized is True res = handle_net_cfg(netobj_q) assert len(res) is 3 # print(res) netobj_q.clear()
def net_id_handler(iface, nwid, old=False): """ Net ID handler for, well, handling network IDs when fpn interfaces come and go. We use a deque to store the new ID and then remove it on cleanup (or the next startup). :param iface: fpn iface ID name <fpn_id0|fpn_id1> :param nwid: fpn network ID or None (state in the caller) :param old: set old=True to remove `nwid` from the net queue """ import diskcache as dc net_q = dc.Deque(directory=get_cachedir('net_queue')) if not old and nwid is not None: if nwid not in list(net_q): net_q.append(nwid) logger.debug('Added network id {} to net_q'.format(nwid)) if old: if nwid in list(net_q): net_q.remove(nwid) logger.debug('Removed network id {} from net_q'.format(nwid))
def test_repr(): directory = '/tmp/diskcache/deque' deque = dc.Deque(directory=directory) assert repr(deque) == 'Deque(directory=%r)' % directory
len(reg_q), list(reg_q))) logger.debug('{} node(s) in wait queue: {}'.format( len(wait_q), list(wait_q))) manage_incoming_nodes(node_q, reg_q, wait_q) if len(reg_q) > 0: drain_msg_queue(reg_q, pub_q, addr='127.0.0.1') logger.debug('{} node(s) in node queue: {}'.format( len(node_q), list(node_q))) logger.debug('{} node(s) in pub queue: {}'.format( len(pub_q), list(pub_q))) logger.debug('{} node(s) in active queue: {}'.format( len(cfg_q), list(cfg_q))) except Exception as exc: logger.error('peerstate exception was: {}'.format(exc)) raise exc cache = dc.Index(get_cachedir()) cfg_q = dc.Deque(directory=get_cachedir('cfg_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) off_q = dc.Deque(directory=get_cachedir('off_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) reg_q = dc.Deque(directory=get_cachedir('reg_queue')) tmp_q = dc.Deque(directory=get_cachedir('tmp_queue')) wait_q = dc.Deque(directory=get_cachedir('wait_queue')) loop = asyncio.get_event_loop() loop.run_until_complete(main())
def test_repr(): directory = tempfile.mkdtemp() deque = dc.Deque(directory=directory) assert repr(deque) == 'Deque(directory=%r)' % directory
def deque(): deque = dc.Deque() yield deque rmdir(deque.directory)
len(boot_list), boot_list)) if len(boot_list) != 0: await close_mbr_net(client, node_list, boot_list, min_nodes=3) else: await unwrap_mbr_net(client, node_list, boot_list, min_nodes=3) except Exception as exc: logger.error('netstate exception was: {}'.format(exc)) await cleanup_orphans(client) if list(ct.net_trie) == [] and list(ct.id_trie) != []: ct.id_trie.clear() raise exc cache = dc.Index(get_cachedir()) off_q = dc.Deque(directory=get_cachedir('off_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) staging_q = dc.Deque(directory=get_cachedir('staging_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) loop = asyncio.get_event_loop() loop.run_until_complete(main())
def test(): random.seed(SEED) sequence = co.deque(range(SIZE)) deque = dc.Deque(range(SIZE)) stress(sequence, deque) assert all(alpha == beta for alpha, beta in zip(sequence, deque))
import sys import time import diskcache import threading from caches import cache from caches import channel dc = diskcache.Deque() class CWEThreadUpdater(threading.Thread): def __init__(self, callback=None, callback_args=None, *args, **kwargs): target = kwargs.pop('target') super(CWEThreadUpdater, self).__init__(target=self.target_with_callback, *args, **kwargs) self.callback = callback self.method = target self.callback_args = callback_args def target_with_callback(self): self.method() if self.callback is not None: self.callback(self.callback) def update_cwe_job(): print('start modelling of CWE updater') for i in range(0, 10): payload = dict(source='cwe', data=dict(id=i, message="tests message N %s" % (i)))
return value return function_timer @timerfunc def gen_netobj_queue(deque, ipnet='172.16.0.0/12'): if len(deque) > 0: print('Using existing queue: {}'.format(deque.directory)) print('Timing data no longer valid!') else: print('Generating netobj queue, please be patient...') netobjs = list(ipaddress.ip_network(ipnet).subnets(new_prefix=30)) for net in netobjs: deque.append(net) print('{} IPv4 network objects in queue: {}'.format( len(deque), deque.directory)) netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) gen_netobj_queue(netobj_q) test_net = netobj_q.peekleft() print('Checking first available network') print('Network with netmask: {}'.format(test_net)) print('Network has host list: {}'.format(list(test_net.hosts()))) iface = ipaddress.ip_interface(str(list(test_net.hosts())[0])) print('First host has interface: {}'.format(iface))
def __init__(self, x_rate_policy: str): self._deque = diskcache.Deque(directory=os.path.join( __diskcache_path__, f"x_rate_response/{x_rate_policy}")) logging.info( f"Found {len(self._deque)} cached responses from {x_rate_policy}")
def do_scheduling(): set_initial_role() network_cruft_cleaner() schedule.run_all(1, 'base-tasks') validate_role() node_role = NODE_SETTINGS['node_role'] mode = NODE_SETTINGS['mode'] if node_role is None and mode == 'peer': NODE_SETTINGS['use_localhost'] = True if mode == 'peer': if node_role is None: check_time = 33 baseCheckJob = schedule.every(check_time).seconds baseCheckJob.do(run_net_check).tag('base-tasks', 'route-status') try: data = wait_for_moon(timeout=45) except Exception as exc: logger.error('ENODATA exception {}'.format(exc)) put_state_msg('ERROR') try: handle_moon_data(data) put_state_msg('STARTING') except MemberNodeError as exc: logger.error('ENODATA exception {}'.format(exc)) put_state_msg('ERROR') str_level = logging.getLevelName(logger.getEffectiveLevel()) logger.debug('Current log level is: {}'.format(str_level)) startup_handlers() else: if node_role == 'controller': netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) gen_netobj_queue(netobj_q) cache = dc.Index(get_cachedir()) for key_str in ['peer', 'moon', 'mstate']: delete_cache_entry(cache, key_str) elif node_role == 'moon': cln_q = dc.Deque(directory=get_cachedir('clean_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) schedule.every(37).seconds.do(run_cleanup_check, cln_q, pub_q).tag( 'chk-tasks', 'cleanup') schedule.every(15).minutes.do(check_daemon_status).tag( 'chk-tasks', 'responder') schedule.every(15).minutes.do(check_daemon_status, script='msg_subscriber.py').tag( 'chk-tasks', 'subscriber') schedule.run_all(1, 'chk-tasks') elif mode == 'adhoc': logger.debug('Running in adhoc mode...') if NODE_SETTINGS['nwid']: logger.debug('ADHOC: found network {}'.format( NODE_SETTINGS['nwid'])) do_startup(NODE_SETTINGS['nwid']) else: logger.error('No network ID found in NODE_SETTINGS!!') logger.error('Have you created a network yet?') logger.debug('MODE: startup mode is {} and role is {}'.format( mode, node_role)) logger.info( 'You are running fpnd/node_tools version {}'.format(fpnd_version)) while True: schedule.run_pending() time.sleep(1)
import datetime from multiprocessing import Process import diskcache as dc from daemon import Daemon from nanoservice import Subscriber from node_tools.helper_funcs import get_cachedir from node_tools.msg_queues import valid_announce_msg pid_file = '/tmp/subscriber.pid' stdout = '/tmp/subscriber.log' stderr = '/tmp/subscriber_err.log' node_q = dc.Deque(directory=get_cachedir('node_queue')) def print_stats(n, duration): pairs = [('Total messages', n), ('Total duration (s)', duration), ('Throughput (msg/s)', n / duration)] for pair in pairs: label, value = pair print(' * {:<25}: {:10,.2f}'.format(label, value)) def service_runner(addr, n): """ Run subscriber service and fill node queue (with stats)""" s = Subscriber(addr)