def test_dump_and_load_json(): data_dir = get_cachedir(dir_name='fpn_test', user_dirs=True) (node_data, peer_data, net_data, moon_data) = load_data() json_dump_file('node', node_data, data_dir) node_dump = json_load_file('node', data_dir) json_check(node_dump) json_check(peer_data) json_check(net_data) json_check(moon_data)
len(reg_q), list(reg_q))) logger.debug('{} node(s) in wait queue: {}'.format( len(wait_q), list(wait_q))) manage_incoming_nodes(node_q, reg_q, wait_q) if len(reg_q) > 0: drain_msg_queue(reg_q, pub_q, addr='127.0.0.1') logger.debug('{} node(s) in node queue: {}'.format( len(node_q), list(node_q))) logger.debug('{} node(s) in pub queue: {}'.format( len(pub_q), list(pub_q))) logger.debug('{} node(s) in active queue: {}'.format( len(cfg_q), list(cfg_q))) except Exception as exc: logger.error('peerstate exception was: {}'.format(exc)) raise exc cache = dc.Index(get_cachedir()) cfg_q = dc.Deque(directory=get_cachedir('cfg_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) off_q = dc.Deque(directory=get_cachedir('off_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) reg_q = dc.Deque(directory=get_cachedir('reg_queue')) tmp_q = dc.Deque(directory=get_cachedir('tmp_queue')) wait_q = dc.Deque(directory=get_cachedir('wait_queue')) loop = asyncio.get_event_loop() loop.run_until_complete(main())
return value return function_timer @timerfunc def gen_netobj_queue(deque, ipnet='172.16.0.0/12'): if len(deque) > 0: print('Using existing queue: {}'.format(deque.directory)) print('Timing data no longer valid!') else: print('Generating netobj queue, please be patient...') netobjs = list(ipaddress.ip_network(ipnet).subnets(new_prefix=30)) for net in netobjs: deque.append(net) print('{} IPv4 network objects in queue: {}'.format( len(deque), deque.directory)) netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) gen_netobj_queue(netobj_q) test_net = netobj_q.peekleft() print('Checking first available network') print('Network with netmask: {}'.format(test_net)) print('Network has host list: {}'.format(list(test_net.hosts()))) iface = ipaddress.ip_interface(str(list(test_net.hosts())[0])) print('First host has interface: {}'.format(iface))
'nwid': 'b6079f73ca8129ad', 'objtype': 'network', 'private': True, 'remoteTraceLevel': 0, 'remoteTraceTarget': None, 'revision': 1, 'routes': [], 'rules': [{'not': False, 'or': False, 'type': 'ACTION_ACCEPT'}], 'rulesSource': '', 'tags': [], 'v4AssignMode': {'zt': False}, 'v6AssignMode': {'6plane': False, 'rfc4193': False, 'zt': False}} # has_aging = False cache = Index(get_cachedir(dir_name='fpn_test', user_dirs=True)) net_q = Deque(get_cachedir(dir_name='net_queue', user_dirs=True)) max_age = NODE_SETTINGS['max_cache_age'] utc_stamp = datetime.datetime.now(utc) # use local time for console client = mock_zt_api_client() # special test cases def json_check(data): import json json_dump = json.dumps(data, indent=4, separators=(',', ': ')) json_load = json.loads(json_dump) assert data == json_load
def do_scheduling(): set_initial_role() network_cruft_cleaner() schedule.run_all(1, 'base-tasks') validate_role() node_role = NODE_SETTINGS['node_role'] mode = NODE_SETTINGS['mode'] if node_role is None and mode == 'peer': NODE_SETTINGS['use_localhost'] = True if mode == 'peer': if node_role is None: check_time = 33 baseCheckJob = schedule.every(check_time).seconds baseCheckJob.do(run_net_check).tag('base-tasks', 'route-status') try: data = wait_for_moon(timeout=45) except Exception as exc: logger.error('ENODATA exception {}'.format(exc)) put_state_msg('ERROR') try: handle_moon_data(data) put_state_msg('STARTING') except MemberNodeError as exc: logger.error('ENODATA exception {}'.format(exc)) put_state_msg('ERROR') str_level = logging.getLevelName(logger.getEffectiveLevel()) logger.debug('Current log level is: {}'.format(str_level)) startup_handlers() else: if node_role == 'controller': netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) gen_netobj_queue(netobj_q) cache = dc.Index(get_cachedir()) for key_str in ['peer', 'moon', 'mstate']: delete_cache_entry(cache, key_str) elif node_role == 'moon': cln_q = dc.Deque(directory=get_cachedir('clean_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) schedule.every(37).seconds.do(run_cleanup_check, cln_q, pub_q).tag( 'chk-tasks', 'cleanup') schedule.every(15).minutes.do(check_daemon_status).tag( 'chk-tasks', 'responder') schedule.every(15).minutes.do(check_daemon_status, script='msg_subscriber.py').tag( 'chk-tasks', 'subscriber') schedule.run_all(1, 'chk-tasks') elif mode == 'adhoc': logger.debug('Running in adhoc mode...') if NODE_SETTINGS['nwid']: logger.debug('ADHOC: found network {}'.format( NODE_SETTINGS['nwid'])) do_startup(NODE_SETTINGS['nwid']) else: logger.error('No network ID found in NODE_SETTINGS!!') logger.error('Have you created a network yet?') logger.debug('MODE: startup mode is {} and role is {}'.format( mode, node_role)) logger.info( 'You are running fpnd/node_tools version {}'.format(fpnd_version)) while True: schedule.run_pending() time.sleep(1)
logger.error('HEALTH: network is unreachable!!') put_state_msg('ERROR') else: logger.debug('HEALTH: wait_for_nets is {}'.format(wait_for_nets)) elif NODE_SETTINGS['mode'] == 'adhoc': if not NODE_SETTINGS['nwid']: logger.warning('ADHOC: network ID not set {}'.format(NODE_SETTINGS['nwid'])) else: logger.debug('ADHOC: found network ID {}'.format(NODE_SETTINGS['nwid'])) if netStatus != []: nwid = netStatus[0]['identity'] addr = netStatus[0]['ztaddress'] nwstat = netStatus[0]['status'] logger.debug('ADHOC: found network with ID {}'.format(nwid)) logger.debug('ADHOC: network status is {}'.format(nwstat)) if addr: res = do_peer_check(addr) # elif NODE_SETTINGS['nwid']: # run_ztcli_cmd(action='join', extra=NODE_SETTINGS['nwid']) except Exception as exc: logger.error('nodestate exception was: {}'.format(exc)) raise exc cache = Index(get_cachedir()) loop = asyncio.get_event_loop() loop.run_until_complete(main())
# set log level and handler/formatter logger.setLevel(logging.DEBUG) logging.getLogger('node_tools.msg_queues').level = logging.DEBUG handler = logging.handlers.SysLogHandler(address='/dev/log', facility='daemon') formatter = logging.Formatter( '%(module)s: %(funcName)s+%(lineno)s: %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # pid_file = '/tmp/responder.pid' pid_file = os.path.join(get_runtimedir(), '{}.pid'.format('msg_responder')) # stdout = '/tmp/responder.log' # stderr = '/tmp/responder_err.log' cfg_q = dc.Deque(directory=get_cachedir('cfg_queue')) hold_q = dc.Deque(directory=get_cachedir('hold_queue')) off_q = dc.Deque(directory=get_cachedir('off_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) reg_q = dc.Deque(directory=get_cachedir('reg_queue')) wait_q = dc.Deque(directory=get_cachedir('wait_queue')) tmp_q = dc.Deque(directory=get_cachedir('tmp_queue')) cln_q = dc.Deque(directory=get_cachedir('clean_queue')) def clean_stale_cfgs(key_str, deque): """
len(boot_list), boot_list)) if len(boot_list) != 0: await close_mbr_net(client, node_list, boot_list, min_nodes=3) else: await unwrap_mbr_net(client, node_list, boot_list, min_nodes=3) except Exception as exc: logger.error('netstate exception was: {}'.format(exc)) await cleanup_orphans(client) if list(ct.net_trie) == [] and list(ct.id_trie) != []: ct.id_trie.clear() raise exc cache = dc.Index(get_cachedir()) off_q = dc.Deque(directory=get_cachedir('off_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) netobj_q = dc.Deque(directory=get_cachedir('netobj_queue')) staging_q = dc.Deque(directory=get_cachedir('staging_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) loop = asyncio.get_event_loop() loop.run_until_complete(main())
# set log level and handler/formatter logger.setLevel(logging.DEBUG) logging.getLogger('node_tools.msg_queues').level = logging.DEBUG handler = logging.handlers.SysLogHandler(address='/dev/log', facility='daemon') formatter = logging.Formatter( '%(module)s: %(funcName)s+%(lineno)s: %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # pid_file = '/tmp/subscriber.pid' pid_file = os.path.join(get_runtimedir(), '{}.pid'.format('msg_subscriber')) # std_out = '/tmp/subscriber.log' # std_err = '/tmp/subscriber_err.log' cfg_q = dc.Deque(directory=get_cachedir('cfg_queue')) node_q = dc.Deque(directory=get_cachedir('node_queue')) off_q = dc.Deque(directory=get_cachedir('off_queue')) pub_q = dc.Deque(directory=get_cachedir('pub_queue')) wdg_q = dc.Deque(directory=get_cachedir('wedge_queue')) def handle_msg(msg): if valid_announce_msg(msg): logger.debug('Got valid node ID: {}'.format(msg)) with node_q.transact(): node_q.append(msg) logger.debug('Adding node id: {}'.format(msg)) logger.info('{} nodes in node queue'.format(len(node_q))) else: logger.warning('Bad node msg is {}'.format(msg))
import datetime from multiprocessing import Process import diskcache as dc from daemon import Daemon from nanoservice import Subscriber from node_tools.helper_funcs import get_cachedir from node_tools.msg_queues import valid_announce_msg pid_file = '/tmp/subscriber.pid' stdout = '/tmp/subscriber.log' stderr = '/tmp/subscriber_err.log' node_q = dc.Deque(directory=get_cachedir('node_queue')) def print_stats(n, duration): pairs = [('Total messages', n), ('Total duration (s)', duration), ('Throughput (msg/s)', n / duration)] for pair in pairs: label, value = pair print(' * {:<25}: {:10,.2f}'.format(label, value)) def service_runner(addr, n): """ Run subscriber service and fill node queue (with stats)""" s = Subscriber(addr)
peer_keys = find_keys(cache, 'peer') print('Returned peer keys: {}'.format(peer_keys)) load_cache_by_type(cache, peer_data, 'peer') # get/display all available network data await client.get_data('controller/network') print('{} networks found'.format(len(client.data))) net_list = client.data net_data = [] for net_id in net_list: # print(net_id) # Get details about each network await client.get_data('controller/network/{}'.format(net_id)) # pprint(client.data) net_data.append(client.data) # load_cache_by_type(cache, net_data, 'net') # net_keys = find_keys(cache, 'net') # print('{} network keys found'.format(len(net_list))) # pprint(net_data) except Exception as exc: # print(str(exc)) raise exc cache = Index(get_cachedir(dir_name='ctlr_data')) # cache.clear() loop = asyncio.get_event_loop() loop.run_until_complete(main())