def dht_value_get_v1(node, key, expected_data, record_type='skip_validation', retries=2, fallback_observer=None): response = None for i in range(retries + 1): if i == retries - 1 and fallback_observer: node = fallback_observer response = request_get(node, 'dht/value/get/v1?record_type=%s&key=%s' % (record_type, key, ), timeout=20) try: assert response.status_code == 200 dbg('dht/value/get/v1 [%s] : %s\n' % (node, pprint.pformat(response.json()), )) assert response.json()['status'] == 'OK', response.json() assert len(response.json()['result']) > 0, response.json() assert response.json()['result']['key'] == key, response.json() if expected_data == 'not_exist': assert response.json()['result']['read'] == 'failed', response.json() assert 'value' not in response.json()['result'], response.json() assert len(response.json()['result']['closest_nodes']) > 0, response.json() else: if response.json()['result']['read'] == 'failed': warn('first request failed, retry one more time') response = request_get(node, 'dht/value/get/v1?record_type=%s&key=%s' % (record_type, key, ), timeout=20) assert response.status_code == 200 assert response.json()['status'] == 'OK', response.json() assert response.json()['result']['read'] == 'success', response.json() assert 'value' in response.json()['result'], response.json() assert response.json()['result']['value']['data'] in expected_data, response.json() assert response.json()['result']['value']['key'] == key, response.json() assert response.json()['result']['value']['type'] == record_type, response.json() except: time.sleep(2) if i == retries - 1: assert False, f'DHT value read validation failed: {node} {key} {expected_data} : {response.json()}' return response.json()
def main() -> None: path = download_wiki("en-latest-all-titles-in") ensure_dependencies() own_sort_exe = find_executable("sort", project_path()) if own_sort_exe is None: warn(f"executable 'sort' not found in {project_path()}") sys.exit(1) with TemporaryDirectory() as tempdir: temp_path = Path(tempdir) coreutils_sort = temp_path.joinpath(path.name + ".coreutils-sort") own_sort = temp_path.joinpath(path.name + ".own-sort") with subtest("Run coreutils sort with 128MB limit"): with open(path) as stdin, open(coreutils_sort, "w") as stdout: run_with_ulimit("sort", stdin, stdout) with subtest("Run own sort with 128MB limit"): with open(path) as stdin, open(own_sort, "w") as stdout: run_with_ulimit(own_sort_exe, stdin, stdout) with subtest("Check if both results matches"): run(["cmp", str(coreutils_sort), str(own_sort)])
def packet_list_v1(node, wait_all_finish=False, attempts=90, delay=2, verbose=False): if verbose: dbg('packet/list/v1 [%s]\n' % node) response = None for _ in range(attempts): response = request_get(node, 'packet/list/v1', timeout=20, verbose=verbose) assert response.status_code == 200 if verbose: dbg('packet/list/v1 [%s] : %s\n' % (node, pprint.pformat(response.json()), )) assert response.json()['status'] == 'OK', response.json() found_packet = False for r in response.json()['result']: if r.get('packet_id', '').count('idle_ping:'): continue # if r.get('command') == 'CancelService' and r.get('direction') == 'outgoing': # continue if r.get('command') == 'Retrieve' and r.get('direction') == 'outgoing' and r.get('label', '').count('-rotated'): continue if r.get('command') == 'Data' and r.get('direction') == 'outgoing' and r.get('label', '').count('-rotated'): continue if r.get('command') == 'Identity' and r.get('direction') == 'outgoing' and r.get('label', '').count('-rotated'): continue found_packet = True if not found_packet or not wait_all_finish: break time.sleep(delay) else: warn('packet/list/v1 [%s] : %s\n' % (node, pprint.pformat(response.json()), )) assert False, 'some packets are still have in/out progress on [%s]' % node return response.json()
def sanity_check(output, n_buckets, initial, n_threads): count = 0 for i in range(0, n_buckets): entries = output[i].split("-") count += len(entries) - 1 if count > initial + n_threads: warn("Hashmap has more items than expected ") exit(1)
def main() -> None: # Replace with the executable you want to test try: info("Run someprojectbinary...") run_project_executable("someprojectbinary") info("OK") except OSError as e: warn(f"Failed to run command: {e}") sys.exit(1)
def open_all_tunnels(event_loop): _begin = time.time() info('\nStarting all SSH tunnels\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.open_one_tunnel_async(node, 9000 + pos, event_loop) for pos, node in enumerate(ALL_NODES) ])) warn('\nAll SSH tunnels opened in %5.3f seconds\n' % (time.time() - _begin))
def collect_coverage_all_nodes(event_loop, verbose=False): _begin = time.time() if verbose: info('\nCollecting coverage from all nodes') event_loop.run_until_complete( asyncio.gather(*[ tsup.collect_coverage_one_node_async( node, event_loop=event_loop, verbose=verbose) for node in ALL_NODES ])) if verbose: warn('\n\nAll coverage files received in %5.3f seconds\n' % (time.time() - _begin))
def clean_all_nodes(event_loop, skip_checks=False, verbose=False): _begin = time.time() info('\nCleaning all nodes') event_loop.run_until_complete( asyncio.gather(*[ tsup.clean_one_node_async( node, event_loop=event_loop, verbose=verbose) for node in ALL_NODES ])) event_loop.run_until_complete( asyncio.gather(*[ tsup.clean_one_customer_async( node['name'], event_loop=event_loop, verbose=verbose) for node in ALL_ROLES['customer'] ])) warn('\n\nAll nodes cleaned in %5.3f seconds\n' % (time.time() - _begin))
def file_list_all_v1(node, expected_reliable=100, reliable_shares=True, attempts=20, delay=5, verbose=False): if expected_reliable is None: response = request_get(node, 'file/list/all/v1', timeout=20) assert response.status_code == 200 if verbose: dbg('file/list/all/v1 [%s] : %s\n' % (node, pprint.pformat(response.json()), )) assert response.json()['status'] == 'OK', response.json() return response.json() response = None latest_reliable = None latest_reliable_fileinfo = None count = 0 while latest_reliable is None or latest_reliable < expected_reliable: response = request_get(node, 'file/list/all/v1', timeout=20) assert response.status_code == 200 if verbose: dbg('file/list/all/v1 [%s] : %s\n' % (node, pprint.pformat(response.json()), )) assert response.json()['status'] == 'OK', response.json() lowest = 100 lowest_file = None for fil in response.json()['result']: if fil['remote_path'].startswith('share_') and not reliable_shares: continue for ver in fil['versions']: reliable = int(ver['reliable'].replace('%', '')) if reliable < lowest: lowest = reliable lowest_file = fil latest_reliable = lowest latest_reliable_fileinfo = fil if latest_reliable >= expected_reliable: break count += 1 if count >= attempts: warn(f' latest reliable item info: {latest_reliable_fileinfo}') assert False, f"file {lowest_file} is not {expected_reliable} % reliable after {attempts} attempts" return time.sleep(delay) return response.json()
def main() -> None: # Run the test program lib = ensure_library("liblockhashmap.so") extra_env = {"LD_LIBRARY_PATH": str(os.path.dirname(lib))} test_lock_hashmap = test_root().joinpath("lock_hashmap") if not test_lock_hashmap.exists(): run(["make", "-C", str(test_root()), str(test_lock_hashmap)]) times = [] with tempfile.TemporaryDirectory() as tmpdir: with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable( str(test_lock_hashmap), args=["-d20000", "-i10000", "-n4", "-r10000", "-u100", "-b1"], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() sanity_check(output[1:], 1, 10000, 4) with subtest("Checking 1 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n1", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 1) times.append(runtime / 3) with subtest("Checking 2 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n2", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 2) times.append(runtime / 3) with subtest("Checking 4 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d2000000", "-i100000", "-n4", "-r100000", "-u10" ], stdout=stdout, extra_env=extra_env, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 512, 100000, 4) times.append(runtime / 3) f1 = times[0] / times[1] f2 = times[1] / times[2] if f1 < 1.4 or f2 < 1.4: warn("Hashmap is not scaling properly: " + str(times)) exit(1)
def _validate(obs): response = None num_suppliers = 0 count = 0 while True: mistakes = 0 if count >= retries: dbg('DHT info still wrong after %d retries, currently see %d suppliers, but expected %d' % ( count, num_suppliers, expected_suppliers_number)) return False try: response = request_get(obs, 'supplier/list/dht/v1?id=%s' % customer_id, timeout=20) except requests.exceptions.ConnectionError as exc: warn('connection error: %r' % exc) return False if response.status_code != 200: count += 1 time.sleep(delay) continue dbg('supplier/list/dht/v1?id=%s from %s\n%s\n' % (customer_id, obs, pprint.pformat(response.json()))) if not response.json()['status'] == 'OK': count += 1 time.sleep(delay) continue if not response.json()['result']: count += 1 time.sleep(delay) continue if not response.json()['result']['customer_idurl'].count('%s.xml' % customer_node): warn('currently see customer_idurl=%r, but expect family owner to be %r\n' % ( response.json()['result']['customer_idurl'], customer_node)) count += 1 time.sleep(delay) continue ss = response.json()['result']['suppliers'] num_suppliers = len(ss) if num_suppliers != expected_suppliers_number: warn('currently see %d suppliers but expected number is %d\n' % (num_suppliers, expected_suppliers_number)) count += 1 time.sleep(delay) continue if len(list(filter(None, ss))) != expected_suppliers_number: mistakes += abs(expected_suppliers_number - len(list(filter(None, ss)))) warn('found missing suppliers\n') if not response.json()['result']['ecc_map'] == expected_ecc_map: mistakes += 1 warn('currently see ecc_map=%r, but expect to be %r\n' % ( response.json()['result']['ecc_map'], expected_ecc_map)) if mistakes > accepted_mistakes: warn('currently see %d mistakes\n' % mistakes) count += 1 time.sleep(delay) continue break return True
def start_all_nodes(event_loop, verbose=False): _begin = time.time() if verbose: warn('\nStarting BitDust nodes\n') for number, dhtseed in enumerate(ALL_ROLES.get('dht-seed', [])): # first seed to be started immediately, all other seeds must wait a bit before start tsup.start_one_dht_seed(dhtseed, wait_seconds=(3 if number > 0 else 0), verbose=verbose) if verbose: info('ALL DHT SEEDS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_identity_server_async(idsrv, event_loop) for idsrv in ALL_ROLES.get('identity-server', []) ])) if verbose: info(f'ALL ID SERVERS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_stun_server_async(stunsrv, event_loop) for stunsrv in ALL_ROLES.get('stun-server', []) ])) if verbose: info(f'ALL STUN SERVERS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_proxy_server_async(proxy_server, event_loop) for proxy_server in ALL_ROLES.get('proxy-server', []) ])) if verbose: info(f'ALL PROXY SERVERS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_supplier_async(supplier, event_loop) for supplier in ALL_ROLES.get('supplier', []) ])) if verbose: info(f'ALL SUPPLIERS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_message_broker_async(message_broker, event_loop) for message_broker in ALL_ROLES.get('message-broker', []) ])) if verbose: info(f'ALL MESSAGE BROKERS STARTED\n') event_loop.run_until_complete( asyncio.gather(*[ tsup.start_one_customer_async( customer, event_loop, sleep_before_start=i * 3) for i, customer in enumerate(ALL_ROLES.get('customer', [])) ])) if verbose: info(f'ALL CUSTOMERS STARTED\n') warn('ALL NODES STARTED in %5.3f seconds\n' % (time.time() - _begin))
def kill_all_nodes(): for node in ALL_NODES: info('Shutdown %s' % node) tsup.run_ssh_command_and_wait(node, 'pkill -e sshd') warn('All nodes stopped')
def stop_all_nodes(event_loop, verbose=False): _begin = time.time() if verbose: warn('\nstop all nodes\n') if verbose: info('customers: %r' % ALL_ROLES.get('customer', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async(customer['name'], event_loop, skip_checks=True, verbose=verbose) for customer in ALL_ROLES.get('customer', []) ])) if verbose: info(f'ALL CUSTOMERS STOPPED\n') if verbose: info('message-brokers: %r' % ALL_ROLES.get('message-broker', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async( message_broker['name'], event_loop, verbose=verbose) for message_broker in ALL_ROLES.get('message-broker', []) ])) if verbose: info(f'ALL MESSAGE BROKERS STOPPED\n') if verbose: info('suppliers: %r' % ALL_ROLES.get('supplier', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async( supplier['name'], event_loop, verbose=verbose) for supplier in ALL_ROLES.get('supplier', []) ])) if verbose: info(f'ALL SUPPLIERS STOPPED\n') if verbose: info('proxy-servers: %r' % ALL_ROLES.get('proxy-server', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async( proxy_server['name'], event_loop, verbose=verbose) for proxy_server in ALL_ROLES.get('proxy-server', []) ])) if verbose: info(f'ALL PROXY SERVERS STOPPED\n') if verbose: info('stun-servers: %r' % ALL_ROLES.get('stun-server', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async( stunsrv['name'], event_loop, verbose=verbose) for stunsrv in ALL_ROLES.get('stun-server', []) ])) if verbose: info(f'ALL STUN SERVERS STOPPED\n') if verbose: info('identity-servers: %r' % ALL_ROLES.get('identity-server', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async(idsrv['name'], event_loop, verbose=verbose) for idsrv in ALL_ROLES.get('identity-server', []) ])) if verbose: info(f'ALL ID SERVERS STOPPED\n') if verbose: info('dht-seeds: %r' % ALL_ROLES.get('dht-seed', [])) event_loop.run_until_complete( asyncio.gather(*[ tsup.stop_daemon_async( dhtseed['name'], event_loop, verbose=verbose) for dhtseed in ALL_ROLES.get('dht-seed', []) ])) if verbose: info('ALL DHT SEEDS STOPPED\n') warn('\nALL NODES STOPPED in %5.3f seconds\n' % (time.time() - _begin))
def main() -> None: # Run the test program test_lock_hashmap = test_root().joinpath("lockfree_hashmap") if not test_lock_hashmap.exists(): run(["make", "-C", str(test_root()), str(test_lock_hashmap)]) times = [] with tempfile.TemporaryDirectory() as tmpdir: with subtest("Checking correctness"): with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n4", "-r10000", "-u100", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() sanity_check(output[1:], 1, 10000, 4) with subtest("Checking 1 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n1", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 1) times.append(runtime / 3) with subtest("Checking 2 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n2", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 2) times.append(runtime / 3) with subtest("Checking 4 thread time"): with open(f"{tmpdir}/stdout", "w+") as stdout: runtime = 0.0 for i in range(0, 3): run_project_executable( str(test_lock_hashmap), args=[ "-d20000", "-i10000", "-n4", "-r10000", "-u10", "-b1" ], stdout=stdout, ) output = open(f"{tmpdir}/stdout").readlines() runtime += float(output[0].strip()) sanity_check(output[1:], 1, 10000, 4) times.append(runtime / 3) f1 = times[0] / times[1] f2 = times[1] / times[2] if f1 < 1.4 or f2 < 1.4: warn("Hashmap is not scaling properly: " + str(times)) exit(1) with subtest("Checking if hashmap cleans up items when removing"): test_cleanup_lockfree = test_root().joinpath( "test_cleanup_lockfree") if not test_cleanup_lockfree.exists(): run([ "make", "-C", str(test_root()), str(test_cleanup_lockfree) ]) with open(f"{tmpdir}/stdout", "w+") as stdout: run_project_executable(str(test_cleanup_lockfree), stdout=stdout) stdout.seek(0) lines = stdout.readlines() first = float(lines[0]) second = float(lines[1]) if second / first > 1.5: warn( f"Hashmap does not cleanup properly when removing items: {first}, {second}" ) exit(1)