def chutney(reactor, temp_dir): chutney_dir = join(temp_dir, 'chutney') mkdir(chutney_dir) # TODO: # check for 'tor' binary explicitly and emit a "skip" if we can't # find it # XXX yuck! should add a setup.py to chutney so we can at least # "pip install <path to tarball>" and/or depend on chutney in "pip # install -e .[dev]" (i.e. in the 'dev' extra) # # https://trac.torproject.org/projects/tor/ticket/20343 proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, '/usr/bin/git', ( '/usr/bin/git', 'clone', '--depth=1', 'https://git.torproject.org/chutney.git', chutney_dir, ) ) pytest.blockon(proto.done) return chutney_dir
def introducer(reactor, temp_dir, flog_gatherer, request): config = ''' [node] nickname = introducer0 web.port = 4560 log_gatherer.furl = {log_furl} '''.format(log_furl=flog_gatherer) intro_dir = join(temp_dir, 'introducer') print("making introducer", intro_dir) if not exists(intro_dir): mkdir(intro_dir) done_proto = _ProcessExitedProtocol() reactor.spawnProcess( done_proto, sys.executable, ( sys.executable, '-m', 'allmydata.scripts.runner', 'create-introducer', '--listen=tcp', '--hostname=localhost', intro_dir, ), ) pytest.blockon(done_proto.done) # over-write the config file with our stuff with open(join(intro_dir, 'tahoe.cfg'), 'w') as f: f.write(config) # on windows, "tahoe start" means: run forever in the foreground, # but on linux it means daemonize. "tahoe run" is consistent # between platforms. protocol = _MagicTextProtocol('introducer running') process = reactor.spawnProcess( protocol, sys.executable, ( sys.executable, '-m', 'allmydata.scripts.runner', 'run', intro_dir, ), ) def cleanup(): try: process.signalProcess('TERM') pytest.blockon(protocol.exited) except ProcessExitedAlready: pass request.addfinalizer(cleanup) pytest.blockon(protocol.magic_seen) return process
def _create_virtualenv(request, python=sys.executable): fixed_venv = request.config.getoption('venv', None) if python != sys.executable: fixed_venv = None # won't re-use venvs for specific Pythons do_coverage = request.config.getoption('coverage', False) tmpdir = fixed_venv if tmpdir is None: tmpdir = mkdtemp() print("Creating virtualenv", tmpdir) else: print("re-using virtualenv", tmpdir) if default_requirements is None: raise RuntimeError( "You must set default_requirements in {}".format(__file__)) # XXX "the python exe" should be a fixture too -- and then vary it # over python, pypy etc not sys.executable # Could make the different package-options paramtrized too reqs = default_requirements if do_coverage and 'coverage' not in reqs: reqs.append('coverage') # py.test wants coverage < 4.0 if fixed_venv is None or not request.config.getoption('no_install', False): # actual venv creation pytest.blockon( ensureDeferred( create_virtualenv(python, tmpdir, environ, reqs, logging=False))) # need to ensure venv has sitecustomize.py for coverage, and set # up env-var and config-file for "coverage" program if do_coverage: print(" enabling coverage in Crossbar.io + subprocesses") coveragerc = _write_coverage_files(tmpdir) environ['COVERAGE_PROCESS_START'] = coveragerc def cleanup(): # if we did coverage analysis, save the .coverage* files # XXX do they go to temp_dir too/instead? if do_coverage: for covfile in listdir(tmpdir): if covfile.startswith('.coverage'): p = path.join(tmpdir, covfile) print(' saved "{}" to "{}".'.format(p, path.curdir)) shutil.move(p, path.curdir) if fixed_venv is None: if request.config.getoption('keep', False): print('Preserving {}'.format(tmpdir)) else: try: shutil.rmtree(tmpdir) except Exception as e: print("Failed to remove tmpdir: {}".format(e)) request.addfinalizer(cleanup) return tmpdir
def tor_introducer(reactor, temp_dir, flog_gatherer, request): config = ''' [node] nickname = introducer_tor web.port = 4561 log_gatherer.furl = {log_furl} '''.format(log_furl=flog_gatherer) intro_dir = join(temp_dir, 'introducer_tor') print("making introducer", intro_dir) if not exists(intro_dir): mkdir(intro_dir) done_proto = _ProcessExitedProtocol() reactor.spawnProcess( done_proto, sys.executable, ( sys.executable, '-m', 'allmydata.scripts.runner', 'create-introducer', '--tor-control-port', 'tcp:localhost:8010', '--listen=tor', intro_dir, ), ) pytest.blockon(done_proto.done) # over-write the config file with our stuff with open(join(intro_dir, 'tahoe.cfg'), 'w') as f: f.write(config) # on windows, "tahoe start" means: run forever in the foreground, # but on linux it means daemonize. "tahoe run" is consistent # between platforms. protocol = _MagicTextProtocol('introducer running') process = reactor.spawnProcess( protocol, sys.executable, ( sys.executable, '-m', 'allmydata.scripts.runner', 'run', intro_dir, ), ) def cleanup(): try: process.signalProcess('TERM') pytest.blockon(protocol.exited) except ProcessExitedAlready: pass request.addfinalizer(cleanup) pytest.blockon(protocol.magic_seen) return process
def flog_gatherer(reactor, temp_dir, flog_binary, request): out_protocol = _CollectOutputProtocol() gather_dir = join(temp_dir, 'flog_gather') process = reactor.spawnProcess(out_protocol, flog_binary, ( 'flogtool', 'create-gatherer', '--location', 'tcp:localhost:3117', '--port', '3117', gather_dir, )) pytest.blockon(out_protocol.done) twistd_protocol = _MagicTextProtocol("Gatherer waiting at") twistd_process = reactor.spawnProcess( twistd_protocol, which('twistd')[0], ( 'twistd', '--nodaemon', '--python', join(gather_dir, 'gatherer.tac'), ), path=gather_dir, ) pytest.blockon(twistd_protocol.magic_seen) def cleanup(): try: twistd_process.signalProcess('TERM') pytest.blockon(twistd_protocol.exited) except ProcessExitedAlready: pass flog_file = mktemp('.flog_dump') flog_protocol = _DumpOutputProtocol(open(flog_file, 'w')) flog_dir = join(temp_dir, 'flog_gather') flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')] print("Dumping {} flogtool logfiles to '{}'".format( len(flogs), flog_file)) reactor.spawnProcess( flog_protocol, flog_binary, ('flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])), ) pytest.blockon(flog_protocol.done) request.addfinalizer(cleanup) with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f: furl = f.read().strip() return furl
def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port, storage=True, magic_text=None): """ Helper to create a single node, run it and return the instance spawnProcess returned (ITransport) """ node_dir = join(temp_dir, name) if web_port is None: web_port = '' if not exists(node_dir): print("creating", node_dir) mkdir(node_dir) done_proto = _ProcessExitedProtocol() args = [ sys.executable, '-m', 'allmydata.scripts.runner', 'create-node', '--nickname', name, '--introducer', introducer_furl, '--hostname', 'localhost', '--listen', 'tcp', ] if not storage: args.append('--no-storage') args.append(node_dir) reactor.spawnProcess( done_proto, sys.executable, args, ) pytest.blockon(done_proto.done) with open(join(node_dir, 'tahoe.cfg'), 'w') as f: f.write(''' [node] nickname = %(name)s web.port = %(web_port)s web.static = public_html log_gatherer.furl = %(log_furl)s [client] # Which services should this client connect to? introducer.furl = %(furl)s shares.needed = 2 shares.happy = 3 shares.total = 4 ''' % { 'name': name, 'furl': introducer_furl, 'web_port': web_port, 'log_furl': flog_gatherer, }) return _run_node(reactor, node_dir, request, magic_text)
def flog_gatherer(reactor, temp_dir, flog_binary, request): out_protocol = _CollectOutputProtocol() gather_dir = join(temp_dir, 'flog_gather') process = reactor.spawnProcess( out_protocol, flog_binary, ( 'flogtool', 'create-gatherer', '--location', 'tcp:localhost:3117', '--port', '3117', gather_dir, ) ) pytest.blockon(out_protocol.done) twistd_protocol = _MagicTextProtocol("Gatherer waiting at") twistd_process = reactor.spawnProcess( twistd_protocol, which('twistd'), ( 'twistd', '--nodaemon', '--python', join(gather_dir, 'gatherer.tac'), ), path=gather_dir, ) pytest.blockon(twistd_protocol.magic_seen) def cleanup(): try: twistd_process.signalProcess('TERM') pytest.blockon(twistd_protocol.exited) except ProcessExitedAlready: pass flog_file = mktemp('.flog_dump') flog_protocol = _DumpOutputProtocol(open(flog_file, 'w')) flog_dir = join(temp_dir, 'flog_gather') flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')] print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file)) reactor.spawnProcess( flog_protocol, flog_binary, ( 'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0]) ), ) pytest.blockon(flog_protocol.done) request.addfinalizer(cleanup) with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f: furl = f.read().strip() return furl
def _cleanup_crossbar(protocol): print("Running Crossbar.io cleanup") try: # if this is KILL we won't get coverage data written protocol.transport.signalProcess('TERM') pytest.blockon(sleep(1)) # protocol.transport.signalProcess('KILL') except ProcessExitedAlready: print(" crossbar already exited.")
def connect(uri): """ Constructs a CloseTestFactory, connects to the desired WebSocket endpoint URI, waits for the CloseTestProtocol to be constructed, and then returns the protocol instance. """ factory = CloseTestFactory(uri) factory.setProtocolOptions(failByDrop=False, openHandshakeTimeout=1) ws.connectWS(factory, timeout=1) protocol = pytest.blockon(factory.connected) pytest.blockon(protocol.opened) return protocol
def teardown(): print "#####TEARDOWN" all_stopped = [] for ntypes in nodes.values(): for n in ntypes: cb, d = create_callback(timeout=10) n.storage.stop(cb=cb) all_stopped.append(d) dl = defer.DeferredList(all_stopped) try: pytest.blockon(dl) except: print "### Some storage plugins might have failed to stopp ###" traceback.print_exc()
def cleanup(): print("Tearing down Chutney Tor network") proto = _CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-m', 'chutney.TorNet', 'stop', join(chutney_dir, 'networks', 'basic'), ), path=join(chutney_dir), env={"PYTHONPATH": join(chutney_dir, "lib")}, ) pytest.blockon(proto.done)
def connect(uri): """ Constructs a MessageTestFactory, connects to the desired WebSocket endpoint URI, waits for the MessageTestProtocol to be constructed, and then returns the protocol instance. """ factory = MessageTestFactory(uri) factory.setProtocolOptions(failByDrop=False, openHandshakeTimeout=1) ws.connectWS(factory, timeout=1) protocol = pytest.blockon(factory.connected) pytest.blockon(protocol.opened) return protocol
def _cfx_edge(request, reactor): cbdir = os.path.join(os.path.dirname(__file__), '../{}/.crossbar'.format(node)) class WaitForTransport(object): """ Super hacky, but ... other suggestions? Could busy-wait for ports to become connect()-able? Better text to search for? """ def __init__(self, done): self.data = '' self.done = done def write(self, data): print(data, end='') if self.done.called: return # in case it's not line-buffered for some crazy reason self.data = self.data + data if "Skipping any local node configuration (on_start_apply_config is off)" in self.data or \ "MrealmController initialized" in self.data or \ "Domain controller ready" in self.data or \ "Connected to Crossbar.io FX Master" in self.data: print("Detected crossbar node is up!") self.done.callback(None) listening = Deferred() protocol = pytest.blockon( start_cfx(reactor, personality, cbdir, config=None, stdout=WaitForTransport(listening), stderr=WaitForTransport(listening), log_level='info')) request.addfinalizer(partial(_cleanup_crossbar, protocol)) timeout = sleep(crossbar_startup_timeout) pytest.blockon( DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)) if timeout.called: raise RuntimeError("Timeout waiting for crossbar to start") return protocol
def invalid_version_response(agent, request): """ A fixture that performs a bad handshake with a prohibited WebSocket version. """ response = pytest.blockon(make_request(agent, version=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def bad_protocol_response(agent, request): """ A fixture that performs a bad handshake with an invalid Sec-WebSocket-Protocol header. """ response = pytest.blockon(make_request(agent, protocol=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def increment_response(agent, request): """ A fixture that connects to the dumb-increment plugin with the given subprotocol list. """ response = pytest.blockon(make_request(agent, path='/dumb-increment', protocol=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def trusted_origin_response(agent, request): """ A fixture that performs a handshake using one of the explicitly trusted test Origins. """ response = pytest.blockon(make_request(agent, path='/origin-whitelist', origin=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def trusted_origin_response(agent, request): """ A fixture that performs a handshake using one of the explicitly trusted test Origins. """ response = pytest.blockon( make_request(agent, path='/origin-whitelist', origin=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def cleanup(): try: twistd_process.signalProcess('TERM') pytest.blockon(twistd_protocol.exited) except ProcessExitedAlready: pass flog_file = mktemp('.flog_dump') flog_protocol = _DumpOutputProtocol(open(flog_file, 'w')) flog_dir = join(temp_dir, 'flog_gather') flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')] print("Dumping {} flogtool logfiles to '{}'".format( len(flogs), flog_file)) reactor.spawnProcess( flog_protocol, flog_binary, ('flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])), ) pytest.blockon(flog_protocol.done)
def alice_invite(reactor, alice, temp_dir, request): node_dir = join(temp_dir, 'alice') # FIXME XXX by the time we see "client running" in the logs, the # storage servers aren't "really" ready to roll yet (uploads # fairly consistently fail if we don't hack in this pause...) import time time.sleep(5) proto = _CollectOutputProtocol() reactor.spawnProcess(proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'create', '--poll-interval', '2', '--basedir', node_dir, 'magik:', 'alice', join(temp_dir, 'magic-alice'), ]) pytest.blockon(proto.done) proto = _CollectOutputProtocol() reactor.spawnProcess(proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'invite', '--basedir', node_dir, 'magik:', 'bob', ]) pytest.blockon(proto.done) invite = proto.output.getvalue() print("invite from alice", invite) # before magic-folder works, we have to stop and restart (this is # crappy for the tests -- can we fix it in magic-folder?) try: alice.signalProcess('TERM') pytest.blockon(alice.exited) except ProcessExitedAlready: pass magic_text = 'Completed initial Magic Folder scan successfully' pytest.blockon(_run_node(reactor, node_dir, request, magic_text)) return invite
def cleanup(): try: twistd_process.signalProcess('TERM') pytest.blockon(twistd_protocol.exited) except ProcessExitedAlready: pass flog_file = mktemp('.flog_dump') flog_protocol = _DumpOutputProtocol(open(flog_file, 'w')) flog_dir = join(temp_dir, 'flog_gather') flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')] print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file)) reactor.spawnProcess( flog_protocol, flog_binary, ( 'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0]) ), ) pytest.blockon(flog_protocol.done)
def good_origin_response(agent, request): """ A fixture that performs a handshake with an Origin that matches the server. """ host = make_authority(host=request.param[0]) origin = make_root(host=request.param[0]) version = request.param[1] response = pytest.blockon(make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def good_origin_response(agent, request): """ A fixture that performs a handshake with an Origin that matches the server. """ host = make_authority(host=request.param[0]) origin = make_root(host=request.param[0]) version = request.param[1] response = pytest.blockon( make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request): print("pairing magic-folder") bob_dir = join(temp_dir, 'bob') proto = _CollectOutputProtocol() transport = reactor.spawnProcess( proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'join', '--poll-interval', '2', '--basedir', bob_dir, alice_invite, join(temp_dir, 'magic-bob'), ] ) pytest.blockon(proto.done) # before magic-folder works, we have to stop and restart (this is # crappy for the tests -- can we fix it in magic-folder?) try: print("Sending TERM to Bob") bob.signalProcess('TERM') pytest.blockon(bob.exited) except ProcessExitedAlready: pass magic_text = 'Completed initial Magic Folder scan successfully' pytest.blockon(_run_node(reactor, bob_dir, request, magic_text)) return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob'))
def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request): print("pairing magic-folder") bob_dir = join(temp_dir, 'bob') proto = _CollectOutputProtocol() transport = reactor.spawnProcess(proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'join', '--poll-interval', '2', '--basedir', bob_dir, alice_invite, join(temp_dir, 'magic-bob'), ]) pytest.blockon(proto.done) # before magic-folder works, we have to stop and restart (this is # crappy for the tests -- can we fix it in magic-folder?) try: print("Sending TERM to Bob") bob.signalProcess('TERM') pytest.blockon(bob.exited) except ProcessExitedAlready: pass magic_text = 'Completed initial Magic Folder scan successfully' pytest.blockon(_run_node(reactor, bob_dir, request, magic_text)) return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob'))
def bad_origin_response(agent, request): """ A fixture that performs a good handshake, but with an Origin that does not match the server. """ origin = request.param[0] host = request.param[1] version = request.param[2] response = pytest.blockon( make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def bad_origin_response(agent, request): """ A fixture that performs a good handshake, but with an Origin that does not match the server. """ origin = request.param[0] host = request.param[1] version = request.param[2] response = pytest.blockon(make_request(agent, origin=origin, host=host, version=version)) yield response client.readBody(response).cancel() # immediately close the connection
def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): try: mkdir(join(temp_dir, 'magic-bob')) except OSError: pass process = pytest.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob", web_port="tcp:9981:interface=localhost", storage=False, ) ) return process
def chutney(reactor, temp_dir): chutney_dir = join(temp_dir, 'chutney') mkdir(chutney_dir) # TODO: # check for 'tor' binary explicitly and emit a "skip" if we can't # find it # XXX yuck! should add a setup.py to chutney so we can at least # "pip install <path to tarball>" and/or depend on chutney in "pip # install -e .[dev]" (i.e. in the 'dev' extra) proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, '/usr/bin/git', ( '/usr/bin/git', 'clone', '--depth=1', 'https://git.torproject.org/chutney.git', chutney_dir, ) ) pytest.blockon(proto.done) return chutney_dir
def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, request): nodes = [] # start all 5 nodes in parallel for x in range(5): name = 'node{}'.format(x) # tub_port = 9900 + x nodes.append( pytest.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port=None, storage=True, ) ) ) #nodes = pytest.blockon(DeferredList(nodes)) return nodes
def alice_invite(reactor, alice, temp_dir, request): node_dir = join(temp_dir, 'alice') # FIXME XXX by the time we see "client running" in the logs, the # storage servers aren't "really" ready to roll yet (uploads # fairly consistently fail if we don't hack in this pause...) import time ; time.sleep(5) proto = _CollectOutputProtocol() transport = reactor.spawnProcess( proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'create', '--poll-interval', '2', '--basedir', node_dir, 'magik:', 'alice', join(temp_dir, 'magic-alice'), ] ) pytest.blockon(proto.done) proto = _CollectOutputProtocol() transport = reactor.spawnProcess( proto, sys.executable, [ sys.executable, '-m', 'allmydata.scripts.runner', 'magic-folder', 'invite', '--basedir', node_dir, 'magik:', 'bob', ] ) pytest.blockon(proto.done) invite = proto.output.getvalue() print("invite from alice", invite) # before magic-folder works, we have to stop and restart (this is # crappy for the tests -- can we fix it in magic-folder?) try: alice.signalProcess('TERM') pytest.blockon(alice.exited) except ProcessExitedAlready: pass magic_text = 'Completed initial Magic Folder scan successfully' pytest.blockon(_run_node(reactor, node_dir, request, magic_text)) return invite
def server(request, reactor): logger = Logger(FakeMemoryLog()) app = DownloadEFolder( logger=logger, download_database=DemoMemoryDownloadDatabase(), storage_path=None, fernet=None, vbms_client=None, queue=None, env_name="testing", ) endpoint = TCP4ServerEndpoint(reactor, 0) d = endpoint.listen(Site(app.app.resource(), logPath="/dev/null")) def addfinalizer(port): # Add a callback so that the server is shutdown at the end of the test. request.addfinalizer(port.stopListening) return port d.addCallback(addfinalizer) return pytest.blockon(d)
def bad_key_response(agent, request): """A fixture that performs a bad handshake with an invalid key.""" response = pytest.blockon(make_request(agent, key=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def cleanup(): # shut the transport down before the assert, for when/if assert fails pytest.blockon( manage.call( "crossbar.node.functestee.worker.testee.stop_router_transport", "foo"))
def setup(request): # Create 3 test nodes per storage type print "###SETUP" tunnels = {} nodes = {} for ti in range(len(storage_types)): nodes[storage_types[ti]] = [calvin.tests.TestNode(["calvinip://127.0.0.1:50{}{}".format(ti, i)]) for i in range(3)] def prep_node(stype, n): n.storage = storage.Storage(n) if stype == "proxy": try: n.network = DummyNetwork(n) n.proto = DummyProto(n) except: traceback.print_exc() if stype != "notstarted" : cb, d = create_callback(timeout=10, test_part=stype + " start") n.storage.start(cb=cb) return d all_started = [] for ti in range(len(storage_types)): stype = storage_types[ti] if stype == "dht": try: _conf.set('global', 'storage_type', 'dht') all_started.extend(map(partial(prep_node, stype), nodes[stype])) except: traceback.print_exc() elif stype == "securedht": try: homefolder = get_home() credentials_testdir = os.path.join(homefolder, ".calvin","test_all_storage_dir") runtimesdir = os.path.join(credentials_testdir,"runtimes") security_testdir = os.path.join(os.path.dirname(__file__), "security_test") domain_name="test_security_domain" code_signer_name="test_signer" orig_identity_provider_path = os.path.join(security_testdir,"identity_provider") identity_provider_path = os.path.join(credentials_testdir, "identity_provider") policy_storage_path = os.path.join(security_testdir, "policies") try: shutil.rmtree(credentials_testdir) except Exception as err: print "Failed to remove old tesdir, err={}".format(err) pass try: shutil.copytree(orig_identity_provider_path, identity_provider_path) except Exception as err: _log.error("Failed to copy the identity provider files, err={}".format(err)) raise runtimes = helpers.create_CA_and_generate_runtime_certs(domain_name, credentials_testdir, 3) for r, n in zip(runtimes, nodes[stype]): n.attributes = attribute_resolver.AttributeResolver(r['attributes']) n.enrollment_password = r['enrollment_password'] n.id = r['id'] n.runtime_credentials = r['credentials'] n.node_name = r['node_name'] #print "###RUNTIMES", runtimes _conf.add_section("security") _conf.set('security', 'security_dir', credentials_testdir) _conf.set('global','storage_type','securedht') all_started.extend(map(partial(prep_node, stype), nodes[stype])) _conf.remove_section("security") except: traceback.print_exc() elif stype == "notstarted": _conf.set('global', 'storage_type', 'dht') map(partial(prep_node, stype), nodes[stype]) elif stype == "sql": _conf.set('global', 'storage_type', 'sql') _conf.set('global', 'storage_sql', {}) # Use the default, i.e. local passwd-less root mysql DB all_started.extend(map(partial(prep_node, stype), nodes[stype])) elif stype == "proxy": # Setting up proxy storage for testing is a bit complicated # We short circuit so that fake tunnels' send directly calls peer's receive-method # The master (0) is setup as local and the others (1,2) as proxy # Give the master node ref to the proxies (to be used in fake network, proto & tunnel) nodes["proxy"][1].master_node = nodes["proxy"][0] nodes["proxy"][2].master_node = nodes["proxy"][0] # Create fake tunnels for n2 in nodes["proxy"]: tt = {} for n1 in nodes["proxy"]: if n1 != n2: tt[n1.id] = FakeTunnel( DummyNetwork(n1), tt, n1.id, 'storage', None, rt_id=n2.id, id=calvinuuid.uuid("TUNNEL")) tunnels[n2.id] = tt n2.tunnels = tunnels # Give a tunnel its peers tunnel for n2 in nodes["proxy"]: for n1 in nodes["proxy"]: if n1 != n2: tunnels[n2.id][n1.id]._peers_fake_tunnel = tunnels[n1.id][n2.id] # Start master _conf.set('global', 'storage_type', 'local') prep_node(stype, nodes[stype][0]) # Inform master it has 2 proxy storage clients [nodes[stype][0].storage.tunnel_request_handles(t) for t in tunnels[nodes[stype][0].id].values()] # Start proxies _conf.set('global', 'storage_type', 'proxy') _conf.set('global', 'storage_proxy', nodes[stype][0].uris[0]) all_started.extend(map(partial(prep_node, stype), nodes[stype][1:])) # Inform proxy that it is connected, first wait until up_handler set count = 0 while (tunnels[nodes[stype][1].id][nodes[stype][0].id].up_handler is None or tunnels[nodes[stype][2].id][nodes[stype][0].id].up_handler is None) and count < 100: pytest.blockon(threads.defer_to_thread(time.sleep, 0.1)) count += 1 tunnels[nodes[stype][1].id][nodes[stype][0].id].up_handler() tunnels[nodes[stype][2].id][nodes[stype][0].id].up_handler() dl = defer.DeferredList(all_started) print time.time() try: pytest.blockon(dl) except: print "### Some storage plugins might have failed to start! ###" traceback.print_exc() print time.time() def teardown(): print "#####TEARDOWN" all_stopped = [] for ntypes in nodes.values(): for n in ntypes: cb, d = create_callback(timeout=10) n.storage.stop(cb=cb) all_stopped.append(d) dl = defer.DeferredList(all_stopped) try: pytest.blockon(dl) except: print "### Some storage plugins might have failed to stopp ###" traceback.print_exc() request.addfinalizer(teardown) return {"nodes": nodes}
def tor_network(reactor, temp_dir, chutney, request): # this is the actual "chutney" script at the root of a chutney checkout chutney_dir = chutney chut = join(chutney_dir, 'chutney') # now, as per Chutney's README, we have to create the network # ./chutney configure networks/basic # ./chutney start networks/basic proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-m', 'chutney.TorNet', 'configure', join(chutney_dir, 'networks', 'basic'), ), path=join(chutney_dir), env={"PYTHONPATH": join(chutney_dir, "lib")}, ) pytest.blockon(proto.done) proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-m', 'chutney.TorNet', 'start', join(chutney_dir, 'networks', 'basic'), ), path=join(chutney_dir), env={"PYTHONPATH": join(chutney_dir, "lib")}, ) pytest.blockon(proto.done) # print some useful stuff proto = _CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-m', 'chutney.TorNet', 'status', join(chutney_dir, 'networks', 'basic'), ), path=join(chutney_dir), env={"PYTHONPATH": join(chutney_dir, "lib")}, ) pytest.blockon(proto.done) def cleanup(): print("Tearing down Chutney Tor network") proto = _CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-m', 'chutney.TorNet', 'stop', join(chutney_dir, 'networks', 'basic'), ), path=join(chutney_dir), env={"PYTHONPATH": join(chutney_dir, "lib")}, ) pytest.blockon(proto.done) request.addfinalizer(cleanup) return chut
def success_response(agent, request): """A fixture that performs a correct handshake with the given version.""" response = pytest.blockon(make_request(agent, version=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def bad_method_response(agent, request): """A fixture that performs a bad handshake with a disallowed HTTP method.""" response = pytest.blockon(make_request(agent, method=request.param)) yield response client.readBody(response).cancel() # immediately close the connection
def crypto_crossbar(reactor, request, virtualenv, session_temp, self_signed_cert): """ Similar to the global "crossbar" fixture, but provides more configuration so we can do self-signed SSL certificates as well. XXX reduce dupes between auth_crossbar + crossbar Note that this means there are *three* crossbar instances active at once, so mind those port-numbers ;) """ (privkey, certfile) = self_signed_cert crossbar_config = { "version": 2, "controller": {}, "workers": [{ "id": "ssl_testee", "type": "router", "realms": [{ "name": "auth_realm", "roles": [{ "name": "authenticated", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }, { "name": "anonymous", "type": "static", "permissions": [{ "uri": "*", "allow": { "subscribe": True, "call": True, }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }] }], "transports": [{ "type": "web", "id": "test_ssl_0", "endpoint": { "type": "tcp", "port": 6464, "tls": { "key": privkey, "certificate": certfile, } }, "paths": { "/": { "type": "static", "directory": "../web" }, "tls_ws": { "type": "websocket", "auth": { "wampcra": { "type": "static", "users": { "username": { "secret": "p4ssw0rd", "role": "authenticated" }, } }, "anonymous": { "type": "static", "role": "anonymous" } } } } }], }] } class WaitForTransport(object): """ Super hacky, but ... other suggestions? Could busy-wait for ports to become connect()-able? Better text to search for? """ def __init__(self, done): self.data = '' self.done = done def write(self, data): print(data, end='') if self.done.called: return # in case it's not line-buffered for some crazy reason self.data = self.data + data if "started Transport test_ssl_0" in self.data: print("Detected transport starting up") self.done.callback(None) if "Address already in use" in self.data: self.done.errback(RuntimeError("Address already in use")) tempdir = _create_temp(request, prefix="cts_auth") listening = Deferred() protocol = pytest.blockon( start_crossbar( reactor, virtualenv, tempdir, crossbar_config, stdout=WaitForTransport(listening), stderr=WaitForTransport(listening), log_level='debug' if request.config.getoption('logdebug', False) else False, )) request.addfinalizer(partial(_cleanup_crossbar, protocol)) timeout = sleep(crossbar_startup_timeout) pytest.blockon( DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)) if timeout.called: raise RuntimeError("Timeout waiting for crossbar to start") return protocol
def crossbar(reactor, request, virtualenv, session_temp): """ A fixture which runs a Crossbar instance in a tempdir. This crossbar will have minimal configuration -- the expectation is that tests themselves would do any additional configuration needed or provide their own fixture. This fixture is **session** scoped, so there will just be a single Crossbar instance created per test-run. Tests should take care not to do anything catastrophic to the instance, or use their own instance in that case. Or, we could make it scope='function'... """ print("Starting Crossbar.io. scope='{0}'".format(request.scope)) # XXX could pytest.mark.paramtrize on transports, for example, to # test both websocket and rawsocket -- but then would need to # provide the configuration onwards somehow... crossbar_config = { "version": 2, "controller": {}, "workers": [{ "id": "testee", "type": "router", # "options": { # "auto_realms": True, # }, "realms": [{ "name": "functest_realm1", "roles": [ { "name": "anonymous", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }, ] }], "transports": [{ "type": "websocket", "id": "ws_test_0", "endpoint": { "type": "tcp", "port": 6565, }, "url": u"ws://localhost:6565/ws", }, { "type": "rawsocket", "id": "ws_test_1", "endpoint": { "type": "tcp", "port": 6564, }, }], }] } class WaitForTransport(object): """ Super hacky, but ... other suggestions? Could busy-wait for ports to become connect()-able? Better text to search for? """ def __init__(self, done): self.data = '' self.done = done def write(self, data): print(data, end='') if self.done.called: return # in case it's not line-buffered for some crazy reason self.data = self.data + data if "started Transport ws_test_0" in self.data: print("Detected transport starting up") self.done.callback(None) if "Address already in use" in self.data: self.done.errback(RuntimeError("Address already in use")) listening = Deferred() protocol = pytest.blockon( start_crossbar( reactor, virtualenv, session_temp, crossbar_config, stdout=WaitForTransport(listening), stderr=WaitForTransport(listening), log_level='debug' if request.config.getoption('logdebug', False) else False, )) request.addfinalizer(partial(_cleanup_crossbar, protocol)) timeout = sleep(crossbar_startup_timeout) pytest.blockon( DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)) if timeout.called: raise RuntimeError("Timeout waiting for crossbar to start") return protocol
def abstract_inline_callbacks(simple_handler, request): method, args = eval(request.param) d = getattr(simple_handler, method)(*args) assert isinstance(d, Deferred) reactor.callLater(0.0, d.callback, "Test Abstract Callback") return pytest.blockon(d)
def cleanup(): try: process.signalProcess('TERM') pytest.blockon(protocol.exited) except ProcessExitedAlready: pass
def dynamic_authorize_crossbar(reactor, request, virtualenv, session_temp): """ Provides a 'slow' dynamic authorizer that takes 2 seconds to authorize a call """ crossbar_config = { "version": 2, "controller": {}, "workers": [{ "id": "testee", "type": "router", "realms": [{ "name": "realm-auth", "roles": [{ "name": "role", "permissions": [{ "uri": "test.authenticate", "allow": { "register": True } }] }] }, { "name": "slow_authentication", "roles": [{ "name": "role0", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }] }], "transports": [{ "type": "web", "id": "ws_test_0", "endpoint": { "type": "tcp", "port": 7979 }, "paths": { "/": { "type": "static", "directory": "../web" }, "test_dyn_cryptosign": { "type": "websocket", "auth": { "cryptosign": { "type": "dynamic", "authenticator": "test.authenticate", "authenticator-realm": "realm-auth" } } } } }], "components": [{ "type": "function", "realm": "realm-auth", "role": "role", "callbacks": { "join": "crossbar.functest_helpers.auth.setup_auth" }, }] }] } class WaitForTransport(object): """ Super hacky, but ... other suggestions? Could busy-wait for ports to become connect()-able? Better text to search for? """ def __init__(self, done): self.data = '' self.done = done def write(self, data): print(data, end='') if self.done.called: return # in case it's not line-buffered for some crazy reason self.data = self.data + data if "started Transport ws_test_0" in self.data: print("Detected transport starting up") self.done.callback(None) if "Address already in use" in self.data: self.done.errback(RuntimeError("Address already in use")) tempdir = _create_temp(request, prefix="cts_auth") listening = Deferred() protocol = pytest.blockon( start_crossbar( reactor, virtualenv, tempdir, crossbar_config, stdout=WaitForTransport(listening), stderr=WaitForTransport(listening), log_level='debug' if request.config.getoption('logdebug', False) else False, )) request.addfinalizer(partial(_cleanup_crossbar, protocol)) timeout = sleep(40) pytest.blockon( DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)) if timeout.called: raise RuntimeError("Timeout waiting for crossbar to start") return protocol
def component_crossbar(reactor, request, virtualenv, session_temp): crossbar_config = { "version": 2, "controller": {}, "workers": [{ "id": "testee", "type": "router", "realms": [{ "name": "auth_realm", "roles": [{ "name": "authenticated", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }, { "name": "anonymous", "permissions": [{ "uri": "*", "allow": { "subscribe": True, "call": True, }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }, { "name": "role0", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }, { "name": "role1", "permissions": [{ "uri": "*", "allow": { "publish": True, "subscribe": True, "call": True, "register": True }, "cache": True, "disclose": { "caller": True, "publisher": True, } }] }] }], "transports": [{ "type": "web", "id": "auth_ws_99", "endpoint": { "type": "tcp", "port": 7171 }, "paths": { "/": { "type": "static", "directory": "../web" }, "auth_ws": { "type": "websocket", "auth": { "cryptosign": { "type": "static", "principals": { "*****@*****.**": { "realm": "auth_realm", "role": "authenticated", "authorized_keys": [ "545efb0a2192db8d43f118e9bf9aee081466e1ef36c708b96ee6f62dddad9122" ] } } } } } } }], }] } class WaitForTransport(object): """ Super hacky, but ... other suggestions? Could busy-wait for ports to become connect()-able? Better text to search for? """ def __init__(self, done): self.data = '' self.done = done def write(self, data): print(data, end='') if self.done.called: return # in case it's not line-buffered for some crazy reason self.data = self.data + data if "started Transport auth_ws_99" in self.data: print("Detected transport starting up") self.done.callback(None) if "Address already in use" in self.data: self.done.errback(RuntimeError("Address already in use")) tempdir = _create_temp(request, prefix="cts_auth") listening = Deferred() from cts.functional_tests.helpers import start_crossbar protocol = pytest.blockon( start_crossbar( reactor, virtualenv, tempdir, crossbar_config, stdout=WaitForTransport(listening), stderr=WaitForTransport(listening), log_level='debug' if request.config.getoption('logdebug', False) else False, )) request.addfinalizer(partial(_cleanup_crossbar, protocol)) timeout = sleep(10) pytest.blockon( DeferredList([timeout, listening], fireOnOneErrback=True, fireOnOneCallback=True)) if timeout.called: raise RuntimeError("Timeout waiting for crossbar to start") return protocol