def run_servers(conns): commands = {} for node_index, _ in enumerate(conns): program_cmd = make_batch_cmd([ f"killall -9 {exe_name}", "sleep 1", f"nohup script -c './sharding-poc -seed={node_index} 2>&1 1>poc_{node_index}.out &' /dev/null", ]) node_cmd = make_and_cmd([ "cd {}".format(make_repo_src_path(poc_repo)), program_cmd, ]) # node_cmd = "nohup sleep 20 > 123.txt < /dev/null &" # node_cmd = "cd {}".format(make_repo_src_path(poc_repo)) # node_cmd += f"&& nohup ./{exe_name} -seed={node_index} > poc_node_{node_index}.out < /dev/null & " # node_cmd = "nohup ./sharding-poc -seed=0 /dev/null &" # node_cmd = "(nohup ./sharding-poc -seed 0 &) && ps aux|grep sharding-poc" # node_cmd = "setsid ./sharding-poc -seed 0" # node_cmd = "python3 ./temp.py > haha.txt" # node_cmd = "nohup ./c_sleep > haha.txt &" commands[node_index] = node_cmd print(commands) g = ThreadingGroup.from_connections(node_conns).run(custom_kwargs=commands) print(g)
def executes_arguments_on_contents_run_via_threading( self, Thread, Queue, ): queue = Queue.return_value g = ThreadingGroup.from_connections(self.cxns) # Make sure .exception() doesn't yield truthy Mocks. Otherwise we # end up with 'exceptions' that cause errors due to all being the # same. Thread.return_value.exception.return_value = None g.run(*self.args, **self.kwargs) # Testing that threads were used the way we expect is mediocre but # I honestly can't think of another good way to assert "threading # was used & concurrency occurred"... instantiations = [ call( target=thread_worker, kwargs=dict( cxn=cxn, queue=queue, args=self.args, kwargs=self.kwargs, ), ) for cxn in self.cxns ] Thread.assert_has_calls(instantiations, any_order=True) # These ought to work as by default a Mock.return_value is a # singleton mock object expected = len(self.cxns) for name, got in (('start', Thread.return_value.start.call_count), ('join', Thread.return_value.join.call_count)): err = "Expected {} calls to ExceptionHandlingThread.{}, got {}" # noqa err = err.format(expected, name, got) assert expected, got == err
def bubbles_up_errors_within_threads(self): # TODO: I feel like this is the first spot where a raw # ThreadException might need tweaks, at least presentation-wise, # since we're no longer dealing with truly background threads (IO # workers and tunnels), but "middle-ground" threads the user is # kind of expecting (and which they might expect to encounter # failures). cxns = [Mock(host=x) for x in ("host1", "host2", "host3")] class OhNoz(Exception): pass onoz = OhNoz() cxns[1].run.side_effect = onoz g = ThreadingGroup.from_connections(cxns) try: g.run(*self.args, **self.kwargs) except GroupException as e: result = e.result else: assert False, "Did not raise GroupException!" succeeded = { cxns[0]: cxns[0].run.return_value, cxns[2]: cxns[2].run.return_value, } failed = {cxns[1]: onoz} expected = succeeded.copy() expected.update(failed) assert result == expected assert result.succeeded == succeeded assert result.failed == failed
def multipleHosts(self): servers = [ Connection("%s@%s" % (host['username'], host['hostname']), connect_kwargs={"password": host['password']}) for host in self.servers_list ] connections = ThreadingGroup.from_connections(servers) return connections
def returns_results_mapping(self, method): cxns = [Mock(name=x) for x in ("host1", "host2", "host3")] g = ThreadingGroup.from_connections(cxns) result = getattr(g, method)("whatever", hide=True) assert isinstance(result, GroupResult) expected = {x: getattr(x, method).return_value for x in cxns} assert result == expected assert result.succeeded == expected assert result.failed == {}
def configure_group(hosts): connections = [] for host in hosts: connections.append( Connection(host=host, user=user, connect_kwargs={'key_filename': 'C:\\Users\\Evan.Evan-Desktop\\.ssh\\dht-aws-key.pem'})) group = Group.from_connections(connections) return group
def returns_results_mapping(self): # TODO: update if/when we implement ResultSet cxns = [Mock(name=x) for x in ("host1", "host2", "host3")] g = ThreadingGroup.from_connections(cxns) result = g.run("whatever", hide=True) assert isinstance(result, GroupResult) expected = {x: x.run.return_value for x in cxns} assert result == expected assert result.succeeded == expected assert result.failed == {}
def broadcastcollation(conns): commands = {} exact_conns = [] for node_index, conn in enumerate(conns): program_cmd = make_batch_cmd( [f"./{exe_name} -seed={node_index} -client "]) if node_index not in node_send_collation: continue program_cmd += "broadcastcollation {}".format(" ".join( map(str, node_send_collation[node_index]))) node_cmd = make_and_cmd([ "cd {}".format(make_repo_src_path(poc_repo)), program_cmd, ]) exact_conns.append(conn) commands[node_index] = node_cmd print(commands) g = ThreadingGroup.from_connections(exact_conns).run( custom_kwargs=commands) print(g)
def update_build_poc(conns): g = ThreadingGroup.from_connections(conns).run( make_batch_cmd([ make_cmd_setup(libp2p_repo), make_cmd_pull(libp2p_repo), make_cmd_setup(pubsub_repo), make_cmd_pull(pubsub_repo), make_cmd_setup(poc_repo), make_cmd_pull(poc_repo), make_cmd_build(poc_repo), ])) for conn in conns: result = g[conn] if not result.ok: raise ValueError( "building failed in conn {}, stdout=\"{}\", stderr=\"{}\"". format( conn, result.stdout, result.stderr, ))
def queue_used_to_return_results(self, Queue): # Regular, explicit, mocks for Connections cxns = [Mock(host=x) for x in ("host1", "host2", "host3")] # Set up Queue with enough behavior to work / assert queue = Queue.return_value # Ending w/ a True will terminate a while-not-empty loop queue.empty.side_effect = (False, False, False, True) fakes = [(x, x.run.return_value) for x in cxns] queue.get.side_effect = fakes[:] # Execute & inspect results g = ThreadingGroup.from_connections(cxns) results = g.run(*self.args, **self.kwargs) expected = {x: x.run.return_value for x in cxns} assert results == expected # Make sure queue was used as expected within worker & # ThreadingGroup.run() puts = [call(x) for x in fakes] queue.put.assert_has_calls(puts, any_order=True) assert queue.empty.called gets = [call(block=False) for _ in cxns] queue.get.assert_has_calls(gets)
def executes_arguments_on_contents_run_via_threading( self, Thread, Queue ): queue = Queue.return_value g = ThreadingGroup.from_connections(self.cxns) # Make sure .exception() doesn't yield truthy Mocks. Otherwise we # end up with 'exceptions' that cause errors due to all being the # same. Thread.return_value.exception.return_value = None g.run(*self.args, **self.kwargs) # Testing that threads were used the way we expect is mediocre but # I honestly can't think of another good way to assert "threading # was used & concurrency occurred"... instantiations = [ call( target=thread_worker, kwargs=dict( cxn=cxn, queue=queue, args=self.args, kwargs=self.kwargs, ), ) for cxn in self.cxns ] Thread.assert_has_calls(instantiations, any_order=True) # These ought to work as by default a Mock.return_value is a # singleton mock object expected = len(self.cxns) for name, got in ( ("start", Thread.return_value.start.call_count), ("join", Thread.return_value.join.call_count), ): err = ( "Expected {} calls to ExceptionHandlingThread.{}, got {}" ) # noqa err = err.format(expected, name, got) assert expected, got == err
def addpeer(conns): commands = {} exact_conns = [] for node_index, conn in enumerate(conns): program_cmd = f"./{exe_name} -seed={node_index} -client " if node_index not in node_target: continue target_node_index = node_target[node_index] target_node_host_index = node_host_index_map[target_node_index] program_cmd += "addpeer {} {} ".format( hosts[target_node_host_index].ip, target_node_index, ) node_cmd = make_and_cmd([ "cd {}".format(make_repo_src_path(poc_repo)), program_cmd, ]) exact_conns.append(conn) commands[node_index] = node_cmd print(commands) g = ThreadingGroup.from_connections(exact_conns).run( custom_kwargs=commands) print(g)
def run_servers(conns): commands = {} for node_index, _ in enumerate(conns): linux_command = f"script -f -c './{exe_name} -seed={node_index}' poc_{node_index}.out" osx_command = f"script -q /dev/null ./{exe_name} -seed={node_index} 2>&1 1>poc_{node_index}.out /dev/null" program_cmd = make_batch_cmd([ f"killall -9 {exe_name}", "sleep 1", f"screen -d -m bash -c \"if [ \"$(uname)\" == \"Darwin\" ]; then {osx_command}; else {linux_command}; fi\"", ]) node_cmd = make_and_cmd([ cmd_set_env, "cd {}".format(make_repo_src_path(poc_repo)), program_cmd, ]) commands[node_index] = node_cmd g = ThreadingGroup.from_connections(node_conns).run(custom_kwargs=commands, echo=True) print(g)