def matrix_server_starter( *, count: int = 1, config_generator: ContextManager = None, log_context: str = None, initial_port: int = 8500, ) -> ContextManager: with ExitStack() as exit_stack: if config_generator is None: config_generator = exit_stack.enter_context( generate_synapse_config()) server_urls = [] for _, port in zip(range(count), get_free_port(initial_port=initial_port)): server_name, config_file = config_generator(port) server_url = ParsedURL(f'https://{server_name}') server_urls.append(server_url) synapse_io = subprocess.DEVNULL # Used in CI to capture the logs for failure analysis if _SYNAPSE_LOGS_PATH: log_file_path = Path(_SYNAPSE_LOGS_PATH).joinpath( f'{server_name}.log') log_file_path.parent.mkdir(parents=True, exist_ok=True) log_file = exit_stack.enter_context(log_file_path.open('at')) # Preface log with header header = datetime.utcnow().isoformat() if log_context: header = f'{header}: {log_context}' header = f' {header} ' log_file.write(f'{header:=^100}\n') log_file.flush() synapse_io = subprocess.DEVNULL, log_file, subprocess.STDOUT exit_stack.enter_context( HTTPExecutor( [ sys.executable, '-m', 'synapse.app.homeserver', f'--server-name={server_name}', f'--config-path={config_file!s}', ], url=urljoin(server_url, '/_matrix/client/versions'), method='GET', timeout=30, cwd=config_file.parent, verify_tls=False, io=synapse_io, ), ) yield server_urls
def local_matrix_server(transport_config): if not transport_config.protocol == TransportProtocol.MATRIX: yield None return server = transport_config.parameters.server # if command is none, assume server is already running if transport_config.parameters.command in (None, 'none'): yield server return # otherwise, run our own local server matrix = HTTPExecutor( transport_config.parameters.command, url=urljoin(server, '/_matrix/client/versions'), method='GET', timeout=30, shell=True, ) matrix.start() yield server matrix.stop()
def matrix_server_starter( free_port_generator: Iterator[int], *, count: int = 1, config_generator: ContextManager = None, log_context: str = None, ) -> ContextManager: with ExitStack() as exit_stack: if config_generator is None: config_generator = exit_stack.enter_context( generate_synapse_config()) server_urls = [] for _, port in zip(range(count), free_port_generator): server_name, config_file = config_generator(port) server_url = ParsedURL(f"https://{server_name}") server_urls.append(server_url) synapse_io = subprocess.DEVNULL # Used in CI to capture the logs for failure analysis if _SYNAPSE_LOGS_PATH: log_file_path = Path(_SYNAPSE_LOGS_PATH).joinpath( f"{server_name}.log") log_file_path.parent.mkdir(parents=True, exist_ok=True) log_file = exit_stack.enter_context(log_file_path.open("at")) # Preface log with header header = datetime.utcnow().isoformat() if log_context: header = f"{header}: {log_context}" header = f" {header} " log_file.write(f"{header:=^100}\n") log_file.flush() synapse_io = subprocess.DEVNULL, log_file, subprocess.STDOUT exit_stack.enter_context( HTTPExecutor( [ sys.executable, "-m", "synapse.app.homeserver", f"--server-name={server_name}", f"--config-path={config_file!s}", ], url=urljoin(server_url, "/_matrix/client/versions"), method="GET", timeout=30, cwd=config_file.parent, verify_tls=False, io=synapse_io, )) yield server_urls
def smoketest(ctx, debug, local_matrix, **kwargs): # pylint: disable=unused-argument """ Test, that the raiden installation is sane. """ from raiden.api.python import RaidenAPI from raiden.tests.utils.smoketest import ( TEST_PARTNER_ADDRESS, TEST_DEPOSIT_AMOUNT, load_smoketest_config, run_smoketests, setup_testchain_and_raiden, ) report_file = tempfile.mktemp(suffix='.log') configure_logging({'': 'DEBUG'}, log_file=report_file) def append_report(subject, data): with open(report_file, 'a', encoding='UTF-8') as handler: handler.write(f'{f" {subject.upper()} ":=^80}{os.linesep}') if data is not None: if isinstance(data, bytes): data = data.decode() handler.writelines([data + os.linesep]) append_report('Raiden version', json.dumps(get_system_spec())) append_report('Raiden log', None) step_count = 7 if ctx.parent.params['transport'] == 'matrix': step_count = 8 step = 0 def print_step(description, error=False): nonlocal step step += 1 click.echo( '{} {}'.format( click.style(f'[{step}/{step_count}]', fg='blue'), click.style(description, fg='green' if not error else 'red'), ), ) print_step('Getting smoketest configuration') smoketest_config = load_smoketest_config() if not smoketest_config: append_report( 'Smoketest configuration', 'Could not load the smoketest genesis configuration file.', ) result = setup_testchain_and_raiden( smoketest_config, ctx.parent.params['transport'], ctx.parent.params['matrix_server'], print_step, ) args = result['args'] contract_addresses = result['contract_addresses'] token = result['token'] ethereum = result['ethereum'] ethereum_config = result['ethereum_config'] smoketest_config['transport'] = args['transport'] for option_ in run.params: if option_.name in args.keys(): args[option_.name] = option_.process_value(ctx, args[option_.name]) else: args[option_.name] = option_.default port = next(get_free_port('127.0.0.1', 5001)) args['api_address'] = 'localhost:' + str(port) def _run_smoketest(): print_step('Starting Raiden') # invoke the raiden app app = run_app(**args) raiden_api = RaidenAPI(app.raiden) rest_api = RestAPI(raiden_api) api_server = APIServer(rest_api) (api_host, api_port) = split_endpoint(args['api_address']) api_server.start(api_host, api_port) raiden_api.channel_open( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), None, None, ) raiden_api.set_total_channel_deposit( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], to_canonical_address(token.contract.address), to_canonical_address(TEST_PARTNER_ADDRESS), TEST_DEPOSIT_AMOUNT, ) smoketest_config['contracts'][ 'registry_address'] = to_checksum_address( contract_addresses[CONTRACT_TOKEN_NETWORK_REGISTRY], ) smoketest_config['contracts'][ 'secret_registry_address'] = to_checksum_address( contract_addresses[CONTRACT_SECRET_REGISTRY], ) smoketest_config['contracts'][ 'discovery_address'] = to_checksum_address( contract_addresses[CONTRACT_ENDPOINT_REGISTRY], ) smoketest_config['contracts']['token_address'] = to_checksum_address( token.contract.address, ) success = False try: print_step('Running smoketest') error = run_smoketests(app.raiden, smoketest_config, debug=debug) if error is not None: append_report('Smoketest assertion error', error) else: success = True finally: app.stop() ethereum.send_signal(2) err, out = ethereum.communicate() append_report('Ethereum init stdout', ethereum_config['init_log_out'].decode('utf-8')) append_report('Ethereum init stderr', ethereum_config['init_log_err'].decode('utf-8')) append_report('Ethereum stdout', out) append_report('Ethereum stderr', err) append_report('Smoketest configuration', json.dumps(smoketest_config)) if success: print_step( f'Smoketest successful, report was written to {report_file}') else: print_step( f'Smoketest had errors, report was written to {report_file}', error=True) return success if args['transport'] == 'udp': with SocketFactory('127.0.0.1', port, strategy='none') as mapped_socket: args['mapped_socket'] = mapped_socket success = _run_smoketest() elif args['transport'] == 'matrix' and local_matrix.lower() != 'none': args['mapped_socket'] = None print_step('Starting Matrix transport') try: with HTTPExecutor( local_matrix, url=urljoin(args['matrix_server'], '/_matrix/client/versions'), method='GET', timeout=30, shell=True, ): args['extra_config'] = { 'transport': { 'matrix': { 'discovery_room': { 'server': 'matrix.local.raiden' }, 'server_name': 'matrix.local.raiden', }, }, } success = _run_smoketest() except (PermissionError, ProcessExitedWithError): append_report('Matrix server start exception', traceback.format_exc()) print_step( f'Error during smoketest setup, report was written to {report_file}', error=True, ) success = False elif args['transport'] == 'matrix' and local_matrix.lower() == "none": args['mapped_socket'] = None success = _run_smoketest() else: # Shouldn't happen raise RuntimeError(f"Invalid transport type '{args['transport']}'") if not success: sys.exit(1)
def matrix_server_starter( free_port_generator: Iterable[Port], broadcast_rooms_aliases: Iterable[str], *, count: int = 1, config_generator: SynapseConfigGenerator = None, log_context: str = None, ) -> Iterator[List[Tuple[ParsedURL, HTTPExecutor]]]: with ExitStack() as exit_stack: if config_generator is None: config_generator = exit_stack.enter_context( generate_synapse_config()) servers: List[Tuple[ParsedURL, HTTPExecutor]] = [] for _, port in zip(range(count), free_port_generator): server_name, config_file = config_generator(port) server_url = ParsedURL(f"http://{server_name}") synapse_cmd = [ sys.executable, "-m", "synapse.app.homeserver", f"--server-name={server_name}", f"--config-path={config_file!s}", ] synapse_io: EXECUTOR_IO = DEVNULL # Used in CI to capture the logs for failure analysis if _SYNAPSE_LOGS_PATH is not None: log_file_path = Path(_SYNAPSE_LOGS_PATH).joinpath( f"{server_name}.log") log_file_path.parent.mkdir(parents=True, exist_ok=True) log_file = exit_stack.enter_context(log_file_path.open("at")) # Preface log with header header = datetime.utcnow().isoformat() if log_context: header = f"{header}: {log_context}" header = f" {header} " log_file.write(f"{header:=^100}\n") log_file.write(f"Cmd: `{' '.join(synapse_cmd)}`\n") log_file.flush() synapse_io = DEVNULL, log_file, STDOUT log.debug("Synapse command", command=synapse_cmd) startup_timeout = 10 sleep = 0.1 executor = HTTPExecutor( synapse_cmd, url=urljoin(server_url, "/_matrix/client/versions"), method="GET", timeout=startup_timeout, sleep=sleep, cwd=config_file.parent, verify_tls=False, io=synapse_io, ) exit_stack.enter_context(executor) # The timeout_limit_teardown is necessary to prevent the build # being killed because of the lack of output, at the same time the # timeout must never happen, because if it does, not all finalizers # are executed, leaving dirty state behind and resulting in test # flakiness. # # Because of this, this value is arbitrarily smaller than the # teardown timeout, forcing the subprocess to be killed on a timely # manner, which should allow the teardown to proceed and finish # before the timeout elapses. teardown_timeout = 0.5 # The timeout values for the startup and teardown must be # different, however the library doesn't support it. So here we # must poke at the private member and overwrite it. executor._timeout = teardown_timeout servers.append((server_url, executor)) log.debug("Setting up broadcast rooms", aliases=broadcast_rooms_aliases) for broadcast_room_alias in broadcast_rooms_aliases: setup_broadcast_room([url for url, _ in servers], broadcast_room_alias) yield servers