def test_send_file_to_directory( self, dcos_node: Node, tmpdir: local, ) -> None: """ It is possible to send a file to a cluster node to a directory that is mounted as tmpfs. See ``DockerExecTransport.send_file`` for details. """ content = str(uuid.uuid4()) file_name = 'example_file.txt' local_file = tmpdir.join(file_name) local_file.write(content) master_destination_path = Path( '/etc/{random}'.format(random=uuid.uuid4().hex), ) dcos_node.run(args=['mkdir', '--parent', str(master_destination_path)]) dcos_node.send_file( local_path=Path(str(local_file)), remote_path=master_destination_path, ) args = ['cat', str(master_destination_path / file_name)] result = dcos_node.run(args=args) assert result.stdout.decode() == content
def test_tty( self, dcos_node: Node, tty: bool, ) -> None: """ If the ``tty`` parameter is set to ``True``, a TTY is created. """ filename = uuid.uuid4().hex script = textwrap.dedent( """ if [ -t 1 ] then echo True > {filename} else echo False > {filename} fi """, ).format(filename=filename) echo_result = dcos_node.run( args=[script], tty=tty, shell=True, ) assert echo_result.returncode == 0 run_result = dcos_node.run(args=['cat', filename]) assert run_result.stdout.strip().decode() == str(tty)
def test_file_to_file( self, dcos_node: Node, tmp_path: Path, ) -> None: """ It is possible to download a file from a node to a file path. """ content = str(uuid.uuid4()) random = uuid.uuid4().hex local_file_name = 'local_file_{random}.txt'.format(random=random) remote_file_name = 'remote_file_{random}.txt'.format(random=random) remote_file_path = Path('/etc/') / remote_file_name downloaded_file_name = 'downloaded_file_{random}.txt'.format( random=random, ) downloaded_file_path = tmp_path / downloaded_file_name local_file = tmp_path / local_file_name local_file.write_text(content) dcos_node.send_file( local_path=local_file, remote_path=remote_file_path, ) dcos_node.download_file( remote_path=remote_file_path, local_path=downloaded_file_path, ) assert downloaded_file_path.read_text() == content
def test_local_file_already_exists( self, dcos_node: Node, tmp_path: Path, ) -> None: """ Downloading a file raises a ``ValueError`` if the local file path already exists. """ content = str(uuid.uuid4()) random = uuid.uuid4().hex local_file_name = 'local_file_{random}.txt'.format(random=random) local_file_path = tmp_path / local_file_name local_file_path.write_text(content) remote_file_name = 'remote_file_{random}.txt'.format(random=random) remote_file_path = Path('/etc/') / remote_file_name dcos_node.send_file( local_path=local_file_path, remote_path=remote_file_path, ) message = ('Failed to download a file to "{file}". ' 'A file already exists in that location.').format( file=local_file_path) with pytest.raises(ValueError) as exc: dcos_node.download_file( remote_path=remote_file_path, local_path=local_file_path, ) assert str(exc.value) == message
def _do_backup(master: Node, backup_local_path: Path) -> None: """ Automated ZooKeeper backup procedure. Intended to be consistent with the documentation. https://jira.mesosphere.com/browse/DCOS-51647 """ master.run(args=['systemctl', 'stop', 'dcos-exhibitor']) backup_name = backup_local_path.name # This must be an existing directory on the remote server. backup_remote_path = Path('/etc/') / backup_name master.run( args=[ '/opt/mesosphere/bin/dcos-shell', 'dcos-zk', 'backup', str(backup_remote_path), '-v', ], output=Output.LOG_AND_CAPTURE, ) master.run(args=['systemctl', 'start', 'dcos-exhibitor']) master.download_file( remote_path=backup_remote_path, local_path=backup_local_path, ) master.run(args=['rm', str(backup_remote_path)])
def test_not_installed(self, dcos_node: Node) -> None: """ When trying to retrieve the DC/OS version of a cluster which does not have DC/OS installed, a ``DCOSNotInstalledError`` is raised. """ with pytest.raises(DCOSNotInstalledError): dcos_node.dcos_build_info()
def test_eq(self, tmpdir: local) -> None: """ Two nodes are equal iff their IP addresses are equal. """ content = str(uuid.uuid4()) key1_filename = 'foo.key' key1_file = tmpdir.join(key1_filename) key1_file.write(content) key2_filename = 'bar.key' key2_file = tmpdir.join(key2_filename) key2_file.write(content) node_public_ip_address = IPv4Address('172.0.0.1') node_private_ip_address = IPv4Address('172.0.0.3') other_ip_address = IPv4Address('172.0.0.4') node_ssh_key_path = Path(str(key1_file)) other_ssh_key_path = Path(str(key2_file)) node_user = '******' other_user = '******' node_transport = Transport.DOCKER_EXEC other_transport = Transport.SSH node = Node( public_ip_address=node_public_ip_address, private_ip_address=node_private_ip_address, ssh_key_path=node_ssh_key_path, default_user=node_user, default_transport=node_transport, ) for transport in (node_transport, other_transport): for public_ip_address in ( node_public_ip_address, other_ip_address, ): for private_ip_address in ( node_private_ip_address, other_ip_address, ): for ssh_key_path in ( node_ssh_key_path, other_ssh_key_path, ): for user in (node_user, other_user): other_node = Node( public_ip_address=public_ip_address, private_ip_address=private_ip_address, ssh_key_path=ssh_key_path, default_user=user, default_transport=transport, ) should_match = bool( (public_ip_address, private_ip_address) == ( node_public_ip_address, node_private_ip_address, ), ) do_match = bool(node == other_node) assert should_match == do_match
def check_bootstrap(node: Node) -> None: # Check that bootstrap works - `dcos-cluster-id` checks the cluster id, # which demonstrates that consensus checking is working node.run( [ '/opt/mesosphere/bin/dcos-shell', '/opt/mesosphere/bin/bootstrap', 'dcos-cluster-id' ], output=Output.LOG_AND_CAPTURE, )
def test_error(self, dcos_node: Node) -> None: """ Commands which return a non-0 code raise a ``CalledProcessError``. """ with pytest.raises(CalledProcessError) as excinfo: dcos_node.run(args=['rm', 'does_not_exist']) exception = excinfo.value assert exception.returncode == 1
def test_errors(self, dcos_node: Node, output: Output) -> None: """ The ``stderr`` of a failed command is available in the raised ``subprocess.CalledProcessError``. """ args = ['rm', 'does_not_exist'] with pytest.raises(subprocess.CalledProcessError) as excinfo: dcos_node.run(args=args, shell=True, output=output) expected_message = b'No such file or directory' assert expected_message in excinfo.value.stderr
def test_custom_user( self, dcos_node: Node, tmpdir: local, ) -> None: """ It is possible to send a file to a cluster node as a custom user. """ testuser = str(uuid.uuid4().hex) dcos_node.run(args=['useradd', testuser]) dcos_node.run( args=['cp', '-R', '$HOME/.ssh', '/home/{}/'.format(testuser)], shell=True, ) random = str(uuid.uuid4()) local_file = tmpdir.join('example_file.txt') local_file.write(random) master_destination_dir = '/home/{testuser}/{random}'.format( testuser=testuser, random=random, ) master_destination_path = Path(master_destination_dir) / 'file.txt' dcos_node.send_file( local_path=Path(str(local_file)), remote_path=master_destination_path, user=testuser, ) args = ['stat', '-c', '"%U"', str(master_destination_path)] result = dcos_node.run(args=args, shell=True) assert result.stdout.decode().strip() == testuser # Implicitly asserts SSH connection closed by ``send_file``. dcos_node.run(args=['userdel', '-r', testuser])
def test_log_output_live_and_tty(self, dcos_node: Node) -> None: """ A ``ValueError`` is raised if ``tty`` is ``True`` and ``log_output_live`` is ``True``. """ with pytest.raises(ValueError) as excinfo: dcos_node.run( args=['echo', '1'], log_output_live=True, tty=True, ) expected_message = '`log_output_live` and `tty` cannot both be `True`.' assert str(excinfo.value) == expected_message
def _dump_stdout_to_file(node: Node, cmd: List[str], file_path: Path) -> None: """ Dump ``stdout`` of the given command to ``file_path``. Raises: CalledProcessError: If an error occurs when running the given command. """ chunk_size = 2048 proc = node.popen(args=cmd) with open(str(file_path), 'wb') as dumpfile: while True: chunk = proc.stdout.read(chunk_size) if chunk: dumpfile.write(chunk) else: break proc.wait() if proc.returncode != 0: exception = CalledProcessError( returncode=proc.returncode, cmd=cmd, output=bytes(proc.stdout), stderr=bytes(proc.stderr), ) message = ( 'Failed to complete "{cmd}": {exception}' ).format( cmd=cmd, exception=exception, ) LOGGER.warn(message) raise exception
def test_log_and_capture_stderr( self, caplog: LogCaptureFixture, dcos_node: Node, message: str, ) -> None: """ When using ``Output.LOG_AND_CAPTURE``, stderr is logged and captured. """ args = ['>&2', 'echo', message] result = dcos_node.run( args=args, shell=True, output=Output.LOG_AND_CAPTURE, ) expected_command = ( 'Running command `/bin/sh -c >&2 echo {message}` on a node ' '`{node}`').format( message=message, node=str(dcos_node), ) assert result.stderr.strip().decode() == message command_log, first_log = caplog.records assert first_log.levelno == logging.WARN assert command_log.message == expected_command assert message == first_log.message
def _dump_stdout_to_file(node: Node, cmd: List[str], file_path: Path) -> None: """ Dump ``stdout`` of the given command to ``file_path``. Raises: CalledProcessError: If an error occurs when running the given command. """ chunk_size = 2048 proc = node.popen(args=cmd) with open(str(file_path), 'wb') as dumpfile: while True: chunk = proc.stdout.read(chunk_size) if chunk: dumpfile.write(chunk) else: break proc.wait() if proc.returncode != 0: exception = CalledProcessError( returncode=proc.returncode, cmd=cmd, output=bytes(proc.stdout), stderr=bytes(proc.stderr), ) message = ('Failed to complete "{cmd}": {exception}').format( cmd=cmd, exception=exception, ) LOGGER.warn(message) raise exception
def test_log_and_capture( self, caplog: LogCaptureFixture, dcos_node: Node, stdout_message: str, stderr_message: str, ) -> None: """ When given ``Output.LOG_AND_CAPTURE``, stderr and stdout are captured in the output as stdout. stdout and stderr are logged. """ args = ['echo', stdout_message, '&&', '>&2', 'echo', stderr_message] result = dcos_node.run( args=args, shell=True, output=Output.LOG_AND_CAPTURE, ) # stderr is merged into stdout. # This is not ideal but for now it is the case. # The order is not necessarily preserved. expected_messages = set([stdout_message, stderr_message]) result_stdout = result.stdout.strip().decode() assert set(result_stdout.split('\n')) == expected_messages first_log, second_log = caplog.records assert first_log.levelno == logging.DEBUG assert second_log.levelno == logging.DEBUG messages = set([first_log.message, second_log.message]) assert messages == expected_messages
def test_capture( self, caplog: LogCaptureFixture, dcos_node: Node, stdout_message: str, stderr_message: str, ) -> None: """ When given ``Output.CAPTURE``, stderr and stdout are captured in the output. stderr is logged. """ args = ['echo', stdout_message, '&&', '>&2', 'echo', stderr_message] result = dcos_node.run(args=args, output=Output.CAPTURE, shell=True) assert result.stdout.strip().decode() == stdout_message assert result.stderr.strip().decode() == stderr_message args_log, result_log = caplog.records assert args_log.levelno == logging.WARNING assert stdout_message in args_log.message assert stderr_message in args_log.message assert 'echo' in args_log.message assert result_log.levelno == logging.WARNING assert result_log.message == stderr_message
def test_default( self, caplog: LogCaptureFixture, dcos_node: Node, ) -> None: """ By default, stderr and stdout are captured in the output. stderr is logged. """ stdout_message = uuid.uuid4().hex stderr_message = uuid.uuid4().hex args = ['echo', stdout_message, '&&', '>&2', 'echo', stderr_message] result = dcos_node.run(args=args, shell=True) assert result.stdout.strip().decode() == stdout_message assert result.stderr.strip().decode() == stderr_message args_log, result_log = caplog.records assert args_log.levelno == logging.WARNING assert stdout_message in args_log.message assert stderr_message in args_log.message assert 'echo' in args_log.message assert result_log.levelno == logging.WARNING assert result_log.message == stderr_message
def test_tty( self, dcos_node: Node, tty: bool, ) -> None: """ If the ``tty`` parameter is set to ``True``, a TTY is created. """ filename = uuid.uuid4().hex script = textwrap.dedent( """ if [ -t 1 ] then echo True else echo False fi """, ).format(filename=filename) echo_result = dcos_node.run( args=[script], tty=tty, shell=True, ) if not sys.stdout.isatty(): # pragma: no cover reason = ('For this test to be valid, stdout must be a TTY. ' 'Use ``--capture=no / -s`` to run this test.') raise pytest.skip(reason) else: # pragma: no cover assert echo_result.returncode == 0 assert echo_result.stdout.strip().decode() == str(tty)
def _get_node_distribution(node: Node) -> Distribution: """ Given a ``Node``, return the ``Distribution`` on that node. """ cat_cmd = node.run( args=['cat /etc/*-release'], shell=True, ) version_info = cat_cmd.stdout version_info_lines = [ line for line in version_info.decode().split('\n') if '=' in line ] version_data = dict(item.split('=') for item in version_info_lines) distributions = { ('"centos"', '"7"'): Distribution.CENTOS_7, ('"rhel"', '"7.4"'): Distribution.RHEL_7, ('coreos', '1911.3.0'): Distribution.COREOS, ('coreos', '1632.3.0'): Distribution.COREOS, } distro_id = version_data['ID'].strip() distro_version_id = version_data['VERSION_ID'].strip() return distributions[(distro_id, distro_version_id)]
def _nodes(self, container_base_name: str) -> Set[Node]: """ Args: container_base_name: The start of the container names. Returns: ``Node``s corresponding to containers with names starting with ``container_base_name``. """ client = docker.from_env(version='auto') filters = {'name': container_base_name} containers = client.containers.list(filters=filters) nodes = set([]) for container in containers: networks = container.attrs['NetworkSettings']['Networks'] network_name = 'bridge' if len(networks) != 1: [network_name] = list(networks.keys() - set(['bridge'])) container_ip_address = IPv4Address( networks[network_name]['IPAddress'], ) nodes.add( Node( public_ip_address=container_ip_address, private_ip_address=container_ip_address, default_user=self._default_user, ssh_key_path=self._path / 'include' / 'ssh' / 'id_rsa', default_transport=self._default_transport, ), ) return nodes
def test_not_utf_8_log_and_capture( self, caplog: LogCaptureFixture, dcos_node: Node, ) -> None: """ It is possible to see output of commands which output non-utf-8 bytes using ``output.LOG_AND_CAPTURE``. """ # We expect that this will trigger a UnicodeDecodeError when run on a # node, if the result is meant to be decoded with utf-8. # It also is not so long that it will kill our terminal. args = ['head', '-c', '100', '/bin/cat'] dcos_node.run(args=args, output=Output.LOG_AND_CAPTURE) # We do not test the output, but we at least test its length for now. [log] = caplog.records assert len(log.message) >= 100
def test_stderr(self, dcos_node: Node) -> None: """ ``stderr`` is send to the result's ``stderr`` property. """ echo_result = dcos_node.run(args=['echo', '1', '1>&2'], shell=True) assert echo_result.returncode == 0 assert echo_result.stdout.strip() == b'' assert echo_result.stderr.strip() == b'1'
def test_errors( self, caplog: LogCaptureFixture, dcos_node: Node, output: Output, ) -> None: """ Errors are always logged at the error level. """ args = ['rm', 'does_not_exist'] output = Output.CAPTURE with pytest.raises(subprocess.CalledProcessError): dcos_node.run(args=args, shell=True, output=output) [record] = caplog.records assert record.levelno == logging.ERROR expected_message = 'No such file or directory' assert expected_message in record.message
def test_custom_user( self, dcos_node: Node, ) -> None: """ Commands can be run as a custom user. """ testuser = str(uuid.uuid4().hex) dcos_node.run(args=['useradd', testuser]) dcos_node.run( args=['cp', '-R', '$HOME/.ssh', '/home/{}/'.format(testuser)], shell=True, ) echo_result = dcos_node.popen( args=['echo', '$HOME'], user=testuser, shell=True, ) stdout, stderr = echo_result.communicate() assert echo_result.returncode == 0 assert stdout.strip().decode() == '/home/' + testuser assert stderr.strip().decode() == '' dcos_node.run(args=['userdel', '-r', testuser])
def test_stderr(self, dcos_node: Node) -> None: """ ``stderr`` is send to the result's ``stderr`` property. """ echo_result = dcos_node.popen(args=['echo', '1', '1>&2'], shell=True) stdout, stderr = echo_result.communicate() assert echo_result.returncode == 0 assert stdout.strip().decode() == '' assert stderr.strip().decode() == '1'
def test_send_file( self, dcos_node: Node, tmpdir: local, ) -> None: """ It is possible to send a file to a cluster node as the default user. """ content = str(uuid.uuid4()) local_file = tmpdir.join('example_file.txt') local_file.write(content) master_destination_path = Path('/etc/new_dir/on_master_node.txt') dcos_node.send_file( local_path=Path(str(local_file)), remote_path=master_destination_path, ) args = ['cat', str(master_destination_path)] result = dcos_node.run(args=args) assert result.stdout.decode() == content
def test_remote_file_does_not_exist( self, dcos_node: Node, ) -> None: """ Downloading a file raises a ``ValueError`` if the remote file path does not exist. """ random = uuid.uuid4().hex remote_file_path = Path('/etc/') / random message = ( 'Failed to download file from remote location "{location}". ' 'File does not exist.').format(location=remote_file_path) with pytest.raises(ValueError) as exc: dcos_node.download_file( remote_path=remote_file_path, local_path=Path('./blub'), ) assert str(exc.value) == message
def test_async(self, dcos_node: Node) -> None: """ It is possible to run commands asynchronously. """ proc_1 = dcos_node.popen( args=['(mkfifo /tmp/pipe | true)', '&&', '(cat /tmp/pipe)'], shell=True, ) proc_2 = dcos_node.popen( args=[ '(mkfifo /tmp/pipe | true)', '&&', '(echo $HOME > /tmp/pipe)', ], shell=True, ) try: # An arbitrary timeout to avoid infinite wait times. stdout, _ = proc_1.communicate(timeout=15) except TimeoutExpired: # pragma: no cover proc_1.kill() stdout, _ = proc_1.communicate() return_code_1 = proc_1.poll() # Needed to cleanly terminate second subprocess try: # An arbitrary timeout to avoid infinite wait times. proc_2.communicate(timeout=15) except TimeoutExpired: # pragma: no cover proc_2.kill() proc_2.communicate() raise return_code_2 = proc_2.poll() assert stdout.strip().decode() == '/' + dcos_node.default_user assert return_code_1 == 0 assert return_code_2 == 0 dcos_node.run(['rm', '-f', '/tmp/pipe'])
def test_error( self, caplog: LogCaptureFixture, dcos_node: Node, shell: bool, log_output_live: bool, ) -> None: """ Commands which return a non-0 code raise a ``CalledProcessError``. """ with pytest.raises(CalledProcessError) as excinfo: dcos_node.run( args=['rm', 'does_not_exist'], shell=shell, log_output_live=log_output_live, ) exception = excinfo.value assert exception.returncode == 1 error_message = ( 'rm: cannot remove ‘does_not_exist’: No such file or directory' ) if log_output_live: assert exception.stderr.strip() == b'' assert exception.stdout.decode().strip() == error_message else: assert exception.stdout.strip() == b'' assert exception.stderr.decode().strip() == error_message # The stderr output is not in the debug log output. debug_messages = set( filter( lambda record: record.levelno == logging.DEBUG, caplog.records, ), ) matching_messages = set( filter( lambda record: 'No such file' in record.getMessage(), caplog.records, ), ) assert bool(len(debug_messages & matching_messages)) is log_output_live
def _get_storage_driver( self, node: Node, ) -> DockerStorageDriver: """ Given a `Node`, return the `DockerStorageDriver` on that node. """ _wait_for_docker(node=node) result = node.run(args=['docker', 'info', '--format', '{{.Driver}}']) return self.DOCKER_STORAGE_DRIVERS[result.stdout.decode().strip()]
def _dcos_systemd_units(node: Node) -> List[str]: """ Return all systemd services that are started up by DC/OS. """ result = node.run( args=[ 'sudo', 'systemctl', 'show', '-p', 'Wants', 'dcos.target', '|', 'cut', '-d=', '-f2' ], shell=True, ) systemd_units_string = result.stdout.strip().decode() return str(systemd_units_string).split(' ')