def create_keystore_truststore(cn: str, task: str): pub_path = "{}_pub.crt".format(cn) priv_path = "{}_priv.key".format(cn) keystore_path = "{}_keystore.jks".format(cn) truststore_path = "{}_truststore.jks".format(cn) log.info("Generating keystore and truststore, task:{}".format(task)) dcos_ca_bundle = fetch_dcos_ca_bundle(task) # Convert to a PKCS12 key output = sdk_cmd.task_exec( task, 'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && ' 'openssl pkcs12 -export -in {} -inkey {} ' '-out keypair.p12 -name keypair -passout pass:export ' '-CAfile {} -caname root"'.format(pub_path, priv_path, dcos_ca_bundle)) log.info(output) assert output[0] is 0 log.info("Generating certificate: importing into keystore and truststore") # Import into the keystore and truststore output = sdk_cmd.task_exec( task, "keytool -importkeystore " "-deststorepass changeit -destkeypass changeit -destkeystore {} " "-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export " "-alias keypair".format(keystore_path)) log.info(output) assert output[0] is 0 output = sdk_cmd.task_exec( task, "keytool -import -trustcacerts -noprompt " "-file {} -storepass changeit " "-keystore {}".format(dcos_ca_bundle, truststore_path)) log.info(output) assert output[0] is 0
def create_remote_keytab(self, keytab_id: str, principals: list=[]) -> str: """ Create a remote keytab for the specified list of principals """ name = "{}.{}.keytab".format(keytab_id, str(uuid.uuid4())) log.info("Creating keytab: %s", name) if not principals: log.info("Using predefined principals") principals = self.principals if not principals: log.error("No principals specified not creating keytab") return None log.info("Deleting any previous keytab just in case (kadmin will append to it)") sdk_cmd.task_exec(self.task_id, "rm {}".format(name)) kadmin_options = ["-l"] kadmin_cmd = "ext" kadmin_args = ["-k", name] kadmin_args.extend(principals) self.__run_kadmin(kadmin_options, kadmin_cmd, kadmin_args) keytab_absolute_path = os.path.join("/var/lib/mesos/slave/slaves", self.kdc_host_id, "frameworks", self.framework_id, "executors", self.task_id, "runs/latest", name) return keytab_absolute_path
def test_changing_discovery_replaces_certificate_sans(): """ Update service configuration to change discovery prefix of a task. Scheduler should update task and new SANs should be generated. """ original_tasks = sdk_tasks.get_task_ids(config.PACKAGE_NAME, 'discovery') assert len(original_tasks) == 1, 'Expecting exactly one task ID' task_id = original_tasks[0] assert task_id # Load end-entity certificate from PEM encoded file _, stdout, _ = sdk_cmd.task_exec(task_id, 'cat server.crt') log.info('first server.crt: {}'.format(stdout)) ascii_cert = stdout.encode('ascii') log.info('first server.crt ascii encoded: {}'.format(ascii_cert)) end_entity_cert = x509.load_pem_x509_certificate(ascii_cert, DEFAULT_BACKEND) san_extension = end_entity_cert.extensions.get_extension_for_oid( ExtensionOID.SUBJECT_ALTERNATIVE_NAME) sans = [ san.value for san in san_extension.value._general_names._general_names ] expected_san = ( '{name}-0.{service_name}.autoip.dcos.thisdcos.directory'.format( name=DISCOVERY_TASK_PREFIX, service_name=config.SERVICE_NAME)) assert expected_san in sans # Run task update with new discovery prefix marathon_config = sdk_marathon.get_config(config.SERVICE_NAME) marathon_config['env'][ 'DISCOVERY_TASK_PREFIX'] = DISCOVERY_TASK_PREFIX + '-new' sdk_marathon.update_app(config.SERVICE_NAME, marathon_config) sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, "discovery")[0] _, stdout, _ = sdk_cmd.task_exec(task_id, 'cat server.crt') log.info('second server.crt: {}'.format(stdout)) ascii_cert = stdout.encode('ascii') log.info('second server.crt ascii encoded: {}'.format(ascii_cert)) new_cert = x509.load_pem_x509_certificate(ascii_cert, DEFAULT_BACKEND) san_extension = new_cert.extensions.get_extension_for_oid( ExtensionOID.SUBJECT_ALTERNATIVE_NAME) sans = [ san.value for san in san_extension.value._general_names._general_names ] expected_san = ( '{name}-0.{service_name}.autoip.dcos.thisdcos.directory'.format( name=DISCOVERY_TASK_PREFIX + '-new', service_name=config.SERVICE_NAME)) assert expected_san in sans
def test_user_can_auth_and_write_and_read(hdfs_client, kerberos): sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs")) test_filename = "test_auth_write_read-{}".format(str(uuid.uuid4())) write_cmd = "/bin/bash -c '{}'".format(config.hdfs_write_command(config.TEST_CONTENT_SMALL, test_filename)) sdk_cmd.task_exec(hdfs_client["id"], write_cmd) read_cmd = "/bin/bash -c '{}'".format(config.hdfs_read_command(test_filename)) _, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], read_cmd) assert stdout == config.TEST_CONTENT_SMALL
def fetch_dcos_ca_bundle(task: str) -> str: """Fetch the DC/OS CA bundle from the leading Mesos master""" local_bundle_file = "dcos-ca.crt" cmd = [ "curl", "-L", "--insecure", "-v", "leader.mesos/ca/dcos-ca.crt", "-o", local_bundle_file ] sdk_cmd.task_exec(task, " ".join(cmd)) return local_bundle_file
def test_user_can_auth_and_write_and_read(kerberized_hdfs_client): sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=config.CLIENT_PRINCIPALS["hdfs"]) test_filename = "test_auth_write_read" # must be unique among tests in this suite write_cmd = "/bin/bash -c '{}'".format( config.hdfs_write_command(config.TEST_CONTENT_SMALL, test_filename)) sdk_cmd.task_exec(kerberized_hdfs_client, write_cmd) read_cmd = "/bin/bash -c '{}'".format( config.hdfs_read_command(test_filename)) _, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, read_cmd) assert stdout == config.TEST_CONTENT_SMALL
def resolve_hosts(task_id: str, hosts: list) -> bool: """ Use bootstrap to resolve the specified list of hosts """ bootstrap_cmd = [ './bootstrap', '-print-env=false', '-hivemq=false', '-install-certs=false', '-self-resolve=false', '-resolve-hosts', ','.join(hosts) ] LOG.info("Running bootstrap to wait for DNS resolution of %s\n\t%s", hosts, bootstrap_cmd) return_code, bootstrap_stdout, bootstrap_stderr = sdk_cmd.task_exec( task_id, ' '.join(bootstrap_cmd)) LOG.info("bootstrap return code: %s", return_code) LOG.info("bootstrap STDOUT: %s", bootstrap_stdout) LOG.info("bootstrap STDERR: %s", bootstrap_stderr) # Note that bootstrap returns its output in STDERR resolved = 'SDK Bootstrap successful.' in bootstrap_stderr if not resolved: for host in hosts: resolved_host_string = "Resolved '{host}' =>".format(host=host) host_resolved = resolved_host_string in bootstrap_stdout if not host_resolved: LOG.error("Could not resolve: %s", host) return resolved
def _export_cert_from_task_keystore(task, keystore_path, alias, password=KEYSTORE_PASS): """ Retrieves certificate from the keystore with given alias by executing a keytool in context of running container and loads the certificate to memory. Args: task (str): Task id of container that contains the keystore keystore_path (str): Path inside container to keystore containing the certificate alias (str): Alias of the certificate in the keystore Returns: x509.Certificate object """ args = ['-rfc'] if password: args.append('-storepass "{password}"'.format(password=password)) args_str = ' '.join(args) cert_bytes = sdk_cmd.task_exec( task, _keystore_export_command(keystore_path, alias, args_str))[1].encode('ascii') return x509.load_pem_x509_certificate(cert_bytes, DEFAULT_BACKEND)
def test_java_keystore(): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'artifacts')[0] assert task_id # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app curl = ('curl -v -i ' '--cacert secure-tls-pod.ca ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) + '/hello-world') _, output = sdk_cmd.task_exec(task_id, curl, return_stderr_in_stdout=True) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert 'HTTP/1.1 200 OK' in output assert 'CAfile: secure-tls-pod.ca' in output tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"') assert tls_verification_msg in output
def test_tls_basic_artifacts(): task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'artifacts')[0] assert task_id # Load end-entity certificate from keystore and root CA cert from truststore stdout = sdk_cmd.task_exec(task_id, 'cat secure-tls-pod.crt')[1].encode('ascii') end_entity_cert = x509.load_pem_x509_certificate(stdout, DEFAULT_BACKEND) root_ca_cert_in_truststore = _export_cert_from_task_keystore( task_id, 'keystore.truststore', 'dcos-root') # Check that certificate subject maches the service name common_name = end_entity_cert.subject.get_attributes_for_oid( NameOID.COMMON_NAME)[0].value assert common_name in sdk_hosts.autoip_host(config.SERVICE_NAME, 'artifacts-0-node') san_extension = end_entity_cert.extensions.get_extension_for_oid( ExtensionOID.SUBJECT_ALTERNATIVE_NAME) sans = san_extension.value._general_names._general_names assert len(sans) == 1 cluster_root_ca_cert = x509.load_pem_x509_certificate( sdk_cmd.cluster_request('GET', '/ca/dcos-ca.crt').content, DEFAULT_BACKEND) assert root_ca_cert_in_truststore.signature == cluster_root_ca_cert.signature
def kdestroy(task_id: str): """ Performs a kdestroy command to erase an auth session for a principal. :param task_id: The task in whose environment the kinit will run. """ log.info("Erasing auth session:") rc, stdout, stderr = sdk_cmd.task_exec(task_id, "kdestroy") if rc != 0: raise RuntimeError("Failed ({}) to erase auth session\nstdout: {}\nstderr: {}".format(rc, stdout, stderr))
def run_openssl_command() -> str: command = ' '.join([ 'timeout', openssl_timeout, 'openssl', 's_client', '-cipher', cipher, '-connect', endpoint ]) task_id = sdk_tasks.get_task_ids(service_name, task_name)[0] _, output = sdk_cmd.task_exec(task_id, command, True) return output
def verify_shared_executor(pod_name, expected_files=['essential', 'nonessential'], delete_files=True): '''verify that both tasks share the same executor: - matching ExecutorInfo - both 'essential' and 'nonessential' present in shared-volume/ across both tasks ''' tasks = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info {}'.format(pod_name), json=True) assert len(tasks) == 2 # check that the task executors all match executor = tasks[0]['info']['executor'] for task in tasks[1:]: assert executor == task['info']['executor'] # for each task, check shared volume content matches what's expected task_names = [task['info']['name'] for task in tasks] for task_name in task_names: # 1.9 just uses the host filesystem in 'task exec', so use 'task ls' across the board instead filenames = sdk_cmd.run_cli( 'task ls {} shared-volume/'.format(task_name)).strip().split() assert set(expected_files) == set(filenames) # delete files from volume in preparation for a following task relaunch if delete_files: if sdk_utils.dcos_version_less_than("1.10"): # 1.9 just uses the host filesystem in 'task exec', so figure out the absolute volume path manually expected_file_path = sdk_cmd.task_exec( task_names[0], 'find /var/lib/mesos/slave/volumes -iname ' + filenames[0])[1].strip() # volume dir is parent of the expected file path. volume_dir = os.path.dirname(expected_file_path) else: # 1.10+ works correctly: path is relative to sandbox volume_dir = 'shared-volume/' sdk_cmd.task_exec( task_names[0], 'rm ' + ' '.join([os.path.join(volume_dir, name) for name in filenames]))
def kinit(task_id: str, keytab: str, principal: str): """ Performs a kinit command to authenticate the specified principal. :param task_id: The task in whose environment the kinit will run. :param keytab: The keytab used by kinit to authenticate. :param principal: The name of the principal the user wants to authenticate as. """ kinit_cmd = "kinit -kt {keytab} {principal}".format(keytab=keytab, principal=principal) log.info("Authenticating principal=%s with keytab=%s: %s", principal, keytab, kinit_cmd) rc, stdout, stderr = sdk_cmd.task_exec(task_id, kinit_cmd) if rc != 0: raise RuntimeError("Failed ({}) to authenticate with keytab={} principal={}\nstdout: {}\nstderr: {}".format(rc, keytab, principal, stdout, stderr))
def wait_for_brokers(client: str, brokers: list): """ Run bootstrap on the specified client to resolve the list of brokers """ LOG.info("Running bootstrap to wait for DNS resolution") bootstrap_cmd = [ '/opt/bootstrap', '-print-env=false', '-template=false', '-install-certs=false', '-resolve-hosts', ','.join(brokers) ] bootstrap_output = sdk_cmd.task_exec(client, ' '.join(bootstrap_cmd)) LOG.info(bootstrap_output) assert "SDK Bootstrap successful" in ' '.join( str(bo) for bo in bootstrap_output)
def create_tls_artifacts(cn: str, task: str) -> str: pub_path = "{}_pub.crt".format(cn) priv_path = "{}_priv.key".format(cn) log.info("Generating certificate. cn={}, task={}".format(cn, task)) output = sdk_cmd.task_exec( task, 'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr ' '-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format( priv_path, cn)) log.info(output) assert output[0] is 0 rc, raw_csr, _ = sdk_cmd.task_exec(task, 'cat request.csr') assert rc is 0 request = {"certificate_request": raw_csr} token = sdk_cmd.run_cli("config show core.dcos_acs_token") output = sdk_cmd.task_exec( task, "curl --insecure -L -X POST " "-H 'Authorization: token={}' " "leader.mesos/ca/api/v2/sign " "-d '{}'".format(token, json.dumps(request))) log.info(output) assert output[0] is 0 # Write the public cert to the client certificate = json.loads(output[1])["result"]["certificate"] output = sdk_cmd.task_exec( task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path)) log.info(output) assert output[0] is 0 create_keystore_truststore(cn, task) return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn)
def __run_kadmin(self, options: list, cmd: str, args: list): """ Invokes Kerberos' kadmin binary inside the container to run some command. :param options (list): A list of options given to kadmin. :param cmd (str): The name of the sub command to run. :param args (list): A list of arguments passed to the sub command. This should also include any flags needed to be set for the sub command. :raises a generic Exception if the invocation fails. """ kadmin_cmd = "/usr/sbin/kadmin {options} {cmd} {args}".format( options=' '.join(options), cmd=cmd, args=' '.join(args)) log.info("Running kadmin: {}".format(kadmin_cmd)) rc, stdout, stderr = sdk_cmd.task_exec(self.task_id, kadmin_cmd) if rc != 0: raise RuntimeError( "Failed ({}) to invoke kadmin: {}\nstdout: {}\nstderr: {}". format(rc, kadmin_cmd, stdout, stderr))
def _add_role_acls(role: str, user: str, task: str, topic: str, zookeeper_endpoint: str, env_str=None): cmd = "bash -c \"{setup_env}kafka-acls \ --topic {topic_name} \ --authorizer-properties zookeeper.connect={zookeeper_endpoint} \ --add \ --allow-principal User:{user} \ --{role}\"".format( setup_env="{} && ".format(env_str) if env_str else "", topic_name=topic, zookeeper_endpoint=zookeeper_endpoint, user=user, role=role) LOG.info("Running: %s", cmd) output = sdk_cmd.task_exec(task, cmd) LOG.info(output)
def _curl_query(service_name, method, endpoint, json_data=None, role="master", https=False, return_json=True): protocol = 'https' if https else 'http' host = sdk_hosts.autoip_host(service_name, "{}-0-node".format(role), _master_zero_http_port(service_name)) curl_cmd = "/opt/mesosphere/bin/curl -sS -u elastic:changeme -X{} '{}://{}/{}'".format( method, protocol, host, endpoint) if json_data: curl_cmd += " -H 'Content-type: application/json' -d '{}'".format( json.dumps(json_data)) task_name = "master-0-node" exit_code, stdout, stderr = sdk_cmd.task_exec(task_name, curl_cmd) def build_errmsg(msg): return "{}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}".format( msg, curl_cmd, stdout, stderr) if exit_code: log.warning( build_errmsg( "Failed to run command on {}, retrying or giving up.".format( task_name))) return None if not return_json: return stdout try: return json.loads(stdout) except: log.warning( build_errmsg( "Failed to parse stdout as JSON, retrying or giving up.")) return None
def test_verify_https_ports(hdfs_client, node_type, port): """ Verify that HTTPS port is open name, journal and data node types. """ task_id = "{}-0-node".format(node_type) host = sdk_hosts.autoip_host( config.SERVICE_NAME, task_id, port) cmd = ["curl", "-v", "--cacert", hdfs_client["dcos_ca_bundle"], "https://{host}".format(host=host), ] rc, stdout, stderr = sdk_cmd.task_exec(hdfs_client["id"], " ".join(cmd)) assert not rc assert "SSL connection using TLS1.2 / ECDHE_RSA_AES_128_GCM_SHA256" in stderr assert "server certificate verification OK" in stderr assert "common name: {}.{} (matched)".format(task_id, config.SERVICE_NAME) in stderr # In the Kerberos case we expect a 401 error assert "401 Authentication required" in stdout
def test_java_truststore(): """ Make an HTTP request from CLI to nginx exposed service. Test that CLI reads and uses truststore to verify HTTPS connection. """ task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, "keystore")[0] assert task_id # Make an http request from a CLI app using configured keystore to the # service itself exposed via VIP. # This will test whether the service is serving correct end-entity # certificate from keystore and if CLI client can verify certificate # with custom truststore configuration. command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME)) _, output, _ = sdk_cmd.task_exec(task_id, command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def test_tls_nginx(): """ Checks that NGINX exposes TLS service with correct PEM encoded end-entity certificate. """ # Use keystore-app `truststoretest` CLI command to run request against # the NGINX container to verify that nginx presents itself with end-entity # certificate that can be verified by with truststore. task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'keystore')[0] assert task_id command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + '/') _, output, _ = sdk_cmd.task_exec(task_id, command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def test_users_have_appropriate_permissions(kerberized_hdfs_client): # "hdfs" is a superuser sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=config.CLIENT_PRINCIPALS["hdfs"]) log.info("Creating directory for alice") make_user_directory_cmd = config.hdfs_command("mkdir -p /users/alice") sdk_cmd.task_exec(kerberized_hdfs_client, make_user_directory_cmd) change_ownership_cmd = config.hdfs_command( "chown alice:users /users/alice") sdk_cmd.task_exec(kerberized_hdfs_client, change_ownership_cmd) change_permissions_cmd = config.hdfs_command("chmod 700 /users/alice") sdk_cmd.task_exec(kerberized_hdfs_client, change_permissions_cmd) test_filename = "test_user_permissions" # must be unique among tests in this suite # alice has read/write access to her directory sdk_auth.kdestroy(kerberized_hdfs_client) sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=config.CLIENT_PRINCIPALS["alice"]) write_access_cmd = "/bin/bash -c \"{}\"".format( config.hdfs_write_command(config.TEST_CONTENT_SMALL, "/users/alice/{}".format(test_filename))) log.info("Alice can write: {}".format(write_access_cmd)) rc, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, write_access_cmd) assert stdout == '' and rc == 0 read_access_cmd = config.hdfs_read_command( "/users/alice/{}".format(test_filename)) log.info("Alice can read: {}".format(read_access_cmd)) _, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, read_access_cmd) assert stdout == config.TEST_CONTENT_SMALL ls_cmd = config.hdfs_command("ls /users/alice") _, stdout, _ = sdk_cmd.task_exec(kerberized_hdfs_client, ls_cmd) assert "/users/alice/{}".format(test_filename) in stdout # bob doesn't have read/write access to alice's directory sdk_auth.kdestroy(kerberized_hdfs_client) sdk_auth.kinit(kerberized_hdfs_client, keytab=config.KEYTAB, principal=config.CLIENT_PRINCIPALS["bob"]) log.info( "Bob tries to wrtie to alice's directory: {}".format(write_access_cmd)) _, _, stderr = sdk_cmd.task_exec(kerberized_hdfs_client, write_access_cmd) log.info( "Bob can't write to alice's directory: {}".format(write_access_cmd)) assert "put: Permission denied: user=bob" in stderr log.info( "Bob tries to read from alice's directory: {}".format(read_access_cmd)) _, _, stderr = sdk_cmd.task_exec(kerberized_hdfs_client, read_access_cmd) log.info( "Bob can't read from alice's directory: {}".format(read_access_cmd)) assert "cat: Permission denied: user=bob" in stderr
def cmd(pod_name, command): return sdk_cmd.task_exec( '{}-server'.format(pod_name), "bash -c 'JAVA_HOME=$(ls -d jre*/) apache-cassandra-*/bin/nodetool {}'" .format(command))
def test_users_have_appropriate_permissions(hdfs_client, kerberos): # "hdfs" is a superuser sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs")) log.info("Creating directory for alice") make_user_directory_cmd = config.hdfs_command("mkdir -p /users/alice") sdk_cmd.task_exec(hdfs_client["id"], make_user_directory_cmd) change_ownership_cmd = config.hdfs_command("chown alice:users /users/alice") sdk_cmd.task_exec(hdfs_client["id"], change_ownership_cmd) change_permissions_cmd = config.hdfs_command("chmod 700 /users/alice") sdk_cmd.task_exec(hdfs_client["id"], change_permissions_cmd) test_filename = "test_user_permissions-{}".format(str(uuid.uuid4())) # alice has read/write access to her directory sdk_auth.kdestroy(hdfs_client["id"]) sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("alice")) write_access_cmd = "/bin/bash -c \"{}\"".format(config.hdfs_write_command( config.TEST_CONTENT_SMALL, "/users/alice/{}".format(test_filename))) log.info("Alice can write: %s", write_access_cmd) rc, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], write_access_cmd) assert stdout == '' and rc == 0 read_access_cmd = config.hdfs_read_command("/users/alice/{}".format(test_filename)) log.info("Alice can read: %s", read_access_cmd) _, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], read_access_cmd) assert stdout == config.TEST_CONTENT_SMALL ls_cmd = config.hdfs_command("ls /users/alice") _, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], ls_cmd) assert "/users/alice/{}".format(test_filename) in stdout # bob doesn't have read/write access to alice's directory sdk_auth.kdestroy(hdfs_client["id"]) sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("bob")) log.info("Bob tries to wrtie to alice's directory: %s", write_access_cmd) _, _, stderr = sdk_cmd.task_exec(hdfs_client["id"], write_access_cmd) log.info("Bob can't write to alice's directory: %s", write_access_cmd) assert "put: Permission denied: user=bob" in stderr log.info("Bob tries to read from alice's directory: %s", read_access_cmd) _, _, stderr = sdk_cmd.task_exec(hdfs_client["id"], read_access_cmd) log.info("Bob can't read from alice's directory: %s", read_access_cmd) assert "cat: Permission denied: user=bob" in stderr
def read_wrapper(): LOG.info("Running: %s", read_cmd) rc, stdout, stderr = sdk_cmd.task_exec(task, read_cmd) LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr) return rc, stdout, stderr
def check_kibana_plugin_installed(plugin_name, service_name=SERVICE_NAME): cmd = "bash -c '$MESOS_SANDBOX/kibana-$ELASTIC_VERSION-linux-x86_64/bin/kibana-plugin list'" _, stdout, _ = sdk_cmd.task_exec(service_name, cmd) return plugin_name in stdout