async def test_misc_indy_1720(pool_handler, wallet_handler, get_default_trustee): trustee_did, _ = get_default_trustee primary1, alias1, target_did1 = await get_primary(pool_handler, wallet_handler, trustee_did) await demote_node(pool_handler, wallet_handler, trustee_did, alias1, target_did1) primary2 = await wait_until_vc_is_done(primary1, pool_handler, wallet_handler, trustee_did) assert primary2 != primary1 for i in range(100): await send_nym(pool_handler, wallet_handler, trustee_did, random_did_and_json()[0], None, None, None) await promote_node(pool_handler, wallet_handler, trustee_did, alias1, target_did1) primary3 = await wait_until_vc_is_done(primary2, pool_handler, wallet_handler, trustee_did) assert primary3 != primary2 output = testinfra.get_host('ssh://node{}'.format(primary3)).check_output( 'systemctl stop indy-node') print(output) primary4 = await wait_until_vc_is_done(primary3, pool_handler, wallet_handler, trustee_did) assert primary4 != primary3 for i in range(100): await send_nym(pool_handler, wallet_handler, trustee_did, random_did_and_json()[0], None, None, None) output = testinfra.get_host('ssh://node{}'.format(primary3)).check_output( 'systemctl start indy-node') print(output) await check_pool_is_in_sync() await send_and_get_nym(pool_handler, wallet_handler, trustee_did, random_did_and_json()[0])
def host(): if os.environ.get("RUNNER_OS", "") == "macOS" and os.environ.get( "KITCHEN_LOCAL_YAML", "") == "kitchen.macos.yml": # Adjust the `PATH` so that the `salt-call` executable can be found os.environ["PATH"] = "/opt/salt/bin{}{}".format( os.pathsep, os.environ["PATH"]) return testinfra.get_host("local://", sudo=True) if os.environ.get( "KITCHEN_USERNAME") == "vagrant" or "windows" in os.environ.get( "KITCHEN_INSTANCE"): if "windows" in os.environ.get("KITCHEN_INSTANCE"): return testinfra.get_host( "winrm://{KITCHEN_USERNAME}:{KITCHEN_PASSWORD}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}" .format(**os.environ), no_ssl=True, ) return testinfra.get_host( "paramiko://{KITCHEN_USERNAME}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}". format(**os.environ), ssh_identity_file=os.environ.get("KITCHEN_SSH_KEY"), ) return testinfra.get_host( "docker://{KITCHEN_USERNAME}@{KITCHEN_CONTAINER_ID}".format( **os.environ))
async def test_vc_by_degradation(): for i in range(1, 26): host = testinfra.get_host('ssh://ubuntu@perf_node' + str(i), ssh_config='/home/indy/.ssh/config') with host.sudo(): host.run('echo "\nDELTA=1.2" >> /etc/indy/indy_config.py') host.run('systemctl restart indy-node') await pool.set_protocol_version(2) pool_handle = await pool_helper() wallet_handle, _, _ = await wallet_helper() trustee_did, trustee_vk = await did.create_and_store_my_did( wallet_handle, json.dumps({'seed': '000000000000000000000000Trustee1'})) random_did = random_did_and_json()[0] another_random_did = random_did_and_json()[0] add_before = json.loads(await nym_helper(pool_handle, wallet_handle, trustee_did, random_did)) get_before = json.loads(await get_nym_helper(pool_handle, wallet_handle, trustee_did, random_did)) req1 = await ledger.build_get_validator_info_request(trustee_did) results1 = json.loads(await ledger.sign_and_submit_request( pool_handle, wallet_handle, trustee_did, req1)) result1 = json.loads(results1['Node1']) primary_before =\ result1['result']['data']['Node_info']['Replicas_status']['Node1:0']['Primary'][len('Node'):-len(':0')] for i in range(250): await nym_helper(pool_handle, wallet_handle, trustee_did, random_did_and_json()[0]) req2 = await ledger.build_get_validator_info_request(trustee_did) results2 = json.loads(await ledger.sign_and_submit_request( pool_handle, wallet_handle, trustee_did, req2)) result2 = json.loads(results2['Node1']) primary_after =\ result2['result']['data']['Node_info']['Replicas_status']['Node1:0']['Primary'][len('Node'):-len(':0')] add_after = json.loads(await nym_helper(pool_handle, wallet_handle, trustee_did, another_random_did)) get_after = json.loads(await get_nym_helper(pool_handle, wallet_handle, trustee_did, another_random_did)) for i in range(1, 26): host = testinfra.get_host('ssh://ubuntu@perf_node' + str(i), ssh_config='/home/indy/.ssh/config') with host.sudo(): host.run('echo "\nDELTA=0.4" >> /etc/indy/indy_config.py') host.run('systemctl restart indy-node') assert primary_before != primary_after assert add_before['op'] == 'REPLY' assert get_before['result']['seqNo'] is not None assert add_after['op'] == 'REPLY' assert get_after['result']['seqNo'] is not None
def get_ansible_host(config, inventory, host, ssh_config=None, ssh_identity_file=None): if is_empty_inventory(inventory): if host == 'localhost': return testinfra.get_host('local://') return None hostvars = inventory['_meta'].get('hostvars', {}).get(host, {}) connection = hostvars.get('ansible_connection', 'ssh') if connection not in ( 'smart', 'ssh', 'paramiko_ssh', 'local', 'docker', 'lxc', 'lxd', ): # unhandled connection type, must use force_ansible=True return None connection = { 'lxd': 'lxc', 'paramiko_ssh': 'paramiko', 'smart': 'ssh', }.get(connection, connection) testinfra_host = hostvars.get('ansible_host', host) user = hostvars.get('ansible_user') port = hostvars.get('ansible_port') kwargs = {} if hostvars.get('ansible_become', False): kwargs['sudo'] = True kwargs['sudo_user'] = hostvars.get('ansible_become_user') if ssh_config is not None: kwargs['ssh_config'] = ssh_config if ssh_identity_file is not None: kwargs['ssh_identity_file'] = ssh_identity_file # Support both keys as advertised by Ansible if 'ansible_ssh_private_key_file' in hostvars: kwargs['ssh_identity_file'] = hostvars['ansible_ssh_private_key_file'] elif 'ansible_private_key_file' in hostvars: kwargs['ssh_identity_file'] = hostvars['ansible_private_key_file'] kwargs['ssh_extra_args'] = '{} {}'.format( hostvars.get('ansible_ssh_common_args', ''), hostvars.get('ansible_ssh_extra_args', '')).strip() spec = '{}://'.format(connection) if user: spec += '{}@'.format(user) if check_ip_address(testinfra_host) == 6: spec += '[' + testinfra_host + ']' else: spec += testinfra_host if port: spec += ':{}'.format(port) return testinfra.get_host(spec, **kwargs)
def host(): if os.environ.get('KITCHEN_USERNAME') == 'vagrant': if 'windows' in os.environ.get('KITCHEN_INSTANCE'): return testinfra.get_host( 'winrm://{KITCHEN_USERNAME}:{KITCHEN_PASSWORD}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}'.format(**os.environ), no_ssl=True) return testinfra.get_host( 'paramiko://{KITCHEN_USERNAME}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}'.format(**os.environ), ssh_identity_file=os.environ.get('KITCHEN_SSH_KEY')) return testinfra.get_host('docker://{KITCHEN_USERNAME}@{KITCHEN_CONTAINER_ID}'.format(**os.environ))
def test_libindy(): indy_plenum_ver = '1.10.0~rc1' indy_node_ver = '1.10.0~rc1' pyzmq_ver = '18.1.0' indy_sdk_deb_path = 'https://repo.sovrin.org/sdk/lib/apt/xenial/rc/' indy_sdk_deb_ver = 'libindy_1.12.0~96_amd64.deb' indy_sdk_ver = '1.12.0-rc-96' os.chdir('/home/indy/indy-sdk') subprocess.check_call(['git', 'stash']) subprocess.check_call(['git', 'fetch']) subprocess.check_call(['git', 'checkout', 'origin/rc']) subprocess.check_call(['sed', '-i', '22c\\ARG indy_stream=rc', './ci/indy-pool.dockerfile']) subprocess.check_call(['sed', '-i', '27c\\ARG indy_plenum_ver={}'.format(indy_plenum_ver), './ci/indy-pool.dockerfile']) subprocess.check_call(['sed', '-i', '28c\\ARG indy_node_ver={}'.format(indy_node_ver), './ci/indy-pool.dockerfile']) subprocess.check_call(['sed', '-i', '31c\\ARG python3_pyzmq_ver={}'.format(pyzmq_ver), './ci/indy-pool.dockerfile']) # set version of `indy` dependency in `pom.xml` to libindy version subprocess.check_call(['sed', '-i', '112c\\\t\t\t<version>{}</version>'.format(indy_sdk_ver), './samples/java/pom.xml']) # set version of `python3-indy` dependency in `setup.py` to libindy version subprocess.check_call(['sed', '-i', '11c\\ install_requires=[\'python3-indy=={}\']'.format(indy_sdk_ver), './samples/python/setup.py']) subprocess.check_call(['docker', 'build', '-f', 'ci/indy-pool.dockerfile', '-t', 'indy_pool', '.']) subprocess.check_call(['docker', 'run', '-itd', '-p', '9701-9709:9701-9709', 'indy_pool']) pool_id = subprocess.check_output(['docker', 'build', '--build-arg', 'indy_sdk_deb={}'. format(indy_sdk_deb_path+indy_sdk_deb_ver), '-f', 'ci/acceptance/ubuntu_acceptance.dockerfile', '.'])[-13:-1].decode().strip() print(pool_id) client_id = subprocess.check_output(['docker', 'run', '-itd', '-v', '/home/indy/indy-sdk/samples:/home/indy/samples', '--network=host', pool_id])\ .decode().strip() print(client_id) host = testinfra.get_host("docker://{}".format(client_id)) # test java java_res = host.run( 'cd /home/indy/samples/java && TEST_POOL_IP=127.0.0.1 mvn clean compile exec:java -Dexec.mainClass="Main"' ) print(java_res) java_checks = [ 'Anoncreds sample -> completed', 'Anoncreds Revocation sample -> completed', 'Ledger sample -> completed', 'Crypto sample -> completed', 'BUILD SUCCESS' ] for check in java_checks: assert java_res.stdout.find(check) is not -1 host.run('rm -rf /home/indy/.indy_client') # test python host.run('cd /home/indy/samples/python && python3.5 -m pip install --user -e .') python_res = host.run('cd /home/indy/samples/python && TEST_POOL_IP=127.0.0.1 python3.5 src/main.py') print(python_res) python_checks = [ 'Getting started -> done', 'Anoncreds Revocation sample -> completed', 'Anoncreds sample -> completed', 'Crypto sample -> completed', 'Ledger sample -> completed', 'Transaction Author Agreement sample -> completed' ] for check in python_checks: assert python_res.stderr.find(check) is not -1 host.run('rm -rf /home/indy/.indy_client')
def _function(): host = testinfra.get_host("paramiko://root@" + server, ssh_config="/root/.ssh/config") app_id_path = '/etc/app.id' assert host.file(app_id_path).exists assert host.file(app_id_path).content_string.strip( ) == f"CIC_WEBAPP:{app_env}:{server}"
def container(request): docker_id = (subprocess.check_output( ["docker", "run", "-d", "-it", request.param[1], "/bin/sh"]).decode().strip()) yield ContainerData(*request.param, testinfra.get_host("docker://" + docker_id)) subprocess.check_call(["docker", "rm", "-f", docker_id])
def container(request): container_url = getattr(request, "param", TUMBLEWEED_CONTAINER[0]) container_id = ( subprocess.check_output( [ "podman", "run", "-d", "-it", container_url, "/bin/sh", ] ) .decode() .strip() ) subprocess.check_call( [ "podman", "cp", path.abspath( path.join( path.dirname(__file__), "update-ca-certificates", ) ), container_id + ":" + "/bin/update-ca-certificates", ], ) yield testinfra.get_host(f"podman://{container_id}") subprocess.check_call(["podman", "rm", "-f", container_id])
def test_lxc_container(): # testinfra uses a plain file for Ansible inventory: create it updated_inventory = '%s.new.yml' % os.environ['MOLECULE_INVENTORY_FILE'] inventory = { 'all': { 'children': { 'lxc': { 'hosts': { 'testboot.lxc': { 'ansible_user': '******' } } }, 'ungrouped': {} } }, } with open(updated_inventory, 'w') as output: output.write(yaml.dump(inventory)) host = testinfra.get_host('ansible://testboot.lxc?ansible_inventory=%s' % updated_inventory) f = host.file('/srv/tests/canary') assert f.exists assert f.contains("inception") assert not f.contains("not there")
def test_platform_is_supported(self): host = testinfra.get_host(f'docker://{self.container_name}') platforms = self.platforms platform_names = { 'centos': 'EL', 'debian': 'Debian', 'fedora': 'Fedora', 'ubuntu': 'Ubuntu' } distribution = host.system_info.distribution self.assertIn(distribution, platform_names, 'Unknown distribution.') platform_name = platform_names[distribution] platform = None for p in platforms: if p['name'] == platform_name: platform = p break supported_versions = platform['versions'] if platform_name == 'EL' or platform_name == 'Fedora': version = int(host.system_info.release.split('.')[0]) else: version = host.system_info.codename self.assertIn(version, supported_versions)
def host(pytestconfig, request): # Override the TestinfraBackend fixture, # all testinfra fixtures (i.e. modules) depend on it. cmd = ["docker", "run", "-d"] for dist in ("centos7", "jessie"): if dist in request.param: # Systemd require privileged container cmd.append("--privileged") break postgres_id = None if request.scope == "function": if hasattr(request.function, "use_postgres"): postgres_id = subprocess.check_output([ "docker", "run", "-d", pytestconfig.getoption('postgres_image'), ]).strip() cmd.extend(["--link", "{0}:postgres".format(postgres_id)]) if hasattr(request.function, "docker_addopts"): cmd.extend(list(request.function.docker_addopts.args)) cmd.append(request.param) docker_id = subprocess.check_output(cmd).strip() yield testinfra.get_host("docker://" + docker_id) # teardown docker image subprocess.check_output(["docker", "rm", "-f", docker_id]) if postgres_id is not None: subprocess.check_output(["docker", "rm", "-f", postgres_id])
async def stop_primary(pool_handle, wallet_handle, trustee_did): try: req = await ledger.build_get_validator_info_request(trustee_did) results = json.loads(await ledger.sign_and_submit_request( pool_handle, wallet_handle, trustee_did, req)) try: result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: try: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) name_before = result['result']['data']['Node_info']['Name'] primary_before =\ result['result']['data']['Node_info']['Replicas_status'][name_before+':0']['Primary'][len('Node'): -len(':0')] except TypeError: try: time.sleep(120) req = await ledger.build_get_validator_info_request(trustee_did) results = json.loads(await ledger.sign_and_submit_request( pool_handle, wallet_handle, trustee_did, req)) try: result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: try: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) name_before = result['result']['data']['Node_info']['Name'] primary_before = \ result['result']['data']['Node_info']['Replicas_status'][name_before + ':0']['Primary'][len('Node'): -len(':0')] except TypeError: time.sleep(240) req = await ledger.build_get_validator_info_request(trustee_did) results = json.loads(await ledger.sign_and_submit_request( pool_handle, wallet_handle, trustee_did, req)) try: result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: try: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) except JSONDecodeError: shuffle(list(results.keys())) result = json.loads(sample(results.items(), 1)[0][1]) name_before = result['result']['data']['Node_info']['Name'] primary_before = \ result['result']['data']['Node_info']['Replicas_status'][name_before + ':0']['Primary'][len('Node'): -len(':0')] host = testinfra.get_host('docker://node' + primary_before) host.run('systemctl stop indy-node') print('\nPRIMARY NODE {} HAS BEEN STOPPED!'.format(primary_before)) return primary_before
def host(request): image = "landtech/ci-kubernetes" subprocess.check_call([ "docker", "build", "--build-arg=KUBECTL_VERSION=" + kubectl_version, "--build-arg=HELM_VERSION=" + helm_version, "--build-arg=AWS_IAM_AUTHENTICATOR_VERSION=" + aws_iam_auth_version, "--build-arg=ARGO_VERSION=" + argo_version, "--build-arg=TILT_VERSION=" + tilt_version, "--build-arg=RENDER_VERSION=" + render_version, "-t", image, "-f", "Dockerfile_kubernetes", ".", ]) container = (subprocess.check_output( ["docker", "run", "--rm", "--detach", "--tty", image]).decode().strip()) yield testinfra.get_host("docker://" + container) subprocess.check_call(["docker", "rm", "-f", container])
def host(request): """Build Docker containers with Testinfra by overloading the host fixture.""" username = os.environ['DOCKER_USERNAME'] image_name = username + '/alpine:' + DOCKER_TAG # build local ./Dockerfile subprocess.check_call( [ 'docker', 'build', '-f', 'Dockerfile', '-t', image_name, '--build-arg', 'TAG=' + DOCKER_TAG, '.', ] ) # run a container docker_id = ( subprocess.check_output( ['docker', 'run', '-d', image_name, 'tail', '-f', '/dev/null'] ) .decode() .strip() ) # return a testinfra connection to the container yield testinfra.get_host("docker://" + docker_id) # at the end of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id])
def houston_api(request): """This is the host fixture for testinfra. To read more, please see the testinfra documentation: https://testinfra.readthedocs.io/en/latest/examples.html#test-docker-images """ namespace = os.environ.get("NAMESPACE") release_name = os.environ.get("RELEASE_NAME") if not namespace: print("NAMESPACE env var is not present, using 'astronomer' namespace") namespace = "astronomer" if not release_name: print( "RELEASE_NAME env var is not present, assuming 'astronomer' is the release name" ) release_name = "astronomer" kube = create_kube_client() pods = kube.list_namespaced_pod(namespace, label_selector="component=houston") pods = pods.items assert ( len(pods) > 0 ), "Expected to find at least one pod with label 'component: houston'" pod = pods[0] yield testinfra.get_host( f"kubectl://{pod.metadata.name}?container=houston&namespace={namespace}" )
def __init__(self, host, username): self.host = host self.username = username self.__connection = testinfra.get_host('paramiko://{}@{}'.format( self.username, self.host)) self.__realname = socket.gethostbyaddr(self.host)[0] self.__ipaddress = socket.gethostbyname(self.host)
def host(dateimage): dockerclient = docker.from_env() container = dockerclient.containers.run(dateimage, publish_all_ports = True, detach = True) yield testinfra.get_host("docker://" + container.id) container.remove(force = True)
def host(dateimage, timeimage, webimage): suffix = os.getpid() networkname = 'datetimeweb' + str(suffix) dateimagename = 'date' + str(suffix) timeimagename = 'time' + str(suffix) webimagename = 'web' + str(suffix) dockerclient = docker.from_env() dockernetwork = dockerclient.networks.create(networkname) datecontainer = \ dockerclient.containers.run(dateimage, name = dateimagename, detach = True, network = dockernetwork.name) timecontainer = \ dockerclient.containers.run(timeimage, name = timeimagename, detach = True, network = dockernetwork.name) webcontainer = \ dockerclient.containers.run(webimage, name = webimagename, detach = True, network = dockernetwork.name, publish_all_ports = True, environment = { 'DATEENDPOINT': '%s:7001' % ( dateimagename ), 'TIMEENDPOINT': '%s:7002' % ( timeimagename ) }) yield testinfra.get_host('docker://' + webcontainer.id) for container in (datecontainer, timecontainer, webcontainer): container.remove(force=True) dockernetwork.remove()
async def promote_node(pool_handle, wallet_handle, trustee_did, alias, target_did): promote_data = json.dumps({'alias': alias, 'services': ['VALIDATOR']}) promote_req = await ledger.build_node_request(trustee_did, target_did, promote_data) promote_res = json.loads(await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did, promote_req)) assert promote_res['op'] == 'REPLY' host = testinfra.get_host('ssh://node'+alias[4:]) host.run('systemctl restart indy-node')
def host(): if os.environ.get("KITCHEN_USERNAME") == "vagrant": if "windows" in os.environ.get("KITCHEN_INSTANCE"): return testinfra.get_host( "winrm://{KITCHEN_USERNAME}:{KITCHEN_PASSWORD}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}" .format(**os.environ), no_ssl=True, ) return testinfra.get_host( "paramiko://{KITCHEN_USERNAME}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}". format(**os.environ), ssh_identity_file=os.environ.get("KITCHEN_SSH_KEY"), ) return testinfra.get_host( "docker://{KITCHEN_USERNAME}@{KITCHEN_CONTAINER_ID}".format( **os.environ))
def host(): docker_id = subprocess.check_output( ['docker', 'run', '--name', container_name, '-e', 'MYSQL_ROOT_PASSWORD='******'-d', docker_image]).decode().strip() subprocess.check_call(['docker','exec','--user','root',container_name,'yum','install','-y','net-tools']) time.sleep(20) yield testinfra.get_host("docker://root@" + docker_id) subprocess.check_call(['docker', 'rm', '-f', docker_id])
def __init__(self, node_name, bootstrap_node=False): self.node_name = node_name self.bootstrap_node = bootstrap_node if bootstrap_node: self.docker_id = subprocess.check_output([ 'docker', 'run', '--name', node_name, '-e', 'MYSQL_ROOT_PASSWORD='******'-e', 'CLUSTER_NAME=' + cluster_name, '--net=' + docker_network, '-d', docker_image ]).decode().strip() time.sleep(20) if pxc_version_major == "8.0": subprocess.check_call(['mkdir', '-p', test_pwd + '/cert']) subprocess.check_call([ 'docker', 'cp', node_name + ':/var/lib/mysql/ca.pem', test_pwd + '/cert' ]) subprocess.check_call([ 'docker', 'cp', node_name + ':/var/lib/mysql/server-cert.pem', test_pwd + '/cert' ]) subprocess.check_call([ 'docker', 'cp', node_name + ':/var/lib/mysql/server-key.pem', test_pwd + '/cert' ]) subprocess.check_call([ 'docker', 'cp', node_name + ':/var/lib/mysql/client-cert.pem', test_pwd + '/cert' ]) subprocess.check_call([ 'docker', 'cp', node_name + ':/var/lib/mysql/client-key.pem', test_pwd + '/cert' ]) subprocess.check_call( ['chmod', '-R', 'a+r', test_pwd + '/cert']) else: if pxc_version_major == "8.0": self.docker_id = subprocess.check_output([ 'docker', 'run', '--name', node_name, '-e', 'MYSQL_ROOT_PASSWORD='******'-e', 'CLUSTER_NAME=' + cluster_name, '-e', 'CLUSTER_JOIN=' + base_node_name + '1', '--net=' + docker_network, '-v', test_pwd + '/config:/etc/percona-xtradb-cluster.conf.d', '-v', test_pwd + '/cert:/cert', '-d', docker_image ]).decode().strip() else: self.docker_id = subprocess.check_output([ 'docker', 'run', '--name', node_name, '-e', 'MYSQL_ROOT_PASSWORD='******'-e', 'CLUSTER_NAME=' + cluster_name, '-e', 'CLUSTER_JOIN=' + base_node_name + '1', '--net=' + docker_network, '-d', docker_image ]).decode().strip() self.ti_host = testinfra.get_host("docker://root@" + self.docker_id)
def resolve_hostname(nodename, ssh_config): """Resolve a node name (from SSH config) to a real hostname.""" if ssh_config is not None: node = testinfra.get_host(nodename, ssh_config=ssh_config) nodename = node.check_output('hostname') else: assert nodename == 'bootstrap' return nodename
def host(request): # run a container docker_id = subprocess.check_output( ['docker', 'run', '-d', image, '/bin/sleep', '300']).decode().strip() # return a testinfra connection to the container yield testinfra.get_host("docker://" + docker_id) # at the end of the test suite, destroy the container subprocess.check_call(['docker', 'rm', '-f', docker_id])
def webserver(request): """ This is the host fixture for testinfra. To read more, please see the testinfra documentation: https://testinfra.readthedocs.io/en/latest/examples.html#test-docker-images """ namespace = os.environ.get('NAMESPACE') pod = os.environ.get('WEBSERVER_POD') yield testinfra.get_host(f'kubectl://{pod}?container=webserver&namespace={namespace}')
def get_node_ip(hostname, ssh_config, bootstrap_config): """Return the IP of the node `hostname`. We have to jump through hoops because `testinfra` does not provide a simple way to get this information… """ infra_node = testinfra.get_host(hostname, ssh_config=ssh_config) control_plane_cidr = bootstrap_config['networks']['controlPlane'] return utils.get_ip_from_cidr(infra_node, control_plane_cidr)
def host(request): test_image = "docker-centos7-slurm:spec-test" subprocess.check_call(["docker", "build", "-t", test_image, "."]) docker_id = subprocess.check_output( ["docker", "run", "-d", "-it", "-h", "ernie", test_image]).decode().strip() yield testinfra.get_host("docker://" + docker_id) subprocess.check_call(["docker", "rm", "-f", docker_id])
def get_host(): hosts = [] #get hosts from config file _hosts = get_value_from_config('testinfra','hosts') for _host in _hosts: host = testinfra.get_host(_host) hosts.append(host) return hosts
def host(docker_compose, request): docker_compose_file = request.config.getoption("--docker-compose-file") docker_id = subprocess.check_output([ 'docker-compose', '-f', docker_compose_file, 'ps', '-q', 'pg' ]).decode().strip() yield testinfra.get_host("docker://"+docker_id)
import functools import os import pytest import testinfra if os.environ.get('KITCHEN_USERNAME') == 'vagrant': if 'windows' in os.environ.get('KITCHEN_INSTANCE'): test_host = testinfra.get_host('winrm://{KITCHEN_USERNAME}:{KITCHEN_PASSWORD}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}'.format(**os.environ), no_ssl=True) else: test_host = testinfra.get_host('paramiko://{KITCHEN_USERNAME}@{KITCHEN_HOSTNAME}:{KITCHEN_PORT}'.format(**os.environ), ssh_identity_file=os.environ.get('KITCHEN_SSH_KEY')) else: test_host = testinfra.get_host('docker://{KITCHEN_USERNAME}@{KITCHEN_CONTAINER_ID}'.format(**os.environ)) @pytest.fixture def host(): return test_host @pytest.fixture def salt(): if 'windows' in os.environ.get('KITCHEN_INSTANCE'): tmpconf = r'c:\Users\vagrant\AppData\Local\Temp\kitchen\etc\salt' else: test_host.run('sudo chown -R {0} /tmp/kitchen'.format(os.environ.get('KITCHEN_USERNAME'))) tmpconf = '/tmp/kitchen/etc/salt' return functools.partial(test_host.salt, config=tmpconf)
def host(request, tmpdir_factory): if not has_docker(): pytest.skip() return image, kw = parse_hostspec(request.param) spec = BaseBackend.parse_hostspec(image) if getattr(request.function, "destructive", None) is not None: scope = "function" else: scope = "session" fname = "_docker_container_%s_%s" % (spec.name, scope) docker_id, docker_host, port = request.getfixturevalue(fname) if kw["connection"] == "docker": hostname = docker_id elif kw["connection"] in ("ansible", "ssh", "paramiko", "safe-ssh"): hostname = spec.name tmpdir = tmpdir_factory.mktemp(str(id(request))) key = tmpdir.join("ssh_key") key.write(open(os.path.join(BASETESTDIR, "ssh_key")).read()) key.chmod(384) # octal 600 if kw["connection"] == "ansible": if ansible is None: pytest.skip() return setup_ansible_config( tmpdir, hostname, docker_host, spec.user or "root", port, str(key)) os.environ["ANSIBLE_CONFIG"] = str(tmpdir.join("ansible.cfg")) # this force backend cache reloading kw["ansible_inventory"] = str(tmpdir.join("inventory")) else: ssh_config = tmpdir.join("ssh_config") ssh_config.write(( "Host {}\n" " Hostname {}\n" " Port {}\n" " UserKnownHostsFile /dev/null\n" " StrictHostKeyChecking no\n" " IdentityFile {}\n" " IdentitiesOnly yes\n" " LogLevel FATAL\n" ).format(hostname, docker_host, port, str(key))) kw["ssh_config"] = str(ssh_config) # Wait ssh to be up service = testinfra.get_host( docker_id, connection='docker').service images_with_sshd = ( "centos_6", "centos_7", "alpine_38", "archlinux" ) if image in images_with_sshd: service_name = "sshd" else: service_name = "ssh" while not service(service_name).is_running: time.sleep(.5) if kw["connection"] != "ansible": hostspec = (spec.user or "root") + "@" + hostname else: hostspec = spec.name b = testinfra.host.get_host(hostspec, **kw) b.backend.get_hostname = lambda: image return b
try: import ansible except ImportError: ansible = None import testinfra from testinfra.backend.base import BaseBackend from testinfra.backend import parse_hostspec BASETESTDIR = os.path.abspath(os.path.dirname(__file__)) BASEDIR = os.path.abspath(os.path.join(BASETESTDIR, os.pardir)) _HAS_DOCKER = None # Use testinfra to get a handy function to run commands locally local_host = testinfra.get_host('local://') check_output = local_host.check_output def has_docker(): global _HAS_DOCKER if _HAS_DOCKER is None: _HAS_DOCKER = local_host.exists("docker") return _HAS_DOCKER # Generated with # $ echo myhostvar: bar > hostvars.yml # $ echo polichinelle > vault-pass.txt # $ ansible-vault encrypt --vault-password-file vault-pass.txt hostvars.yml # $ cat hostvars.yml