def build_jump_host(self, external, image, flavor, user, password=None, **kwargs): keyname = self.context["user"]["keypair"]["name"] LOG.info("Building Jump Host with key : {}".format(keyname)) jump_host, jump_host_ip = self._boot_server_with_fip(image, flavor, True, None, key_name=keyname, **kwargs) # wait for ping self._wait_for_ping(jump_host_ip["ip"]) # open ssh connection jump_ssh = sshutils.SSH(user, jump_host_ip["ip"], 22, self.context["user"]["keypair"]["private"], password) # check for connectivity self._wait_for_ssh(jump_ssh) # write id_rsa(private key) to get to guests self._run_command_over_ssh(jump_ssh, {"remote_path": "rm -rf ~/.ssh"}) self._run_command_over_ssh(jump_ssh, {"remote_path": "mkdir ~/.ssh"}) jump_ssh.run("cat > ~/.ssh/id_rsa", stdin=self.context["user"]["keypair"]["private"]) jump_ssh.execute("chmod 0600 ~/.ssh/id_rsa") return jump_ssh, jump_host_ip, jump_host
def get_ssh_from_credential(cred): sshcli = sshutils.SSH(cred["user"], cred["host"], port=cred["port"], key_filename=cred["key"], password=cred["password"]) return sshcli
def build_host(self, external, image, flavor, user, password=None, **kwargs): keyname = self.context["user"]["keypair"]["name"] host, host_ip = self._boot_server_with_fip( image, flavor, use_floating_ip=True, floating_network=external['name'], key_name=keyname, **kwargs) # Wait for ping self._wait_for_ping(host_ip['ip']) # Open SSH Connection host_ssh = sshutils.SSH(user, host_ip['ip'], 22, self.context["user"]["keypair"]["private"], password) # Check for connectivity self._wait_for_ssh(host_ssh) return host_ssh, host_ip, host
def _run_job_ssh(self, job_idx, server_ip, username, private_key, command, retry_count=3, interval=1): ssh = sshutils.SSH(username, server_ip, port=22, pkey=private_key, password=None) ssh.wait() @atomic.action_timer("vm.job_execution_%s" % job_idx) def run(self): retry = retry_count success_filter = "completed successfully" while True: try: LOG.debug("Running job: %s" % command) code, out, err = ssh.execute(command) LOG.debug(err) if err.find(success_filter) < 0: return False LOG.debug("Job completed succesfully #%d" % job_idx) return True except Exception: if retry == 0: raise retry = retry - 1 time.sleep(interval) run(self)
def build_jump_host(self, external, image, flavor, user, password=None, **kwargs): keyname = self.context["user"]["keypair"]["name"] jump_host, jump_host_ip = self._boot_server_with_fip(image, flavor, use_floating_ip=True, floating_network=external[ 'name'], key_name=keyname, **kwargs) # Wait for ping self._wait_for_ping(jump_host_ip['ip']) # Open SSH Connection jump_ssh = sshutils.SSH(user, jump_host_ip['ip'], 22, self.context[ "user"]["keypair"]["private"], password) # Check for connectivity self._wait_for_ssh(jump_ssh) # Write id_rsa to get to guests. self._run_command_over_ssh(jump_ssh, {'remote_path': "rm -rf ~/.ssh"}) self._run_command_over_ssh(jump_ssh, {'remote_path': "mkdir ~/.ssh"}) jump_ssh.run( "cat > ~/.ssh/id_rsa", stdin=self.context["user"]["keypair"]["private"]) jump_ssh.execute("chmod 0600 ~/.ssh/id_rsa") return jump_ssh, jump_host_ip, jump_host
def run(self, flavor, username, ssh_timeout, image=None, floating_network=None, port=22, use_floating_ip=True, **kwargs): """Boot a server, to test metadata :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param username: ssh username on server, str :param ssh_timeout: wait for ssh timeout. Default is 120 seconds :param floating_network: external network name, for floating ip :param port: ssh port for SSH connection :param use_floating_ip: bool, floating or fixed IP for SSH connection :param kwargs: Optional additional arguments for server creation """ if not image: image = self.context["tenant"]["custom_image"]["id"] server, fip = self._boot_server_with_fip( image, flavor, use_floating_ip=use_floating_ip, floating_network=floating_network, key_name=self.context["user"]["keypair"]["name"], **kwargs) ssh = sshutils.SSH(username, fip["ip"], port=port, pkey=self.context["user"]["keypair"]["private"]) self._wait_for_ssh(ssh, timeout=ssh_timeout)
def runcommand_heat(self, workload, template, files, parameters): """Run workload on stack deployed by heat. Workload can be either file or resource: {"file": "/path/to/file.sh"} {"resource": ["package.module", "workload.py"]} Also it should contain "username" key. Given file will be uploaded to `gate_node` and started. This script should print `key` `value` pairs separated by colon. These pairs will be presented in results. Gate node should be accessible via ssh with keypair `key_name`, so heat template should accept parameter `key_name`. :param workload: workload to run :param template: path to heat template file :param files: additional template files :param parameters: parameters for heat template """ keypair = self.context["user"]["keypair"] parameters["key_name"] = keypair["name"] network = self.context["tenant"]["networks"][0] parameters["router_id"] = network["router_id"] self.stack = heat.main.Stack(self, self.task, template, files=files, parameters=parameters) self.stack.create() for output in self.stack.stack.outputs: if output["output_key"] == "gate_node": ip = output["output_value"] break ssh = sshutils.SSH(workload["username"], ip, pkey=keypair["private"]) ssh.wait() script = workload.get("resource") if script: script = pkgutil.get_data(*script) else: script = open(workload["file"]).read() ssh.execute("cat > /tmp/.rally-workload", stdin=script) ssh.execute("chmod +x /tmp/.rally-workload") with atomic.ActionTimer(self, "runcommand_heat.workload"): status, out, err = ssh.execute( "/tmp/.rally-workload", stdin=json.dumps(self.stack.stack.outputs)) rows = [] for line in out.splitlines(): row = line.split(":") if len(row) != 2: raise exceptions.ScriptError("Invalid data '%s'" % line) rows.append(row) if not rows: raise exceptions.ScriptError("No data returned") self.add_output( complete={"title": "Workload summary", "description": "Data generated by workload", "chart_plugin": "Table", "data": { "cols": ["key", "value"], "rows": rows}} )
def boot_and_delete_server_test_ssh(self, image, flavor, username, password=None, command=None, port=22, use_floating_ip=True, floating_network=None, force_delete=False, **kwargs): server, fip = self._boot_server_with_fip( image, flavor, use_floating_ip=use_floating_ip, floating_network=floating_network, key_name=self.context["user"]["keypair"]["name"], **kwargs) pkey = self.context["user"]["keypair"]["private"] ssh = sshutils.SSH(username, fip["ip"], port=port, pkey=pkey, password=password) self._wait_for_ssh(ssh) self._delete_server_with_fip(server, fip, force_delete=force_delete)
def _run_command(self, server_ip, port, username, password, command, pkey=None): """Run command via SSH on server. Create SSH connection for server, wait for server to become available (there is a delay between server being set to ACTIVE and sshd being available). Then call run_command_over_ssh to actually execute the command. :param server_ip: server ip address :param port: ssh port for SSH connection :param username: str. ssh username for server :param password: Password for SSH authentication :param command: Dictionary specifying command to execute. See `rally info find VMTasks.boot_runcommand_delete' parameter `command' docstring for explanation. :param pkey: key for SSH authentication :returns: tuple (exit_status, stdout, stderr) """ pkey = pkey if pkey else self.context["user"]["keypair"]["private"] ssh = sshutils.SSH(username, server_ip, port=port, pkey=pkey, password=password) self._wait_for_ssh(ssh) return self._run_command_over_ssh(ssh, command)
def __init__(self, host, user, key=None, password=None, port=22): self.host = host self.port = port self.user = user self.key = key self.password = password self.ssh = sshutils.SSH(user, host, key_filename=key, port=port, password=password) super(Server, self).__init__()
def test__get_pkey_rsa(self): private_rsa_key = six.StringIO() private_rsa_key_obj = paramiko.RSAKey.generate(1024) private_rsa_key_obj.write_private_key(private_rsa_key) private_rsa_key.seek(0) ssh = sshutils.SSH("root", "example.net") self.assertIsInstance(ssh._get_pkey(private_rsa_key), paramiko.RSAKey) private_rsa_key.seek(0) self.assertIsInstance(ssh._get_pkey(private_rsa_key.getvalue()), paramiko.RSAKey)
def test_construct(self, mock_ssh__get_pkey): mock_ssh__get_pkey.return_value = "pkey" ssh = sshutils.SSH("root", "example.net", port=33, pkey="key", key_filename="kf", password="******") mock_ssh__get_pkey.assert_called_once_with("key") self.assertEqual("root", ssh.user) self.assertEqual("example.net", ssh.host) self.assertEqual(33, ssh.port) self.assertEqual("pkey", ssh.pkey) self.assertEqual("kf", ssh.key_filename) self.assertEqual("secret", ssh.password)
def _run_command(self, server_ip, port, username, password, interpreter, script, pkey=None): """Run command via SSH on server. Create SSH connection for server, wait for server to become available (there is a delay between server being set to ACTIVE and sshd being available). Then call run_command_over_ssh to actually execute the command. """ pkey = pkey if pkey else self.context["user"]["keypair"]["private"] ssh = sshutils.SSH(username, server_ip, port=port, pkey=pkey, password=password) self._wait_for_ssh(ssh) return self._run_command_over_ssh(ssh, interpreter, script)
def test__get_client(self, mock_paramiko, mock_ssh__get_pkey): mock_ssh__get_pkey.return_value = "key" fake_client = mock.Mock() mock_paramiko.SSHClient.return_value = fake_client mock_paramiko.AutoAddPolicy.return_value = "autoadd" ssh = sshutils.SSH("admin", "example.net", pkey="key") client = ssh._get_client() self.assertEqual(fake_client, client) client_calls = [ mock.call.set_missing_host_key_policy("autoadd"), mock.call.connect("example.net", username="******", port=22, pkey="key", key_filename=None, password=None, timeout=1), ] self.assertEqual(client_calls, client.mock_calls)
def wait_for_hadoop_on_ubuntu(self, server_ip, username, pkey, port): command = ("sudo sed -i '0,/localhost/c\\127.0.0.1 " "localhost %(hostname)s' /etc/hosts;" "~/hadoop-2.7.1/bin/hdfs namenode -format;" "~/hadoop-2.7.1/sbin/start-all.sh;" "~/hadoop-2.7.1/bin/hdfs dfsadmin -safemode wait") ssh = sshutils.SSH(username, server_ip, port=port, pkey=pkey, password=None) ssh.wait() _, hostname, _ = ssh.execute('hostname') code, out, err = ssh.execute(command % {'hostname': hostname}) if code: raise Exception("Command failed! Check error: %s" % err)
def setUp(self): super(SSHRunTestCase, self).setUp() self.fake_client = mock.Mock() self.fake_session = mock.Mock() self.fake_transport = mock.Mock() self.fake_transport.open_session.return_value = self.fake_session self.fake_client.get_transport.return_value = self.fake_transport self.fake_session.recv_ready.return_value = False self.fake_session.recv_stderr_ready.return_value = False self.fake_session.send_ready.return_value = False self.fake_session.exit_status_ready.return_value = True self.fake_session.recv_exit_status.return_value = 0 self.ssh = sshutils.SSH("admin", "example.net") self.ssh._get_client = mock.Mock(return_value=self.fake_client)
def bind_port(self, port, subnet, neutron_client): port_id = port['port']['id'] port_ip = port['port']['fixed_ips'][0]['ip_address'] port_mac = port['port']['mac_address'] gw_ip = subnet['subnet']['gateway_ip'] mask = subnet['subnet']['cidr'].split('/')[1] name = "b_%s" % port_id[:8] param_dict = { 'name': name, 'port_id': port_id, 'port_ip': port_ip, 'port_mac': port_mac, 'gw_ip': gw_ip, 'mask': mask } # Open SSH Connection ssh = sshutils.SSH('heat-admin', RemoteScurityGroup.COMPUTE_IP) # Check for connectivity ssh.wait(120, 1) commands = [ "sudo ovs-vsctl add-port br-int %(name)s -- set Interface %(name)s type=internal -- set Interface %(name)s external_ids:iface-id=%(port_id)s external_ids:iface-status='active' external_ids:attached-mac=%(port_mac)s", "sudo ip netns add %(name)s", "sudo ip link set %(name)s netns %(name)s", "sudo ip netns exec %(name)s ip link set %(name)s address %(port_mac)s", "sudo ip netns exec %(name)s ip addr add %(port_ip)s/%(mask)s dev %(name)s", "sudo ip netns exec %(name)s ip link set %(name)s up", "sudo ip netns exec %(name)s ip route add default via %(gw_ip)s" ] for c in commands: ssh.run(c % param_dict) self._wait_for_port_active(neutron_client, port_id)
def _run_command(self, server_ip, port, username, password, interpreter, script, pkey=None, is_file=True): """Run command via SSH on server. Create SSH connection for server, wait for server to become available (there is a delay between server being set to ACTIVE and sshd being available). Then call run_command_over_ssh to actually execute the command. :param server_ip: server ip address :param port: ssh port for SSH connection :param username: str. ssh username for server :param password: Password for SSH authentication :param interpreter: server's interpreter to execute the script :param script: script to run on server :param pkey: key for SSH authentication :param is_file: if True, script represent a path, else, script contains an inline script. """ pkey = pkey if pkey else self.context["user"]["keypair"]["private"] ssh = sshutils.SSH(username, server_ip, port=port, pkey=pkey, password=password) self._wait_for_ssh(ssh) return self._run_command_over_ssh(ssh, interpreter, script, is_file)
def setUp(self): super(SSHTestCase, self).setUp() self.ssh = sshutils.SSH("root", "example.net")
def run(self, image, flavor, user, lb_algorithm, protocol, protocol_port, jump_host_ip, num_pools, num_clients, vip_subnet_id, user_data_file, router_create_args=None, network_create_args=None, subnet_create_args=None, **kwargs): network = self._create_network(network_create_args or {}) subnet = self._create_subnet(network, subnet_create_args or {}) kwargs["nics"] = [{"net-id": network['network']['id']}] subnet_id = subnet['subnet']['id'] _clients = self.create_clients(num_clients, image, flavor, user, user_data_file, **kwargs) max_attempts = 10 LOG.info("Creating a load balancer") lb = self.octavia.load_balancer_create(subnet_id=vip_subnet_id, admin_state=True) lb_id = lb["id"] LOG.info("Waiting for the lb {} to be active".format(lb_id)) self.octavia.wait_for_loadbalancer_prov_status(lb) time.sleep(90) for _ in range(num_pools): listener_args = { "name": self.generate_random_name(), "loadbalancer_id": lb_id, "protocol": protocol, "protocol_port": protocol_port, "connection_limit": -1, "admin_state_up": True, } LOG.info("Creating a listener for lb {}".format(lb_id)) attempts = 0 # Retry to avoid HTTP 409 errors like "Load Balancer # is immutable and cannot be updated" while attempts < max_attempts: try: listener = self.octavia.listener_create( json={"listener": listener_args}) break except exceptions.OctaviaClientException as e: # retry for 409 return code if e.code == 409: attempts += attempts time.sleep(120) self.octavia.wait_for_loadbalancer_prov_status(lb) continue break LOG.info(listener) time.sleep(30) LOG.info( "Waiting for the lb {} to be active, after listener_create". format(lb_id)) self.octavia.wait_for_loadbalancer_prov_status(lb) LOG.info("Creating a pool for lb {}".format(lb_id)) attempts = 0 # Retry to avoid HTTP 409 errors like "Load Balancer # is immutable and cannot be updated" while attempts < max_attempts: try: # internally pool_create will wait for active state pool = self.octavia.pool_create( lb_id=lb["id"], protocol=protocol, lb_algorithm=lb_algorithm, listener_id=listener["listener"]["id"], admin_state_up=True) break except exceptions.OctaviaClientException as e: # retry for 409 return code if e.code == 409: attempts += attempts time.sleep(120) continue break time.sleep(60) for client_ip in _clients: member_args = { "address": client_ip, "protocol_port": protocol_port, "subnet_id": subnet_id, "admin_state_up": True, "name": self.generate_random_name(), } LOG.info("Adding member : {} to the pool {} lb {}".format( client_ip, pool["id"], lb_id)) attempts = 0 # Retry to avoid "Load Balancer is immutable and cannot be updated" while attempts < max_attempts: try: self.octavia.member_create( pool["id"], json={"member": member_args}) break except exceptions.OctaviaClientException as e: # retry for 409 return code if e.code == 409: attempts += attempts time.sleep(120) self.octavia.wait_for_loadbalancer_prov_status(lb) LOG.info( "member_create exception: Waiting for the lb {} to be active" .format(lb_id)) continue break time.sleep(30) LOG.info( "Waiting for the lb {} to be active, after member_create". format(lb_id)) self.octavia.wait_for_loadbalancer_prov_status(lb) protocol_port = protocol_port + 1 # ssh and ping the vip lb_ip = lb["vip_address"] LOG.info("Load balancer IP: {}".format(lb_ip)) port = 80 jump_ssh = sshutils.SSH(user, jump_host_ip, 22, None, None) # check for connectivity self._wait_for_ssh(jump_ssh) for i in range(num_pools): for j in range(num_clients): cmd = "curl -s {}:{}".format(lb_ip, port) attempts = 0 while attempts < max_attempts: test_exitcode, stdout_test, stderr = jump_ssh.execute( cmd, timeout=60) LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test)) if test_exitcode != 0 and stdout_test != 1: LOG.error("ERROR with HTTP response {}".format(cmd)) attempts += attempts time.sleep(30) else: LOG.info("cmd: {} succesful".format(cmd)) break port = port + 1
def run(self, image, flavor, ext_net_id, router_create_args=None, network_create_args=None, subnet_create_args=None, **kwargs): router_create_args["name"] = self.generate_random_name() router_create_args.setdefault("external_gateway_info", {"network_id": ext_net_id, "enable_snat": True}) router = self._create_router(router_create_args) network = self._create_network(network_create_args or {}) if not subnet_create_args: subnet_create_args = {'enable_dhcp': False} else: subnet_create_args.setdefault('enable_dhcp', False) subnet = self._create_subnet(network, subnet_create_args or {}) self._add_interface_router(subnet['subnet'], router['router']) kwargs["nics"] = [{'net-id': network['network']['id']}] # HACK_START host_id, host_ip = self._schedule() #ext_net = self.admin_clients("neutron").show_network(ext_net_id) fip = self._create_floatingip(ext_net_id) port_args = {'binding:host_id': host_id} secgroup = self.context.get("user", {}).get("secgroup") if secgroup: port_args["security_groups"] = [secgroup["id"]] #port_args["security_groups"] = None #port_args["port_security_enabled"] = False port = self._create_port(network, port_args) self._associate_fip(floatingip=fip['floatingip'], port=port['port']) port_id = port['port']['id'] port_ip = port['port']['fixed_ips'][0]['ip_address'] port_mac = port['port']['mac_address'] gw_ip = subnet['subnet']['gateway_ip'] mask = subnet['subnet']['cidr'].split('/')[1] name = "b_%s" % port_id[:8] param_dict = {'name': name, 'port_id': port_id, 'port_ip': port_ip, 'port_mac': port_mac, 'gw_ip': gw_ip, 'mask': mask} # Open SSH Connection ssh = sshutils.SSH('heat-admin', host_ip) # Check for connectivity ssh.wait(120, 1) commands = [ "sudo ovs-vsctl add-port br-int %(name)s -- set Interface %(name)s type=internal -- set Interface %(name)s external_ids:iface-id=%(port_id)s external_ids:iface-status='active' external_ids:attached-mac=%(port_mac)s", "sudo ip netns add %(name)s", "sudo ip link set %(name)s netns %(name)s", "sudo ip netns exec %(name)s ip link set %(name)s address %(port_mac)s", "sudo ip netns exec %(name)s ip addr add %(port_ip)s/%(mask)s dev %(name)s", "sudo ip netns exec %(name)s ip link set %(name)s up", "sudo ip netns exec %(name)s ip route add default via %(gw_ip)s"] for c in commands: ssh.run(c % param_dict) self._wait_for_port_active(port_id) self._wait_for_ping(fip['floatingip']['floating_ip_address'])
def run(self, image, flavor, vip_subnet_id, num_lb, user_data_file, jump_host_ip, user, password=None, **kwargs): project_id = self.context["tenant"]["id"] subnet_address = self.create_client(image, flavor, num_lb, project_id, user_data_file) loadbalancers = [] protocol = "HTTP" protocol_port = 80 # https://docs.openstack.org/octavia/ # latest/_modules/octavia/api/v2/controllers/load_balancer.html for subnet_id, mem_addr in subnet_address.items(): lb_name = self.generate_random_name() listener_name = self.generate_random_name() pool_name = self.generate_random_name() LOG.info("Creating load balancer %s", lb_name) pool_args = { "name": pool_name, "protocol": protocol, "lb_algorithm": "ROUND_ROBIN", "members": [{ "address": mem_addr, "subnet_id": subnet_id, "protocol_port": 80 }] } listener_args = { "name": listener_name, "protocol": protocol, "protocol_port": protocol_port, "default_pool": pool_args } lb_args = { "name": lb_name, "description": None, "listeners": [listener_args], "provider": None, "admin_state_up": True, "project_id": project_id, "vip_subnet_id": vip_subnet_id, "vip_qos_policy_id": None, } lb = self.octavia._clients.octavia().load_balancer_create( json={"loadbalancer": lb_args})["loadbalancer"] loadbalancers.append(lb) for loadbalancer in loadbalancers: LOG.info("Waiting for the load balancer to be active") self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) LOG.info("Loadbalancer %s is active", loadbalancer) time.sleep(90) # ssh and ping the vip lb_ip = loadbalancer["vip_address"] LOG.info("Load balancer IP: {}".format(lb_ip)) jump_ssh = sshutils.SSH(user, jump_host_ip, 22, None, None) # check for connectivity self._wait_for_ssh(jump_ssh) cmd = "curl -s {}:{}".format(lb_ip, 80) max_attempts = 10 attempts = 0 while attempts < max_attempts: test_exitcode, stdout_test, stderr = jump_ssh.execute( cmd, timeout=60) LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test)) if test_exitcode != 0 and stdout_test != 1: LOG.error("ERROR with HTTP response {}".format(cmd)) attempts += attempts time.sleep(30) else: LOG.info("cmd: {} succesful".format(cmd)) break
def create_verify_and_delete_cluster(self, image, flavor, network_id=None, server_name="rally_vm", cluster_name=None, cluster_flavor="8795", size=1, cluster_volume_size=0, cluster_timeout=300, cluster_check_interval=1, **kwargs): """Boot a nova instance , create a cue instance, ssh from nova instance to cue instance and run a command on it. :param image: str, image name for server creation :param flavor: str, flavor for server creation :param network_id: str, network id for server creation :param server_name: str, server name :param cluster_name: str, cluster name :param cluster_flavor: str, cluster flavor :param size: int, cluster size :param cluster_volume_size: int, cluster volume size :param cluster_timeout: int, time out seconds for cluster to go active :param cluster_check_interval: int, check interval seconds :param kwargs: other optional parameters to initialize the server """ server_name = self.generate_random_name() nova_server_boot_timeout = 60 * 5 network_name = "rally_network" sec_group_name = "rally_sec_group" key_name = "rally_keypair" key_file_name = '/tmp/' + key_name cluster = None server = None neutron_client = self.clients("neutron") nova_client = self.clients("nova") try: # create a key-pair LOG.info("Adding new keypair") keypair = nova_client.keypairs.create(key_name) f = open(key_file_name, 'w') os.chmod(key_file_name, stat.S_IREAD | stat.S_IWRITE) f.write(keypair.private_key) f.close() if not network_id: # create new network test_network = self._create_network(neutron_client, network_name) network_id = test_network[1]["network"]["id"] rabbitmq_username = "******" rabbitmq_password = "******" # create cue_cluster cluster = self._create_cue_cluster( cluster_name, size, network_id, cluster_flavor, cluster_volume_size, cluster_timeout, cluster_check_interval, 'plain', rabbitmq_username, rabbitmq_password) # assign network_id argument kwargs["nics"] = [{"net-id": network_id}] server = self._create_nova_vm(nova_client, flavor, image, keypair, server_name, sec_group_name, nova_server_boot_timeout, **kwargs) LOG.info("Adding floating ip") floating_ip = self._add_floating_ip(nova_client, server) # ssh instance LOG.info("SSHing to instance") ssh = sshutils.SSH("ubuntu", floating_ip, key_filename=key_file_name) ssh.wait() # run rabbitmq_test script endpoint = cluster.endpoints[0] uri = endpoint['uri'].split(':') rabbitmq_file = "/opt/rabbitmq_test.py" LOG.info("Running rabbitmq-test script") LOG.info('Testing for error when using invalid password') status, stdout, stderr = ssh.execute( "python {0} -H {1} -P {2} -u {3} -p {4}".format( rabbitmq_file, uri[0], uri[1], rabbitmq_username, "invalid")) assert (status != 0), "Expected an error due to invalid password" LOG.info('Testing using valid rabbitmq credentials') status, stdout, stderr = ssh.execute( "python {0} -H {1} -P {2} -u {3} -p {4}".format( rabbitmq_file, uri[0], uri[1], rabbitmq_username, rabbitmq_password)) assert (status == 0), "Expected success" except Exception as err: LOG.exception(err) # cleanup - delete cluster, server, network and key file finally: if cluster is not None: self._delete_cluster(cluster['id']) self._wait_for_cluster_delete(cluster['id'], cluster_timeout, cluster_check_interval) if server is not None: self._delete_server(server.id) if test_network is not None: self._delete_network(test_network) self._delete_key_file(key_file_name)