def test_guest(self): ws = resource.Websocket() g = resource.Server().create({ "cpu": 1000, "name": "", "mem": 256 * 1024**2, "vnc_password": "******" }) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped') resource.Server().start(g['uuid']) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'running') resource.Server().stop(g['uuid']) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped') resource.Server().delete(g['uuid']) try: g = ws.wait_obj_wrapper(ws.wait_obj_uri, (ret['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: False) except errors.ClientError as e: if e.args[0] != 404: raise
def test_guest_drive(self): ws = resource.Websocket() g = resource.Server().create( { "cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "******" } ) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped' ) d = resource.Drive().create( { "size": 1000 ** 3, "name": "", "media": "disk" } ) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30, extra_filter=lambda x: x['status'] == 'unmounted' ) resource.Server().update( g['uuid'], { "cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "******", "drives": [ { "dev_channel": "0:0", "device": "virtio", "drive": d['uuid'] } ] } ) ws.wait_obj_uri(g['resource_uri'], resource.Server) resource.Drive().delete(d['uuid']) resource.Server().delete(g['uuid'])
def test_server_fw_rules(self): policy = self.client.create(self.base_policy) server_def = { 'name': 'FirewalledServer', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******', "nics": [{ "firewall_policy": policy['uuid'], "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }], } server_client = resource.Server() with DumpResponse(clients=[server_client])("fwpolicy_server_attach"): server = server_client.create(server_def) self.assertEqual(server['nics'][0]['firewall_policy']['uuid'], policy['uuid']) self.client.delete(policy['uuid']) server = server_client.get(server['uuid']) self.assertIsNone(server['nics'][0]['firewall_policy']) server_client.delete(server['uuid'])
def _clean_policies(self): policies = self.client.list_detail() server_client = resource.Server() deleted_servers = [] for policy in policies: for server in policy['servers']: if server['uuid'] not in deleted_servers: deleted_servers.append(server['uuid']) server_client.delete(server['uuid']) self.client.delete(policy['uuid'])
def get_per_server_usage(start_time, end_time): server_client = cr.Server() server_list = server_client.list_detail() server_resources = {} for server in server_list: server_resources[server['uuid']] = server['uuid'] for drive in server['drives']: server_resources[drive['drive']['uuid']] = server['uuid'] usage_client = cr.Usage() ledger_client = cr.Ledger() server_billing = defaultdict(int) interval = (end_time - start_time).days ledger_list = ledger_client.list( dict(time__gt=end_time - timedelta(days=interval), time__lt=end_time)) usage_list = [] i = 0 for i in range(7, interval, 7): usage_list.extend( usage_client.list( dict(poll_time__gt=end_time - timedelta(days=i), poll_time__lt=end_time - timedelta(days=i - 7)))) if interval % 7 != 0: usage_list.extend( usage_client.list( dict(poll_time__gt=end_time - timedelta(days=interval), poll_time__lt=end_time - timedelta(days=i)))) usage_list = list(sorted(usage_list, key=lambda x: x['poll_time'])) bisect_list = [dateutil.parser.parse(u['poll_time']) for u in usage_list] for ledger in ledger_list: if not ledger['billing_cycle']: continue match = re.search('Burst: .* of ([^ ]*) .*', ledger['reason']) if not match: continue ledger['resource'] = match.group(1) poll_time = dateutil.parser.parse(ledger['time']) start_date = poll_time - timedelta(seconds=ledger['interval'] - 1) usages = get_usages(usage_client, ledger['time'], start_date, usage_list, bisect_list) for usage in usages: if usage['resource'] != ledger['resource']: continue server = server_resources.get(usage['uuid']) if server: server_billing[server] += old_div( Decimal(usage['amount']), Decimal( ledger['resource_amount'])) * Decimal(ledger['amount']) return server_billing
def test_guest(self): ws = resource.Websocket() g = resource.Server().create({ "cpu": 1000, "name": "", "mem": 256 * 1024**2, "vnc_password": "******" }) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped') resource.Server().start(g['uuid']) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'running') resource.Server().stop(g['uuid']) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped') resource.Server().delete(g['uuid'])
def test_attaching_preinstalled(self): server_client = cr.Server() found = None for drive in self.client.list(): if drive['media'] == 'disk': found = drive break if found is None: raise unittest.SkipTest( 'Cannot find a preinstalled drive in the drives library') guest_def = self._gen_server_definition(drives=[found['uuid']]) with self.assertRaises(errors.PermissionError): server_client.create(guest_def)
def _clean_servers(self): """ Removes all the servers in the acceptance test account ( containing 'test' or 'stress_drive_iops' keyword ) :return: """ server_client = cr.Server() stopping = [] deleting = [] inter = [] for server in server_client.list_detail(): if 'test' in server['name'] or 'stress_drive_iops' in server[ 'name']: status = server['status'] if status == 'running': server_client.stop(server['uuid']) stopping.append(server['uuid']) elif status == 'stopped': server_client.delete(server['uuid']) deleting.append(server['uuid']) else: inter.append(server['uuid']) for uuid in stopping: try: self._wait_for_status(uuid, 'stopped', client=server_client) except: LOG.exception("Server {} did not stop in time".format(uuid)) else: server_client.delete(uuid) deleting.append(uuid) for uuid in deleting: try: self._wait_deleted(uuid, client=server_client) except: LOG.exception("Server {} did not delete in time".format(uuid)) if len(inter) != 0: LOG.error( 'The servers {} are stuck in intermediate states. Cannot remove them.' .format(inter))
def test_attaching_cdrom(self): server_client = cr.Server() found = None for drive in self.client.list(): if drive['media'] == 'cdrom': found = drive break if found is None: raise unittest.SkipTest( 'Cannot find a cdrom drive in drives library' ) guest_def = self._gen_server_definition(drives=[found['uuid']]) new_guest = server_client.create(guest_def) server_client.delete(new_guest['uuid']) self._wait_deleted(new_guest['uuid'], client=server_client)
def setUp(self): super(ServerStressTest, self).setUp() self.server_client = cr.Server() self.drive_client = cr.Drive()
def setUp(self): unittest.TestCase.setUp(self) self.client = cr.Server() # create a resource handle object self.dump_response = DumpResponse(clients=[self.client])
def test_firewall(self): dc = cr.Drive() sc = cr.Server() fwp = cr.FirewallPolicy() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) fw_policy = fwp.create({}) g_def = { "name": "testFirewallServer", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [ { "firewall_policy": fw_policy['uuid'], "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, ], } guest = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) sc.start(guest['uuid']) self._wait_for_status(guest['uuid'], 'running', client=sc) guest = sc.get(guest['uuid']) ip1 = guest['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) fw_policy['rules'] = [{ "ip_proto": "tcp", "dst_port": 22, "direction": "in", "action": "drop", "comment": "Block SSH traffic" }] fwp.update(fw_policy['uuid'], fw_policy) self._wait_socket_close(ip1, 22) fw_policy['rules'] = [] fwp.update(fw_policy['uuid'], fw_policy) self._wait_for_open_socket(ip1, 22) sc.stop(guest['uuid']) self._wait_for_status(guest['uuid'], 'stopped', client=sc) sc.delete(guest['uuid']) fwp.delete(fw_policy['uuid'])
def test_guest_context(self): dc = cr.Drive() sc = cr.Server() gcc = cr.GlobalContext() dump_response = DumpResponse(clients=[sc, dc, gcc]) # ensure empty global context gcc.update({}) puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_clone_1'}) from uuid import uuid4 g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': str(uuid4())[:18].replace('-', ''), 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [ { "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, ], "meta": { "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy4XpmD3kEfRZ+LCwFh3Xmqrkm7rSiDu8v+ZCTOA3vlNjmy/ZOc3vy9Zr+IhWPP4yipiApkGRsBM63tTgnxqUUn/WU7qkbBNktZBcs5p7Mj/zO4ZHkk4VoTczFzHlPGwuak2P4wTftEj7sU8IRutaAbMoKj4AMuFF50j4sIgF7P5b5FtTIM2b5HSW8BlDz10b67+xsj6s3Jv05xxbBs+RWj+v7D5yjMVeeErXoSui8dlHpUu6QOVKn8LLmdpxvehc6ns8yW7cbQvWmLjOICMnm6BXdVtOKWBncDq9FGLmKF3fUeZZPbv79Z7dyZs+xGZGMHbpaNHpuY9QhNS/hQ5D5 dave@hal" } } LOG.debug('Creating guest with drive') with dump_response('guest_for_context'): g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) sc.start(g1['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) from fabric.api import settings as fabric_settings from fabric import tasks, api fab_kwargs = { "warn_only": True, "abort_on_prompts": True, "use_ssh_config": p_pass is None } LOG.debug('Using fabric config {}'.format(fab_kwargs)) if p_pass is not None: fab_kwargs['password'] = p_pass LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )' ) dump_path = dump_response.response_dump.dump_path #command_template = r"read -t 1 -d $'\004' DISCARD < /dev/ttyS1; " \ # r'echo -en "<\n{}\n>" > /dev/ttyS1 && read -t 3 READVALUE < /dev/ttyS1 && echo $READVALUE' self.command_template = r'v=$(read -t 13 READVALUE < /dev/ttyS1 && echo $READVALUE & sleep 1; echo -en "<\n{}\n>" > /dev/ttyS1; wait %1); echo $v' LOG.debug('Test the guest context') LOG.debug('Check single value retrieval') self.check_key_retrieval(g_def, 'context_single_value', 'name', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check key retrieval') self.check_key_retrieval(g_def, 'context_single_value_ssh_key', '/meta/ssh_public_key', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check complete context retrieval') self.check_all_retrieval(g_def, 'context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## ########################################## ########################################## ########################################## LOG.debug('Check context dynamic update') g_def['name'] += '_renamed' g_def['meta']['another_key'] = 'a value or something' upd_res = sc.update(g1['uuid'], g_def) self.assertEqual(g_def['name'], upd_res['name']) LOG.debug('Check single value retrieval') self.check_key_retrieval(g_def, 'context_single_value_dynamic', 'name', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check key retrieval') self.check_key_retrieval(g_def, 'context_single_value_another_key_dynamic', '/meta/another_key', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check complete context retrieval') self.check_all_retrieval(g_def, 'context_all_dynamic', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################### ########################################### ########################################### with dump_response('update_global_context'): gcc.update({'new_global_key': 'new_global_val'}) LOG.debug('Check global context retrieval') command = self.command_template.format( '/global_context/new_global_key') expected_val = 'new_global_val' res_string = self.get_single_ctx_val(command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api) self.assertEqual(res_string, expected_val) self.dump_ctx_command(command, res_string, 'global_context_single_value', dump_path) self.check_all_retrieval(g_def, 'global_context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) LOG.debug('Stopping guest') sc.stop(g1['uuid']) self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) LOG.debug('Delete guest') sc.delete(g1['uuid']) LOG.debug('Delete drive') dc.delete(d1['uuid']) self._wait_deleted(d1['uuid'], client=dc)
def test_servers_operations(self): dc = cr.Drive() sc = cr.Server() vc = cr.VLAN() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Get a vlan from the account') all_vlans = vc.list() if not all_vlans: raise SkipTest('There is no vlan in the acceptance test account') vlan = all_vlans[0] LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, { "model": "virtio", "vlan": vlan['uuid'], }], } LOG.debug('Creating guest with drive') g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) LOG.debug('Clone the guest') g2 = sc.clone(g1['uuid']) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Check if the drive is active ( mounted )') d2_uuid = g2['drives'][0]['drive']['uuid'] self._wait_for_status(d2_uuid, 'mounted', client=dc) LOG.debug('Start both guests') sc.start(g1['uuid']) sc.start(g2['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) self._wait_for_status(g2['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) g2 = sc.get(g2['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) self._wait_for_open_socket(ip2, 22, timeout=40, close_on_success=True) from fabric.api import settings as fabric_settings from fabric import tasks, api fab_kwargs = { "warn_only": True, "abort_on_prompts": True, "use_ssh_config": p_pass is None } LOG.debug('Using fabric config {}'.format(fab_kwargs)) if p_pass is not None: fab_kwargs['password'] = p_pass LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )' ) with fabric_settings(**fab_kwargs): LOG.debug('Changing hostnames and restarting avahi on guest 1') set_hostname = 'hostname {} && service avahi-daemon restart' tasks.execute(api.run, set_hostname.format("atom1"), hosts=["root@%s" % ip1]) LOG.debug('Changing hostnames and restarting avahi on guest 2') tasks.execute(api.run, set_hostname.format("atom2"), hosts=["root@%s" % ip2]) LOG.debug('Ping the two hosts via private network') ping_res = tasks.execute(api.run, "ping atom2.local -c 1", hosts=["root@%s" % ip1]) self.assertEqual(ping_res.values()[0].return_code, 0, 'Could not ping host atom2 from atom1') LOG.debug('Halt both servers') tasks.execute(api.run, "halt", hosts=["root@%s" % ip1, "root@%s" % ip2]) LOG.debug('Wait for complete shutdown') self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Deleting both guests') sc.delete(g1['uuid']) sc.delete(g2['uuid']) LOG.debug('Deleting both drives') dc.delete(d1['uuid']) dc.delete(d2_uuid) self._wait_deleted(d1['uuid'], client=dc) self._wait_deleted(d2_uuid, client=dc)
def test_tags(self): with self.dump_response('tags_schema'): self.client.get_schema() sc = cr.Server() server1 = sc.create({ 'name': 'test_server1', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******' }) server2 = sc.create({ 'name': 'test_server2', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******' }) dc = cr.Drive() drive = dc.create({ 'name': 'test_drive', 'size': 1000**3, 'media': 'disk' }) ip = cr.IP().list()[0] vlan = cr.VLAN().list()[0] with self.dump_response('tags_create'): tag1 = self.client.create({'name': 'MyGroupOfThings'}) with self.dump_response('tags_create_with_resource'): tag2 = self.client.create({ 'name': 'TagCreatedWithResource', 'resources': [ server1['uuid'], server2['uuid'], drive['uuid'], ip['uuid'], vlan['uuid'] ] }) with self.dump_response('tags_list'): self.client.list() with self.dump_response('tags_list_detail'): self.client.list_detail() with self.dump_response('tags_get'): self.client.get(tag2['uuid']) with self.dump_response('tags_update_resources'): self.client.update( tag2['uuid'], { 'name': 'TagCreatedWithResource', 'resources': [server1['uuid'], drive['uuid']] }) server2['tags'] = [tag1['uuid'], tag2['uuid']] with DumpResponse(clients=[sc], name='tags_update_tag_from_resource'): sc.update(server2['uuid'], server2) with self.dump_response('tags_list_resource'): self.client.servers(tag1['uuid']) time.sleep(30) dc.delete(drive['uuid']) sc.delete(server1['uuid']) sc.delete(server2['uuid']) with self.dump_response('tags_delete'): self.client.delete(tag1['uuid']) self.client.delete(tag2['uuid'])
def test_servers_operations(self): dc = cr.Drive() sc = cr.Server() vc = cr.VLAN() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Get a vlan from the account') all_vlans = vc.list() if not all_vlans: raise SkipTest('There is no vlan in the acceptance test account') vlan = all_vlans[0] LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, { "model": "virtio", "vlan": vlan['uuid'], }], } LOG.debug('Creating guest with drive') g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) LOG.debug('Clone the guest') g2 = sc.clone(g1['uuid']) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Check if the drive is active ( mounted )') d2_uuid = g2['drives'][0]['drive']['uuid'] self._wait_for_status(d2_uuid, 'mounted', client=dc) LOG.debug('Start both guests') sc.start(g1['uuid']) sc.start(g2['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) self._wait_for_status(g2['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) g2 = sc.get(g2['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=300, close_on_success=True) self._wait_for_open_socket(ip2, 22, timeout=300, close_on_success=True) from fabric import Connection LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )') self.ssh_host1 = "root@" + ip1 connection_host1 = Connection(host=self.ssh_host1, connect_kwargs={"password": p_pass}) self.ssh_host2 = "root@" + ip2 connection_host2 = Connection(host=self.ssh_host2, connect_kwargs={"password": p_pass}) LOG.debug('Changing hostnames and restarting avahi on guest 1') self.run_command = 'hostname {} && service avahi-daemon restart'.format( "atom1") self.cmd_exec_res = connection_host1.run( self.run_command).stderr.replace('\n', '') print(self.cmd_exec_res) LOG.debug('Changing hostnames and restarting avahi on guest 2') self.run_command = 'hostname {} && service avahi-daemon restart'.format( "atom2") self.cmd_exec_res = connection_host2.run( self.run_command).stderr.replace('\n', '') print(self.cmd_exec_res) #LOG.debug('Ping the two hosts via private network') #self.run_command = 'ping atom2.local -c 1' #cmd_exec_res = connection_host1.run(self.run_command).stdout.replace('\n', '') #self.assertEqual(cmd_exec_res, 0, 'Could not ping host atom2 from atom1') LOG.debug('poweroff both servers') self.run_command = 'poweroff' with self.assertRaises(UnexpectedExit): self.cmd_exec_res = connection_host1.run(self.run_command) print(self.cmd_exec_res) self.run_command = 'poweroff' with self.assertRaises(UnexpectedExit): self.cmd_exec_res = connection_host2.run(self.run_command) print(self.cmd_exec_res) LOG.debug('Wait for complete shutdown') self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Deleting both guests') sc.delete(g1['uuid']) sc.delete(g2['uuid']) LOG.debug('Deleting both drives') dc.delete(d1['uuid']) dc.delete(d2_uuid) self._wait_deleted(d1['uuid'], client=dc) self._wait_deleted(d2_uuid, client=dc)