def test_drive(self): ws = resource.Websocket() d = resource.Drive().create( { "size": 1000 ** 3, "name": "", "media": "disk" } ) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30, extra_filter=lambda x: x['status'] == 'unmounted' ) resource.Drive().delete(d['uuid']) try: ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30, extra_filter=lambda x: False ) except errors.ClientError as e: if e.status_code != 404: raise
def _get_persistent_image_uuid_and_pass(self): # Get a good persistent test image p_name = config.get('persistent_drive_name') p_pass = config.get('persistent_drive_ssh_password') if p_name is None: raise SkipTest('A persistent_drive_name must be stated in the ' 'client configuration to execute this test') def _filter_drives(av_drives): for drive in av_drives: if p_name in drive['name'] and drive['status'] in \ ('mounted', 'unmounted', 'cloning_src',): return drive['uuid'] return None puuid = _filter_drives(cr.Drive().list_detail()) if puuid is None: puuid = _filter_drives(cr.LibDrive().list_detail()) if puuid is not None: client_drives = cr.Drive() clone_drive_def = { 'name': p_name, } cloned_drive = client_drives.clone(puuid, clone_drive_def) self._wait_for_status(cloned_drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=client_drives) puuid = cloned_drive['uuid'] if puuid is None: raise SkipTest("There is no drive matching {}".format(p_name)) return puuid, p_pass
def test_guest_drive(self): ws = resource.Websocket() g = resource.Server().create( { "cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "******" } ) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30, extra_filter=lambda x: x['status'] == 'stopped' ) d = resource.Drive().create( { "size": 1000 ** 3, "name": "", "media": "disk" } ) ret = ws.wait_obj_wrapper( ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30, extra_filter=lambda x: x['status'] == 'unmounted' ) resource.Server().update( g['uuid'], { "cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "******", "drives": [ { "dev_channel": "0:0", "device": "virtio", "drive": d['uuid'] } ] } ) ws.wait_obj_uri(g['resource_uri'], resource.Server) resource.Drive().delete(d['uuid']) resource.Server().delete(g['uuid'])
def test_drive(self): ws = resource.Websocket() d = resource.Drive().create({ "size": 1000**3, "name": "", "media": "disk" }) # ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30, # extra_filter=lambda x: x['status'] == 'unmounted') time.sleep(30) resource.Drive().delete(d['uuid'])
def setUp(self): super(SnapshotsTest, self).setUp() self.snap_client = cr.Snapshot() self.drive_client = cr.Drive() self.dump_response = DumpResponse( clients=[self.snap_client, self.drive_client] )
def setUp(self): super(TestUpload, self).setUp() # 10.something MiB self.file_size = 10 * 1024 ** 2 + random.randrange(0, 1024) self.file_path = self.generate_file() # self.downloaded_path = tempfile.mktemp(prefix='test_download_') self.dc = cr.Drive()
def test_server_edit(self): server_def = { 'name': 'test_server_1', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******', } server = self._create_a_server(server_req=server_def) # Test simple update server_def['name'] = 'test_server_updated' server_def['cpu'] = 2000 server_def['vnc_password'] = '******' with self.dump_response('server_edit_minimal'): updated_server = self.client.update(server['uuid'], server_def) self.assertDictContainsSubset(server_def, updated_server) dv = cr.Drive() drive_def_1 = { 'name': 'test_drive_1', 'size': '1024000000', 'media': 'disk', } drive = dv.create(drive_def_1) self._wait_for_status(drive['uuid'], 'unmounted', client=dv) #Test attach drive server_def['drives'] = [ { "device": "virtio", "dev_channel": "0:0", "drive": drive['uuid'], "boot_order": 1 }, ] with self.dump_response('server_attach_drive'): updated_server = self.client.update(server['uuid'], server_def) self.assertEqual( server_def['drives'][0]['drive'], updated_server['drives'][0]['drive']['uuid'], 'The updated server and the update definition do not match') self.client.delete(updated_server['uuid']) dv.delete(drive['uuid']) self._wait_deleted(drive['uuid'], client=dv)
def _get_persistent_image_uuid_and_pass(self): # Get a good persistant test image p_name = config.get('persistent_drive_name') p_pass = config.get('persistent_drive_ssh_password') if p_name is None: raise SkipTest( 'A persistent_drive_name must be stated in the client configuration to execute this test' ) puuid = None av_drives = cr.Drive().list_detail() for drive in av_drives: if p_name in drive['name']: puuid = drive['uuid'] break if puuid is None: raise SkipTest("There is no drive matching {}".format(p_name)) return puuid, p_pass
def test_libdrive_listing(self): with self.dump_response('libdrive_list'): libdrives = self.client.list(query_params={'limit': 5}) # Select the lib drive with most interesting attributes libdrive_uuid = libdrives[0][ 'uuid'] # by default use the first possible for d in libdrives: if len(d['licenses']) > 0: # pick a drive with licenses libdrive_uuid = d['uuid'] break with self.dump_response('libdrive_get'): libdrive = self.client.get(libdrive_uuid) dc = cr.Drive() with DumpResponse(clients=[dc])('librdrive_get_through_drives'): libdrive_from_drive_url = dc.get(libdrive_uuid) self.assertIsNone(libdrive_from_drive_url['owner']) self.assertEqual(libdrive['uuid'], libdrive_from_drive_url['uuid']) self.assertEqual(libdrive['name'], libdrive_from_drive_url['name'])
def _clean_drives(self): """ Removes all the drives in the acceptance test account ( containing 'test' or 'stress_atom_clone' keyword ) :return: """ drive_client = cr.Drive() mounted = [] deleting = [] inter = [] for drive in drive_client.list_detail(): if 'test' in drive['name'] or 'stress_atom_clone' in drive['name']: status = drive['status'] if status == 'mounted': mounted.append(drive['uuid']) elif status in ('unmounted', 'uploading'): drive_client.delete(drive['uuid']) deleting.append(drive['uuid']) else: inter.append(drive['uuid']) for uuid in deleting: try: self._wait_deleted(uuid, client=drive_client) except: LOG.exception("Drive {} did not delete in time".format(uuid)) if mounted: LOG.error( 'The drives {} are still mounted and cannot be deleted'.format( mounted)) if inter: LOG.error( 'The drives {} are stuck in intermediate states and cannot be deleted.' .format(inter))
def setUp(self): super(ServerStressTest, self).setUp() self.server_client = cr.Server() self.drive_client = cr.Drive()
def test_server_clone_with_avoid_drive(self): dv = cr.Drive() drive_def_1 = { 'name': 'test_drive_1', 'size': '1024000000', 'media': 'disk', } drive1 = dv.create(drive_def_1) self._wait_for_status(drive1['uuid'], 'unmounted', client=dv) dv = cr.Drive() drive_def_2 = { 'name': 'test_drive_2', 'size': '1024000000', 'media': 'cdrom', } drive2 = dv.create(drive_def_2) self._wait_for_status(drive2['uuid'], 'unmounted', client=dv) server_def = { 'name': 'testServerAcc', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": drive1['uuid'], "boot_order": 1 }, { "device": "virtio", "dev_channel": "0:1", "drive": drive2['uuid'], "boot_order": 2 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }], } server = self.client.create(server_def) clone = self.client.clone(server['uuid'], { 'name': 'cloned server name', 'random_vnc_password': True }, avoid=[server['uuid']]) for mount in clone['drives']: drive_uuid = mount['drive']['uuid'] self._wait_for_status(drive_uuid, 'mounted', client=dv) self.assertNotEqual(clone['drives'][0]['drive']['uuid'], server['drives'][0]['drive']['uuid']) self.assertEqual(clone['drives'][1]['drive']['uuid'], server['drives'][1]['drive']['uuid']) self.client.delete_with_all_drives(server['uuid']) self.client.delete_with_disks(clone['uuid']) self._wait_deleted(server['drives'][0]['drive']['uuid'], client=dv) self._wait_deleted(server['drives'][1]['drive']['uuid'], client=dv) self._wait_deleted(clone['drives'][0]['drive']['uuid'], client=dv)
def test_create_full_server(self): dv = cr.Drive() dump_response = DumpResponse(clients=[self.client]) drive_def_1 = { 'name': 'test_drive_1', 'size': '1024000000', 'media': 'disk', } drive_def_2 = { 'name': 'test_drive_2', 'size': '1024000000', 'media': 'cdrom', } drive1 = dv.create(drive_def_1) drive2 = dv.create(drive_def_2) self._wait_for_status(drive1['uuid'], 'unmounted', client=dv) self._wait_for_status(drive2['uuid'], 'unmounted', client=dv) server_definition = { "requirements": [], "name": "test_acc_full_server", "cpus_instead_of_cores": False, "tags": [], "mem": 256 * 1024**2, "nics": [{ "ip_v4_conf": { "conf": "dhcp" }, }], "enable_numa": False, "cpu": 1000, "drives": [ { "device": "virtio", "dev_channel": "0:0", "drive": drive1['uuid'], "boot_order": 1 }, { "device": "ide", "dev_channel": "0:0", "drive": drive2['uuid'], }, ], "smp": 1, "hv_relaxed": False, "hv_tsc": False, "meta": { "description": "A full server with description" }, "vnc_password": "******", } with dump_response('server_create_full'): server = self.client.create(server_definition) # TODO: Uncomment this when the guest_drive definition order changes reach production #self._verify_list(server, True) self.client.delete(server['uuid']) self._verify_list(server, False) dv.delete(drive1['uuid']) dv.delete(drive2['uuid']) self._wait_deleted(drive1['uuid'], client=dv) self._wait_deleted(drive2['uuid'], client=dv)
def test_server_runtime(self): dv = cr.Drive() drive_def_1 = { 'name': 'test_drive_1', 'size': '1024000000', 'media': 'disk', } drive1 = dv.create(drive_def_1) self._wait_for_status(drive1['uuid'], 'unmounted', client=dv) server_def = { 'name': 'testServerAcc', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": drive1['uuid'], "boot_order": 1 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }], } server = self.client.create(server_def) self._verify_list(server, True) self.client.start(server['uuid']) self._wait_for_status(server['uuid'], 'running') with DumpResponse(clients=[self.client], name='server_get_running'): server_def = self.client.get(server['uuid']) self.assertEqual(server_def['runtime']['nics'][0]['interface_type'], 'public') self.assertIsNotNone(server_def['runtime']['nics'][0]['ip_v4']) # check runtime call runtime = self.client.runtime(server['uuid']) self.assertEqual(runtime['nics'][0]['interface_type'], 'public') self.assertIsNotNone(runtime['nics'][0]['ip_v4']) self.client.stop(server['uuid']) self._wait_for_status(server['uuid'], 'stopped') self.client.delete(server['uuid']) self._verify_list(server, False) dv.delete(drive1['uuid']) self._wait_deleted(drive1['uuid'], client=dv)
def test_tags(self): with self.dump_response('tags_schema'): self.client.get_schema() sc = cr.Server() server1 = sc.create({ 'name': 'test_server1', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******' }) server2 = sc.create({ 'name': 'test_server2', 'cpu': 1000, 'mem': 512 * 1024**2, 'vnc_password': '******' }) dc = cr.Drive() drive = dc.create({ 'name': 'test_drive', 'size': 1000**3, 'media': 'disk' }) ip = cr.IP().list()[0] vlan = cr.VLAN().list()[0] with self.dump_response('tags_create'): tag1 = self.client.create({'name': 'MyGroupOfThings'}) with self.dump_response('tags_create_with_resource'): tag2 = self.client.create({ 'name': 'TagCreatedWithResource', 'resources': [ server1['uuid'], server2['uuid'], drive['uuid'], ip['uuid'], vlan['uuid'] ] }) with self.dump_response('tags_list'): self.client.list() with self.dump_response('tags_list_detail'): self.client.list_detail() with self.dump_response('tags_get'): self.client.get(tag2['uuid']) with self.dump_response('tags_update_resources'): self.client.update( tag2['uuid'], { 'name': 'TagCreatedWithResource', 'resources': [server1['uuid'], drive['uuid']] }) server2['tags'] = [tag1['uuid'], tag2['uuid']] with DumpResponse(clients=[sc], name='tags_update_tag_from_resource'): sc.update(server2['uuid'], server2) with self.dump_response('tags_list_resource'): self.client.servers(tag1['uuid']) time.sleep(30) dc.delete(drive['uuid']) sc.delete(server1['uuid']) sc.delete(server2['uuid']) with self.dump_response('tags_delete'): self.client.delete(tag1['uuid']) self.client.delete(tag2['uuid'])
def test_servers_operations(self): dc = cr.Drive() sc = cr.Server() vc = cr.VLAN() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Get a vlan from the account') all_vlans = vc.list() if not all_vlans: raise SkipTest('There is no vlan in the acceptance test account') vlan = all_vlans[0] LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, { "model": "virtio", "vlan": vlan['uuid'], }], } LOG.debug('Creating guest with drive') g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) LOG.debug('Clone the guest') g2 = sc.clone(g1['uuid']) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Check if the drive is active ( mounted )') d2_uuid = g2['drives'][0]['drive']['uuid'] self._wait_for_status(d2_uuid, 'mounted', client=dc) LOG.debug('Start both guests') sc.start(g1['uuid']) sc.start(g2['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) self._wait_for_status(g2['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) g2 = sc.get(g2['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) self._wait_for_open_socket(ip2, 22, timeout=40, close_on_success=True) from fabric.api import settings as fabric_settings from fabric import tasks, api fab_kwargs = { "warn_only": True, "abort_on_prompts": True, "use_ssh_config": p_pass is None } LOG.debug('Using fabric config {}'.format(fab_kwargs)) if p_pass is not None: fab_kwargs['password'] = p_pass LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )' ) with fabric_settings(**fab_kwargs): LOG.debug('Changing hostnames and restarting avahi on guest 1') set_hostname = 'hostname {} && service avahi-daemon restart' tasks.execute(api.run, set_hostname.format("atom1"), hosts=["root@%s" % ip1]) LOG.debug('Changing hostnames and restarting avahi on guest 2') tasks.execute(api.run, set_hostname.format("atom2"), hosts=["root@%s" % ip2]) LOG.debug('Ping the two hosts via private network') ping_res = tasks.execute(api.run, "ping atom2.local -c 1", hosts=["root@%s" % ip1]) self.assertEqual(ping_res.values()[0].return_code, 0, 'Could not ping host atom2 from atom1') LOG.debug('Halt both servers') tasks.execute(api.run, "halt", hosts=["root@%s" % ip1, "root@%s" % ip2]) LOG.debug('Wait for complete shutdown') self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Deleting both guests') sc.delete(g1['uuid']) sc.delete(g2['uuid']) LOG.debug('Deleting both drives') dc.delete(d1['uuid']) dc.delete(d2_uuid) self._wait_deleted(d1['uuid'], client=dc) self._wait_deleted(d2_uuid, client=dc)
def setUp(self): super(DriveStressTest, self).setUp() self.client = cr.Drive()
def setUp(self): super(DriveBasicTest, self).setUp() self.client = cr.Drive() self.dump_response = DumpResponse(clients=[self.client])
def test_servers_operations(self): dc = cr.Drive() sc = cr.Server() vc = cr.VLAN() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Get a vlan from the account') all_vlans = vc.list() if not all_vlans: raise SkipTest('There is no vlan in the acceptance test account') vlan = all_vlans[0] LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [{ "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, { "model": "virtio", "vlan": vlan['uuid'], }], } LOG.debug('Creating guest with drive') g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) LOG.debug('Clone the guest') g2 = sc.clone(g1['uuid']) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Check if the drive is active ( mounted )') d2_uuid = g2['drives'][0]['drive']['uuid'] self._wait_for_status(d2_uuid, 'mounted', client=dc) LOG.debug('Start both guests') sc.start(g1['uuid']) sc.start(g2['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) self._wait_for_status(g2['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) g2 = sc.get(g2['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=300, close_on_success=True) self._wait_for_open_socket(ip2, 22, timeout=300, close_on_success=True) from fabric import Connection LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )') self.ssh_host1 = "root@" + ip1 connection_host1 = Connection(host=self.ssh_host1, connect_kwargs={"password": p_pass}) self.ssh_host2 = "root@" + ip2 connection_host2 = Connection(host=self.ssh_host2, connect_kwargs={"password": p_pass}) LOG.debug('Changing hostnames and restarting avahi on guest 1') self.run_command = 'hostname {} && service avahi-daemon restart'.format( "atom1") self.cmd_exec_res = connection_host1.run( self.run_command).stderr.replace('\n', '') print(self.cmd_exec_res) LOG.debug('Changing hostnames and restarting avahi on guest 2') self.run_command = 'hostname {} && service avahi-daemon restart'.format( "atom2") self.cmd_exec_res = connection_host2.run( self.run_command).stderr.replace('\n', '') print(self.cmd_exec_res) #LOG.debug('Ping the two hosts via private network') #self.run_command = 'ping atom2.local -c 1' #cmd_exec_res = connection_host1.run(self.run_command).stdout.replace('\n', '') #self.assertEqual(cmd_exec_res, 0, 'Could not ping host atom2 from atom1') LOG.debug('poweroff both servers') self.run_command = 'poweroff' with self.assertRaises(UnexpectedExit): self.cmd_exec_res = connection_host1.run(self.run_command) print(self.cmd_exec_res) self.run_command = 'poweroff' with self.assertRaises(UnexpectedExit): self.cmd_exec_res = connection_host2.run(self.run_command) print(self.cmd_exec_res) LOG.debug('Wait for complete shutdown') self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Deleting both guests') sc.delete(g1['uuid']) sc.delete(g2['uuid']) LOG.debug('Deleting both drives') dc.delete(d1['uuid']) dc.delete(d2_uuid) self._wait_deleted(d1['uuid'], client=dc) self._wait_deleted(d2_uuid, client=dc)
def test_firewall(self): dc = cr.Drive() sc = cr.Server() fwp = cr.FirewallPolicy() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) fw_policy = fwp.create({}) g_def = { "name": "testFirewallServer", "cpu": 1000, "mem": 1024**3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [ { "firewall_policy": fw_policy['uuid'], "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, ], } guest = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) sc.start(guest['uuid']) self._wait_for_status(guest['uuid'], 'running', client=sc) guest = sc.get(guest['uuid']) ip1 = guest['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) fw_policy['rules'] = [{ "ip_proto": "tcp", "dst_port": 22, "direction": "in", "action": "drop", "comment": "Block SSH traffic" }] fwp.update(fw_policy['uuid'], fw_policy) self._wait_socket_close(ip1, 22) fw_policy['rules'] = [] fwp.update(fw_policy['uuid'], fw_policy) self._wait_for_open_socket(ip1, 22) sc.stop(guest['uuid']) self._wait_for_status(guest['uuid'], 'stopped', client=sc) sc.delete(guest['uuid']) fwp.delete(fw_policy['uuid'])
def test_guest_context(self): dc = cr.Drive() sc = cr.Server() gcc = cr.GlobalContext() dump_response = DumpResponse(clients=[sc, dc, gcc]) # ensure empty global context gcc.update({}) puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_clone_1'}) from uuid import uuid4 g_def = { "name": "test_server", "cpu": 1000, "mem": 1024**3, 'vnc_password': str(uuid4())[:18].replace('-', ''), 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [ { "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, ], "meta": { "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy4XpmD3kEfRZ+LCwFh3Xmqrkm7rSiDu8v+ZCTOA3vlNjmy/ZOc3vy9Zr+IhWPP4yipiApkGRsBM63tTgnxqUUn/WU7qkbBNktZBcs5p7Mj/zO4ZHkk4VoTczFzHlPGwuak2P4wTftEj7sU8IRutaAbMoKj4AMuFF50j4sIgF7P5b5FtTIM2b5HSW8BlDz10b67+xsj6s3Jv05xxbBs+RWj+v7D5yjMVeeErXoSui8dlHpUu6QOVKn8LLmdpxvehc6ns8yW7cbQvWmLjOICMnm6BXdVtOKWBncDq9FGLmKF3fUeZZPbv79Z7dyZs+xGZGMHbpaNHpuY9QhNS/hQ5D5 dave@hal" } } LOG.debug('Creating guest with drive') with dump_response('guest_for_context'): g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) sc.start(g1['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) from fabric.api import settings as fabric_settings from fabric import tasks, api fab_kwargs = { "warn_only": True, "abort_on_prompts": True, "use_ssh_config": p_pass is None } LOG.debug('Using fabric config {}'.format(fab_kwargs)) if p_pass is not None: fab_kwargs['password'] = p_pass LOG.debug( 'Using a password to SSH to the servers ( not using ssh config )' ) dump_path = dump_response.response_dump.dump_path #command_template = r"read -t 1 -d $'\004' DISCARD < /dev/ttyS1; " \ # r'echo -en "<\n{}\n>" > /dev/ttyS1 && read -t 3 READVALUE < /dev/ttyS1 && echo $READVALUE' self.command_template = r'v=$(read -t 13 READVALUE < /dev/ttyS1 && echo $READVALUE & sleep 1; echo -en "<\n{}\n>" > /dev/ttyS1; wait %1); echo $v' LOG.debug('Test the guest context') LOG.debug('Check single value retrieval') self.check_key_retrieval(g_def, 'context_single_value', 'name', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check key retrieval') self.check_key_retrieval(g_def, 'context_single_value_ssh_key', '/meta/ssh_public_key', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check complete context retrieval') self.check_all_retrieval(g_def, 'context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## ########################################## ########################################## ########################################## LOG.debug('Check context dynamic update') g_def['name'] += '_renamed' g_def['meta']['another_key'] = 'a value or something' upd_res = sc.update(g1['uuid'], g_def) self.assertEqual(g_def['name'], upd_res['name']) LOG.debug('Check single value retrieval') self.check_key_retrieval(g_def, 'context_single_value_dynamic', 'name', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check key retrieval') self.check_key_retrieval(g_def, 'context_single_value_another_key_dynamic', '/meta/another_key', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################## LOG.debug('Check complete context retrieval') self.check_all_retrieval(g_def, 'context_all_dynamic', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) ########################################### ########################################### ########################################### with dump_response('update_global_context'): gcc.update({'new_global_key': 'new_global_val'}) LOG.debug('Check global context retrieval') command = self.command_template.format( '/global_context/new_global_key') expected_val = 'new_global_val' res_string = self.get_single_ctx_val(command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api) self.assertEqual(res_string, expected_val) self.dump_ctx_command(command, res_string, 'global_context_single_value', dump_path) self.check_all_retrieval(g_def, 'global_context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api) LOG.debug('Stopping guest') sc.stop(g1['uuid']) self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) LOG.debug('Delete guest') sc.delete(g1['uuid']) LOG.debug('Delete drive') dc.delete(d1['uuid']) self._wait_deleted(d1['uuid'], client=dc)