예제 #1
0
 def test_host_name_is_same_as_server_name(self):
     # Verify the instance host name is the same as the server name
     validation_resources = self.get_class_validation_resources(
         self.os_primary)
     linux_client = remote_client.RemoteClient(
         self.get_server_ip(self.server, validation_resources),
         self.ssh_user,
         self.password,
         validation_resources['keypair']['private_key'],
         server=self.server,
         servers_client=self.client)
     hostname = linux_client.exec_command("hostname").rstrip()
     msg = ('Failed while verifying servername equals hostname. Expected '
            'hostname "%s" but got "%s".' %
            (self.name, hostname.split(".")[0]))
     # NOTE(zhufl): Some images will add postfix for the hostname, e.g.,
     # if hostname is "aaa", postfix ".novalocal" may be added to it, and
     # the hostname will be "aaa.novalocal" then, so we should ignore the
     # postfix when checking whether hostname equals self.name.
     self.assertEqual(self.name.lower(), hostname.split(".")[0], msg)
 def test_create_server_with_personality(self):
     file_contents = 'This is a test file.'
     file_path = '/test.txt'
     personality = [{
         'path': file_path,
         'contents': base64.b64encode(file_contents)
     }]
     password = data_utils.rand_password()
     created_server = self.create_test_server(personality=personality,
                                              adminPass=password,
                                              wait_until='ACTIVE',
                                              validatable=True)
     server = self.client.show_server(created_server['id'])['server']
     if CONF.validation.run_validation:
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server), self.ssh_user, password,
             self.validation_resources['keypair']['private_key'])
         self.assertEqual(
             file_contents,
             linux_client.exec_command('sudo cat %s' % file_path))
예제 #3
0
 def test_200_check_connectivity(self):
     """Check inside and outside connectivities"""
     web_ip = self.ctx.web_instance.ip_address
     db_ip = self.ctx.db_instance.private_ip_address
     ssh = remote_client.RemoteClient(web_ip,
                                      self.ssh_user,
                                      pkey=self.keypair.material)
     ssh_conn = ssh.ssh_client._get_ssh_connection()
     sftp = ssh_conn.open_sftp()
     fr = sftp.file("key.pem", 'wb')
     fr.set_pipelined(True)
     fr.write(self.keypair.material)
     fr.close()
     ssh_conn.close()
     ssh.exec_command('chmod 400 key.pem')
     ssh.exec_command(
         "ssh -i key.pem -o UserKnownHostsFile=/dev/null "
         "-o StrictHostKeyChecking=no %(user)s@%(ip)s "
         "curl -s http://google.com" %
         {"user": self.ssh_user, "ip": db_ip})
예제 #4
0
    def test_change_server_password(self):
        # Since this test messes with the password and makes the
        # server unreachable, it should create its own server
        newserver = self.create_test_server(validatable=True,
                                            wait_until='ACTIVE')
        # The server's password should be set to the provided password
        new_password = '******'
        self.client.change_password(newserver['id'], adminPass=new_password)
        waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')

        if CONF.validation.run_validation:
            # Verify that the user can authenticate with the new password
            server = self.client.show_server(newserver['id'])['server']
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server),
                self.ssh_user,
                new_password,
                server=server,
                servers_client=self.client)
            linux_client.validate_authentication()
예제 #5
0
    def get_remote_client(self, ip_address, username=None, private_key=None):
        """Get a SSH client to a remote server

        @param ip_address the server floating or fixed IP address to use
                          for ssh validation
        @param username name of the Linux account on the remote server
        @param private_key the SSH private key to use
        @return a RemoteClient object
        """

        if username is None:
            username = CONF.validation.image_ssh_user
        # Set this with 'keypair' or others to log in with keypair or
        # username/password.
        if CONF.validation.auth_method == 'keypair':
            password = None
            if private_key is None:
                private_key = self.keypair['private_key']
        else:
            password = CONF.validation.image_ssh_password
            private_key = None
        linux_client = remote_client.RemoteClient(ip_address,
                                                  username,
                                                  pkey=private_key,
                                                  password=password)
        try:
            linux_client.validate_authentication()
        except Exception as e:
            message = ('Initializing SSH connection to %(ip)s failed. '
                       'Error: %(error)s' % {
                           'ip': ip_address,
                           'error': e
                       })
            caller = test_utils.find_test_caller()
            if caller:
                message = '(%s) %s' % (caller, message)
            LOG.exception(message)
            self._log_console_output()
            raise

        return linux_client
예제 #6
0
파일: compute.py 프로젝트: sapcc/tempest
def wait_for_ssh_or_ping(server, clients, tenant_network,
                         validatable, validation_resources, wait_until,
                         set_floatingip):
    """Wait for the server for SSH or Ping as requested.

    :param server: The server dict as returned by the API
    :param clients: Client manager which provides OpenStack Tempest clients.
    :param tenant_network: Tenant network to be used for creating a server.
    :param validatable: Whether the server will be pingable or sshable.
    :param validation_resources: Resources created for the connection to the
        server. Include a keypair, a security group and an IP.
    :param wait_until: Server status to wait for the server to reach.
        It can be PINGABLE and SSHABLE states when the server is both
        validatable and has the required validation_resources provided.
    :param set_floatingip: If FIP needs to be associated to server
    """
    if set_floatingip and CONF.validation.connect_method == 'floating':
        _setup_validation_fip(
            server, clients, tenant_network, validation_resources)

    server_ip = get_server_ip(
        server, validation_resources=validation_resources)
    if wait_until == 'PINGABLE':
        waiters.wait_for_ping(
            server_ip,
            clients.servers_client.build_timeout,
            clients.servers_client.build_interval
        )
    if wait_until == 'SSHABLE':
        pkey = validation_resources['keypair']['private_key']
        ssh_client = remote_client.RemoteClient(
            server_ip,
            CONF.validation.image_ssh_user,
            pkey=pkey,
            server=server,
            servers_client=clients.servers_client
        )
        waiters.wait_for_ssh(
            ssh_client,
            clients.servers_client.build_timeout
        )
예제 #7
0
    def test_can_log_into_created_server(self):

        sid = self.stack_identifier
        rid = 'SmokeServer'

        # wait for create to complete.
        self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')

        resp, body = self.client.get_resource(sid, rid)
        self.assertEqual('CREATE_COMPLETE', body['resource_status'])

        # fetch the IP address from servers client, since we can't get it
        # from the stack until stack create is complete
        resp, server = self.servers_client.get_server(
            body['physical_resource_id'])

        # Check that the user can authenticate with the generated password
        linux_client = remote_client.RemoteClient(server, 'ec2-user',
                                                  pkey=self.keypair[
                                                      'private_key'])
        linux_client.validate_authentication()
예제 #8
0
    def test_102_tune_nat_instance(self):
        """Tune NAT in NAT instance"""
        instance = self.ctx.nat_instance
        address = instance.ip_address
        ssh = remote_client.RemoteClient(address,
                                         self.ssh_user,
                                         pkey=self.keypair.material)

        # NOTE(ft): We must use tty mode, because some images (like Amazon
        # Linux) has restrictions (requiretty flag in /etc/sudoers)
        ssh_conn = ssh.ssh_client._get_ssh_connection()
        chan = ssh_conn.get_transport().open_session()
        chan.get_pty()
        chan.exec_command("sudo iptables -t nat -A POSTROUTING -s %s "
                          "-o eth0 -j MASQUERADE" % str(self.vpc_cidr))
        chan.close()
        chan = ssh_conn.get_transport().open_session()
        chan.get_pty()
        chan.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
        chan.close()
        ssh_conn.close()
예제 #9
0
    def test_rebuild_server(self):
        # The server should be rebuilt using the provided image and data
        meta = {'rebuild': 'server'}
        new_name = data_utils.rand_name('server')
        file_contents = 'Test server rebuild.'
        personality = [{
            'path': 'rebuild.txt',
            'contents': base64.b64encode(file_contents)
        }]
        password = '******'
        rebuilt_server = self.client.rebuild(self.server_id,
                                             self.image_ref_alt,
                                             name=new_name,
                                             metadata=meta,
                                             personality=personality,
                                             adminPass=password)

        # If the server was rebuilt on a different image, restore it to the
        # original image once the test ends
        if self.image_ref_alt != self.image_ref:
            self.addCleanup(self._rebuild_server_and_check, self.image_ref)

        # Verify the properties in the initial response are correct
        self.assertEqual(self.server_id, rebuilt_server['id'])
        rebuilt_image_id = rebuilt_server['image']['id']
        self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])

        # Verify the server properties after the rebuild completes
        self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
        server = self.client.show_server(rebuilt_server['id'])
        rebuilt_image_id = server['image']['id']
        self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
        self.assertEqual(new_name, server['name'])

        if self.run_ssh:
            # Verify that the user can authenticate with the provided password
            linux_client = remote_client.RemoteClient(self.ip_addr,
                                                      self.ssh_user, password)
            linux_client.validate_authentication()
예제 #10
0
    def test_rebuild_server(self):
        # The server should be rebuilt using the provided image and data
        meta = {'rebuild': 'server'}
        new_name = data_utils.rand_name('server')
        password = '******'
        rebuilt_server = self.client.rebuild_server(
            self.server_id,
            self.image_ref_alt,
            name=new_name,
            metadata=meta,
            adminPass=password)['server']

        # If the server was rebuilt on a different image, restore it to the
        # original image once the test ends
        if self.image_ref_alt != self.image_ref:
            self.addCleanup(self._rebuild_server_and_check, self.image_ref)

        # Verify the properties in the initial response are correct
        self.assertEqual(self.server_id, rebuilt_server['id'])
        rebuilt_image_id = rebuilt_server['image']['id']
        self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])

        # Verify the server properties after the rebuild completes
        waiters.wait_for_server_status(self.client,
                                       rebuilt_server['id'], 'ACTIVE')
        server = self.client.show_server(rebuilt_server['id'])['server']
        rebuilt_image_id = server['image']['id']
        self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
        self.assertEqual(new_name, server['name'])

        if CONF.validation.run_validation:
            # TODO(jlanoux) add authentication with the provided password
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(rebuilt_server),
                self.ssh_user,
                self.password,
                self.validation_resources['keypair']['private_key'])
            linux_client.validate_authentication()
예제 #11
0
 def dhcp_121_metadata_hostroutes_check_on_vm_nsxv(self, vm_env):
     self.serv_fip = vm_env['fip1']['floating_ip_address']
     username, password = self.get_image_userpass()
     # Connect to instance launched using ssh lib
     client = remote_client.RemoteClient(self.serv_fip,
                                         username=username,
                                         password=password)
     # Executes route over launched instance
     cmd = ('/sbin/route -n')
     out_data = client.exec_command(cmd)
     self.assertIn(Metadataserver_ip, out_data)
     LOG.info("Metadata routes available on vm")
     cmd = ('wget  http://169.254.169.254 -O sample.txt')
     client.exec_command(cmd)
     cmd = ('cat sample.txt')
     out_data = client.exec_command(cmd)
     # Check metadata server inforamtion available or not
     self.assertIn('latest', out_data)
     LOG.info("metadata server is acessible")
     # Fetch dhcp edge infor from nsx-v
     exc_edge = self.vsm.get_dhcp_edge_info()
     self.assertIsNotNone(exc_edge)
     # Fetch host-route and metadata info from nsx-v
     dhcp_options_info = {}
     dhcp_options_info = \
         exc_edge['staticBindings']['staticBindings'][0]['dhcpOptions']
     # Check Host Route information avaialable at beckend
     self.assertIn(
         Metadataserver_ip, dhcp_options_info['option121']['staticRoutes']
         [0]['destinationSubnet'])
     # Storing sec-group, network, subnet, router, server info in dict
     project_dict = dict(security_group=vm_env['security_group'],
                         network=vm_env['network'],
                         subnet=vm_env['subnet'],
                         router=vm_env['router'],
                         client_mgr=vm_env['client_mgr'],
                         serv1=vm_env['serv1'],
                         fip1=vm_env['fip1'])
     return project_dict
예제 #12
0
 def test_resize_volume_backed_server_confirm(self):
     # We have to create a new server that is volume-backed since the one
     # from setUp is not volume-backed.
     server = self.create_test_server(
         volume_backed=True, wait_until='ACTIVE')
     self._test_resize_server_confirm(server['id'])
     if CONF.compute_feature_enabled.console_output:
         # Now do something interactive with the guest like get its console
         # output; we don't actually care about the output,
         # just that it doesn't raise an error.
         self.client.get_console_output(server['id'])
     if CONF.validation.run_validation:
         validation_resources = self.get_class_validation_resources(
             self.os_primary)
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server, validation_resources),
             self.ssh_user,
             password=None,
             pkey=validation_resources['keypair']['private_key'],
             server=server,
             servers_client=self.client)
         linux_client.validate_authentication()
예제 #13
0
 def test_021_check_traffic_visibility(self):
     """Are other VMs visible?"""
     if self.ctx.instance3 is None:
         self.skipTest("Instance 3 was not initialized")
     with self.TcpDumpRunner(self.ctx.instance3,
                             self.ssh_user,
                             self.keypair,
                             "ip proto \\\\icmp") as tdump:
         ssh = remote_client.RemoteClient(self.instance1.ip_address,
                                          self.ssh_user,
                                          pkey=self.keypair.material)
         ssh.exec_command("ping -c 1 %s" %
                          self.instance2.private_ip_address)
         if not tdump.stop():
             raise base.TestCasePreparationError()
         resp = tdump.get_result()
     for line in resp.splitlines():
         if line.endswith("packets captured"):
             captured = line
             break
     tokens = captured.split()
     packets = int(tokens[0])
     self.assertEqual(0, packets)
예제 #14
0
    def setUp(self):
        super(TestRemoteClientWithServer, self).setUp()
        self.useFixture(fake_config.ConfigFixture())
        self.patchobject(config, 'TempestConfigPrivate',
                         fake_config.FakePrivate)
        cfg.CONF.set_default('ip_version_for_ssh', 4, group='validation')
        cfg.CONF.set_default('network_for_ssh', 'public',
                             group='validation')
        cfg.CONF.set_default('connect_timeout', 1, group='validation')
        cfg.CONF.set_default('console_output', True,
                             group='compute-feature-enabled')

        self.conn = remote_client.RemoteClient(
            '127.0.0.1', 'user', 'pass',
            server=self.server, servers_client=FakeServersClient())
        self.useFixture(fixtures.MockPatch(
            'tempest.lib.common.ssh.Client._get_ssh_connection',
            side_effect=lib_exc.SSHTimeout(host='127.0.0.1',
                                           user='******',
                                           password='******')))
        self.log = self.useFixture(fixtures.FakeLogger(
            name='tempest.common.utils.linux.remote_client',
            level='DEBUG'))
예제 #15
0
    def setUpClass(cls):
        super(VolumeBenchmarkTest, cls).setUpClass()

        cls._load_benchmark_data("VolumeBenchmarkTest")

        cfg = cls.config.cloudscaling
        image_name = cfg.general_image_name
        cls.ssh_user = cfg.general_ssh_user_name
        cls.volume_size = cfg.volume_benchmark_volume_size_gb
        cls.volume_fill = cfg.volume_benchmark_volume_fill_percent
        cls.volume_attach_name = "sdh"
        cls.ctx = cls.Context()

        cls.image_id = cls._prepare_image_id(image_name)

        cls.keypair = cls._prepare_key_pair()
        sg = cls._prepare_security_group()
        cls.sec_group_name = sg.name

        # NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
        reservation = cls.ec2_client.run_instances(
            cls.image_id,
            instance_type=cls.instance_type,
            key_name=cls.keypair.name,
            security_groups=(cls.sec_group_name, ))
        cls.addResourceCleanUp(cls.destroy_reservation, reservation)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        # NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
        cls._wait_instance_state(instance, "running")
        cls.ctx.instance = instance

        ip_address = cls._prepare_public_ip(instance)
        ssh = remote_client.RemoteClient(ip_address,
                                         cls.ssh_user,
                                         pkey=cls.keypair.material)
        cls.ctx.ssh = ssh
예제 #16
0
 def test_can_create_server_with_max_number_personality_files(self):
     # Server should be created successfully if maximum allowed number of
     # files is injected into the server during creation.
     file_contents = 'This is a test file.'
     limits = self.user_client.show_limits()['limits']
     max_file_limit = limits['absolute']['maxPersonality']
     if max_file_limit == -1:
         raise self.skipException("No limit for personality files")
     person = []
     for i in range(0, max_file_limit):
         # NOTE(andreaf) The cirros disk image is blank before boot
         # so we can only inject safely to /
         path = '/test' + str(i) + '.txt'
         person.append({
             'path':
             path,
             'contents':
             base64.encode_as_text(file_contents + str(i)),
         })
     password = data_utils.rand_password()
     created_server = self.create_test_server(personality=person,
                                              adminPass=password,
                                              wait_until='ACTIVE',
                                              validatable=True)
     server = self.client.show_server(created_server['id'])['server']
     if CONF.validation.run_validation:
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server),
             self.ssh_user,
             password,
             self.validation_resources['keypair']['private_key'],
             server=server,
             servers_client=self.client)
         for i in person:
             self.assertEqual(
                 base64.decode_as_text(i['contents']),
                 linux_client.exec_command('sudo cat %s' % i['path']))
예제 #17
0
 def test_023_check_multicast_visible(self):
     """Is multicast traffic visible?"""
     if self.ctx.instance3 is None:
         self.skipTest("Instance 3 was not initialized")
     with self.TcpDumpRunner(self.ctx.instance3,
                             self.ssh_user,
                             self.keypair,
                             "ip multicast") as tdump:
         ssh = remote_client.RemoteClient(self.instance1.ip_address,
                                          self.ssh_user,
                                          pkey=self.keypair.material)
         ssh.exec_command("echo ping |"
                          "socat - UDP4-DATAGRAM:239.1.1.1:6666")
         if not tdump.stop():
             raise base.TestCasePreparationError()
         resp = tdump.get_result()
     captured = ""
     for line in resp.splitlines():
         if line.endswith(" captured"):
             captured = line
             break
     tokens = captured.split()
     packets = int(tokens[0])
     self.assertEqual(0, packets)
예제 #18
0
    def test_verify_created_server_ephemeral_disk(self):
        """Verify that the ephemeral disk is created when creating server"""
        flavor_base = self.flavors_client.show_flavor(
            self.flavor_ref)['flavor']

        def create_flavor_with_ephemeral(ephem_disk):
            name = 'flavor_with_ephemeral_%s' % ephem_disk
            flavor_name = data_utils.rand_name(name)

            ram = flavor_base['ram']
            vcpus = flavor_base['vcpus']
            disk = flavor_base['disk']

            # Create a flavor with ephemeral disk
            flavor = self.create_flavor(name=flavor_name,
                                        ram=ram,
                                        vcpus=vcpus,
                                        disk=disk,
                                        ephemeral=ephem_disk)

            # Set extra specs same as self.flavor_ref for the created flavor,
            # because the environment may need some special extra specs to
            # create server which should have been contained in
            # self.flavor_ref.
            extra_spec_keys = \
                self.admin_flavors_client.list_flavor_extra_specs(
                    self.flavor_ref)['extra_specs']
            if extra_spec_keys:
                self.admin_flavors_client.set_flavor_extra_spec(
                    flavor['id'], **extra_spec_keys)

            return flavor['id']

        flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
        flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)

        admin_pass = self.image_ssh_password

        validation_resources = self.get_test_validation_resources(
            self.os_primary)
        server_no_eph_disk = self.create_test_server(
            validatable=True,
            validation_resources=validation_resources,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_no_eph_disk_id)

        self.addCleanup(waiters.wait_for_server_termination,
                        self.servers_client, server_no_eph_disk['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.servers_client.delete_server,
                        server_no_eph_disk['id'])

        # Get partition number of server without ephemeral disk.
        server_no_eph_disk = self.client.show_server(
            server_no_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_no_eph_disk, validation_resources),
            self.ssh_user,
            admin_pass,
            validation_resources['keypair']['private_key'],
            server=server_no_eph_disk,
            servers_client=self.client)
        disks_num = len(linux_client.get_disks().split('\n'))

        # Explicit server deletion necessary for Juno compatibility
        self.client.delete_server(server_no_eph_disk['id'])

        server_with_eph_disk = self.create_test_server(
            validatable=True,
            validation_resources=validation_resources,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_with_eph_disk_id)

        self.addCleanup(waiters.wait_for_server_termination,
                        self.servers_client, server_with_eph_disk['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.servers_client.delete_server,
                        server_with_eph_disk['id'])

        server_with_eph_disk = self.client.show_server(
            server_with_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_with_eph_disk, validation_resources),
            self.ssh_user,
            admin_pass,
            validation_resources['keypair']['private_key'],
            server=server_with_eph_disk,
            servers_client=self.client)
        disks_num_eph = len(linux_client.get_disks().split('\n'))
        self.assertEqual(disks_num + 1, disks_num_eph)
예제 #19
0
    def test_attach_detach_volume(self):
        """Test attaching and detaching volume from server

        Stop and Start a server with an attached volume, ensuring that
        the volume remains attached.
        """
        server, validation_resources = self._create_server()

        # NOTE(andreaf) Create one remote client used throughout the test.
        if CONF.validation.run_validation:
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server, validation_resources),
                self.image_ssh_user,
                self.image_ssh_password,
                validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.servers_client)
            # NOTE(andreaf) We need to ensure the ssh key has been
            # injected in the guest before we power cycle
            linux_client.validate_authentication()
            disks_before_attach = linux_client.list_disks()

        volume = self.create_volume()

        # NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
        # no longer honors a user-supplied device name, and there can be
        # a mismatch between libvirt provide disk name and actual disk name
        # on instance, hence we no longer validate this test with the supplied
        # device name rather we count number of disk before attach
        # detach to validate the testcase.

        attachment = self.attach_volume(server, volume)

        self.servers_client.stop_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'SHUTOFF')

        self.servers_client.start_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        if CONF.validation.run_validation:
            disks_after_attach = linux_client.list_disks()
            self.assertGreater(len(disks_after_attach),
                               len(disks_before_attach))

        self.servers_client.detach_volume(server['id'], attachment['volumeId'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                attachment['volumeId'],
                                                'available')

        self.servers_client.stop_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'SHUTOFF')

        self.servers_client.start_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        if CONF.validation.run_validation:
            disks_after_detach = linux_client.list_disks()
            self.assertEqual(len(disks_before_attach), len(disks_after_detach))
예제 #20
0
    def test_verify_created_server_ephemeral_disk(self):
        # Verify that the ephemeral disk is created when creating server
        flavor_base = self.flavors_client.show_flavor(
            self.flavor_ref)['flavor']

        def create_flavor_with_ephemeral(ephem_disk):
            name = 'flavor_with_ephemeral_%s' % ephem_disk
            flavor_name = data_utils.rand_name(name)

            ram = flavor_base['ram']
            vcpus = flavor_base['vcpus']
            disk = flavor_base['disk']

            # Create a flavor with ephemeral disk
            flavor = self.create_flavor(name=flavor_name,
                                        ram=ram,
                                        vcpus=vcpus,
                                        disk=disk,
                                        ephemeral=ephem_disk)
            return flavor['id']

        flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
        flavor_no_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=0)

        admin_pass = self.image_ssh_password

        server_no_eph_disk = self.create_test_server(
            validatable=True,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_no_eph_disk_id)

        # Get partition number of server without ephemeral disk.
        server_no_eph_disk = self.client.show_server(
            server_no_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_no_eph_disk),
            self.ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'],
            server=server_no_eph_disk,
            servers_client=self.client)
        disks_num = len(linux_client.get_disks().split('\n'))

        # Explicit server deletion necessary for Juno compatibility
        self.client.delete_server(server_no_eph_disk['id'])

        server_with_eph_disk = self.create_test_server(
            validatable=True,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_with_eph_disk_id)

        server_with_eph_disk = self.client.show_server(
            server_with_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_with_eph_disk),
            self.ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'],
            server=server_with_eph_disk,
            servers_client=self.client)
        disks_num_eph = len(linux_client.get_disks().split('\n'))
        self.assertEqual(disks_num + 1, disks_num_eph)
예제 #21
0
    def test_verify_created_server_ephemeral_disk(self):
        # Verify that the ephemeral disk is created when creating server

        def create_flavor_with_extra_specs():
            flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
            flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor with extra specs
            flavor = (self.flavor_client.
                      create_flavor(name=flavor_with_eph_disk_name,
                                    ram=ram, vcpus=vcpus, disk=disk,
                                    id=flavor_with_eph_disk_id,
                                    ephemeral=1))['flavor']
            self.addCleanup(flavor_clean_up, flavor['id'])

            return flavor['id']

        def create_flavor_without_extra_specs():
            flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
            flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)

            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor without extra specs
            flavor = (self.flavor_client.
                      create_flavor(name=flavor_no_eph_disk_name,
                                    ram=ram, vcpus=vcpus, disk=disk,
                                    id=flavor_no_eph_disk_id))['flavor']
            self.addCleanup(flavor_clean_up, flavor['id'])

            return flavor['id']

        def flavor_clean_up(flavor_id):
            self.flavor_client.delete_flavor(flavor_id)
            self.flavor_client.wait_for_resource_deletion(flavor_id)

        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
        flavor_no_eph_disk_id = create_flavor_without_extra_specs()

        admin_pass = self.image_ssh_password

        server_no_eph_disk = self.create_test_server(
            validatable=True,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_no_eph_disk_id)

        # Get partition number of server without extra specs.
        server_no_eph_disk = self.client.show_server(
            server_no_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_no_eph_disk),
            self.ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'])
        partition_num = len(linux_client.get_partitions().split('\n'))

        # Explicit server deletion necessary for Juno compatibility
        self.client.delete_server(server_no_eph_disk['id'])

        server_with_eph_disk = self.create_test_server(
            validatable=True,
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_with_eph_disk_id)

        server_with_eph_disk = self.client.show_server(
            server_with_eph_disk['id'])['server']
        linux_client = remote_client.RemoteClient(
            self.get_server_ip(server_with_eph_disk),
            self.ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'])
        partition_num_emph = len(linux_client.get_partitions().split('\n'))
        self.assertEqual(partition_num + 1, partition_num_emph)
예제 #22
0
    def test_verify_created_server_ephemeral_disk(self):
        # Verify that the ephemeral disk is created when creating server

        def create_flavor_with_extra_specs():
            flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
            flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor with extra specs
            resp, flavor = (self.flavor_client.create_flavor(
                flavor_with_eph_disk_name,
                ram,
                vcpus,
                disk,
                flavor_with_eph_disk_id,
                ephemeral=1))
            self.addCleanup(flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def create_flavor_without_extra_specs():
            flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
            flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)

            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor without extra specs
            resp, flavor = (self.flavor_client.create_flavor(
                flavor_no_eph_disk_name, ram, vcpus, disk,
                flavor_no_eph_disk_id))
            self.addCleanup(flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def flavor_clean_up(flavor_id):
            resp, body = self.flavor_client.delete_flavor(flavor_id)
            self.assertEqual(resp.status, 202)
            self.flavor_client.wait_for_resource_deletion(flavor_id)

        flavor_with_eph_disk_id = create_flavor_with_extra_specs()
        flavor_no_eph_disk_id = create_flavor_without_extra_specs()

        admin_pass = self.image_ssh_password

        resp, server_no_eph_disk = (self.create_test_server(
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_no_eph_disk_id))
        resp, server_with_eph_disk = (self.create_test_server(
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_with_eph_disk_id))
        # Get partition number of server without extra specs.
        _, server_no_eph_disk = self.client.get_server(
            server_no_eph_disk['id'])
        linux_client = remote_client.RemoteClient(server_no_eph_disk,
                                                  self.ssh_user, admin_pass)
        partition_num = len(linux_client.get_partitions().split('\n'))

        _, server_with_eph_disk = self.client.get_server(
            server_with_eph_disk['id'])
        linux_client = remote_client.RemoteClient(server_with_eph_disk,
                                                  self.ssh_user, admin_pass)
        partition_num_emph = len(linux_client.get_partitions().split('\n'))
        self.assertEqual(partition_num + 1, partition_num_emph)
예제 #23
0
 def test_host_name_is_same_as_server_name(self):
     # Verify the instance host name is the same as the server name
     linux_client = remote_client.RemoteClient(self.server, self.ssh_user,
                                               self.password)
     self.assertTrue(linux_client.hostname_equals_servername(self.name))
예제 #24
0
 def dhcp_121_hostroutes_clear(self, vm_env):
     # Fetch next hop information from tempest.conf
     next_hop = CONF.network.project_network_cidr
     self.nexthop_host_route = next_hop.rsplit('.', 1)[0]
     # Floating-ip of VM
     self.serv_fip = vm_env['fip1']['floating_ip_address']
     username, password = self.get_image_userpass()
     subnet_id = vm_env['subnet']['id']
     subnet_info = self.subnets_client.show_subnet(subnet_id)
     self.nexthop1 = subnet_info['subnet']['gateway_ip']
     # Update subnet with host routes
     public_net_cidr = CONF.network.public_network_cidr
     _subnet_data = {
         'host_routes': [{
             'destination': '10.20.0.0/32',
             'nexthop': '10.100.1.1'
         }],
         'new_host_routes': [{
             'destination': public_net_cidr,
             'nexthop': self.nexthop1
         }]
     }
     new_host_routes = _subnet_data['new_host_routes']
     kwargs = {'host_routes': new_host_routes}
     new_name = "New_subnet"
     # Update subnet with host-route info
     self.subnets_client.update_subnet(subnet_id, name=new_name, **kwargs)
     # Connect to instance launched using ssh lib
     client = remote_client.RemoteClient(self.serv_fip,
                                         username=username,
                                         password=password)
     # Executes route over instance launched
     fixed_ip = vm_env['fip1']['fixed_ip_address']
     client._renew_lease_udhcpc(fixed_ip)
     cmd = ('/sbin/route -n')
     out_data = client.exec_command(cmd)
     self.assertIn(_subnet_data['new_host_routes'][0]['nexthop'], out_data)
     self.assertIn(self.nexthop_host_route, out_data)
     LOG.info("Host routes available on vm")
     # Check Host route info at beckend
     exc_edge = self.vsm.get_dhcp_edge_info()
     self.assertIsNotNone(exc_edge)
     # Fetch host-route and metadata info from nsx-v
     dhcp_options_info = {}
     dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
         'dhcpOptions']['option121']['staticRoutes']
     # Check Host Route information avaialable at beckend
     for destination_net in dhcp_options_info:
         dest = _subnet_data['new_host_routes'][0]['destination']
         dest_subnet = destination_net['destinationSubnet']
         dest_router = destination_net['router']
         if (dest in dest_subnet and self.nexthop1 in dest_router):
             LOG.info("Host routes available on nsxv")
     # Update subnet with no host-routes
     _subnet_data1 = {'new_host_routes': []}
     new_host_routes = _subnet_data1['new_host_routes']
     kwargs = {'host_routes': new_host_routes}
     new_name = "New_subnet"
     self.subnets_client.update_subnet(subnet_id, name=new_name, **kwargs)
     # Executes route over instance launched
     fixed_ip = vm_env['fip1']['fixed_ip_address']
     client._renew_lease_udhcpc(fixed_ip)
     cmd = ('/sbin/route -n')
     out_data = client.exec_command(cmd)
     self.assertIsNotNone(out_data)
     # Check Host routes on VM shouldn't be avialable
     self.assertNotIn(_subnet_data['new_host_routes'][0]['destination'],
                      out_data)
     # Check Host-routes at beckend after deletion
     exc_edge = self.vsm.get_dhcp_edge_info()
     self.assertIsNotNone(exc_edge)
     dhcp_options_info = []
     dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
         'dhcpOptions']['option121']['staticRoutes']
     # Check Host Route information avaialable at beckend
     for destination_net in dhcp_options_info:
         if (_subnet_data['new_host_routes'][0]['destination']
                 not in destination_net['destinationSubnet']):
             LOG.info("Host routes not available on nsxv")
     project_dict = dict(security_group=vm_env['security_group'],
                         network=vm_env['network'],
                         subnet=vm_env['subnet'],
                         router=vm_env['router'],
                         client_mgr=vm_env['client_mgr'],
                         serv1=vm_env['serv1'],
                         fip1=vm_env['fip1'])
     return project_dict
예제 #25
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup-")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(
            sec_group_name, group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="icmp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=-1,
                                                     to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="tcp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=22,
                                                     to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name, ))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation, reservation)
        volume = self.ec2_client.create_volume(1, self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output -")
        resp = ssh.write_to_console(text)
        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
예제 #26
0
 def test_host_routes_between_vms(self):
     client_mgr = self.manager
     next_hop = CONF.network.project_network_cidr
     ip = next_hop.rsplit('/', 1)[0]
     ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0]
     ss = (ip2int(ip))
     int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n))
     new_network_cidr = (int2ip(ss + 256))
     net_mask = str(CONF.network.project_network_mask_bits)
     new_network_cidr = new_network_cidr + '/' + net_mask
     cidr = netaddr.IPNetwork(new_network_cidr)
     self.green = self.setup_vm_enviornment(self.manager, 'green', True)
     network, subnet =\
         self.create_project_network_subnet_with_cidr('dhcp121-tenant',
                                                      cidr=cidr)
     net_id = network['id']
     # Create Port
     port = self.create_port(net_id)
     HELO.router_add_port_interface(self,
                                    net_router=self.green['router'],
                                    net_port=port,
                                    client_mgr=client_mgr)
     t_security_group = self._create_security_group(
         security_groups_client=self.security_groups_client,
         security_group_rules_client=self.security_group_rules_client,
         namestart='adm')
     username, password = self.get_image_userpass()
     security_groups = [{'name': t_security_group['name']}]
     _subnet_data = {
         'host_routes': [{
             'destination': '10.20.0.0/32',
             'nexthop': '10.100.1.1'
         }],
         'new_host_routes': [{
             'destination': CONF.network.public_network_cidr,
             'nexthop': port['fixed_ips'][0]['ip_address']
         }]
     }
     subnet_client = client_mgr.subnets_client
     subnet_id = subnet['id']
     new_name = "New_subnet"
     new_host_routes = _subnet_data['new_host_routes']
     kwargs = {'host_routes': new_host_routes}
     # Update subnet with host-route info
     subnet_client.update_subnet(subnet_id, name=new_name, **kwargs)
     # launched dest vm
     t_serv2 = self.create_server_on_network(
         network,
         security_groups,
         image=self.get_server_image(),
         flavor=self.get_server_flavor(),
         name=network['name'])
     self.check_server_connected(t_serv2)
     time.sleep(dmgr.WAITTIME_FOR_CONNECTIVITY)
     # Connect to instance launched using ssh lib
     self.serv_fip = self.green['fip1']['floating_ip_address']
     username, password = self.get_image_userpass()
     client = remote_client.RemoteClient(self.serv_fip,
                                         username=username,
                                         password=password)
     network_name = network['name']
     dest_ip = t_serv2['addresses'][network_name][0]['addr']
     # Ping dest vm from source vm
     cmd = ('ping %s -c 3' % dest_ip)
     out_data = client.exec_command(cmd)
     desired_output = "64 bytes from %s" % dest_ip
     self.assertIn(desired_output, out_data)
예제 #27
0
    def _test_dscp_rule(self, vm_env, dscp_value):
        """To verify if traffic is being marked according to dscp_value"""
        src_client = remote_client.RemoteClient(
            vm_env['src_public_ip'], username='******', password='******')
        dst_client = remote_client.RemoteClient(
            vm_env['dst_public_ip'], username='******', password='******')
        dscp_filename = 'dscp_' + str(dscp_value) + '.pcap'
        # To capture packets from eth0
        cmd = ('nohup tcpdump -ni eth0 -w %s > /dev/null 2>&1 &'
               % dscp_filename)
        dst_client.exec_command(cmd)
        # Iperf server on destination VM
        cmd = ('iperf -p 49162 -s -u > /dev/null 2>&1 &')
        dst_client.exec_command(cmd)
        # Iperf client on source VM
        cmd = ('iperf -p 49162 -c %s -b 1M -t 1 -u | grep %%'
               % (unicode(vm_env['dst_private_ip'])))
        output = src_client.exec_command(cmd)
        loss_prcnt = output.split()[13].strip('()%')
        loss_val = float(loss_prcnt) if '.' in loss_prcnt else int(loss_prcnt)
        if (loss_val > 50.0):
            raise Exception('Huge packet loss at the destination VM')
        # Kill iperf process on destination VM
        cmd = ('ps -ef | grep iperf ')
        output = dst_client.exec_command(cmd)
        for line in output.splitlines():
            if 'iperf -p 49162 -s -u' not in line:
                continue
            else:
                iperf_process_id = line.split()[1]
                cmd = ('kill %s' % (unicode(iperf_process_id)))
                dst_client.exec_command(cmd)
        # kill tcpdump process on destination VM
        cmd = ('ps -ef | grep tcpdump')
        output = dst_client.exec_command(cmd)
        for line in output.splitlines():
            if 'tcpdump -ni eth0 -w' not in line:
                continue
            else:
                tcpdump_process_id = line.split()[1]
                cmd = ('kill %s' % (unicode(tcpdump_process_id)))
                dst_client.exec_command(cmd)
        # To copy pcap (packet capture) file from destination VM to external VM
        cmd = ('sshpass -p  \"nicira\" scp -o StrictHostKeyChecking=no'
               ' root@%s:/root/%s .'
               % (unicode(vm_env['dst_public_ip']), unicode(dscp_filename)))
        try:
            subprocess.check_call(cmd, shell=True, executable='/bin/bash',
                                  stderr=subprocess.STDOUT)
        except Exception as e:
            message = ('Failed to copy file from VM.'
                       'Error: %(error)s' % {'error': e})
            LOG.exception(message)
            raise

        """Check the entire file to see if any UDP packets are sent without configured
        dscp value.Example capture all UDP packets with DSCP value !=12"""

        filter_string = (
            'ip.dsfield.dscp != %s && udp.dstport == 49162 '
            '&& ip.src == %s && ip.dst == %s' %
            (str(dscp_value), (unicode(
                vm_env['src_private_ip'])), (unicode(
                    vm_env['dst_private_ip']))))
        capture = pyshark.FileCapture(dscp_filename,
                                      display_filter=filter_string)
        # capture file includes all packets that match the filter criteria
        if len(capture) > 0:
            raise Exception('Traffic is being marked with incorrect DSCP')
예제 #28
0
 def test_host_name_is_same_as_server_name(self):
     # Verify the instance host name is the same as the server name
     linux_client = remote_client.RemoteClient(
         self.get_server_ip(self.server), self.ssh_user, self.password,
         self.validation_resources['keypair']['private_key'])
     self.assertTrue(linux_client.hostname_equals_servername(self.name))
예제 #29
0
    def test_device_tagging(self):
        # Create volumes
        # The create_volume methods waits for the volumes to be available and
        # the base class will clean them up on tearDown.
        boot_volume = self.create_volume(CONF.compute.image_ref)
        other_volume = self.create_volume()
        untagged_volume = self.create_volume()

        # Create networks
        net1 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net1'))['network']
        self.addCleanup(self.networks_client.delete_network, net1['id'])

        net2 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net2'))['network']
        self.addCleanup(self.networks_client.delete_network, net2['id'])

        # Create subnets
        subnet1 = self.subnets_client.create_subnet(network_id=net1['id'],
                                                    cidr='10.1.1.0/24',
                                                    ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet1['id'])

        subnet2 = self.subnets_client.create_subnet(network_id=net2['id'],
                                                    cidr='10.2.2.0/24',
                                                    ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])

        # Create ports
        self.port1 = self.ports_client.create_port(network_id=net1['id'],
                                                   fixed_ips=[{
                                                       'subnet_id':
                                                       subnet1['id']
                                                   }])['port']
        self.addCleanup(self.ports_client.delete_port, self.port1['id'])

        self.port2 = self.ports_client.create_port(network_id=net1['id'],
                                                   fixed_ips=[{
                                                       'subnet_id':
                                                       subnet1['id']
                                                   }])['port']
        self.addCleanup(self.ports_client.delete_port, self.port2['id'])

        # Create server
        admin_pass = data_utils.rand_password()
        config_drive_enabled = CONF.compute_feature_enabled.config_drive

        server = self.create_test_server(
            validatable=True,
            config_drive=config_drive_enabled,
            adminPass=admin_pass,
            name=data_utils.rand_name('device-tagging-server'),
            networks=[
                # Validation network for ssh
                {
                    'uuid': self.get_tenant_network()['id']
                },
                # Different tags for different ports
                {
                    'port': self.port1['id'],
                    'tag': 'port-1'
                },
                {
                    'port': self.port2['id'],
                    'tag': 'port-2'
                },
                # Two nics on same net, one tagged one not
                {
                    'uuid': net1['id'],
                    'tag': 'net-1'
                },
                {
                    'uuid': net1['id']
                },
                # Two nics on same net, different IP
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.100',
                    'tag': 'net-2-100'
                },
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.200',
                    'tag': 'net-2-200'
                }
            ],
            block_device_mapping_v2=[
                # Boot volume
                {
                    'uuid': boot_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 0,
                    'tag': 'boot'
                },
                # Other volume
                {
                    'uuid': other_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 1,
                    'tag': 'other'
                },
                # Untagged volume
                {
                    'uuid': untagged_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 2
                }
            ])

        self.addCleanup(self.delete_server, server['id'])

        self.ssh_client = remote_client.RemoteClient(
            self.get_server_ip(server),
            CONF.validation.image_ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'],
            server=server,
            servers_client=self.servers_client)

        # Find the MAC addresses of our fixed IPs
        self.net_2_100_mac = None
        self.net_2_200_mac = None
        ifaces = self.interfaces_client.list_interfaces(server['id'])
        for iface in ifaces['interfaceAttachments']:
            if 'fixed_ips' in iface:
                for ip in iface['fixed_ips']:
                    if ip['ip_address'] == '10.2.2.100':
                        self.net_2_100_mac = iface['mac_addr']
                    if ip['ip_address'] == '10.2.2.200':
                        self.net_2_200_mac = iface['mac_addr']
        # Make sure we have the MACs we need, there's no reason for some to be
        # missing
        self.assertTrue(self.net_2_100_mac)
        self.assertTrue(self.net_2_200_mac)

        # Verify metadata from metadata service
        if CONF.compute_feature_enabled.metadata_service:
            md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
            LOG.info(
                'Attempting to verify tagged devices in server %s via '
                'the metadata service: %s', server['id'], md_url)

            def get_and_verify_metadata():
                try:
                    self.ssh_client.exec_command('curl -V')
                except exceptions.SSHExecCommandFailed:
                    if not CONF.compute_feature_enabled.config_drive:
                        raise self.skipException('curl not found in guest '
                                                 'and config drive is '
                                                 'disabled')
                    LOG.warning('curl was not found in the guest, device '
                                'tagging metadata was not checked in the '
                                'metadata API')
                    return True
                cmd = 'curl %s' % md_url
                md_json = self.ssh_client.exec_command(cmd)
                self.verify_device_metadata(md_json)
                return True

            if not test_utils.call_until_true(get_and_verify_metadata,
                                              CONF.compute.build_timeout,
                                              CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timeout while verifying '
                                                  'metadata on server.')

        # Verify metadata on config drive
        if CONF.compute_feature_enabled.config_drive:
            cmd_blkid = 'blkid -t LABEL=config-2 -o device'
            LOG.info(
                'Attempting to verify tagged devices in server %s via '
                'the config drive.', server['id'])
            dev_name = self.ssh_client.exec_command(cmd_blkid)
            dev_name = dev_name.rstrip()
            try:
                self.ssh_client.exec_command('sudo mount %s /mnt' % dev_name)
            except exceptions.SSHExecCommandFailed:
                # So the command failed, let's try to know why and print some
                # useful information.
                lsblk = self.ssh_client.exec_command('sudo lsblk --fs --ascii')
                LOG.error(
                    "Mounting %s on /mnt failed. Right after the "
                    "failure 'lsblk' in the guest reported:\n%s", dev_name,
                    lsblk)
                raise

            cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
            md_json = self.ssh_client.exec_command(cmd_md)
            self.verify_device_metadata(md_json)
예제 #30
0
def get_remote_client_by_password(client_ip, username, password):
    ssh_client = remote_client.RemoteClient(client_ip, username, password)
    return ssh_client