Ejemplo n.º 1
0
def wait_for_volume_migration(client, volume_id, new_host):
    """Waits for a Volume to move to a new host."""
    body = client.show_volume(volume_id)['volume']
    host = body['os-vol-host-attr:host']
    migration_status = body['migration_status']
    start = int(time.time())

    # new_host is hostname@backend while current_host is hostname@backend#type
    while migration_status != 'success' or new_host not in host:
        time.sleep(client.build_interval)
        body = client.show_volume(volume_id)['volume']
        host = body['os-vol-host-attr:host']
        migration_status = body['migration_status']

        if migration_status == 'error':
            message = ('volume %s failed to migrate.' % (volume_id))
            raise lib_exc.TempestException(message)

        if int(time.time()) - start >= client.build_timeout:
            message = ('Volume %s failed to migrate to %s (current %s) '
                       'within the required time (%s s).' %
                       (volume_id, new_host, host, client.build_timeout))
            raise lib_exc.TimeoutException(message)
Ejemplo n.º 2
0
    def wait_execution(self, ex_body, timeout=180, url='executions',
                       target_state='SUCCESS'):
        start_time = time.time()

        expected_states = [target_state, 'RUNNING']

        while ex_body['state'] != target_state:
            if time.time() - start_time > timeout:
                msg = ("Execution exceeds timeout {0} "
                       "to change state to {1}. "
                       "Execution: {2}".format(timeout, target_state, ex_body))
                raise exceptions.TimeoutException(msg)

            _, ex_body = self.get_object(url, ex_body['id'])

            if ex_body['state'] not in expected_states:
                msg = ("Execution state %s is not in expected "
                       "states: %s" % (ex_body['state'], expected_states))
                raise exceptions.TempestException(msg)

            time.sleep(1)

        return ex_body
Ejemplo n.º 3
0
def wait_for_interface_status(client, server_id, port_id, status):
    """Waits for an interface to reach a given status."""
    body = (client.show_interface(server_id, port_id)
            ['interfaceAttachment'])
    interface_status = body['port_state']
    start = int(time.time())

    while(interface_status != status):
        time.sleep(client.build_interval)
        body = (client.show_interface(server_id, port_id)
                ['interfaceAttachment'])
        interface_status = body['port_state']

        timed_out = int(time.time()) - start >= client.build_timeout

        if interface_status != status and timed_out:
            message = ('Interface %s failed to reach %s status '
                       '(current %s) within the required time (%s s).' %
                       (port_id, status, interface_status,
                        client.build_timeout))
            raise lib_exc.TimeoutException(message)

    return body
Ejemplo n.º 4
0
    def test_nova_datasource_driver_flavors(self):
        @helper.retry_on_exception
        def _check_data_table_nova_flavors():
            # Fetch data from nova each time, because this test may start
            # before nova has all the users.
            flavors = self.flavors_client.list_flavors(detail=True)
            flavor_id_map = {}
            for flavor in flavors['flavors']:
                flavor_id_map[flavor['id']] = flavor

            results = (self.os_admin.congress_client.list_datasource_rows(
                self.datasource_id, 'flavors'))
            # TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA:
            # prefix is for.
            keys = [
                'id', 'name', 'vcpus', 'ram', 'disk',
                'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor'
            ]
            for row in results['results']:
                match = True
                try:
                    flavor_row = flavor_id_map[row['data'][0]]
                except KeyError:
                    return False
                for index in range(len(keys)):
                    if row['data'][index] != flavor_row[keys[index]]:
                        match = False
                        break
                if match:
                    return True
            return False

        if not test_utils.call_until_true(func=_check_data_table_nova_flavors,
                                          duration=100,
                                          sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Ejemplo n.º 5
0
def wait_for_volume_attachment_remove_from_server(client, server_id,
                                                  volume_id):
    """Waits for a volume to be removed from a given server.

    This waiter checks the compute API if the volume attachment is removed.
    """
    start = int(time.time())

    try:
        volumes = client.list_volume_attachments(
            server_id)['volumeAttachments']
    except lib_exc.NotFound:
        # Ignore 404s on detach in case the server is deleted or the volume
        # is already detached.
        return

    while any(volume for volume in volumes if volume['volumeId'] == volume_id):
        time.sleep(client.build_interval)

        timed_out = int(time.time()) - start >= client.build_timeout
        if timed_out:
            console_output = client.get_console_output(server_id)['output']
            LOG.debug('Console output for %s\nbody=\n%s', server_id,
                      console_output)
            message = ('Volume %s failed to detach from server %s within '
                       'the required time (%s s) from the compute API '
                       'perspective' %
                       (volume_id, server_id, client.build_timeout))
            raise lib_exc.TimeoutException(message)
        try:
            volumes = client.list_volume_attachments(
                server_id)['volumeAttachments']
        except lib_exc.NotFound:
            # Ignore 404s on detach in case the server is deleted or the volume
            # is already detached.
            return
    return
Ejemplo n.º 6
0
    def wait_for_server_volume_swap(self, server_id, old_volume_id,
                                    new_volume_id):
        """Waits for a server to swap the old volume to a new one."""
        volume_attachments = self.servers_client.list_volume_attachments(
            server_id)['volumeAttachments']
        attached_volume_ids = [
            attachment['volumeId'] for attachment in volume_attachments
        ]
        start = int(time.time())

        while (old_volume_id in attached_volume_ids) \
                or (new_volume_id not in attached_volume_ids):
            time.sleep(self.servers_client.build_interval)
            volume_attachments = self.servers_client.list_volume_attachments(
                server_id)['volumeAttachments']
            attached_volume_ids = [
                attachment['volumeId'] for attachment in volume_attachments
            ]

            if int(time.time()) - start >= self.servers_client.build_timeout:
                old_vol_bdm_status = 'in BDM' \
                    if old_volume_id in attached_volume_ids else 'not in BDM'
                new_vol_bdm_status = 'in BDM' \
                    if new_volume_id in attached_volume_ids else 'not in BDM'
                message = ('Failed to swap old volume %(old_volume_id)s '
                           '(current %(old_vol_bdm_status)s) to new volume '
                           '%(new_volume_id)s (current %(new_vol_bdm_status)s)'
                           ' on server %(server_id)s within the required time '
                           '(%(timeout)s s)' % {
                               'old_volume_id': old_volume_id,
                               'old_vol_bdm_status': old_vol_bdm_status,
                               'new_volume_id': new_volume_id,
                               'new_vol_bdm_status': new_vol_bdm_status,
                               'server_id': server_id,
                               'timeout': self.servers_client.build_timeout
                           })
                raise lib_exc.TimeoutException(message)
    def share_shrink_retry_until_success(self,
                                         share_id,
                                         share_size,
                                         status_attr='status'):
        """Try share reset, followed by shrink, until timeout"""

        check_interval = CONF.share.build_interval * 2
        body = self.shares_v2_client.get_share(share_id)
        share_status = body[status_attr]
        start = int(time.time())

        while share_status != constants.STATUS_AVAILABLE:
            if share_status != constants.STATUS_SHRINKING:
                self.shares_admin_v2_client.reset_state(
                    share_id, status=constants.STATUS_AVAILABLE)
                try:
                    self.shares_v2_client.shrink_share(share_id,
                                                       new_size=share_size)
                except exceptions.BadRequest as e:
                    if ('New size for shrink must be less '
                            'than current size') in six.text_type(e):
                        break
            time.sleep(check_interval)
            body = self.shares_v2_client.get_share(share_id)
            share_status = body[status_attr]
            if share_status == constants.STATUS_AVAILABLE:
                return

            if int(time.time()) - start >= CONF.share.build_timeout:
                message = ("Share's %(status_attr)s failed to transition to "
                           "%(status)s within the required time %(seconds)s." %
                           {
                               "status_attr": status_attr,
                               "status": constants.STATUS_AVAILABLE,
                               "seconds": CONF.share.build_timeout
                           })
                raise exceptions.TimeoutException(message)
Ejemplo n.º 8
0
    def _wait_for_http_service(self, check_ip, port=80):
        def try_connect(check_ip, port):
            try:
                LOG.info(('checking connection to ip: {0} port: {1}'.format(
                    check_ip, port)))
                resp = urllib2.urlopen("http://{0}:{1}/".format(check_ip,
                                                                port))
                if resp.getcode() == 200:
                    return True
                return False
            except IOError as e:
                LOG.info(('Got IOError in check connection: {0}'.format(e)))
                return False
            except error.HTTPError as e:
                LOG.info(('Got HTTPError in check connection: {0}'.format(e)))
                return False

        timeout = config.validation.ping_timeout
        start = time.time()
        while not try_connect(check_ip, port):
            if (time.time() - start) > timeout:
                message = "Timed out trying to connect to %s" % check_ip
                raise lib_exc.TimeoutException(message)
            time.sleep(1)
Ejemplo n.º 9
0
def wait_for_volume_attachment_remove_from_server(client, server_id,
                                                  volume_id):
    """Waits for a volume to be removed from a given server.

    This waiter checks the compute API if the volume attachment is removed.
    """
    start = int(time.time())
    volumes = client.list_volume_attachments(server_id)['volumeAttachments']

    while any(volume for volume in volumes if volume['volumeId'] == volume_id):
        time.sleep(client.build_interval)

        timed_out = int(time.time()) - start >= client.build_timeout
        if timed_out:
            message = ('Volume %s failed to detach from server %s within '
                       'the required time (%s s) from the compute API '
                       'perspective' %
                       (volume_id, server_id, client.build_timeout))
            raise lib_exc.TimeoutException(message)

        volumes = client.list_volume_attachments(
            server_id)['volumeAttachments']

    return volumes
Ejemplo n.º 10
0
def wait_for_allocation(client,
                        allocation_ident,
                        timeout=15,
                        interval=1,
                        expect_error=False):
    """Wait for the allocation to become active.

    :param client: an instance of tempest plugin BaremetalClient.
    :param allocation_ident: UUID or name of the allocation.
    :param timeout: the timeout after which the allocation is considered as
        failed. Defaults to 15 seconds.
    :param interval: an interval between show_allocation calls.
        Defaults to 1 second.
    :param expect_error: if True, return successfully even in case of an error.
    """
    result = [None]  # a mutable object to modify in the closure

    def check():
        result[0] = client.show_allocation(allocation_ident)
        allocation = result[0][1]

        if allocation['state'] == 'error' and not expect_error:
            raise lib_exc.TempestException(
                "Allocation %(ident)s failed: %(error)s" % {
                    'ident': allocation_ident,
                    'error': allocation.get('last_error')
                })
        else:
            return allocation['state'] != 'allocating'

    if not test_utils.call_until_true(check, timeout, interval):
        msg = ('Timed out waiting for the allocation %s to become active' %
               allocation_ident)
        raise lib_exc.TimeoutException(msg)

    return result[0]
Ejemplo n.º 11
0
def wait_for_image_tasks_status(client, image_id, status):
    """Waits for an image tasks to reach a given status."""
    pending_tasks = []
    start = int(time.time())
    while int(time.time()) - start < client.build_timeout:
        tasks = client.show_image_tasks(image_id)['tasks']

        pending_tasks = [task for task in tasks if task['status'] != status]
        if not pending_tasks:
            return tasks
        time.sleep(client.build_interval)

    message = ('Image %(image_id)s tasks: %(pending_tasks)s '
               'failed to reach %(status)s state within the required '
               'time (%(timeout)s s).' % {
                   'image_id': image_id,
                   'pending_tasks': pending_tasks,
                   'status': status,
                   'timeout': client.build_timeout
               })
    caller = test_utils.find_test_caller()
    if caller:
        message = '(%s) %s' % (caller, message)
    raise lib_exc.TimeoutException(message)
Ejemplo n.º 12
0
def wait_for_recordset_status(client, zone_id, recordset_id, status):
    """Waits for a recordset to reach the given status."""
    LOG.info('Waiting for recordset %s to reach %s',
             recordset_id, status)

    _, recordset = client.show_recordset(zone_id, recordset_id)
    start = int(time.time())

    while recordset['status'] != status:
        time.sleep(client.build_interval)
        _, recordset = client.show_recordset(zone_id, recordset_id)
        status_curr = recordset['status']
        if status_curr == status:
            LOG.info('Recordset %s reached %s', recordset_id, status)
            return

        if recordset['status'] == const.ERROR:
            raise exceptions.InvalidStatusError('Recordset', recordset_id,
                                                recordset['status'])

        if int(time.time()) - start >= client.build_timeout:
            message = ('Recordset %(recordset_id)s failed to reach '
                       'status=%(status)s within the required time '
                       '(%(timeout)s s). Current '
                       'status: %(status_curr)s' %
                       {'recordset_id': recordset_id,
                        'status': status,
                        'status_curr': status_curr,
                        'timeout': client.build_timeout})

            caller = test_utils.find_test_caller()

            if caller:
                message = '(%s) %s' % (caller, message)

            raise lib_exc.TimeoutException(message)
Ejemplo n.º 13
0
 def _dump_flows_on_br_sec_for_icmp_type(self, vapp_ipadd, protocol, vlan,
                                         mac, icmp_type, net_id):
     HOST = self.vapp_username + "@" + vapp_ipadd
     time.sleep(self.build_interval)
     if "vlan" == self.tenant_network_type:
         cmd = ('sudo ovs-ofctl dump-flows' + self.br_inf + 'table=0' +
                ',' + str(protocol) + ',dl_dst=' + str(mac) + ',dl_vlan=' +
                str(vlan) + ',icmp_type=' + str(icmp_type))
     else:
         segment_id = self._fetch_segment_id_from_db(str(net_id))
         cmd = ('sudo ovs-ofctl dump-flows' + self.br_inf + 'table=0' +
                ',' + str(protocol) + ',dl_dst=' + str(mac) + ',dl_vlan=' +
                str(segment_id) + ',icmp_type=' + str(icmp_type))
     ssh = subprocess.Popen(["ssh", "%s" % HOST, cmd],
                            shell=False,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
     output = ssh.stdout.readlines()
     if output[1:] == []:
         error = ssh.stderr.readlines()
         raise lib_exc.TimeoutException(error)
     else:
         for output_list in output[1:]:
             self.assertIn('icmp_type=' + str(icmp_type), output_list)
Ejemplo n.º 14
0
    def test_extend_attached_volume(self):
        """This is a happy path test which does the following:

        * Create a volume at the configured volume_size.
        * Create a server instance.
        * Attach the volume to the server.
        * Wait for the volume status to be "in-use".
        * Extend the size of the volume and wait for the volume status to go
          back to "in-use".
        * Assert the volume size change is reflected in the volume API.
        * Wait for the "compute_extend_volume" instance action event to show
          up in the compute API with the success or failure status. We fail
          if we timeout waiting for the instance action event to show up, or
          if the action on the server fails.
        """
        # Create a test volume. Will be automatically cleaned up on teardown.
        volume = self.create_volume()
        # Create a test server. Will be automatically cleaned up on teardown.
        server = self.create_server()
        # Attach the volume to the server and wait for the volume status to be
        # "in-use".
        self.attach_volume(server['id'], volume['id'])
        # Extend the size of the volume. If this is successful, the volume API
        # will change the status on the volume to "extending" before doing an
        # RPC cast to the volume manager on the backend. Note that we multiply
        # the size of the volume since certain Cinder backends, e.g. ScaleIO,
        # require multiples of 8GB.
        extend_size = volume['size'] * 2
        self.volumes_client.extend_volume(volume['id'], new_size=extend_size)
        # The volume status should go back to in-use since it is still attached
        # to the server instance.
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'in-use')
        # Assert that the volume size has changed in the volume API.
        volume = self.volumes_client.show_volume(volume['id'])['volume']
        self.assertEqual(extend_size, volume['size'])
        # Now we wait for the "compute_extend_volume" instance action event
        # to show up for the server instance. This is our indication that the
        # asynchronous operation is complete on the compute side.
        start_time = int(time.time())
        timeout = self.servers_client.build_timeout
        action = self._find_extend_volume_instance_action(server['id'])
        while action is None and int(time.time()) - start_time < timeout:
            time.sleep(self.servers_client.build_interval)
            action = self._find_extend_volume_instance_action(server['id'])

        if action is None:
            msg = ("Timed out waiting to get 'extend_volume' instance action "
                   "record for server %(server)s after %(timeout)s seconds." %
                   {
                       'server': server['id'],
                       'timeout': timeout
                   })
            raise lib_exc.TimeoutException(msg)

        # Now that we found the extend_volume instance action, we can wait for
        # the compute_extend_volume instance action event to show up to
        # indicate the operation is complete.
        start_time = int(time.time())
        event = self._find_extend_volume_instance_action_finish_event(action)
        while event is None and int(time.time()) - start_time < timeout:
            time.sleep(self.servers_client.build_interval)
            event = self._find_extend_volume_instance_action_finish_event(
                action)

        if event is None:
            msg = ("Timed out waiting to get 'compute_extend_volume' instance "
                   "action event record for server %(server)s and request "
                   "%(request_id)s after %(timeout)s seconds." % {
                       'server': server['id'],
                       'request_id': action['request_id'],
                       'timeout': timeout
                   })
            raise lib_exc.TimeoutException(msg)

        # Finally, assert that the action completed successfully.
        self.assertTrue(
            event['result'].lower() == 'success',
            "Unexpected compute_extend_volume result '%(result)s' for request "
            "%(request_id)s." % {
                'result': event['result'],
                'request_id': action['request_id']
            })
Ejemplo n.º 15
0
    def exec_command(self, cmd, encoding="utf-8"):
        """Execute the specified command on the server

        Note that this method is reading whole command outputs to memory, thus
        shouldn't be used for large outputs.

        :param str cmd: Command to run at remote server.
        :param str encoding: Encoding for result from paramiko.
                             Result will not be decoded if None.
        :returns: data read from standard output of the command.
        :raises: SSHExecCommandFailed if command returns nonzero
                 status. The exception contains command status stderr content.
        :raises: TimeoutException if cmd doesn't end when timeout expires.
        """
        ssh = self._get_ssh_connection()
        transport = ssh.get_transport()
        with transport.open_session() as channel:
            channel.fileno()  # Register event pipe
            channel.exec_command(cmd)
            channel.shutdown_write()

            # If the executing host is linux-based, poll the channel
            if self._can_system_poll():
                out_data_chunks = []
                err_data_chunks = []
                poll = select.poll()
                poll.register(channel, select.POLLIN)
                start_time = time.time()

                while True:
                    ready = poll.poll(self.channel_timeout)
                    if not any(ready):
                        if not self._is_timed_out(start_time):
                            continue
                        raise exceptions.TimeoutException(
                            "Command: '{0}' executed on host '{1}'.".format(
                                cmd, self.host))
                    if not ready[0]:  # If there is nothing to read.
                        continue
                    out_chunk = err_chunk = None
                    if channel.recv_ready():
                        out_chunk = channel.recv(self.buf_size)
                        out_data_chunks += out_chunk,
                    if channel.recv_stderr_ready():
                        err_chunk = channel.recv_stderr(self.buf_size)
                        err_data_chunks += err_chunk,
                    if not err_chunk and not out_chunk:
                        break
                out_data = b''.join(out_data_chunks)
                err_data = b''.join(err_data_chunks)
            # Just read from the channels
            else:
                out_file = channel.makefile('rb', self.buf_size)
                err_file = channel.makefile_stderr('rb', self.buf_size)
                out_data = out_file.read()
                err_data = err_file.read()
            if encoding:
                out_data = out_data.decode(encoding)
                err_data = err_data.decode(encoding)

            exit_status = channel.recv_exit_status()

        ssh.close()

        if 0 != exit_status:
            raise exceptions.SSHExecCommandFailed(command=cmd,
                                                  exit_status=exit_status,
                                                  stderr=err_data,
                                                  stdout=out_data)
        return out_data
Ejemplo n.º 16
0
    def test_active_standby_vrrp_failover(self):
        """Tests active/standby VRRP failover

        * Test the load balancer to make sure it is functioning
        * Identifies the Master and Backup amphora
        * Deletes the Master amphora
        * Sends traffic through the load balancer
        * Validates that the Backup has assumed the Master role
        """
        # We have to do this here as the api_version and clients are not
        # setup in time to use a decorator or the skip_checks mixin
        if not self.mem_listener_client.is_version_supported(
                self.api_version, '2.4'):
            raise self.skipException(
                'Active/Standby VRRP failover tests require '
                'Octavia API version 2.3 or newer.')

        session = requests.Session()

        # Send some traffic
        self.check_members_balanced(self.lb_vip_address)

        # Get the amphorae associated with this load balancer
        amphorae = self.os_admin.amphora_client.list_amphorae(
            query_params='{loadbalancer_id}={lb_id}'.format(
                loadbalancer_id=const.LOADBALANCER_ID,
                lb_id=self.lb_id))

        # TODO(johnsom): Fix when LB flavors support act/stdby
        if len(amphorae) < 2:
            self.skipTest('Load balancer must be using active/standby '
                          'topology for the VRRP failover test.')

        # Generate traffic on the LB so we can identify the current Master
        r = session.get('http://{0}'.format(self.lb_vip_address), timeout=2)

        # Cycle through the amps to find the master
        master_amp = None
        backup_amp = None
        start = int(time.time())
        while True:
            for amp in amphorae:
                amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
                    amp[const.ID])
                for listener in amphora_stats:
                    if listener[const.TOTAL_CONNECTIONS] > 0:
                        master_amp = amp
                        break
                # check if we left the listener for loop by finding the master
                if master_amp:
                    break
            # If we found the master and broke out of the amp for loop, break
            # out of the while loop too.
            if master_amp:
                break
            if int(time.time()) - start >= CONF.load_balancer.check_timeout:
                message = ('Unable to find Master amphora in {timeout} '
                           'seconds.'.format(
                               timeout=CONF.load_balancer.check_timeout))
                raise exceptions.TimeoutException(message)
            time.sleep(CONF.load_balancer.check_interval)

        # Find the backup amphora and check it is ready for the test
        for amp in amphorae:
            if amp[const.ID] == master_amp[const.ID]:
                continue
            else:
                backup_amp = amp
        self.assertIsNotNone(backup_amp)
        amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
            backup_amp[const.ID])
        for listener in amphora_stats:
            self.assertEqual(0, listener[const.TOTAL_CONNECTIONS])

        # Delete the master amphora compute instance
        self.os_admin_servers_client.delete_server(
            master_amp[const.COMPUTE_ID])

        # Pass some traffic through the LB
        # Note: We want this to loop for longer than the heartbeat interval
        #       to make sure a stats update has come in to the HM
        for x in range(0, 20):
            try:
                r = session.get('http://{0}'.format(self.lb_vip_address),
                                timeout=1)
                LOG.info('Got response: %s', r.text)
            except Exception:
                LOG.info('Load balancer request failed. Looping')
            time.sleep(1)

        # Check that the Backup amphora is now Master
        amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
            backup_amp[const.ID])
        connections = 0
        for listener in amphora_stats:
            connections += listener[const.TOTAL_CONNECTIONS]
        self.assertGreater(connections, 0)
        LOG.info('Backup amphora is now Master.')
        # Wait for the amphora failover to start
        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.PENDING_UPDATE, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
        # Wait for the load balancer to return to ACTIVE so the
        # cleanup steps will pass
        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.ACTIVE, CONF.load_balancer.lb_build_interval,
            CONF.load_balancer.lb_build_timeout)
Ejemplo n.º 17
0
    def test_minimum_basic_scenario(self):
        image = self.glance_image_create()
        keypair = self.create_keypair()

        server = self.create_server(image_id=image, key_name=keypair['name'])
        servers = self.servers_client.list_servers()['servers']
        self.assertIn(server['id'], [x['id'] for x in servers])

        self.nova_show(server)

        volume = self.create_volume()
        volumes = self.volumes_client.list_volumes()['volumes']
        self.assertIn(volume['id'], [x['id'] for x in volumes])

        self.cinder_show(volume)

        volume = self.nova_volume_attach(server, volume)
        self.addCleanup(self.nova_volume_detach, server, volume)
        self.cinder_show(volume)

        floating_ip = None
        server = self.servers_client.show_server(server['id'])['server']
        if (CONF.network_feature_enabled.floating_ips and
            CONF.network.floating_network_name):
            floating_ip = self.create_floating_ip(server)
            # fetch the server again to make sure the addresses were refreshed
            # after associating the floating IP
            server = self.servers_client.show_server(server['id'])['server']
            address = self._get_floating_ip_in_server_addresses(
                floating_ip, server)
            self.assertIsNotNone(
                address,
                "Failed to find floating IP '%s' in server addresses: %s" %
                (floating_ip['ip'], server['addresses']))
            ssh_ip = floating_ip['ip']
        else:
            ssh_ip = self.get_server_ip(server)

        self.create_and_add_security_group_to_server(server)

        # check that we can SSH to the server before reboot
        self.linux_client = self.get_remote_client(
            ssh_ip, private_key=keypair['private_key'],
            server=server)

        self.nova_reboot(server)

        # check that we can SSH to the server after reboot
        # (both connections are part of the scenario)
        self.linux_client = self.get_remote_client(
            ssh_ip, private_key=keypair['private_key'],
            server=server)

        self.check_disks()

        if floating_ip:
            # delete the floating IP, this should refresh the server addresses
            self.compute_floating_ips_client.delete_floating_ip(
                floating_ip['id'])

            def is_floating_ip_detached_from_server():
                server_info = self.servers_client.show_server(
                    server['id'])['server']
                address = self._get_floating_ip_in_server_addresses(
                    floating_ip, server_info)
                return (not address)

            if not test_utils.call_until_true(
                is_floating_ip_detached_from_server,
                CONF.compute.build_timeout,
                CONF.compute.build_interval):
                msg = ("Floating IP '%s' should not be in server addresses: %s"
                       % (floating_ip['ip'], server['addresses']))
                raise exceptions.TimeoutException(msg)
Ejemplo n.º 18
0
def validate_URL_response(URL,
                          expected_status_code=200,
                          expected_body=None,
                          HTTPS_verify=True,
                          client_cert_path=None,
                          CA_certs_path=None,
                          request_interval=CONF.load_balancer.build_interval,
                          request_timeout=CONF.load_balancer.build_timeout):
    """Check a URL response (HTTP or HTTPS).

    :param URL: The URL to query.
    :param expected_status_code: The expected HTTP status code.
    :param expected_body: The expected response text, None will not compare.
    :param HTTPS_verify: Should we verify the HTTPS server.
    :param client_cert_path: Filesystem path to a file with the client private
                             key and certificate.
    :param CA_certs_path: Filesystem path to a file containing CA certificates
                          to use for HTTPS validation.
    :param request_interval: Time, in seconds, to timeout a request.
    :param request_timeout: The maximum time, in seconds, to attempt requests.
                            Failed validation of expected results does not
                            result in a retry.
    :raises InvalidHttpSuccessCode: The expected_status_code did not match.
    :raises InvalidHTTPResponseBody: The response body did not match the
                                     expected content.
    :raises TimeoutException: The request timed out.
    :returns: None
    """
    with requests.Session() as session:
        session_kwargs = {}
        if not HTTPS_verify:
            session_kwargs['verify'] = False
        if CA_certs_path:
            session_kwargs['verify'] = CA_certs_path
        if client_cert_path:
            session_kwargs['cert'] = client_cert_path
        session_kwargs['timeout'] = request_interval
        start = time.time()
        while time.time() - start < request_timeout:
            try:
                response = session.get(URL, **session_kwargs)
                if response.status_code != expected_status_code:
                    raise exceptions.InvalidHttpSuccessCode(
                        '{0} is not the expected code {1}'.format(
                            response.status_code, expected_status_code))
                if expected_body and response.text != expected_body:
                    details = '{} does not match expected {}'.format(
                        response.text, expected_body)
                    raise exceptions.InvalidHTTPResponseBody(resp_body=details)
                return
            except requests.exceptions.Timeout:
                # Don't sleep as we have already waited the interval.
                LOG.info('Request for {} timed out. Retrying.'.format(URL))
            except (exceptions.InvalidHttpSuccessCode,
                    exceptions.InvalidHTTPResponseBody,
                    requests.exceptions.SSLError):
                raise
            except Exception as e:
                LOG.info('Validate URL got exception: {0}. '
                         'Retrying.'.format(e))
                time.sleep(request_interval)
        raise exceptions.TimeoutException()
Ejemplo n.º 19
0
    def test_add_remove_fixed_ip(self):
        # NOTE(zhufl) By default only project that is admin or network owner
        # or project with role advsvc is authorised to add interfaces with
        # fixed-ip, so if we don't create network for each project, do not
        # test
        if not (CONF.auth.use_dynamic_credentials and
                CONF.auth.create_isolated_networks and
                not CONF.network.shared_physical_network):
            raise self.skipException("Only owner network supports "
                                     "creating interface by fixed ip.")

        # Add and Remove the fixed IP to server.
        server, ifs = self._create_server_get_interfaces()
        original_interface_count = len(ifs)  # This is the number of ports.
        self.assertGreater(original_interface_count, 0)
        # Get the starting list of IPs on the server.
        addresses = self.os_primary.servers_client.list_addresses(
            server['id'])['addresses']
        # There should be one entry for the single network mapped to a list of
        # addresses, which at this point should have at least one entry.
        # Note that we could start with two addresses depending on how tempest
        # is configured for using floating IPs.
        self.assertEqual(1, len(addresses), addresses)  # number of networks
        # Keep track of the original addresses so we can know which IP is new.
        original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
        original_ip_count = len(original_ips)
        self.assertGreater(original_ip_count, 0, addresses)  # at least 1
        network_id = ifs[0]['net_id']
        # Add another fixed IP to the server. This should result in another
        # fixed IP on the same network (and same port since we only have one
        # port).
        self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
        # Wait for the ips count to increase by one.

        def _get_server_floating_ips():
            _floating_ips = []
            _server = self.os_primary.servers_client.show_server(
                server['id'])['server']
            for _ip_set in _server['addresses']:
                for _ip in _server['addresses'][_ip_set]:
                    if _ip['OS-EXT-IPS:type'] == 'floating':
                        _floating_ips.append(_ip['addr'])
            return _floating_ips

        def _wait_for_ip_increase():
            _addresses = self.os_primary.servers_client.list_addresses(
                server['id'])['addresses']
            _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
            LOG.debug("Wait for IP increase. All IPs still associated to "
                      "the server %(id)s: %(ips)s",
                      {'id': server['id'], 'ips': _ips})
            if len(_ips) == original_ip_count + 1:
                return True
            elif len(_ips) == original_ip_count:
                return False
            # If not, lets remove any floating IP from the list and check again
            _fips = _get_server_floating_ips()
            _ips = [_ip for _ip in _ips if _ip not in _fips]
            LOG.debug("Wait for IP increase. Fixed IPs still associated to "
                      "the server %(id)s: %(ips)s",
                      {'id': server['id'], 'ips': _ips})
            return len(_ips) == original_ip_count + 1

        if not test_utils.call_until_true(
                _wait_for_ip_increase, CONF.compute.build_timeout,
                CONF.compute.build_interval):
            raise lib_exc.TimeoutException(
                'Timed out while waiting for IP count to increase.')

        # Remove the fixed IP that we just added.
        server_detail = self.os_primary.servers_client.show_server(
            server['id'])['server']
        # Get the Fixed IP from server.
        fixed_ip = None
        for ip_set in server_detail['addresses']:
            for ip in server_detail['addresses'][ip_set]:
                if (ip['OS-EXT-IPS:type'] == 'fixed' and
                        ip['addr'] not in original_ips):
                    fixed_ip = ip['addr']
                    break
            if fixed_ip is not None:
                break
        self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
        # Wait for the interface count to decrease by one.

        def _wait_for_ip_decrease():
            _addresses = self.os_primary.servers_client.list_addresses(
                server['id'])['addresses']
            _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
            LOG.debug("Wait for IP decrease. All IPs still associated to "
                      "the server %(id)s: %(ips)s",
                      {'id': server['id'], 'ips': _ips})
            if len(_ips) == original_ip_count:
                return True
            # If not, lets remove any floating IP from the list and check again
            _fips = _get_server_floating_ips()
            _ips = [_ip for _ip in _ips if _ip not in _fips]
            LOG.debug("Wait for IP decrease. Fixed IPs still associated to "
                      "the server %(id)s: %(ips)s",
                      {'id': server['id'], 'ips': _ips})
            return len(_ips) == original_ip_count

        if not test_utils.call_until_true(
                _wait_for_ip_decrease, CONF.compute.build_timeout,
                CONF.compute.build_interval):
            raise lib_exc.TimeoutException(
                'Timed out while waiting for IP count to decrease.')
    def test_neutronv2_ports_tables(self):
        port_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'ports')['columns'])

        port_sec_binding_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'security_group_port_bindings')['columns'])

        fixed_ips_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'fixed_ips')['columns'])

        @helper.retry_on_exception
        def _check_data():
            ports_from_neutron = self.ports_client.list_ports()
            port_map = {}
            for port in ports_from_neutron['ports']:
                port_map[port['id']] = port

            client = self.os_admin.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
            security_group_port_bindings = (
                client.list_datasource_rows(
                    self.datasource_id, 'security_group_port_bindings'))
            fixed_ips = (
                client.list_datasource_rows(self.datasource_id, 'fixed_ips'))

            # Validate ports table
            for row in ports['results']:
                port_row = port_map[row['data'][0]]
                for index in range(len(port_schema)):
                    if (str(row['data'][index]) !=
                            str(port_row[port_schema[index]['name']])):
                        return False

            # validate security_group_port_bindings table
            for row in security_group_port_bindings['results']:
                port_row = port_map[row['data'][0]]
                for index in range(len(port_sec_binding_schema)):
                    row_index = port_sec_binding_schema[index]['name']
                    # Translate port_id -> id
                    if row_index == 'port_id':
                        if (str(row['data'][index]) !=
                                str(port_row['id'])):
                            return False
                    elif row_index == 'security_group_id':
                        if (str(row['data'][index]) not in
                                port_row['security_groups']):
                            return False

            # validate fixed_ips
            for row in fixed_ips['results']:
                port_row = port_map[row['data'][0]]
                for index in range(len(fixed_ips_schema)):
                    row_index = fixed_ips_schema[index]['name']
                    if row_index in ['subnet_id', 'ip_address']:
                        if not port_row['fixed_ips']:
                            continue
                        for fixed_ip in port_row['fixed_ips']:
                            if row['data'][index] == fixed_ip[row_index]:
                                break
                        else:
                            # no subnet_id/ip_address match found
                            return False
            return True

        if not test_utils.call_until_true(func=_check_data,
                                          duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Ejemplo n.º 21
0
    def test_list_notification_methods_with_offset_limit(self):
        name1 = data_utils.rand_name('notification')
        name2 = data_utils.rand_name('notification')
        name3 = data_utils.rand_name('notification')
        name4 = data_utils.rand_name('notification')
        notification1 = helpers.create_notification(name=name1)
        notification2 = helpers.create_notification(name=name2)
        notification3 = helpers.create_notification(name=name3)
        notification4 = helpers.create_notification(name=name4)

        resp, response_body = self.monasca_client.create_notifications(
            notification1)
        id1 = response_body['id']
        self.assertEqual(201, resp.status)
        resp, response_body = self.monasca_client.create_notifications(
            notification2)
        id2 = response_body['id']
        self.assertEqual(201, resp.status)
        resp, response_body = self.monasca_client.create_notifications(
            notification3)
        id3 = response_body['id']
        self.assertEqual(201, resp.status)
        resp, response_body = self.monasca_client.create_notifications(
            notification4)
        id4 = response_body['id']
        self.assertEqual(201, resp.status)

        resp, response_body = self.monasca_client.list_notification_methods()
        elements = response_body['elements']

        first_element = elements[0]
        last_element = elements[3]

        query_parms = '?limit=4'
        resp, response_body = self.monasca_client.\
            list_notification_methods(query_parms)
        self.assertEqual(200, resp.status)
        self.assertEqual(4, len(elements))
        self.assertEqual(first_element, elements[0])

        timeout = time.time() + 60 * 1  # 1 minute timeout
        for limit in xrange(1, 5):
            next_element = elements[limit - 1]
            while True:
                if time.time() < timeout:
                    query_parms = '?offset=' + str(next_element['id']) + \
                                  '&limit=' + str(limit)
                    resp, response_body = self.monasca_client.\
                        list_notification_methods(query_parms)
                    self.assertEqual(200, resp.status)
                    new_elements = response_body['elements']
                    if len(new_elements) > limit - 1:
                        self.assertEqual(limit, len(new_elements))
                        next_element = new_elements[limit - 1]
                    elif 0 < len(new_elements) <= limit - 1:
                        self.assertEqual(last_element, new_elements[0])
                        break
                    else:
                        self.assertEqual(last_element, next_element)
                        break
                else:
                    msg = "Failed " \
                          "test_list_notification_methods_with_offset_limit:" \
                          " one minute timeout on offset limit test loop."
                    raise exceptions.TimeoutException(msg)

        resp, response_body = self.monasca_client.\
            delete_notification_method(id1)
        self.assertEqual(204, resp.status)

        resp, response_body = self.monasca_client.\
            delete_notification_method(id2)
        self.assertEqual(204, resp.status)

        resp, response_body = self.monasca_client.\
            delete_notification_method(id3)
        self.assertEqual(204, resp.status)
        resp, response_body = self.monasca_client.\
            delete_notification_method(id4)
        self.assertEqual(204, resp.status)
Ejemplo n.º 22
0
 def test_update_no_error(self):
     if not test_utils.call_until_true(
             func=lambda: self.check_datasource_no_error('aodh'),
             duration=30, sleep_for=5):
         raise exceptions.TimeoutException('Datasource could not poll '
                                           'without error.')
Ejemplo n.º 23
0
def wait_for_server_status(client,
                           server_id,
                           status,
                           ready_wait=True,
                           extra_timeout=0,
                           raise_on_error=True):
    """Waits for a server to reach a given status."""

    # NOTE(afazekas): UNKNOWN status possible on ERROR
    # or in a very early stage.
    time.sleep(1)
    body = client.show_server(server_id)['server']
    old_status = server_status = body['status']
    old_task_state = task_state = _get_task_state(body)
    start_time = int(time.time())
    timeout = client.build_timeout + extra_timeout
    while True:
        # NOTE(afazekas): Now the BUILD status only reached
        # between the UNKNOWN->ACTIVE transition.
        # TODO(afazekas): enumerate and validate the stable status set
        if status == 'BUILD' and server_status != 'UNKNOWN':
            return
        if server_status == status:
            if ready_wait:
                if status == 'BUILD':
                    return
                # NOTE(afazekas): The instance is in "ready for action state"
                # when no task in progress
                if task_state is None:
                    # without state api extension 3 sec usually enough
                    time.sleep(CONF.compute.ready_wait)
                    return
            else:
                return

        time.sleep(client.build_interval)
        body = client.show_server(server_id)['server']
        server_status = body['status']
        task_state = _get_task_state(body)
        if (server_status != old_status) or (task_state != old_task_state):
            LOG.info('State transition "%s" ==> "%s" after %d second wait',
                     '/'.join((old_status, str(old_task_state))), '/'.join(
                         (server_status, str(task_state))),
                     time.time() - start_time)
        if (server_status == 'ERROR') and raise_on_error:
            if 'fault' in body:
                raise exceptions.BuildErrorException(body['fault'],
                                                     server_id=server_id)
            else:
                raise exceptions.BuildErrorException(server_id=server_id)

        timed_out = int(time.time()) - start_time >= timeout

        if timed_out:
            expected_task_state = 'None' if ready_wait else 'n/a'
            message = ('Server %(server_id)s failed to reach %(status)s '
                       'status and task state "%(expected_task_state)s" '
                       'within the required time (%(timeout)s s).' % {
                           'server_id': server_id,
                           'status': status,
                           'expected_task_state': expected_task_state,
                           'timeout': timeout
                       })
            message += ' Current status: %s.' % server_status
            message += ' Current task state: %s.' % task_state
            caller = test_utils.find_test_caller()
            if caller:
                message = '(%s) %s' % (caller, message)
            raise lib_exc.TimeoutException(message)
        old_status = server_status
        old_task_state = task_state
Ejemplo n.º 24
0
def wait_for_resource_status(client,
                             resource_id,
                             status,
                             resource_name='share',
                             rule_id=None,
                             status_attr='status',
                             raise_rule_in_error_state=True,
                             version=LATEST_MICROVERSION):
    """Waits for a resource to reach a given status."""

    get_resource_action = {
        'share': 'get_share',
        'snapshot': 'get_snapshot',
        'share_server': 'show_share_server',
        'share_instance': 'get_share_instance',
        'snapshot_instance': 'get_snapshot_instance',
        'access_rule': 'list_access_rules',
        'snapshot_access': 'list_snapshot_access_rules',
        'share_group': 'get_share_group',
        'share_group_snapshot': 'get_share_group_snapshot',
        'share_replica': 'get_share_replica',
    }

    action_name = get_resource_action[resource_name]
    # This code snippet is intended to set the dictionary key of the returned
    # response for share access rule and for snapshot access rule.
    if 'access' in resource_name:
        rn = '_'.join(action_name.split('_')[1:-1]) + '_list'
    else:
        rn = resource_name

    # Since API v2 requests require an additional parameter for micro-versions,
    # it's necessary to pass the required parameters according to the version.
    resource_action = getattr(client, action_name)
    method_args = [resource_id]
    method_kwargs = {}
    if isinstance(client, shares_client.SharesV2Client):
        method_kwargs.update({'version': version})
    body = resource_action(*method_args, **method_kwargs)[rn]

    if 'access' in resource_name:
        status_attr = 'state'
        body = _get_access_rule(body, rule_id)

    resource_status = body[status_attr]
    start = int(time.time())

    exp_status = status if isinstance(status, list) else [status]
    while resource_status not in exp_status:
        time.sleep(client.build_interval)
        body = resource_action(*method_args, **method_kwargs)[rn]

        if 'access' in resource_name:
            status_attr = 'state'
            body = _get_access_rule(body, rule_id)

        resource_status = body[status_attr]

        if resource_status in exp_status:
            return
        elif 'error' in resource_status.lower() and raise_rule_in_error_state:
            raise_method = _get_name_of_raise_method(resource_name)
            resource_exception = getattr(share_exceptions, raise_method)
            raise resource_exception(resource_id=resource_id)
        if int(time.time()) - start >= client.build_timeout:
            message = ('%s %s failed to reach %s status (current %s) '
                       'within the required time (%s s).' %
                       (resource_name.replace('_', ' '), resource_id, status,
                        resource_status, client.build_timeout))
            raise exceptions.TimeoutException(message)
Ejemplo n.º 25
0
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip['floating_ip_address']
        private_key = self._get_server_key(server)
        ssh_client = self.get_remote_client(ip_address,
                                            private_key=private_key,
                                            server=server)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self.os_admin.ports_client.list_ports(
            device_id=server['id'])['ports']
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        interface = self.interface_client.create_interface(
            server_id=server['id'],
            net_id=self.new_net['id'])['interfaceAttachment']
        self.addCleanup(self.ports_client.wait_for_resource_deletion,
                        interface['port_id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.interface_client.delete_interface, server['id'],
                        interface['port_id'])

        def check_ports():
            self.new_port_list = [
                port for port in self.os_admin.ports_client.list_ports(
                    device_id=server['id'])['ports']
                if port['id'] != old_port['id']
            ]
            return len(self.new_port_list) == 1

        if not test_utils.call_until_true(check_ports,
                                          CONF.network.build_timeout,
                                          CONF.network.build_interval):
            raise exceptions.TimeoutException(
                "No new port attached to the server in time (%s sec)! "
                "Old port: %s. Number of new ports: %d" %
                (CONF.network.build_timeout, old_port, len(
                    self.new_port_list)))
        new_port = self.new_port_list[0]

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test_utils.call_until_true(check_new_nic,
                                          CONF.network.build_timeout,
                                          CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec" %
                                              CONF.network.build_timeout)

        _, new_nic = self.diff_list[0]
        ip_output = ssh_client.exec_command('ip a')
        ip_address = new_port['fixed_ips'][0]['ip_address']
        ip_mask = CONF.network.project_network_mask_bits
        # check if the address is not already in use, if not, set it
        if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
            try:
                ssh_client.exec_command("sudo ip addr add %s/%s dev %s" %
                                        (ip_address, ip_mask, new_nic))
                ssh_client.exec_command("sudo ip link set %s up" % new_nic)
            except exceptions.SSHExecCommandFailed as exc:
                if 'RTNETLINK answers: File exists' in str(exc):
                    LOG.debug(
                        'IP address %(ip_address)s is already set in device '
                        '%(device)s\nPrevious "ip a" output: %(ip_output)s', {
                            'ip_address': ip_address,
                            'device': new_nic,
                            'ip_output': ip_output
                        })
                else:
                    raise exc
Ejemplo n.º 26
0
    def test_datasource_db_sync_add_remove(self):
        # Verify that a replica adds a datasource when a datasource
        # appears in the database.
        replica_server = False
        try:
            # Check fake if exists. else create
            fake_id = self.create_fake(self.client)

            # Start replica
            self.start_replica(CONF.congressha.replica_port)
            replica_client = self.create_client(CONF.congressha.replica_type)

            # Check replica server status
            if not test.call_until_true(
                    func=lambda: self._check_replica_server_status(
                        replica_client),
                    duration=60,
                    sleep_for=1):
                raise exceptions.TimeoutException("Replica Server not ready")
            # Relica server is up
            replica_server = True

            # primary server might sync later than replica server due to
            # diff in datasource sync interval(P-30, replica-5). So checking
            # replica first

            # Verify that replica server synced fake dataservice and policy
            if not test.call_until_true(
                    func=lambda: self._check_resource_exists(
                        replica_client, 'datasource'),
                    duration=60,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica doesn't have fake dataservice, data sync failed")
            if not test.call_until_true(
                    func=lambda: self._check_resource_exists(
                        replica_client, 'policy'),
                    duration=60,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica doesn't have fake policy, policy sync failed")

            # Verify that primary server synced fake dataservice and policy
            if not test.call_until_true(
                    func=lambda: self._check_resource_exists(
                        self.client, 'datasource'),
                    duration=90,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "primary doesn't have fake dataservice, data sync failed")
            if not test.call_until_true(
                    func=lambda: self._check_resource_exists(
                        self.client, 'policy'),
                    duration=90,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "primary doesn't have fake policy, policy sync failed")

            # Remove fake from primary server instance.
            LOG.debug("removing fake datasource %s", str(fake_id))
            self.client.delete_datasource(fake_id)

            # Verify that replica server has no fake datasource and fake policy
            if not test.call_until_true(
                    func=lambda: self._check_resource_missing(
                        replica_client, 'datasource'),
                    duration=60,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica still has fake dataservice, sync failed")
            if not test.call_until_true(
                    func=lambda: self._check_resource_missing(
                        replica_client, 'policy'),
                    duration=60,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica still fake policy, policy synchronizer failed")

            LOG.debug("removed fake datasource from replica instance")

            # Verify that primary server has no fake datasource and fake policy
            if not test.call_until_true(
                    func=lambda: self._check_resource_missing(
                        self.client, 'datasource'),
                    duration=90,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "primary still has fake dataservice, sync failed")
            if not test.call_until_true(
                    func=lambda: self._check_resource_missing(
                        self.client, 'policy'),
                    duration=90,
                    sleep_for=1):
                raise exceptions.TimeoutException(
                    "primary still fake policy, policy synchronizer failed")

            LOG.debug("removed fake datasource from primary instance")

        finally:
            if replica_server:
                self.stop_replica(CONF.congressha.replica_port)
Ejemplo n.º 27
0
    def test_device_tagging(self):
        # Create volumes
        # The create_volume methods waits for the volumes to be available and
        # the base class will clean them up on tearDown.
        boot_volume = self.create_volume(CONF.compute.image_ref)
        other_volume = self.create_volume()
        untagged_volume = self.create_volume()

        # Create networks
        net1 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net1'))['network']
        self.addCleanup(self.networks_client.delete_network, net1['id'])

        net2 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net2'))['network']
        self.addCleanup(self.networks_client.delete_network, net2['id'])

        # Create subnets
        subnet1 = self.subnets_client.create_subnet(network_id=net1['id'],
                                                    cidr='10.1.1.0/24',
                                                    ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet1['id'])

        subnet2 = self.subnets_client.create_subnet(network_id=net2['id'],
                                                    cidr='10.2.2.0/24',
                                                    ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])

        # Create ports
        self.port1 = self.ports_client.create_port(network_id=net1['id'],
                                                   fixed_ips=[{
                                                       'subnet_id':
                                                       subnet1['id']
                                                   }])['port']
        self.addCleanup(self.ports_client.delete_port, self.port1['id'])

        self.port2 = self.ports_client.create_port(network_id=net1['id'],
                                                   fixed_ips=[{
                                                       'subnet_id':
                                                       subnet1['id']
                                                   }])['port']
        self.addCleanup(self.ports_client.delete_port, self.port2['id'])

        # Create server
        admin_pass = data_utils.rand_password()
        config_drive_enabled = CONF.compute_feature_enabled.config_drive

        server = self.create_test_server(
            validatable=True,
            config_drive=config_drive_enabled,
            adminPass=admin_pass,
            name=data_utils.rand_name('device-tagging-server'),
            networks=[
                # Validation network for ssh
                {
                    'uuid': self.get_tenant_network()['id']
                },
                # Different tags for different ports
                {
                    'port': self.port1['id'],
                    'tag': 'port-1'
                },
                {
                    'port': self.port2['id'],
                    'tag': 'port-2'
                },
                # Two nics on same net, one tagged one not
                {
                    'uuid': net1['id'],
                    'tag': 'net-1'
                },
                {
                    'uuid': net1['id']
                },
                # Two nics on same net, different IP
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.100',
                    'tag': 'net-2-100'
                },
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.200',
                    'tag': 'net-2-200'
                }
            ],
            block_device_mapping_v2=[
                # Boot volume
                {
                    'uuid': boot_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 0,
                    'tag': 'boot'
                },
                # Other volume
                {
                    'uuid': other_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 1,
                    'tag': 'other'
                },
                # Untagged volume
                {
                    'uuid': untagged_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 2
                }
            ])

        self.addCleanup(self.delete_server, server['id'])

        self.ssh_client = remote_client.RemoteClient(
            self.get_server_ip(server),
            CONF.validation.image_ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'],
            server=server,
            servers_client=self.servers_client)

        # Find the MAC addresses of our fixed IPs
        self.net_2_100_mac = None
        self.net_2_200_mac = None
        ifaces = self.interfaces_client.list_interfaces(server['id'])
        for iface in ifaces['interfaceAttachments']:
            if 'fixed_ips' in iface:
                for ip in iface['fixed_ips']:
                    if ip['ip_address'] == '10.2.2.100':
                        self.net_2_100_mac = iface['mac_addr']
                    if ip['ip_address'] == '10.2.2.200':
                        self.net_2_200_mac = iface['mac_addr']
        # Make sure we have the MACs we need, there's no reason for some to be
        # missing
        self.assertTrue(self.net_2_100_mac)
        self.assertTrue(self.net_2_200_mac)

        # Verify metadata from metadata service
        if CONF.compute_feature_enabled.metadata_service:
            md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
            LOG.info(
                'Attempting to verify tagged devices in server %s via '
                'the metadata service: %s', server['id'], md_url)

            def get_and_verify_metadata():
                try:
                    self.ssh_client.exec_command('curl -V')
                except exceptions.SSHExecCommandFailed:
                    if not CONF.compute_feature_enabled.config_drive:
                        raise self.skipException('curl not found in guest '
                                                 'and config drive is '
                                                 'disabled')
                    LOG.warning('curl was not found in the guest, device '
                                'tagging metadata was not checked in the '
                                'metadata API')
                    return True
                cmd = 'curl %s' % md_url
                md_json = self.ssh_client.exec_command(cmd)
                self.verify_device_metadata(md_json)
                return True

            if not test_utils.call_until_true(get_and_verify_metadata,
                                              CONF.compute.build_timeout,
                                              CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timeout while verifying '
                                                  'metadata on server.')

        # Verify metadata on config drive
        if CONF.compute_feature_enabled.config_drive:
            cmd_blkid = 'blkid -t LABEL=config-2 -o device'
            LOG.info(
                'Attempting to verify tagged devices in server %s via '
                'the config drive.', server['id'])
            dev_name = self.ssh_client.exec_command(cmd_blkid)
            dev_name = dev_name.rstrip()
            try:
                self.ssh_client.exec_command('sudo mount %s /mnt' % dev_name)
            except exceptions.SSHExecCommandFailed:
                # So the command failed, let's try to know why and print some
                # useful information.
                lsblk = self.ssh_client.exec_command('sudo lsblk --fs --ascii')
                LOG.error(
                    "Mounting %s on /mnt failed. Right after the "
                    "failure 'lsblk' in the guest reported:\n%s", dev_name,
                    lsblk)
                raise

            cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
            md_json = self.ssh_client.exec_command(cmd_md)
            self.verify_device_metadata(md_json)
Ejemplo n.º 28
0
def wait_for_status(show_client,
                    id,
                    status_key,
                    status,
                    check_interval,
                    check_timeout,
                    root_tag=None,
                    error_ok=False,
                    **kwargs):
    """Waits for an object to reach a specific status.

    :param show_client: The tempest service client show method.
                        Ex. cls.os_primary.servers_client.show_server
    :param id: The id of the object to query.
    :param status_key: The key of the status field in the response.
                       Ex. provisioning_status
    :param status: The status to wait for. Ex. "ACTIVE"
    :check_interval: How often to check the status, in seconds.
    :check_timeout: The maximum time, in seconds, to check the status.
    :root_tag: The root tag on the response to remove, if any.
    :error_ok: When true, ERROR status will not raise an exception.
    :raises CommandFailed: Raised if the object goes into ERROR and ERROR was
                           not the desired status.
    :raises TimeoutException: The object did not achieve the status or ERROR in
                              the check_timeout period.
    :returns: The object details from the show client.
    """
    start = int(time.time())
    LOG.info('Waiting for {name} status to update to {status}'.format(
        name=show_client.__name__, status=status))
    while True:
        if status == const.DELETED:
            try:
                response = show_client(id, **kwargs)
            except exceptions.NotFound:
                return
        else:
            response = show_client(id, **kwargs)

        if root_tag:
            object_details = response[root_tag]
        else:
            object_details = response

        if object_details[status_key] == status:
            LOG.info('{name}\'s status updated to {status}.'.format(
                name=show_client.__name__, status=status))
            return object_details
        elif object_details[status_key] == 'ERROR':
            message = ('{name} {field} updated to an invalid state of '
                       'ERROR'.format(name=show_client.__name__,
                                      field=status_key))
            caller = test_utils.find_test_caller()
            if caller:
                message = '({caller}) {message}'.format(caller=caller,
                                                        message=message)
            if not error_ok:
                raise exceptions.UnexpectedResponseCode(message)
        elif int(time.time()) - start >= check_timeout:
            message = (
                '{name} {field} failed to update to {expected_status} within '
                'the required time {timeout}. Current status of {name}: '
                '{status}'.format(name=show_client.__name__,
                                  timeout=check_timeout,
                                  status=object_details[status_key],
                                  expected_status=status,
                                  field=status_key))
            caller = test_utils.find_test_caller()
            if caller:
                message = '({caller}) {message}'.format(caller=caller,
                                                        message=message)
            raise exceptions.TimeoutException(message)

        time.sleep(check_interval)
Ejemplo n.º 29
0
    def test_add_remove_fixed_ip(self):
        # Add and Remove the fixed IP to server.
        server, ifs = self._create_server_get_interfaces()
        original_interface_count = len(ifs)  # This is the number of ports.
        self.assertGreater(original_interface_count, 0)
        # Get the starting list of IPs on the server.
        addresses = self.os_primary.servers_client.list_addresses(
            server['id'])['addresses']
        # There should be one entry for the single network mapped to a list of
        # addresses, which at this point should have at least one entry.
        # Note that we could start with two addresses depending on how tempest
        # is configured for using floating IPs.
        self.assertEqual(1, len(addresses), addresses)  # number of networks
        # Keep track of the original addresses so we can know which IP is new.
        original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
        original_ip_count = len(original_ips)
        self.assertGreater(original_ip_count, 0, addresses)  # at least 1
        network_id = ifs[0]['net_id']
        # Add another fixed IP to the server. This should result in another
        # fixed IP on the same network (and same port since we only have one
        # port).
        self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
        # Wait for the ips count to increase by one.

        def _wait_for_ip_increase():
            _addresses = self.os_primary.servers_client.list_addresses(
                server['id'])['addresses']
            return len(list(_addresses.values())[0]) == original_ip_count + 1

        if not test_utils.call_until_true(
                _wait_for_ip_increase, CONF.compute.build_timeout,
                CONF.compute.build_interval):
            raise lib_exc.TimeoutException(
                'Timed out while waiting for IP count to increase.')

        # Remove the fixed IP that we just added.
        server_detail = self.os_primary.servers_client.show_server(
            server['id'])['server']
        # Get the Fixed IP from server.
        fixed_ip = None
        for ip_set in server_detail['addresses']:
            for ip in server_detail['addresses'][ip_set]:
                if (ip['OS-EXT-IPS:type'] == 'fixed' and
                        ip['addr'] not in original_ips):
                    fixed_ip = ip['addr']
                    break
            if fixed_ip is not None:
                break
        self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
        # Wait for the interface count to decrease by one.

        def _wait_for_ip_decrease():
            _addresses = self.os_primary.servers_client.list_addresses(
                server['id'])['addresses']
            return len(list(_addresses.values())[0]) == original_ip_count

        if not test_utils.call_until_true(
                _wait_for_ip_decrease, CONF.compute.build_timeout,
                CONF.compute.build_interval):
            raise lib_exc.TimeoutException(
                'Timed out while waiting for IP count to decrease.')
    def test_neutronv2_subnets_tables(self):
        subnet_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'subnets')['columns'])

        host_routes_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'host_routes')['columns'])

        dns_nameservers_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'dns_nameservers')['columns'])

        allocation_pools_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'allocation_pools')['columns'])

        @helper.retry_on_exception
        def _check_data():
            subnets_from_neutron = self.subnets_client.list_subnets()
            subnet_map = {}
            for subnet in subnets_from_neutron['subnets']:
                subnet_map[subnet['id']] = subnet

            client = self.os_admin.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            subnets = (
                client.list_datasource_rows(self.datasource_id, 'subnets'))
            host_routes = (
                client.list_datasource_rows(self.datasource_id, 'host_routes'))
            dns_nameservers = (
                client.list_datasource_rows(
                    self.datasource_id, 'dns_nameservers'))
            allocation_pools = (
                client.list_datasource_rows(
                    self.datasource_id, 'allocation_pools'))
            # Validate subnets table
            for row in subnets['results']:
                subnet_row = subnet_map[row['data'][0]]
                for index in range(len(subnet_schema)):
                    if (str(row['data'][index]) !=
                            str(subnet_row[subnet_schema[index]['name']])):
                        return False

            # validate dns_nameservers
            for row in dns_nameservers['results']:
                subnet_row = subnet_map[row['data'][0]]
                for index in range(len(dns_nameservers_schema)):
                    row_index = dns_nameservers_schema[index]['name']
                    if row_index in ['dns_nameserver']:
                        if (row['data'][index]
                                not in subnet_row['dns_nameservers']):
                            return False

            # validate host_routes
            for row in host_routes['results']:
                subnet_row = subnet_map[row['data'][0]]
                for index in range(len(host_routes_schema)):
                    row_index = host_routes_schema[index]['name']
                    if row_index in ['destination', 'nexthop']:
                        if not subnet_row['host_routes']:
                            continue
                        for host_route in subnet_row['host_routes']:
                            if row['data'][index] == host_route[row_index]:
                                break
                        else:
                            # no destination/nexthop match found
                            return False

            # validate allocation_pools
            for row in allocation_pools['results']:
                subnet_row = subnet_map[row['data'][0]]
                for index in range(len(allocation_pools_schema)):
                    row_index = allocation_pools_schema[index]['name']
                    if row_index in ['start', 'end']:
                        if not subnet_row['allocation_pools']:
                            continue
                        for allocation_pool in subnet_row['allocation_pools']:
                            if (row['data'][index] ==
                                    allocation_pool[row_index]):
                                break
                        else:
                            # no destination/nexthop match found
                            return False
            return True

        if not test_utils.call_until_true(func=_check_data,
                                          duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")