Exemple #1
0
 def wait_for_broken_connection(self):
     """Wait until our connection breaks."""
     if not USE_IP:
         return
     if not hasattr(self, "connection"):
         return
     poll_until(self.connection.is_connected, lambda connected: not connected, time_out=TIME_OUT_TIME)
    def metadata(self):
        """pg_basebackup may complete, and we arrive here before the
        history file is written to the wal archive. So we need to
        handle two possibilities:
        - this is the first backup, and no history file exists yet
        - this isn't the first backup, and so the history file we retrieve
        isn't the one we just ran!
         """
        def _metadata_found():
            LOG.debug("Polling for backup metadata... ")
            self.mrb = self.most_recent_backup_file()
            if not self.mrb:
                LOG.debug("No history files found!")
                return False
            metadata = self.base_backup_metadata(
                os.path.join(WAL_ARCHIVE_DIR, self.mrb))
            LOG.debug("Label to pg_basebackup: %s label found: %s" %
                      (self.base_filename, metadata['label']))
            LOG.info(_("Metadata for backup: %s.") % str(metadata))
            return metadata['label'] == self.base_filename

        try:
            utils.poll_until(_metadata_found, sleep_time=5, time_out=60)
        except exception.PollTimeOut:
            raise RuntimeError(_("Timeout waiting for backup metadata for"
                                 " backup %s") % self.base_filename)

        return self.base_backup_metadata(
            os.path.join(WAL_ARCHIVE_DIR, self.mrb))
Exemple #3
0
    def _do_resize(self, new_size):
        try:
            self.volume_client.volumes.extend(self.volume_id, new_size)
        except cinder_exceptions.ClientException:
            LOG.exception(_("Error encountered trying to rescan or resize the "
                            "attached volume filesystem for volume: "
                            "%s") % self.volume_id)
            raise

        try:
            volume = self.volume_client.volumes.get(self.volume_id)
            if not volume:
                raise (cinder_exceptions.
                       ClientException(_('Failed to get volume with '
                                       'id: %(id)s') %
                                       {'id': self.volume_id}))
            utils.poll_until(
                lambda: self.volume_client.volumes.get(self.volume_id),
                lambda volume: volume.size == int(new_size),
                sleep_time=2,
                time_out=CONF.volume_time_out)
            self.update_db(volume_size=new_size)
        except PollTimeOut:
            LOG.error(_("Timeout trying to rescan or resize the attached "
                      "volume filesystem for volume %(vol_id)s of "
                      "instance: %(id)s") %
                      {'vol_id': self.volume_id, 'id': self.id})
        except Exception as e:
            LOG.exception(_("Error encountered trying to rescan or resize the "
                          "attached volume filesystem of volume %(vol_id)s of "
                          "instance %(id)s: %(e)s") %
                          {'vol_id': self.volume_id, 'id': self.id, 'e': e})
        finally:
            self.update_db(task_status=inst_models.InstanceTasks.NONE)
Exemple #4
0
def poll_until_then_raise(event, exception):
    try:
        utils.poll_until(event,
                         sleep_time=RESET_ROOT_SLEEP_INTERVAL,
                         time_out=RESET_ROOT_RETRY_TIMEOUT)
    except exception.PollTimeOut:
        raise exception
Exemple #5
0
    def test_unassign_configuration_from_instances(self):
        # test to unassign configuration from instance
        instance_info.dbaas.instances.modify(configuration_instance.id,
                                             configuration="")
        resp, body = instance_info.dbaas.client.last_response
        assert_equal(resp.status, 202)
        instance_info.dbaas.instances.get(configuration_instance.id)
        # test that config group is not removed
        instance_info.dbaas.instances.modify(instance_info.id,
                                             configuration=None)
        resp, body = instance_info.dbaas.client.last_response
        assert_equal(resp.status, 202)
        instance_info.dbaas.instances.get(instance_info.id)

        def result_has_no_configuration():
            instance = instance_info.dbaas.instances.get(inst_info.id)
            if hasattr(instance, 'configuration'):
                return False
            else:
                return True
        inst_info = instance_info
        poll_until(result_has_no_configuration)
        inst_info = configuration_instance
        poll_until(result_has_no_configuration)
        instance = instance_info.dbaas.instances.get(instance_info.id)
        assert_equal('RESTART_REQUIRED', instance.status)
    def _resize_active_volume(self, new_size):
        try:
            LOG.debug("Instance %s calling stop_db..." % self.server.id)
            self.guest.stop_db()

            LOG.debug("Detach volume %s from instance %s" % (self.volume_id,
                                                             self.server.id))
            self.volume_client.volumes.detach(self.volume_id)

            utils.poll_until(
                lambda: self.volume_client.volumes.get(self.volume_id),
                lambda volume: volume.status == 'available',
                sleep_time=2,
                time_out=CONF.volume_time_out)

            LOG.debug("Successfully detach volume %s" % self.volume_id)
        except Exception as e:
            LOG.error("Failed to detach volume %s instance %s: %s" % (
                self.volume_id, self.server.id, str(e)))
            self.restart()
            raise

        self._do_resize(new_size)
        self.volume_client.volumes.attach(self.server.id, self.volume_id)

        self.restart()
Exemple #7
0
    def _wait_for_slave_status(self, status, client, max_time):

        def verify_slave_status():
            actual_status = client.execute(
                "SHOW GLOBAL STATUS like 'slave_running'").first()
            if actual_status:
                return actual_status[1].upper() == status.upper()
            # The slave_running status is no longer available in MySql 5.7
            # Need to query the performance_schema instead.
            LOG.debug("slave_running global status doesn't exist, checking "
                      "service_state in performance_schema instead.")
            q = sql_query.Query()
            q.columns = ["a.service_state", "c.service_state"]
            q.tables = ["performance_schema.replication_applier_status a",
                        "performance_schema.replication_connection_status c"]
            q.where = ["a.channel_name = ''", "c.channel_name = ''"]
            t = text(str(q))
            actual_status = client.execute(t).first()
            if (actual_status and actual_status[0].upper() == 'ON' and
                    actual_status[1].upper() == 'ON'):
                actual_status_str = 'ON'
            else:
                actual_status_str = 'OFF'
            return actual_status_str == status.upper()

        LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
        try:
            utils.poll_until(verify_slave_status, sleep_time=3,
                             time_out=max_time)
            LOG.info(_("Replication is now %s.") % status.lower())
        except PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(), 'max': max_time})
Exemple #8
0
 def resize_volume(self, new_size):
     old_volume_size = self.volume_size
     new_size = int(new_size)
     LOG.debug("%s: Resizing volume for instance: %s from %s to %r GB"
               % (greenthread.getcurrent(), self.server.id,
                  old_volume_size, new_size))
     self.volume_client.volumes.resize(self.volume_id, new_size)
     try:
         utils.poll_until(
             lambda: self.volume_client.volumes.get(self.volume_id),
             lambda volume: volume.status == 'in-use',
             sleep_time=2,
             time_out=CONF.volume_time_out)
         volume = self.volume_client.volumes.get(self.volume_id)
         self.update_db(volume_size=volume.size)
         self.nova_client.volumes.rescan_server_volume(self.server,
                                                       self.volume_id)
         self.send_usage_event('modify_volume',
                               old_volume_size=old_volume_size,
                               launched_at=timeutils.isotime(),
                               modify_at=timeutils.isotime(),
                               volume_size=new_size)
     except PollTimeOut as pto:
         LOG.error("Timeout trying to rescan or resize the attached volume "
                   "filesystem for volume: %s" % self.volume_id)
     except Exception as e:
         LOG.error(e)
         LOG.error("Error encountered trying to rescan or resize the "
                   "attached volume filesystem for volume: %s"
                   % self.volume_id)
     finally:
         self.update_db(task_status=inst_models.InstanceTasks.NONE)
Exemple #9
0
    def test_instance_resize_flavor(self):
        """Tests the resize instance/flavor API."""

        flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
                                        'm1.medium')
        flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
        new_flavor = flavors[0]

        asserts.assert_true(new_flavor is not None,
                            "Flavor '%s' not found!" % flavor_name)

        if not getattr(self, 'instance', None):
            raise SkipTest(
                "Skipping this test since instance is not available.")

        self.rd_client = create_dbaas_client(self.instance.user)
        self.rd_client.instances.resize_instance(self.instance.id,
                                                 new_flavor.id)

        asserts.assert_equal(202, self.rd_client.last_http_code)
        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal("RESIZE", test_instance.status)

        poll_until(lambda: self._find_status(self.rd_client,
                                             self.instance.id, "ACTIVE"),
                   sleep_time=SLEEP_TIME, time_out=TIMEOUT)

        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
        self.report.log("Resized Flavor for Instance ID: %s to %s." % (
            self.instance.id, new_flavor.id))
Exemple #10
0
    def test_nova_resize_timeout(self):
        self._stop_db()
        self.server.resize(NEW_FLAVOR_ID)

        self.mock.StubOutWithMock(utils, 'poll_until')
        utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\
            .AndRaise(PollTimeOut)
Exemple #11
0
    def test_bad_change_user_password(self):
        password = ""
        users = [{"name": password}]

        def _check_instance_status():
            inst = self.dbaas.instances.get(self.instance)
            if inst.status == "ACTIVE":
                return True
            else:
                return False

        poll_until(_check_instance_status)
        try:
            self.dbaas.users.change_passwords(self.instance, users)
        except Exception as e:
            resp, body = self.dbaas.client.last_response
            httpCode = resp.status
            assert_equal(httpCode, 400,
                         "Change usr/passwd failed with code %s, exception %s"
                         % (httpCode, e))
            if not isinstance(self.dbaas.client,
                              troveclient.compat.xml.TroveXmlClient):
                password = "******"
                assert_equal(e.message,
                             "Validation error: "
                             "users[0] 'password' is a required property; "
                             "users[0]['name'] %s is too short; "
                             "users[0]['name'] %s does not match "
                             "'^.*[0-9a-zA-Z]+.*$'" %
                             (password, password))
Exemple #12
0
    def test_wait_until_cluster_is_active(self):
        if not getattr(self, 'cluster', None):
            raise SkipTest(
                "Skipping this test since cluster is not available.")

        def result_is_active():
            cluster = self.rd_client.clusters.get(self.cluster.id)
            cluster_instances = [
                self.rd_client.instances.get(instance['id'])
                for instance in cluster.instances]
            self.report.log("Cluster info %s." % cluster._info)
            self.report.log("Cluster instances info %s." % cluster_instances)
            if cluster.task['name'] == "NONE":

                if ["ERROR"] * len(cluster_instances) == [
                   str(instance.status) for instance in cluster_instances]:
                    self.report.log("Cluster provisioning failed.")
                    asserts.fail("Cluster provisioning failed.")

                if ["ACTIVE"] * len(cluster_instances) == [
                   str(instance.status) for instance in cluster_instances]:
                    self.report.log("Cluster is ready.")
                    return True
            else:
                asserts.assert_not_equal(
                    ["ERROR"] * len(cluster_instances),
                    [instance.status
                     for instance in cluster_instances])
            self.report.log("Continue polling, cluster is not ready yet.")

        poll_until(result_is_active, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
        self.report.log("Created cluster, ID = %s." % self.cluster.id)
Exemple #13
0
    def _wait_for_nova_action(self):
        # Wait for the flavor to change.
        def update_server_info():
            self.instance._refresh_compute_server_info()
            return self.instance.server.status != "RESIZE"

        utils.poll_until(update_server_info, sleep_time=2, time_out=RESIZE_TIME_OUT)
Exemple #14
0
    def _wait_for_revert_nova_action(self):
        # Wait for the server to return to ACTIVE after revert.
        def update_server_info():
            self.instance._refresh_compute_server_info()
            return self.instance.server.status == "ACTIVE"

        utils.poll_until(update_server_info, sleep_time=2, time_out=REVERT_TIME_OUT)
Exemple #15
0
    def _do_resize(self, new_size):
        try:
            self.volume_client.volumes.extend(self.volume_id, new_size)
        except cinder_exceptions.ClientException:
            LOG.exception(
                "Error encountered trying to rescan or resize the "
                "attached volume filesystem for volume: "
                "%s" % self.volume_id
            )
            raise

        try:
            volume = self.volume_client.volumes.get(self.volume_id)
            utils.poll_until(
                lambda: self.volume_client.volumes.get(self.volume_id),
                lambda volume: volume.size == int(new_size),
                sleep_time=2,
                time_out=CONF.volume_time_out,
            )
            self.update_db(volume_size=new_size)
        except PollTimeOut as pto:
            LOG.error(
                "Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id
            )
        except Exception as e:
            LOG.error(e)
            LOG.error(
                "Error encountered trying to rescan or resize the "
                "attached volume filesystem for volume: %s" % self.volume_id
            )
        finally:
            self.update_db(task_status=inst_models.InstanceTasks.NONE)
Exemple #16
0
    def _create_dns_entry(self):
        LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id))
        dns_support = CONF.trove_dns_support
        LOG.debug(_("trove dns support = %s") % dns_support)

        if dns_support:
            dns_client = create_dns_client(self.context)

            def get_server():
                c_id = self.db_info.compute_instance_id
                return self.nova_client.servers.get(c_id)

            def ip_is_available(server):
                LOG.info("Polling for ip addresses: $%s " % server.addresses)
                if server.addresses != {}:
                    return True
                elif server.addresses == {} and server.status != InstanceStatus.ERROR:
                    return False
                elif server.addresses == {} and server.status == InstanceStatus.ERROR:
                    msg = _("Instance IP not available, instance (%s): " "server had status (%s).")
                    LOG.error(msg % (self.id, server.status))
                    raise TroveError(status=server.status)

            poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT)
            server = self.nova_client.servers.get(self.db_info.compute_instance_id)
            LOG.info("Creating dns entry...")
            dns_client.create_instance_entry(self.id, get_ip_address(server.addresses))
        else:
            LOG.debug("%s: DNS not enabled for instance: %s" % (greenthread.getcurrent(), self.id))
Exemple #17
0
    def _create_server_volume_heat(self, flavor, image_id, security_groups, service_type, volume_size):
        client = create_heat_client(self.context)
        novaclient = create_nova_client(self.context)
        cinderclient = create_cinder_client(self.context)
        heat_template = template.HeatTemplate().template()
        parameters = {
            "KeyName": "heatkey",
            "Flavor": flavor["name"],
            "VolumeSize": volume_size,
            "ServiceType": "mysql",
            "InstanceId": self.id,
        }
        stack_name = "trove-%s" % self.id
        stack = client.stacks.create(stack_name=stack_name, template=heat_template, parameters=parameters)
        stack = client.stacks.get(stack_name)

        utils.poll_until(
            lambda: client.stacks.get(stack_name),
            lambda stack: stack.stack_status in ["CREATE_COMPLETE", "CREATE_FAILED"],
            sleep_time=2,
            time_out=HEAT_TIME_OUT,
        )

        resource = client.resources.get(stack.id, "BaseInstance")
        server = novaclient.servers.get(resource.physical_resource_id)

        resource = client.resources.get(stack.id, "DataVolume")
        volume = cinderclient.volumes.get(resource.physical_resource_id)
        volume_info = self._build_volume(volume)

        self.update_db(compute_instance_id=server.id, volume_id=volume.id)

        return server, volume_info
Exemple #18
0
    def test_bad_change_user_password(self):
        password = ""
        users = [{"name": password}]

        def _check_instance_status():
            inst = self.dbaas.instances.get(self.instance)
            if inst.status == "ACTIVE":
                return True
            else:
                return False

        poll_until(_check_instance_status)
        try:
            self.dbaas.users.change_passwords(self.instance, users)
        except Exception as e:
            resp, body = self.dbaas.client.last_response
            httpCode = resp.status
            asserts.assert_equal(httpCode, 400,
                                 "Change usr/passwd failed with code %s, "
                                 "exception %s" % (httpCode, e))
            password = "******"
            assert_contains(
                e.message,
                ["Validation error: users[0] 'password' "
                 "is a required property",
                 "users[0]['name'] %s is too short" % password,
                 "users[0]['name'] %s does not match "
                 "'^.*[0-9a-zA-Z]+.*$'" % password])
Exemple #19
0
    def reboot(self):
        try:
            LOG.debug(_("Instance %s calling stop_db...") % self.id)
            self.guest.stop_db()
            LOG.debug(_("Rebooting instance %s") % self.id)
            self.server.reboot()

            # Poll nova until instance is active
            reboot_time_out = CONF.reboot_time_out

            def update_server_info():
                self._refresh_compute_server_info()
                return self.server.status == 'ACTIVE'

            utils.poll_until(
                update_server_info,
                sleep_time=2,
                time_out=reboot_time_out)

            # Set the status to PAUSED. The guest agent will reset the status
            # when the reboot completes and MySQL is running.
            self._set_service_status_to_paused()
            LOG.debug(_("Successfully rebooted instance %s") % self.id)
        except Exception as e:
            LOG.error(_("Failed to reboot instance %(id)s: %(e)s") %
                      {'id': self.id, 'e': str(e)})
        finally:
            LOG.debug(_("Rebooting FINALLY  %s") % self.id)
            self.update_db(task_status=inst_models.InstanceTasks.NONE)
Exemple #20
0
    def assert_all_instance_states(self, instance_ids, expected_states,
                                   fast_fail_status=None,
                                   require_all_states=False):
        self.report.log("Waiting for states (%s) for instances: %s" %
                        (expected_states, instance_ids))

        def _make_fn(inst_id):
            return lambda: self._assert_instance_states(
                inst_id, expected_states,
                fast_fail_status=fast_fail_status,
                require_all_states=require_all_states)

        tasks = [
            build_polling_task(
                _make_fn(instance_id),
                sleep_time=self.def_sleep_time,
                time_out=self.def_timeout) for instance_id in instance_ids]
        poll_until(lambda: all(poll_task.ready() for poll_task in tasks),
                   sleep_time=self.def_sleep_time, time_out=self.def_timeout)

        for task in tasks:
            if task.has_result():
                self.assert_true(
                    task.poll_result(),
                    "Some instances failed to acquire all expected states.")
            elif task.has_exception():
                self.fail(str(task.poll_exception()))
Exemple #21
0
    def test_bad_resize_vol_data(self):
        def _check_instance_status():
            inst = self.dbaas.instances.get(self.instance)
            if inst.status == "ACTIVE":
                return True
            else:
                return False

        poll_until(_check_instance_status)
        data = "bad data"
        try:
            self.dbaas.instances.resize_volume(self.instance.id, data)
        except Exception as e:
            resp, body = self.dbaas.client.last_response
            httpCode = resp.status
            asserts.assert_equal(httpCode, 400,
                                 "Resize instance failed with code %s, "
                                 "exception %s" % (httpCode, e))
            data = "u'bad data'"
            assert_contains(
                e.message,
                ["Validation error:",
                 "resize['volume']['size'] %s is not valid under "
                 "any of the given schemas" % data,
                 "%s is not of type 'integer'" % data,
                 "%s does not match '^[0-9]+$'" % data])
Exemple #22
0
    def test_instance_delete(self):
        """Tests the instance delete."""
        if not getattr(self, 'instance', None):
            raise SkipTest(
                "Skipping this test since instance is not available.")

        self.rd_client = create_dbaas_client(self.instance.user)
        self.rd_client.instances.delete(self.instance.id)

        asserts.assert_equal(202, self.rd_client.last_http_code)
        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal("SHUTDOWN", test_instance.status)

        def _poll():
            try:
                instance = self.rd_client.instances.get(self.instance.id)
                self.report.log("Instance info %s" % instance._info)
                asserts.assert_equal("SHUTDOWN", instance.status)
                return False
            except exceptions.NotFound:
                self.report.log("Instance has gone.")
                asserts.assert_equal(404, self.rd_client.last_http_code)
                return True

        poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
        self.report.log("Deleted Instance ID: %s " % self.instance.id)
Exemple #23
0
    def test_backup_delete(self):
        """test delete"""

        # Test to make sure that user in other tenant is not able
        # to DELETE this backup
        reqs = Requirements(is_admin=False)
        other_user = CONFIG.users.find_user(
            reqs,
            black_list=[instance_info.user.auth_user])
        other_client = create_dbaas_client(other_user)
        assert_raises(exceptions.NotFound, other_client.backups.delete,
                      backup_info.id)

        instance_info.dbaas.backups.delete(backup_info.id)
        assert_equal(202, instance_info.dbaas.last_http_code)

        def backup_is_gone():
            result = instance_info.dbaas.instances.backups(instance_info.id)
            if len(result) == 0:
                return True
            else:
                return False
        poll_until(backup_is_gone)
        assert_raises(exceptions.NotFound, instance_info.dbaas.backups.get,
                      backup_info.id)
Exemple #24
0
    def _resize_active_volume(self, new_size):
        try:
            LOG.debug(_("Instance %s calling stop_db...") % self.server.id)
            self.guest.stop_db()

            LOG.debug(_("Detach volume %(vol_id)s from instance %(id)s") %
                      {'vol_id': self.volume_id, 'id': self.server.id})
            self.volume_client.volumes.detach(self.volume_id)

            utils.poll_until(
                lambda: self.volume_client.volumes.get(self.volume_id),
                lambda volume: volume.status == 'available',
                sleep_time=2,
                time_out=CONF.volume_time_out)

            LOG.debug(_("Successfully detach volume %s") % self.volume_id)
        except Exception as e:
            LOG.debug(_("end _resize_active_volume for id: %s") %
                      self.server.id)
            LOG.exception(_("Failed to detach volume %(volume_id)s "
                            "instance %(id)s: %(e)s" %
                          {'volume_id': self.volume_id, 'id':
                            self.server.id, 'e': str(e)}))
            self.restart()
            raise

        self._do_resize(new_size)
        self.volume_client.volumes.attach(self.server.id, self.volume_id)
        LOG.debug(_("end _resize_active_volume for id: %s") % self.server.id)
        self.restart()
Exemple #25
0
    def create_instance(self, flavor, image_id, databases, users,
                        service_type, volume_size, security_groups,
                        backup_id, availability_zone, root_password):
        if use_heat:
            server, volume_info = self._create_server_volume_heat(
                flavor,
                image_id,
                security_groups,
                service_type,
                volume_size,
                availability_zone)
        elif use_nova_server_volume:
            server, volume_info = self._create_server_volume(
                flavor['id'],
                image_id,
                security_groups,
                service_type,
                volume_size,
                availability_zone)
        else:
            server, volume_info = self._create_server_volume_individually(
                flavor['id'],
                image_id,
                security_groups,
                service_type,
                volume_size,
                availability_zone)

        try:
            self._create_dns_entry()
        except Exception as e:
            msg = "Error creating DNS entry for instance: %s" % self.id
            err = inst_models.InstanceTasks.BUILDING_ERROR_DNS
            self._log_and_raise(e, msg, err)

        config = self._render_config(service_type, flavor, self.id)

        if server:
            self._guest_prepare(server, flavor['ram'], volume_info,
                                databases, users, backup_id,
                                config.config_contents, root_password)

        if not self.db_info.task_status.is_error:
            self.update_db(task_status=inst_models.InstanceTasks.NONE)

        # Make sure the service becomes active before sending a usage
        # record to avoid over billing a customer for an instance that
        # fails to build properly.
        try:
            utils.poll_until(self._service_is_active,
                             sleep_time=USAGE_SLEEP_TIME,
                             time_out=USAGE_TIMEOUT)
            self.send_usage_event('create', instance_size=flavor['ram'])
        except PollTimeOut:
            LOG.error("Timeout for service changing to active. "
                      "No usage create-event sent.")
            self.update_statuses_on_time_out()
        except Exception:
            LOG.exception("Error during create-event call.")
Exemple #26
0
    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """
        def clean_json(val):
            """
            This method removes from json, values that are functions like
            ISODate(), TimeStamp().
            """
            return re.sub(':\s*\w+\(\"?(.*?)\"?\)', r': "\1"', val)

        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            out, err = self.do_mongo("rs.status()")
            response = clean_json(out.strip())
            json_data = json.loads(response)

            if((json_data["ok"] == 1) and
               (json_data["members"][0]["stateStr"] == "PRIMARY") and
               (json_data["myState"] == 1)):
                    return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            out, err = self.do_mongo("rs.status()")
            response = clean_json(out.strip())
            json_data = json.loads(response)
            primary_count = 0

            if json_data["ok"] != 1:
                return False
            if len(json_data["members"]) != (len(members) + 1):
                return False
            for rs_member in json_data["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        # initiate replica-set
        self.do_mongo("rs.initiate()")
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status, sleep_time=60, time_out=100)

        # add replica-set members
        for member in members:
            self.do_mongo('rs.add("' + member + '")')
         # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status, sleep_time=60, time_out=100)
Exemple #27
0
    def test_slave_user_removed(self):
        if CONFIG.fake_mode:
            raise SkipTest("Test not_read_only not supported in fake mode")

        def _slave_user_deleted():
            return _get_user_count(instance_info) == 0

        poll_until(_slave_user_deleted)
 def wait_for_resize(self):
     def is_finished_resizing():
         instance = self.instance
         if instance.status == "RESIZE":
             return False
         asserts.assert_equal("ACTIVE", instance.status)
         return True
     poll_until(is_finished_resizing, time_out=TIME_OUT_TIME)
Exemple #29
0
    def test_detach_replica(self):
        if CONFIG.fake_mode:
            raise SkipTest("Detach replica not supported in fake mode")

        instance_info.dbaas.instances.edit(slave_instance.id, detach_replica_source=True)
        assert_equal(202, instance_info.dbaas.last_http_code)

        poll_until(slave_is_running(False))
Exemple #30
0
 def _assert_guest_is_ok(self):
     # The guest will never set the status to PAUSED.
     self.instance._set_service_status_to_paused()
     # Now we wait until it sets it to anything at all,
     # so we know it's alive.
     utils.poll_until(
         self._guest_is_awake,
         sleep_time=2,
         time_out=RESIZE_TIME_OUT)
Exemple #31
0
    def detach_slave(self, service, for_failover):
        """Promote replica and wait for its running.

        Running on replica, detach from the primary.
        """
        service.adm.query("select pg_promote()")

        def _wait_for_failover():
            """Wait until slave has switched out of recovery mode"""
            return not service.is_replica()

        try:
            utils.poll_until(_wait_for_failover, time_out=60)
        except exception.PollTimeOut:
            raise exception.TroveError(
                "Timeout occurred waiting for replica to exit standby mode")
Exemple #32
0
    def _wait_for_user_create(self, instance_id, expected_user_defs):
        expected_user_names = {user_def['name']
                               for user_def in expected_user_defs}
        self.report.log("Waiting for all created users to appear in the "
                        "listing: %s" % expected_user_names)

        def _all_exist():
            all_users = self._get_user_names(instance_id)
            return all(usr in all_users for usr in expected_user_names)

        try:
            poll_until(_all_exist, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC)
            self.report.log("All users now exist on the instance.")
        except exception.PollTimeOut:
            self.fail("Some users were not created within the poll "
                      "timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC)
    def test_delete_configuration_instance(self):
        # test that we can delete the instance even though there is a
        # configuration applied to the instance
        instance_info.dbaas.instances.delete(configuration_instance.id)
        assert_equal(202, instance_info.dbaas.last_http_code)

        def instance_is_gone():
            try:
                instance_info.dbaas.instances.get(configuration_instance.id)
                return False
            except exceptions.NotFound:
                return True

        poll_until(instance_is_gone)
        assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
                      configuration_instance.id)
Exemple #34
0
    def verify_instance_is_active(self, instance_id):
        # This version just checks the REST API status.
        def result_is_active():
            instance = instance_info.dbaas.instances.get(instance_id)
            if instance.status in CONFIG.running_status:
                return True
            else:
                # If its not ACTIVE, anything but BUILD must be
                # an error.
                assert_equal("BUILD", instance.status)
                if instance_info.volume is not None:
                    assert_equal(instance.volume.get('used', None), None)
                return False

        poll_until(result_is_active, sleep_time=5,
                   time_out=TIMEOUT_INSTANCE_CREATE)
Exemple #35
0
    def _wait_for_slave_status(self, status, client, max_time):

        def verify_slave_status():
            actual_status = client.execute(
                "SHOW GLOBAL STATUS like 'slave_running'").first()[1]
            return actual_status.upper() == status.upper()

        LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status)
        try:
            utils.poll_until(verify_slave_status, sleep_time=3,
                             time_out=max_time)
            LOG.info("Replication is now %s.", status.lower())
        except PollTimeOut:
            raise RuntimeError(
                _("Replication is not %(status)s after %(max)d seconds.") % {
                    'status': status.lower(), 'max': max_time})
Exemple #36
0
 def test_api_get(self):
     """Wait until the volume is finished provisioning."""
     volume = poll_until(lambda: self.story.get_volume(),
                         lambda volume: volume["status"] != "creating")
     self.assertEqual(volume["status"], "available")
     self.assert_volume_as_expected(volume)
     self.assertTrue(volume["attach_status"], "detached")
Exemple #37
0
    def test_restart_service_after_unassign_return_active(self):
        def result_is_not_active():
            instance = instance_info.dbaas.instances.get(instance_info.id)
            if instance.status in CONFIG.running_status:
                return False
            else:
                return True

        poll_until(result_is_not_active)

        config = instance_info.dbaas.configurations.list()
        print(config)
        instance = instance_info.dbaas.instances.get(instance_info.id)
        resp, body = instance_info.dbaas.client.last_response
        assert_equal(resp.status, 200)
        assert_equal('RESTART_REQUIRED', instance.status)
Exemple #38
0
    def wait_for_database_create(self, instance_id, expected_database_defs):
        expected_db_names = {db_def['name']
                             for db_def in expected_database_defs}
        self.report.log("Waiting for all created databases to appear in the "
                        "listing: %s" % expected_db_names)

        def _all_exist():
            all_dbs = self.get_db_names(instance_id)
            return all(db in all_dbs for db in expected_db_names)

        try:
            poll_until(_all_exist, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC)
            self.report.log("All databases now exist on the instance.")
        except exception.PollTimeOut:
            self.fail("Some databases were not created within the poll "
                      "timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC)
Exemple #39
0
    def _delete_resources(self, deleted_at):
        server_id = self.db_info.compute_instance_id
        old_server = self.nova_client.servers.get(server_id)
        try:
            if use_heat:
                # Delete the server via heat
                heatclient = create_heat_client(self.context)
                name = 'trove-%s' % self.id
                heatclient.stacks.delete(name)
            else:
                self.server.delete()
        except Exception as ex:
            LOG.error("Error during delete compute server %s " %
                      self.server.id)
            LOG.error(ex)
        try:
            dns_support = CONF.trove_dns_support
            LOG.debug(_("trove dns support = %s") % dns_support)
            if dns_support:
                dns_api = create_dns_client(self.context)
                dns_api.delete_instance_entry(instance_id=self.db_info.id)
        except Exception as ex:
            LOG.error("Error during dns entry for instance %s " %
                      self.db_info.id)
            LOG.error(ex)
            # Poll until the server is gone.

        def server_is_finished():
            try:
                server = self.nova_client.servers.get(server_id)
                if server.status not in ['SHUTDOWN', 'ACTIVE']:
                    msg = "Server %s got into ERROR status during delete " \
                          "of instance %s!" % (server.id, self.id)
                    LOG.error(msg)
                return False
            except nova_exceptions.NotFound:
                return True

        try:
            utils.poll_until(server_is_finished,
                             sleep_time=2,
                             time_out=CONF.server_delete_time_out)
        except PollTimeOut:
            LOG.exception("Timout during nova server delete.")
        self.send_usage_event('delete',
                              deleted_at=timeutils.isotime(deleted_at),
                              server=old_server)
Exemple #40
0
    def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
        child = pexpect.spawn(
            "sudo mysqld_safe --init-file=%s --log-error=%s" %
            (init_file.name, err_log_file.name))
        try:
            index = child.expect(['Starting mysqld daemon'])
            if index == 0:
                LOG.info(_("Starting MySQL"))
        except pexpect.TIMEOUT:
            LOG.exception(_("Got a timeout launching mysqld_safe"))
        finally:
            # There is a race condition here where we kill mysqld before
            # the init file been executed. We need to ensure mysqld is up.
            #
            # mysqld_safe will start even if init-file statement(s) fail.
            # We therefore also check for errors in the log file.
            self.poll_until_then_raise(
                self.mysql_is_running,
                base.RestoreError("Reset root password failed:"
                                  " mysqld did not start!"))
            first_err_message = self._find_first_error_message(err_log_file)
            if first_err_message:
                raise base.RestoreError("Reset root password failed: %s" %
                                        first_err_message)

            LOG.info(_("Root password reset successfully."))
            LOG.debug("Cleaning up the temp mysqld process.")
            utils.execute_with_timeout("mysqladmin", "-uroot",
                                       "--protocol=tcp", "shutdown")
            LOG.debug("Polling for shutdown to complete.")
            try:
                utils.poll_until(self.mysql_is_not_running,
                                 sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
                                 time_out=self.RESET_ROOT_RETRY_TIMEOUT)
                LOG.debug("Database successfully shutdown")
            except exception.PollTimeOut:
                LOG.debug("Timeout shutting down database "
                          "- performing killall on mysqld_safe.")
                utils.execute_with_timeout("killall",
                                           "mysqld_safe",
                                           root_helper="sudo",
                                           run_as_root=True)
                self.poll_until_then_raise(
                    self.mysql_is_not_running,
                    base.RestoreError("Reset root password failed: "
                                      "mysqld did not stop!"))
Exemple #41
0
    def detach_slave(self, service, for_failover):
        """Touch trigger file in to disable recovery mode"""
        LOG.info(_("Detaching slave, use trigger to disable recovery mode"))
        operating_system.write_file(TRIGGER_FILE, '')
        operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner,
                               group=service.pgsql_owner, as_root=True)

        def _wait_for_failover():
            """Wait until slave has switched out of recovery mode"""
            return not service.pg_is_in_recovery()

        try:
            utils.poll_until(_wait_for_failover, time_out=120)

        except exception.PollTimeOut:
            raise RuntimeError(_("Timeout occurred waiting for slave to exit"
                                 "recovery mode"))
Exemple #42
0
    def test_instance_restart(self):
        """Tests the restart API."""
        if not getattr(self, 'instance', None):
            raise SkipTest(
                "Skipping this test since instance is not available.")

        self.rd_client = create_dbaas_client(self.instance.user)
        self.rd_client.instances.restart(self.instance.id)

        asserts.assert_equal(202, self.rd_client.last_http_code)
        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal("REBOOT", test_instance.status)

        poll_until(lambda: self._find_status(self.rd_client,
                                             self.instance.id, "ACTIVE"),
                   sleep_time=SLEEP_TIME, time_out=TIMEOUT)
        self.report.log("Restarted Instance: %s." % self.instance.id)
Exemple #43
0
    def _assert_instance_states(self,
                                instance_id,
                                expected_states,
                                fast_fail_status=None,
                                require_all_states=False):
        """Keep polling for the expected instance states until the instance
        acquires either the last or fast-fail state.

        If the instance state does not match the state expected at the time of
        polling (and 'require_all_states' is not set) the code assumes the
        instance had already acquired before and moves to the next expected
        state.
        """

        self.report.log("Waiting for states (%s) for instance: %s" %
                        (expected_states, instance_id))

        if fast_fail_status is None:
            fast_fail_status = ['ERROR', 'FAILED']
        found = False
        for status in expected_states:
            if require_all_states or found or self._has_status(
                    instance_id, status, fast_fail_status=fast_fail_status):
                found = True
                start_time = timer.time()
                try:
                    poll_until(lambda: self._has_status(
                        instance_id, status, fast_fail_status=fast_fail_status
                    ),
                               sleep_time=self.def_sleep_time,
                               time_out=self.def_timeout)
                    self.report.log(
                        "Instance '%s' has gone '%s' in %s." %
                        (instance_id, status, self._time_since(start_time)))
                except exception.PollTimeOut:
                    self.report.log(
                        "Status of instance '%s' did not change to '%s' "
                        "after %s." %
                        (instance_id, status, self._time_since(start_time)))
                    return False
            else:
                self.report.log(
                    "Instance state was not '%s', moving to the next expected "
                    "state." % status)

        return found
Exemple #44
0
    def test_bad_resize_instance_data(self):
        def _check_instance_status():
            inst = self.dbaas.instances.get(self.instance)
            if inst.status == "ACTIVE":
                return True
            else:
                return False

        poll_until(_check_instance_status)
        try:
            self.dbaas.instances.resize_instance(self.instance.id, "")
        except Exception as e:
            resp, body = self.dbaas.client.last_response
            httpCode = resp.status
            asserts.assert_equal(
                httpCode, 400, "Resize instance failed with code %s, "
                "exception %s" % (httpCode, e))
Exemple #45
0
        def _shrink_cluster():
            def all_instances_marked_deleted():
                non_deleted_instances = DBInstance.find_all(
                    cluster_id=cluster_id, deleted=False).all()
                non_deleted_ids = [
                    db_instance.id for db_instance in non_deleted_instances
                ]
                return not bool(
                    set(instance_ids).intersection(set(non_deleted_ids)))

            try:
                utils.poll_until(all_instances_marked_deleted,
                                 sleep_time=2,
                                 time_out=CONF.cluster_delete_time_out)
            except PollTimeOut:
                LOG.error(_("timeout for instances to be marked as deleted."))
                return
Exemple #46
0
    def wait_for_txn(self, context, txn):
        if not self.app.pg_is_in_recovery():
            raise RuntimeError(
                _("Attempting to wait for a txn on a server "
                  "not in recovery mode!"))

        def _wait_for_txn():
            lsn = self.app.pg_last_xlog_replay_location()
            LOG.info("Last xlog location found: %s" % lsn)
            return lsn >= txn

        try:
            utils.poll_until(_wait_for_txn, time_out=120)
        except exception.PollTimeOut:
            raise RuntimeError(
                _("Timeout occurred waiting for xlog "
                  "offset to change to '%s'.") % txn)
Exemple #47
0
 def test_get_missing_volume(self):
     try:
         volume = poll_until(lambda: self.story.api.get(self.story.context,
                                                     self.story.volume_id),
                             lambda volume: volume["status"] != "deleted")
         self.assertEqual(volume["deleted"], False)
     except exception.VolumeNotFound:
         pass
Exemple #48
0
    def test_volume_resize_success(self):
        """test_volume_resize_success"""
        def check_resize_status():
            instance = instance_info.dbaas.instances.get(instance_info.id)
            if instance.status in CONFIG.running_status:
                return True
            elif instance.status in ["RESIZE", "SHUTDOWN"]:
                return False
            else:
                asserts.fail("Status should not be %s" % instance.status)

        poll_until(check_resize_status,
                   sleep_time=5,
                   time_out=300,
                   initial_delay=5)
        instance = instance_info.dbaas.instances.get(instance_info.id)
        asserts.assert_equal(instance.volume['size'], self.new_volume_size)
Exemple #49
0
    def test_delete_restored_instance(self):
        """test delete restored instance"""
        if test_config.auth_strategy == "fake":
            raise SkipTest("Skipping delete restored instance for fake mode.")
        instance_info.dbaas.instances.delete(restore_instance_id)
        assert_equal(202, instance_info.dbaas.last_http_code)

        def instance_is_gone():
            try:
                instance_info.dbaas.instances.get(restore_instance_id)
                return False
            except exceptions.NotFound:
                return True

        poll_until(instance_is_gone)
        assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
                      restore_instance_id)
Exemple #50
0
    def add_members(self, members):
        """
        This method is used by a replica-set member instance.
        """
        def check_initiate_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()

            if ((status["ok"] == 1)
                    and (status["members"][0]["stateStr"] == "PRIMARY")
                    and (status["myState"] == 1)):
                return True
            else:
                return False

        def check_rs_status():
            """
            This method is used to verify replica-set status.
            """
            status = MongoDBAdmin().get_repl_status()
            primary_count = 0

            if status["ok"] != 1:
                return False
            if len(status["members"]) != (len(members) + 1):
                return False
            for rs_member in status["members"]:
                if rs_member["state"] not in [1, 2, 7]:
                    return False
                if rs_member["health"] != 1:
                    return False
                if rs_member["state"] == 1:
                    primary_count += 1

            return primary_count == 1

        MongoDBAdmin().rs_initiate()
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_initiate_status, sleep_time=30, time_out=100)

        # add replica-set members
        MongoDBAdmin().rs_add_members(members)
        # TODO(ramashri) see if hardcoded values can be removed
        utils.poll_until(check_rs_status, sleep_time=10, time_out=100)
Exemple #51
0
    def test_assign_config_and_name_to_instance_using_patch(self):
        # test assigning a configuration and name to an instance
        new_name = 'new_name'
        report = CONFIG.get_report()
        report.log("instance_info.id: %s" % instance_info.id)
        report.log("configuration_info: %s" % configuration_info)
        report.log("configuration_info.id: %s" % configuration_info.id)
        report.log("instance name:%s" % instance_info.name)
        report.log("instance new name:%s" % new_name)
        saved_name = instance_info.name
        config_id = configuration_info.id
        instance_info.dbaas.instances.edit(instance_info.id,
                                           configuration=config_id,
                                           name=new_name)
        assert_equal(202, instance_info.dbaas.last_http_code)
        check = instance_info.dbaas.instances.get(instance_info.id)
        assert_equal(200, instance_info.dbaas.last_http_code)
        assert_equal(check.name, new_name)

        # restore instance name
        instance_info.dbaas.instances.edit(instance_info.id, name=saved_name)
        assert_equal(202, instance_info.dbaas.last_http_code)

        instance = instance_info.dbaas.instances.get(instance_info.id)
        assert_equal('RESTART_REQUIRED', instance.status)
        # restart to be sure configuration is applied
        instance_info.dbaas.instances.restart(instance_info.id)
        assert_equal(202, instance_info.dbaas.last_http_code)
        sleep(2)

        def result_is_active():
            instance = instance_info.dbaas.instances.get(instance_info.id)
            if instance.status == "ACTIVE":
                return True
            else:
                assert_equal("REBOOT", instance.status)
                return False

        poll_until(result_is_active)
        # test assigning a configuration to an instance that
        # already has an assigned configuration with patch
        config_id = configuration_info.id
        assert_raises(exceptions.BadRequest,
                      instance_info.dbaas.instances.edit,
                      instance_info.id,
                      configuration=config_id)
Exemple #52
0
    def wait_for_instance_status(self,
                                 instance_id,
                                 status="ACTIVE",
                                 acceptable_states=None):
        if acceptable_states:
            acceptable_states.append(status)

        def assert_state(instance):
            if acceptable_states:
                assert_true(instance.status in acceptable_states,
                            "Invalid status: %s" % instance.status)
            return instance

        poll_until(lambda: self.dbaas.instances.get(instance_id),
                   lambda instance: assert_state(instance).status == status,
                   time_out=30,
                   sleep_time=1)
Exemple #53
0
    def _create_server_volume_heat(self, flavor, image_id, datastore_manager,
                                   volume_size, availability_zone):
        LOG.debug(_("begin _create_server_volume_heat for id: %s") % self.id)
        client = create_heat_client(self.context)
        novaclient = create_nova_client(self.context)
        cinderclient = create_cinder_client(self.context)

        template_obj = template.load_heat_template(datastore_manager)
        heat_template_unicode = template_obj.render()
        try:
            heat_template = heat_template_unicode.encode('ascii')
        except UnicodeEncodeError:
            LOG.error(_("heat template ascii encode issue"))
            raise TroveError("heat template ascii encode issue")

        parameters = {
            "Flavor": flavor["name"],
            "VolumeSize": volume_size,
            "InstanceId": self.id,
            "ImageId": image_id,
            "DatastoreManager": datastore_manager,
            "AvailabilityZone": availability_zone
        }
        stack_name = 'trove-%s' % self.id
        client.stacks.create(stack_name=stack_name,
                             template=heat_template,
                             parameters=parameters)
        stack = client.stacks.get(stack_name)

        utils.poll_until(lambda: client.stacks.get(stack_name),
                         lambda stack: stack.stack_status in
                         ['CREATE_COMPLETE', 'CREATE_FAILED'],
                         sleep_time=2,
                         time_out=HEAT_TIME_OUT)

        resource = client.resources.get(stack.id, 'BaseInstance')
        server = novaclient.servers.get(resource.physical_resource_id)

        resource = client.resources.get(stack.id, 'DataVolume')
        volume = cinderclient.volumes.get(resource.physical_resource_id)
        volume_info = self._build_volume(volume)

        self.update_db(compute_instance_id=server.id, volume_id=volume.id)
        LOG.debug(_("end _create_server_volume_heat for id: %s") % self.id)
        return server, volume_info
Exemple #54
0
    def _poll(cls, instance_id_to_poll):
        """Shared "instance restored" test logic."""

        # This version just checks the REST API status.
        def result_is_active():
            instance = instance_info.dbaas.instances.get(instance_id_to_poll)
            if instance.status in CONFIG.running_status:
                return True
            else:
                # If its not ACTIVE, anything but BUILD must be
                # an error.
                assert_equal("BUILD", instance.status)
                if instance_info.volume is not None:
                    assert_equal(instance.volume.get('used', None), None)
                return False

        poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_RESTORE,
                   sleep_time=10)
Exemple #55
0
    def _assert_cluster_states(self, cluster_id, expected_states,
                               fast_fail_status=None):
        for status in expected_states:
            start_time = timer.time()
            try:
                poll_until(lambda: self._has_task(
                    cluster_id, status, fast_fail_status=fast_fail_status),
                    sleep_time=self.def_sleep_time,
                    time_out=self.def_timeout)
                self.report.log("Cluster has gone '%s' in %s." %
                                (status, self._time_since(start_time)))
            except exception.PollTimeOut:
                self.report.log(
                    "Status of cluster '%s' did not change to '%s' after %s."
                    % (cluster_id, status, self._time_since(start_time)))
                return False

        return True
Exemple #56
0
    def test_slave_is_not_read_only(self):
        if CONFIG.fake_mode:
            raise SkipTest("Test not_read_only not supported in fake mode")

        # wait until replica is no longer read only
        def check_not_read_only():
            cmd = "mysql -BNq -e \\\'select @@read_only\\\'"
            server = create_server_connection(slave_instance.id)

            try:
                stdout = server.execute(cmd)
                stdout = int(stdout)
            except Exception:
                return False

            return stdout == 0

        poll_until(check_not_read_only)
Exemple #57
0
    def test_instance_restored(self):
        if test_config.auth_strategy == "fake":
            raise SkipTest("Skipping restore tests for fake mode.")

        # This version just checks the REST API status.
        def result_is_active():
            instance = instance_info.dbaas.instances.get(restore_instance_id)
            if instance.status == "ACTIVE":
                return True
            else:
                # If its not ACTIVE, anything but BUILD must be
                # an error.
                assert_equal("BUILD", instance.status)
                if instance_info.volume is not None:
                    assert_equal(instance.volume.get('used', None), None)
                return False

        poll_until(result_is_active)
Exemple #58
0
    def test_resize_down(self):
        expected_dbaas_flavor = self.expected_dbaas_flavor

        def is_active():
            return self.instance.status == 'ACTIVE'
        poll_until(is_active, time_out=TIME_OUT_TIME)
        assert_equal(self.instance.status, 'ACTIVE')

        old_flavor_href = self.get_flavor_href(
            flavor_id=self.expected_old_flavor_id)

        self.dbaas.instances.resize_instance(self.instance_id, old_flavor_href)
        assert_equal(202, self.dbaas.last_http_code)
        self.old_dbaas_flavor = instance_info.dbaas_flavor
        instance_info.dbaas_flavor = expected_dbaas_flavor
        self.wait_for_resize()
        assert_equal(str(self.instance.flavor['id']),
                     str(self.expected_old_flavor_id))
Exemple #59
0
    def _wait_all_deleted(self, instance_ids, expected_last_status):
        tasks = [
            build_polling_task(lambda: self._wait_for_delete(
                instance_id, expected_last_status),
                               sleep_time=self.def_sleep_time,
                               time_out=self.def_timeout)
            for instance_id in instance_ids
        ]
        poll_until(lambda: all(poll_task.ready() for poll_task in tasks),
                   sleep_time=self.def_sleep_time,
                   time_out=self.def_timeout)

        for task in tasks:
            if task.has_result():
                self.assert_true(task.poll_result(),
                                 "Some instances were not removed.")
            elif task.has_exception():
                self.fail(str(task.poll_exception()))
Exemple #60
0
    def test_resize(self):
        self.story.api.resize(self.story.context, self.story.volume_id,
                              self.story.resize_volume_size)

        volume = poll_until(lambda: self.story.get_volume(),
                            lambda volume: volume["status"] == "resized")
        self.assertEqual(volume["status"], "resized")
        self.assertTrue(volume["attach_status"], "attached")
        self.assertTrue(volume['size'], self.story.resize_volume_size)