Esempio n. 1
0
    def _allow_access_share(self, share, access_type, access, access_level):
        """Allow access to a share

        :param share: :class:`Share`
        :param access_type: represents the access type (e.g: 'ip', 'domain'...)
        :param access: represents the object (e.g: '127.0.0.1'...)
        :param access_level: access level to the share (e.g: 'rw', 'ro')
        """
        access_result = share.allow(access_type, access, access_level)
        # Get access from the list of accesses of the share
        access = next(access for access in share.access_list()
                      if access.id == access_result["id"])

        fn = self._update_resource_in_allow_access_share(share,
                                                         access_result["id"])

        # We check if the access in that access_list has the active state
        utils.wait_for_status(
            access,
            ready_statuses=["active"],
            update_resource=fn,
            check_interval=CONF.openstack.manila_access_create_poll_interval,
            timeout=CONF.openstack.manila_access_create_timeout)

        return access_result
Esempio n. 2
0
    def _live_migrate(self, server, block_migration=False,
                      disk_over_commit=False, skip_host_check=False):
        """Run live migration of the given server.

        :param server: Server object
        :param block_migration: Specifies the migration type
        :param disk_over_commit: Specifies whether to overcommit migrated
                                 instance or not
        :param skip_host_check: Specifies whether to verify the targeted host
                                availability
        """
        server_admin = self.admin_clients("nova").servers.get(server.id)
        host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
        server_admin.live_migrate(block_migration=block_migration,
                                  disk_over_commit=disk_over_commit)
        utils.wait_for_status(
            server,
            ready_statuses=["ACTIVE"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.nova_server_live_migrate_timeout,
            check_interval=(
                CONF.openstack.nova_server_live_migrate_poll_interval)
        )
        server_admin = self.admin_clients("nova").servers.get(server.id)
        if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host")
                and not skip_host_check):
            raise exceptions.RallyException(
                "Live Migration failed: Migration complete "
                "but instance did not change host: %s" % host_pre_migrate)
Esempio n. 3
0
    def _migrate(self, server, skip_host_check=False):
        """Run migration of the given server.

        :param server: Server object
        :param skip_host_check: Specifies whether to verify the targeted host
                                availability
        """
        server_admin = self.admin_clients("nova").servers.get(server.id)
        host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
        server_admin.migrate()
        utils.wait_for_status(
            server,
            ready_statuses=["VERIFY_RESIZE"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.nova_server_migrate_timeout,
            check_interval=(
                CONF.openstack.nova_server_migrate_poll_interval)
        )
        if not skip_host_check:
            server_admin = self.admin_clients("nova").servers.get(server.id)
            host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
            if host_pre_migrate == host_after_migrate:
                raise exceptions.RallyException(
                    "Migration failed: Migration complete but instance"
                    " did not change host: %s" % host_pre_migrate)
Esempio n. 4
0
 def delete_image(self, image):
     self.client.images.delete(image.id)
     utils.wait_for_status(
         image, ["deleted"],
         check_deletion=True,
         update_resource=self._update_image,
         timeout=CONF.benchmark.glance_image_delete_timeout,
         check_interval=CONF.benchmark.glance_image_delete_poll_interval)
Esempio n. 5
0
 def delete_image(self, image):
     image.delete()
     utils.wait_for_status(
         image, ["deleted"],
         check_deletion=True,
         update_resource=utils.get_from_manager(),
         timeout=CONF.benchmark.glance_image_delete_timeout,
         check_interval=CONF.benchmark.glance_image_delete_poll_interval)
Esempio n. 6
0
 def _wait_for_ping(self, server_ip):
     server = Host(server_ip)
     utils.wait_for_status(
         server,
         ready_statuses=[Host.ICMP_UP_STATUS],
         update_resource=Host.update_status,
         timeout=CONF.benchmark.vm_ping_timeout,
         check_interval=CONF.benchmark.vm_ping_poll_interval
     )
Esempio n. 7
0
 def delete(self):
     client = self._client()
     client().images.delete(self.raw_resource.id)
     task_utils.wait_for_status(
         self.raw_resource, ["deleted"],
         check_deletion=True,
         update_resource=self._wrapper().get_image,
         timeout=CONF.benchmark.glance_image_delete_timeout,
         check_interval=CONF.benchmark.glance_image_delete_poll_interval)
Esempio n. 8
0
 def _resize(self, server, flavor):
     server.resize(flavor)
     utils.wait_for_status(
         server,
         ready_statuses=["VERIFY_RESIZE"],
         update_resource=utils.get_from_manager(),
         timeout=CONF.openstack.nova_server_resize_timeout,
         check_interval=CONF.openstack.nova_server_resize_poll_interval
     )
Esempio n. 9
0
 def _resize_revert(self, server, status="ACTIVE"):
     server.revert_resize()
     utils.wait_for_status(
         server,
         ready_statuses=[status],
         update_resource=utils.get_from_manager(),
         timeout=CONF.openstack.nova_server_resize_revert_timeout,
         check_interval=(
             CONF.openstack.nova_server_resize_revert_poll_interval)
     )
Esempio n. 10
0
 def _do_server_reboot(self, server, reboottype):
     server.reboot(reboot_type=reboottype)
     self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)
     utils.wait_for_status(
         server,
         ready_statuses=["ACTIVE"],
         update_resource=utils.get_from_manager(),
         timeout=CONF.openstack.nova_server_reboot_timeout,
         check_interval=CONF.openstack.nova_server_reboot_poll_interval
     )
Esempio n. 11
0
 def delete(self):
     client = self._client()
     if self.raw_resource.status == "deactivated":
         glancev2 = glance_v2.GlanceV2Service(self.admin or self.user)
         glancev2.reactivate_image(self.raw_resource.id)
     client.delete_image(self.raw_resource.id)
     task_utils.wait_for_status(
         self.raw_resource, ["deleted"],
         check_deletion=True,
         update_resource=self._client().get_image,
         timeout=CONF.openstack.glance_image_delete_timeout,
         check_interval=CONF.openstack.glance_image_delete_poll_interval)
Esempio n. 12
0
    def _shrink_share(self, share, new_size):
        """Shrink the given share

        :param share: :class:`Share`
        :param new_size: new size of the share
        """
        share.shrink(new_size)
        utils.wait_for_status(
            share,
            ready_statuses=["available"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.manila_share_create_timeout,
            check_interval=CONF.openstack.manila_share_create_poll_interval)
Esempio n. 13
0
    def _delete_share_network(self, share_network):
        """Delete share network.

        :param share_network: instance of :class:`ShareNetwork`.
        """
        share_network.delete()
        utils.wait_for_status(
            share_network,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.manila_share_delete_timeout,
            check_interval=CONF.openstack.manila_share_delete_poll_interval)
Esempio n. 14
0
    def _delete_security_service(self, security_service):
        """Delete security service.

        :param security_service: instance of :class:`SecurityService`.
        """
        security_service.delete()
        utils.wait_for_status(
            security_service,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.manila_share_delete_timeout,
            check_interval=CONF.openstack.manila_share_delete_poll_interval)
Esempio n. 15
0
 def _cleanup_images(self):
     glance_wrapper = glance.wrap(self.clients.glance, self)
     for image in self._created_images:
         LOG.debug("Deleting image '%s'" % image.name)
         self.clients.glance().images.delete(image.id)
         task_utils.wait_for_status(
             image, ["deleted", "pending_delete"],
             check_deletion=True,
             update_resource=glance_wrapper.get_image,
             timeout=CONF.benchmark.glance_image_delete_timeout,
             check_interval=CONF.benchmark.
             glance_image_delete_poll_interval)
         self._remove_opt_value_from_config("compute", image.id)
Esempio n. 16
0
    def create_image(self, container_format, image_location,
                     disk_format, **kwargs):
        kw = {
            "container_format": container_format,
            "disk_format": disk_format,
        }
        kw.update(kwargs)
        if "name" not in kw:
            kw["name"] = self.owner.generate_random_name()
        if "is_public" in kw:
            LOG.warning("is_public is not supported by Glance v2, and is "
                        "deprecated in Rally v0.8.0")
            kw["visibility"] = "public" if kw.pop("is_public") else "private"

        image_location = os.path.expanduser(image_location)

        image = self.client.images.create(**kw)

        rutils.interruptable_sleep(CONF.openstack.
                                   glance_image_create_prepoll_delay)

        start = time.time()
        image = utils.wait_for_status(
            image, ["queued"],
            update_resource=self.get_image,
            timeout=CONF.openstack.glance_image_create_timeout,
            check_interval=CONF.openstack.
            glance_image_create_poll_interval)
        timeout = time.time() - start

        image_data = None
        response = None
        try:
            if os.path.isfile(image_location):
                image_data = open(image_location)
            else:
                response = requests.get(image_location, stream=True)
                image_data = response.raw
            self.client.images.upload(image.id, image_data)
        finally:
            if image_data is not None:
                image_data.close()
            if response is not None:
                response.close()

        return utils.wait_for_status(
            image, ["active"],
            update_resource=self.get_image,
            timeout=timeout,
            check_interval=CONF.openstack.
            glance_image_create_poll_interval)
Esempio n. 17
0
 def _attach_volume(self, server, volume, device=None):
     server_id = server.id
     volume_id = volume.id
     attachment = self.clients("nova").volumes.create_server_volume(
         server_id, volume_id, device)
     utils.wait_for_status(
         volume,
         ready_statuses=["in-use"],
         update_resource=self._update_volume_resource,
         timeout=CONF.openstack.nova_server_resize_revert_timeout,
         check_interval=(
             CONF.openstack.nova_server_resize_revert_poll_interval)
     )
     return attachment
Esempio n. 18
0
    def _resume_stack(self, stack):
        """Resume given stack.

        :param stack: stack that needs to be resumed
        """

        self.clients("heat").actions.resume(stack.id)
        utils.wait_for_status(
            stack,
            ready_statuses=["RESUME_COMPLETE"],
            failure_statuses=["RESUME_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.heat_stack_resume_timeout,
            check_interval=CONF.openstack.heat_stack_resume_poll_interval)
Esempio n. 19
0
    def delete_floating_ip(self, fip_id, wait=False):
        """Delete floating IP.

        :param fip_id: int floating IP id
        :param wait: if True then wait to return until floating ip is deleted
        """
        self.client.floating_ips.delete(fip_id)
        if not wait:
            return
        task_utils.wait_for_status(
            fip_id,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=lambda i: self._get_floating_ip(i, do_raise=True))
Esempio n. 20
0
    def _suspend_stack(self, stack):
        """Suspend given stack.

        :param stack: stack that needs to be suspended
        """

        self.clients("heat").actions.suspend(stack.id)
        utils.wait_for_status(
            stack,
            ready_statuses=["SUSPEND_COMPLETE"],
            failure_statuses=["SUSPEND_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.heat_stack_suspend_timeout,
            check_interval=CONF.openstack.heat_stack_suspend_poll_interval)
Esempio n. 21
0
 def _cleanup_images(self):
     image_service = image.Image(self.clients)
     for image_obj in self._created_images:
         LOG.debug("Deleting image '%s'." % image_obj.name)
         self.clients.glance().images.delete(image_obj.id)
         task_utils.wait_for_status(
             image_obj, ["deleted", "pending_delete"],
             check_deletion=True,
             update_resource=image_service.get_image,
             timeout=conf.CONF.openstack.glance_image_delete_timeout,
             check_interval=conf.CONF.openstack.
             glance_image_delete_poll_interval)
         LOG.debug("Image '%s' has been deleted." % image_obj.name)
         self._remove_opt_value_from_config("compute", image_obj.id)
Esempio n. 22
0
    def _delete_share(self, share):
        """Delete the given share.

        :param share: :class:`Share`
        """
        share.delete()
        error_statuses = ("error_deleting", )
        utils.wait_for_status(
            share,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=utils.get_from_manager(error_statuses),
            timeout=CONF.openstack.manila_share_delete_timeout,
            check_interval=CONF.openstack.manila_share_delete_poll_interval)
Esempio n. 23
0
    def _delete_cluster(self, cluster):
        """Delete given cluster.

        Returns after the cluster is successfully deleted.

        :param cluster: cluster object to delete
        """
        self.admin_clients("senlin").delete_cluster(cluster)
        utils.wait_for_status(
            cluster,
            ready_statuses=["DELETED"],
            failure_statuses=["ERROR"],
            check_deletion=True,
            update_resource=self._get_cluster,
            timeout=CONF.benchmark.senlin_action_timeout)
Esempio n. 24
0
File: utils.py Progetto: rvbaz/rally
    def _delete_stack(self, stack):
        """Delete given stack.

        Returns when the stack is actually deleted.

        :param stack: stack object
        """
        stack.delete()
        utils.wait_for_status(
            stack,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=utils.get_from_manager(),
            timeout=CONF.benchmark.heat_stack_delete_timeout,
            check_interval=CONF.benchmark.heat_stack_delete_poll_interval)
Esempio n. 25
0
    def _delete_image(self, image):
        """Deletes given image.

        Returns when the image is actually deleted.

        :param image: Image object
        """
        self.clients("glance").images.delete(image.id)
        wrapper = glance_wrapper.wrap(self._clients.glance, self)
        utils.wait_for_status(
            image, ["deleted", "pending_delete"],
            check_deletion=True,
            update_resource=wrapper.get_image,
            timeout=CONF.openstack.glance_image_delete_timeout,
            check_interval=CONF.openstack.glance_image_delete_poll_interval)
Esempio n. 26
0
    def _check_stack(self, stack):
        """Check given stack.

        Check the stack and stack resources.

        :param stack: stack that needs to be checked
        """
        self.clients("heat").actions.check(stack.id)
        utils.wait_for_status(
            stack,
            ready_statuses=["CHECK_COMPLETE"],
            failure_statuses=["CHECK_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(["CHECK_FAILED"]),
            timeout=CONF.openstack.heat_stack_check_timeout,
            check_interval=CONF.openstack.heat_stack_check_poll_interval)
Esempio n. 27
0
    def _delete_image(self, image):
        """Deletes given image.

        Returns when the image is actually deleted.

        :param image: Image object
        """
        image.delete()
        utils.wait_for_status(
            image,
            ready_statuses=["deleted"],
            check_deletion=True,
            update_resource=utils.get_from_manager(),
            timeout=CONF.benchmark.glance_image_delete_timeout,
            check_interval=CONF.benchmark.glance_image_delete_poll_interval)
Esempio n. 28
0
 def _create_audit(self, audit_template_uuid):
     audit = self.admin_clients("watcher").audit.create(
         audit_template_uuid=audit_template_uuid,
         audit_type="ONESHOT")
     utils.wait_for_status(
         audit,
         ready_statuses=["SUCCEEDED"],
         failure_statuses=["FAILED"],
         status_attr="state",
         update_resource=utils.get_from_manager(),
         timeout=CONF.benchmark.watcher_audit_launch_timeout,
         check_interval=CONF.benchmark.watcher_audit_launch_poll_interval,
         id_attr="uuid"
     )
     return audit
Esempio n. 29
0
    def _restore_stack(self, stack, snapshot_id):
        """Restores stack from given snapshot.

        :param stack: stack that will be restored from snapshot
        :param snapshot_id: id of given snapshot
        """
        self.clients("heat").stacks.restore(stack.id, snapshot_id)
        utils.wait_for_status(
            stack,
            ready_statuses=["RESTORE_COMPLETE"],
            failure_statuses=["RESTORE_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.heat_stack_restore_timeout,
            check_interval=CONF.openstack.heat_stack_restore_poll_interval
        )
Esempio n. 30
0
 def cleanup(self):
     for user, tenant_id in rutils.iterate_per_tenants(
             self.context["users"]):
         clients = osclients.Clients(
             user["credential"],
             api_info=self.context["config"].get("api_versions"))
         glance_wrap = glance_wrapper.wrap(clients.glance, self)
         for image in self.context["tenants"][tenant_id].get("images", []):
             clients.glance().images.delete(image)
             utils.wait_for_status(
                 clients.glance().images.get(image), ["deleted"],
                 check_deletion=True,
                 update_resource=glance_wrap.get_image,
                 timeout=CONF.benchmark.glance_image_delete_timeout,
                 check_interval=CONF.benchmark.
                 glance_image_delete_poll_interval)
Esempio n. 31
0
    def _live_migrate(self, server, block_migration=False,
                      disk_over_commit=False, skip_compute_nodes_check=False,
                      skip_host_check=False):
        """Run live migration of the given server.

        :param server: Server object
        :param block_migration: Specifies the migration type
        :param disk_over_commit: Specifies whether to overcommit migrated
                                 instance or not
        :param skip_compute_nodes_check: Specifies whether to verify the number
                                         of compute nodes
        :param skip_host_check: Specifies whether to verify the targeted host
                                availability
        """
        if not skip_compute_nodes_check:
            compute_nodes = len(self._list_hypervisors())
            if compute_nodes < 2:
                raise exceptions.RallyException("Less than 2 compute nodes,"
                                                " skipping Live Migration")

        server_admin = self.admin_clients("nova").servers.get(server.id)
        host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
        server_admin.live_migrate(block_migration=block_migration,
                                  disk_over_commit=disk_over_commit)
        utils.wait_for_status(
            server,
            ready_statuses=["ACTIVE"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.nova_server_live_migrate_timeout,
            check_interval=(
                CONF.openstack.nova_server_live_migrate_poll_interval)
        )
        if not skip_host_check:
            server_admin = self.admin_clients("nova").servers.get(server.id)
            host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
            if host_pre_migrate == host_after_migrate:
                raise exceptions.RallyException(
                    "Live Migration failed: Migration complete "
                    "but instance did not change host: %s" % host_pre_migrate)
Esempio n. 32
0
    def _delete_image(self, image):
        """Delete the given image.

        Returns when the image is actually deleted.

        :param image: Image object
        """
        LOG.warning("Method '_delete_image' of NovaScenario class is "
                    "deprecated since Rally 0.10.0. Use GlanceUtils instead.")
        glance = image_service.Image(self._clients,
                                     atomic_inst=self.atomic_actions())
        glance.delete_image(image.id)
        check_interval = CONF.openstack.nova_server_image_delete_poll_interval
        with atomic.ActionTimer(self, "glance.wait_for_delete"):
            utils.wait_for_status(
                image,
                ready_statuses=["deleted", "pending_delete"],
                check_deletion=True,
                update_resource=glance.get_image,
                timeout=CONF.openstack.nova_server_image_delete_timeout,
                check_interval=check_interval
            )
Esempio n. 33
0
    def _delete_server(self, server, force=False):
        """Delete the given server.

        Returns when the server is actually deleted.

        :param server: Server object
        :param force: If True, force_delete will be used instead of delete.
        """
        atomic_name = ("nova.%sdelete_server") % (force and "force_" or "")
        with atomic.ActionTimer(self, atomic_name):
            if force:
                server.force_delete()
            else:
                server.delete()

            utils.wait_for_status(
                server,
                ready_statuses=["deleted"],
                check_deletion=True,
                update_resource=utils.get_from_manager(),
                timeout=CONF.benchmark.nova_server_delete_timeout,
                check_interval=CONF.benchmark.nova_server_delete_poll_interval)
Esempio n. 34
0
    def _boot_server(self, image, flavor,
                     auto_assign_nic=False, **kwargs):
        """Boot a server.

        Returns when the server is actually booted and in "ACTIVE" state.

        If multiple networks created by Network context are present, the first
        network found that isn't associated with a floating IP pool is used.

        :param image: image ID or instance for server creation
        :param flavor: int, flavor ID or instance for server creation
        :param auto_assign_nic: bool, whether or not to auto assign NICs
        :param kwargs: other optional parameters to initialize the server
        :returns: nova Server instance
        """
        server_name = self.generate_random_name()
        secgroup = self.context.get("user", {}).get("secgroup")
        if secgroup:
            if "security_groups" not in kwargs:
                kwargs["security_groups"] = [secgroup["name"]]
            elif secgroup["name"] not in kwargs["security_groups"]:
                kwargs["security_groups"].append(secgroup["name"])

        if auto_assign_nic and not kwargs.get("nics", False):
            nic = self._pick_random_nic()
            if nic:
                kwargs["nics"] = nic

        if "nics" not in kwargs and\
                "tenant" in self.context and\
                "networks" in self.context["tenant"]:
            kwargs["nics"] = [
                {"net-id": self.context["tenant"]["networks"][0]["id"]}]

        for nic in kwargs.get("nics", []):
            if not nic.get("net-id") and nic.get("net-name"):
                nic["net-id"] = self._get_network_id(nic["net-name"])

        with atomic.ActionTimer(self, "nova.boot_server"):
            server = self.clients("nova").servers.create(
                server_name, image, flavor, **kwargs)

            self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)
            server = utils.wait_for_status(
                server,
                ready_statuses=["ACTIVE"],
                update_resource=utils.get_from_manager(),
                timeout=CONF.openstack.nova_server_boot_timeout,
                check_interval=CONF.openstack.nova_server_boot_poll_interval
            )
        return server
Esempio n. 35
0
    def _upload_volume_to_image(self, volume, force=False,
                                container_format="bare", disk_format="raw"):
        """Upload the given volume to image.

        Returns created image.

        :param volume: volume object
        :param force: flag to indicate whether to snapshot a volume even if
                      it's attached to an instance
        :param container_format: container format of image. Acceptable
                                 formats: ami, ari, aki, bare, and ovf
        :param disk_format: disk format of image. Acceptable formats:
                            ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
        :returns: Returns created image object
        """
        resp, img = volume.upload_to_image(force, self.generate_random_name(),
                                           container_format, disk_format)
        # NOTE (e0ne): upload_to_image changes volume status to uploading so
        # we need to wait until it will be available.
        volume = bench_utils.wait_for_status(
            volume,
            ready_statuses=["available"],
            update_resource=bench_utils.get_from_manager(),
            timeout=CONF.openstack.cinder_volume_create_timeout,
            check_interval=CONF.openstack.cinder_volume_create_poll_interval
        )
        image_id = img["os-volume_upload_image"]["image_id"]
        image = self.clients("glance").images.get(image_id)
        wrapper = glance_wrapper.wrap(self._clients.glance, self)
        image = bench_utils.wait_for_status(
            image,
            ready_statuses=["active"],
            update_resource=wrapper.get_image,
            timeout=CONF.openstack.glance_image_create_timeout,
            check_interval=CONF.openstack.glance_image_create_poll_interval
        )

        return image
Esempio n. 36
0
    def _delete_servers(self, servers, force=False):
        """Delete multiple servers.

        :param servers: A list of servers to delete
        :param force: If True, force_delete will be used instead of delete.
        """
        atomic_name = ("nova.%sdelete_servers") % (force and "force_" or "")
        with atomic.ActionTimer(self, atomic_name):
            for server in servers:
                if force:
                    server.force_delete()
                else:
                    server.delete()

            for server in servers:
                utils.wait_for_status(
                    server,
                    ready_statuses=["deleted"],
                    check_deletion=True,
                    update_resource=utils.get_from_manager(),
                    timeout=CONF.benchmark.nova_server_delete_timeout,
                    check_interval=CONF.benchmark.
                    nova_server_delete_poll_interval)
Esempio n. 37
0
    def _migrate(self, server, skip_host_check=False):
        """Run migration of the given server.

        :param server: Server object
        :param skip_host_check: Specifies whether to verify the targeted host
                                availability
        """
        server_admin = self.admin_clients("nova").servers.get(server.id)
        host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
        server_admin.migrate()
        utils.wait_for_status(
            server,
            ready_statuses=["VERIFY_RESIZE"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.benchmark.nova_server_migrate_timeout,
            check_interval=(CONF.benchmark.nova_server_migrate_poll_interval))
        if not skip_host_check:
            server_admin = self.admin_clients("nova").servers.get(server.id)
            host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host")
            if host_pre_migrate == host_after_migrate:
                raise exceptions.RallyException(
                    _("Migration failed: Migration complete but instance"
                      " did not change host: %s") % host_pre_migrate)
Esempio n. 38
0
    def _create_backup(self, volume_id, **kwargs):
        """Create a volume backup of the given volume.

        :param volume_id: The ID of the volume to backup.
        :param kwargs: Other optional parameters
        """
        backup = self.clients("cinder").backups.create(volume_id, **kwargs)
        return bench_utils.wait_for_status(
            backup,
            ready_statuses=["available"],
            update_resource=bench_utils.get_from_manager(),
            timeout=CONF.openstack.cinder_volume_create_timeout,
            check_interval=CONF.openstack.cinder_volume_create_poll_interval
        )
Esempio n. 39
0
    def create_image(self, image_name=None, container_format=None,
                     image_location=None, disk_format=None,
                     is_public=True, min_disk=0, min_ram=0,
                     properties=None):
        """Creates new image.

        :param image_name: Image name for which need to be created
        :param container_format: Container format
        :param image_location: The new image's location
        :param disk_format: Disk format
        :param is_public: The created image's public status
        :param min_disk: The min disk of created images
        :param min_ram: The min ram of created images
        :param properties: Dict of image properties
        """
        image_location = os.path.expanduser(image_location)
        image_name = image_name or self.generate_random_name()
        kwargs = {}

        try:
            if os.path.isfile(image_location):
                kwargs["data"] = open(image_location)
            else:
                kwargs["copy_from"] = image_location

            image_obj = self._clients.glance("1").images.create(
                name=image_name,
                container_format=container_format,
                disk_format=disk_format,
                is_public=is_public,
                min_disk=min_disk,
                min_ram=min_ram,
                properties=properties,
                **kwargs)

            rutils.interruptable_sleep(CONF.openstack.
                                       glance_image_create_prepoll_delay)

            image_obj = utils.wait_for_status(
                image_obj, ["active"],
                update_resource=self.get_image,
                timeout=CONF.openstack.glance_image_create_timeout,
                check_interval=CONF.openstack.glance_image_create_poll_interval
            )

        finally:
            if "data" in kwargs:
                kwargs["data"].close()

        return image_obj
Esempio n. 40
0
    def _detach_volume(self, server, volume, attachment=None):
        """Detach volume from the server.

        :param server: A server object to detach volume from.
        :param volume: A volume object to detach from the server.
        :param attachment: DEPRECATED
        """
        if attachment:
            LOG.warning("An argument `attachment` of `_detach_volume` is "
                        "deprecated in favor of `volume` argument since "
                        "Rally 0.10.0")

        server_id = server.id

        self.clients("nova").volumes.delete_server_volume(server_id,
                                                          volume.id)
        utils.wait_for_status(
            volume,
            ready_statuses=["available"],
            update_resource=self._update_volume_resource,
            timeout=CONF.openstack.nova_detach_volume_timeout,
            check_interval=CONF.openstack.nova_detach_volume_poll_interval
        )
Esempio n. 41
0
    def _restore_backup(self, backup_id, volume_id=None):
        """Restore the given backup.

        :param backup_id: The ID of the backup to restore.
        :param volume_id: The ID of the volume to restore the backup to.
        """
        restore = self.clients("cinder").restores.restore(backup_id, volume_id)
        restored_volume = self.clients("cinder").volumes.get(restore.volume_id)
        backup_for_restore = self.clients("cinder").backups.get(backup_id)
        bench_utils.wait_for_status(
            backup_for_restore,
            ready_statuses=["available"],
            update_resource=bench_utils.get_from_manager(),
            timeout=CONF.openstack.cinder_backup_restore_timeout,
            check_interval=CONF.openstack.cinder_backup_restore_poll_interval
        )
        return bench_utils.wait_for_status(
            restored_volume,
            ready_statuses=["available"],
            update_resource=bench_utils.get_from_manager(),
            timeout=CONF.openstack.cinder_volume_create_timeout,
            check_interval=CONF.openstack.cinder_volume_create_poll_interval
        )
Esempio n. 42
0
    def setup(self):
        try:
            client = osclients.Clients(self.context['users'][0]['credential']).nova()
            server = client.servers.create(
                name=self.config.get('name', 'test'),
                image=None,
                userdata=self.config.get('userdata'),
                #meta=self.config['meta'],
                flavor=client.flavors.get(self.config.get('flavor', 2)),
                block_device_mapping_v2=list(self.config['block_device']),
                nics=list(self.config['nics']))
            self.context['server'] = server
            utils.wait_for_status(
                server, update_resource=client.servers.get,
                ready_statuses=['ACTIVE'], timeout=180, check_interval=5)

            server = client.servers.get(server.id)
            if self.context.get('floatingip'):
                neutron = osclients.Clients(self.context['users'][0]['credential']).neutron()
                for port in neutron.list_ports()['ports']:
                    for ips in port['fixed_ips']:
                        if ips['ip_address'] == server.networks.values()[0][0]:
                            break
                    else:
                        continue
                    break

                neutron.update_floatingip(self.context['floatingip']['id'], {"floatingip": {"port_id": port['id']}})

            self.context['server'] = server.to_dict()
            LOG.debug("Server with id '%s'" % server.id)
        except Exception as e:
            msg = "Can't create server: %s" % e
            if logging.is_debug():
                LOG.exception(msg)
            else:
                LOG.warning(msg)
Esempio n. 43
0
    def _boot_servers(self, image_id, flavor_id, requests, instances_amount=1,
                      auto_assign_nic=False, **kwargs):
        """Boot multiple servers.

        Returns when all the servers are actually booted and are in the
        "Active" state.

        :param image_id: ID of the image to be used for server creation
        :param flavor_id: ID of the flavor to be used for server creation
        :param requests: Number of booting requests to perform
        :param instances_amount: Number of instances to boot per each request
        :param auto_assign_nic: bool, whether or not to auto assign NICs
        :param kwargs: other optional parameters to initialize the servers

        :returns: List of created server objects
        """
        if auto_assign_nic and not kwargs.get("nics", False):
            nic = self._pick_random_nic()
            if nic:
                kwargs["nics"] = nic

        for nic in kwargs.get("nics", []):
            if not nic.get("net-id") and nic.get("net-name"):
                nic["net-id"] = self._get_network_id(nic["net-name"])

        name_prefix = self.generate_random_name()
        with atomic.ActionTimer(self, "nova.boot_servers"):
            for i in range(requests):
                self.clients("nova").servers.create(
                    "%s_%d" % (name_prefix, i),
                    image_id, flavor_id,
                    min_count=instances_amount,
                    max_count=instances_amount,
                    **kwargs)
            # NOTE(msdubov): Nova python client returns only one server even
            #                when min_count > 1, so we have to rediscover
            #                all the created servers manually.
            servers = [s for s in self.clients("nova").servers.list()
                       if s.name.startswith(name_prefix)]
            self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)
            servers = [utils.wait_for_status(
                server,
                ready_statuses=["ACTIVE"],
                update_resource=utils.
                get_from_manager(),
                timeout=CONF.openstack.nova_server_boot_timeout,
                check_interval=CONF.openstack.nova_server_boot_poll_interval
            ) for server in servers]
        return servers
Esempio n. 44
0
 def _metric_from_instance(self, seed, image, flavor, monitor_vip,
                           pushgateway_port, job_name):
     push_cmd = (
         "echo %(seed)s 12345 | curl --data-binary "
         "@- http://%(monitor_vip)s:%(pgtw_port)s/metrics/job"
         "/%(job_name)s" % {"seed": seed,
                            "monitor_vip": monitor_vip,
                            "pgtw_port": pushgateway_port,
                            "job_name": job_name})
     userdata = ("#!/bin/bash\n%s" % push_cmd)
     server = self.clients("nova").servers.create(seed,
                                                  image, flavor,
                                                  userdata=userdata)
     LOG.info("Server %s create started" % seed)
     self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)
     utils.wait_for_status(
         server,
         ready_statuses=["ACTIVE"],
         update_resource=utils.get_from_manager(),
         timeout=CONF.openstack.nova_server_boot_timeout,
         check_interval=CONF.openstack.nova_server_boot_poll_interval
     )
     LOG.info("Server %s with pushing metric script (metric exporter) is "
              "active" % seed)
Esempio n. 45
0
    def _set_alarm_state(self, alarm, state, timeout):
        """Set the state of the alarm.

        :param alarm: alarm instance
        :param state: an alarm state to be set
        :param timeout: The number of seconds for which to attempt a
                         successful check of the alarm state.
        :returns: alarm in the set state
        """
        self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
        return bench_utils.wait_for_status(
            alarm,
            ready_statuses=[state],
            update_resource=bench_utils.get_from_manager(),
            timeout=timeout,
            check_interval=1)
Esempio n. 46
0
 def _detach_replica(self, instance):
     """Detach replica instance itself."""
     if isinstance(instance, unicode):
         instance = self.clients("trove").instances.get(instance)
     else:
         pass
     LOG.debug("Detach replica: [%s: %s]" % (instance.name, instance.id))
     instance.detach_replica()
     detached_instance = utils.wait_for_status(
         instance,
         ready_statuses=["ACTIVE"],
         update_resource=utils.get_from_manager(),
         timeout=300,
         check_interval=1
     )
     return detached_instance
Esempio n. 47
0
 def _restart_instance(self, instance):
     """Restart the given instance."""
     if isinstance(instance, unicode):
         instance = self.clients("trove").instances.get(instance)
     else:
         pass
     LOG.debug("Restart instance: [%s: %s]"
               % (instance.name, instance.id))
     instance.restart()
     restarted_instance = utils.wait_for_status(
         instance,
         ready_statuses=["ACTIVE"],
         update_resource=utils.get_from_manager(),
         timeout=120,
         check_interval=1
     )
     return restarted_instance
Esempio n. 48
0
    def pool_create(self,
                    lb_id,
                    protocol,
                    lb_algorithm,
                    listener_id=None,
                    description=None,
                    admin_state_up=True,
                    project_id=None,
                    session_persistence=None):
        """Create a pool

        :param lb_id: ID of the loadbalancer
        :param protocol: protocol of the resource
        :param lb_algorithm: loadbalancing algorithm of the pool
        :param listener_id: ID of the listener
        :param description: a human readable description of the pool
        :param admin_state_up: administrative state of the resource
        :param project_id: project ID of the resource
        :param session_persistence: a json object specifiying the session
            persistence of the pool
        :return:
            A dict of the created pool's settings
        """
        args = {
            "name": self.generate_random_name(),
            "loadbalancer_id": lb_id,
            "protocol": protocol,
            "lb_algorithm": lb_algorithm,
            "listener_id": listener_id,
            "description": description,
            "admin_state_up": admin_state_up,
            "project_id": project_id,
            "session_persistence": session_persistence
        }
        pool = self._clients.octavia().pool_create(json={"pool": args})
        pool = pool["pool"]
        pool = utils.wait_for_status(
            pool,
            ready_statuses=["ACTIVE"],
            status_attr="provisioning_status",
            update_resource=self.update_pool_resource,
            timeout=CONF.openstack.octavia_create_loadbalancer_timeout,
            check_interval=(
                CONF.openstack.octavia_create_loadbalancer_poll_interval))
        return pool
Esempio n. 49
0
    def _create_execution(self, workflow_identifier):
        """Create a new execution.

        :param workflow_identifier: name or id of the workflow to execute
        :returns: executions object
        """

        execution = self.clients("mistral").executions.create(
            workflow_identifier)

        execution = utils.wait_for_status(
            execution,
            ready_statuses=["SUCCESS"],
            failure_statuses=["ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.benchmark.mistral_execution_timeout)

        return execution
Esempio n. 50
0
    def _create_execution(self, workflow_identifier, wf_input=None, **params):
        """Create a new execution.

        :param workflow_identifier: name or id of the workflow to execute
        :param input_: json string of mistral workflow input
        :param params: optional mistral params (this is the place to pass
                       environment).
        :returns: executions object
        """

        execution = self.clients("mistral").executions.create(
            workflow_identifier, workflow_input=wf_input, **params)

        execution = utils.wait_for_status(
            execution, ready_statuses=["SUCCESS"], failure_statuses=["ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.mistral_execution_timeout)

        return execution
Esempio n. 51
0
    def _create_image(self, server):
        """Create an image from the given server

        Uses the server name to name the created image. Returns when the image
        is actually created and is in the "Active" state.

        :param server: Server object for which the image will be created

        :returns: Created image object
        """
        image_uuid = self.clients("nova").servers.create_image(
            server, server.name)
        image = self.clients("nova").images.get(image_uuid)
        check_interval = CONF.benchmark.nova_server_image_create_poll_interval
        image = utils.wait_for_status(
            image,
            ready_statuses=["ACTIVE"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.benchmark.nova_server_image_create_timeout,
            check_interval=check_interval)
        return image
Esempio n. 52
0
    def upload_volume_to_image(self,
                               volume,
                               force=False,
                               container_format="bare",
                               disk_format="raw"):
        """Upload the given volume to image.

        Returns created image.

        :param volume: volume object
        :param force: flag to indicate whether to snapshot a volume even if
                      it's attached to an instance
        :param container_format: container format of image. Acceptable
                                 formats: ami, ari, aki, bare, and ovf
        :param disk_format: disk format of image. Acceptable formats:
                            ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
        :returns: Returns created image object
        """
        aname = "cinder_v%s.upload_volume_to_image" % self.version
        with atomic.ActionTimer(self, aname):
            resp, img = self._get_client().volumes.upload_to_image(
                volume, force, self.generate_random_name(), container_format,
                disk_format)
            # NOTE (e0ne): upload_to_image changes volume status to uploading
            # so we need to wait until it will be available.
            volume = self._wait_available_volume(volume)

            image_id = img["os-volume_upload_image"]["image_id"]
            glance = image.Image(self._clients)

            image_inst = glance.get_image(image_id)
            image_inst = bench_utils.wait_for_status(
                image_inst,
                ready_statuses=["active"],
                update_resource=glance.get_image,
                timeout=CONF.benchmark.glance_image_create_timeout,
                check_interval=(
                    CONF.benchmark.glance_image_create_poll_interval))

            return image_inst
Esempio n. 53
0
    def _create_stack(self,
                      template,
                      parameters=None,
                      files=None,
                      environment=None):
        """Create a new stack.

        :param template: template with stack description.
        :param parameters: template parameters used during stack creation
        :param files: additional files used in template
        :param environment: stack environment definition

        :returns: object of stack
        """
        stack_name = self.generate_random_name()
        kw = {
            "stack_name": stack_name,
            "disable_rollback": True,
            "parameters": parameters or {},
            "template": template,
            "files": files or {},
            "environment": environment or {}
        }

        # heat client returns body instead manager object, so we should
        # get manager object using stack_id
        stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"]
        stack = self.clients("heat").stacks.get(stack_id)

        self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay)

        stack = utils.wait_for_status(
            stack,
            ready_statuses=["CREATE_COMPLETE"],
            failure_statuses=["CREATE_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.heat_stack_create_timeout,
            check_interval=CONF.openstack.heat_stack_create_poll_interval)

        return stack
Esempio n. 54
0
    def _create_instance(self, flavor, datastore, datastore_version,
                         nics, volume, name=None, **kwargs):
        """Create an instance."""
        if name:
            instance_name = name
        else:
            instance_name = self.generate_random_name()

        LOG.debug("Creating Instance with name %s" % instance_name)
        instance = self.clients("trove").instances.create(
            instance_name,
            flavor, datastore=datastore, datastore_version=datastore_version,
            nics=nics, volume=volume, **kwargs)
        self.sleep_between(10)
        instance = utils.wait_for_status(
            instance,
            ready_statuses=["ACTIVE"],
            update_resource=utils.get_from_manager(),
            timeout=300,
            check_interval=1
        )
        return instance
Esempio n. 55
0
    def _create_cluster(self,
                        profile_id,
                        desired_capacity=0,
                        min_size=0,
                        max_size=-1,
                        timeout=60,
                        metadata=None):
        """Create a new cluster from attributes.

        :param profile_id: ID of profile used to create cluster
        :param desired_capacity: The capacity or initial number of nodes
                                 owned by the cluster
        :param min_size: The minimum number of nodes owned by the cluster
        :param max_size: The maximum number of nodes owned by the cluster.
                         -1 means no limit
        :param timeout: The timeout value in minutes for cluster creation
        :param metadata: A set of key value pairs to associate with the cluster

        :returns: object of cluster created.
        """
        attrs = {
            "profile_id": profile_id,
            "name": self.generate_random_name(),
            "desired_capacity": desired_capacity,
            "min_size": min_size,
            "max_size": max_size,
            "metadata": metadata,
            "timeout": timeout
        }

        cluster = self.admin_clients("senlin").create_cluster(**attrs)
        cluster = utils.wait_for_status(
            cluster,
            ready_statuses=["ACTIVE"],
            failure_statuses=["ERROR"],
            update_resource=self._get_cluster,
            timeout=CONF.openstack.senlin_action_timeout)

        return cluster
Esempio n. 56
0
    def _create_share(self, share_proto, size=1, **kwargs):
        """Create a share.

        :param share_proto: share protocol for new share,
            available values are NFS, CIFS, GlusterFS, HDFS and CEPHFS.
        :param size: size of a share in GB
        :param snapshot_id: ID of the snapshot
        :param name: name of new share
        :param description: description of a share
        :param metadata: optional metadata to set on share creation
        :param share_network: either instance of ShareNetwork or str with ID
        :param share_type: either instance of ShareType or str with ID
        :param is_public: defines whether to set share as public or not.
        :returns: instance of :class:`Share`
        """
        if self.context:
            share_networks = self.context.get("tenant", {}).get(
                consts.SHARE_NETWORKS_CONTEXT_NAME,
                {}).get("share_networks", [])
            if share_networks and not kwargs.get("share_network"):
                kwargs["share_network"] = share_networks[
                    self.context["iteration"] % len(share_networks)]["id"]

        if not kwargs.get("name"):
            kwargs["name"] = self.generate_random_name()

        share = self.clients("manila").shares.create(share_proto, size,
                                                     **kwargs)

        self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay)
        share = utils.wait_for_status(
            share,
            ready_statuses=["available"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.manila_share_create_timeout,
            check_interval=CONF.openstack.manila_share_create_poll_interval,
        )
        return share
Esempio n. 57
0
    def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args):
        """Create LB loadbalancer(v2)

        :param subnet_id: str, neutron subnet-id
        :param lb_create_args: dict, POST /lbaas/loadbalancers request options
        :returns: dict, neutron lb
        """
        args = {"name": self.generate_random_name(),
                "vip_subnet_id": subnet_id}
        args.update(lb_create_args)
        neutronclient = self.clients("neutron")
        lb = neutronclient.create_loadbalancer({"loadbalancer": args})
        lb = lb["loadbalancer"]
        lb = utils.wait_for_status(
            lb,
            ready_statuses=["ACTIVE"],
            status_attr="provisioning_status",
            update_resource=self.update_loadbalancer_resource,
            timeout=CONF.openstack.neutron_create_loadbalancer_timeout,
            check_interval=(
                CONF.openstack.neutron_create_loadbalancer_poll_interval)
        )
        return lb
Esempio n. 58
0
    def _extend_volume(self, volume, new_size):
        """Extend the given volume.

        Returns when the volume is actually extended.

        :param volume: volume object
        :param new_size: new volume size in GB, or
                         dictionary, must contain two values:
                             min - minimum size volumes will be created as;
                             max - maximum size volumes will be created as.
                        Notice: should be bigger volume size
        """

        if isinstance(new_size, dict):
            new_size = random.randint(new_size["min"], new_size["max"])

        volume.extend(volume, new_size)
        volume = bench_utils.wait_for_status(
            volume,
            ready_statuses=["available"],
            update_resource=bench_utils.get_from_manager(),
            timeout=CONF.openstack.cinder_volume_create_timeout,
            check_interval=CONF.openstack.cinder_volume_create_poll_interval)
Esempio n. 59
0
    def _update_stack(self,
                      stack,
                      template,
                      parameters=None,
                      files=None,
                      environment=None):
        """Update an existing stack

        :param stack: stack that need to be updated
        :param template: Updated template
        :param parameters: template parameters for stack update
        :param files: additional files used in template
        :param environment: stack environment definition

        :returns: object of updated stack
        """

        kw = {
            "stack_name": stack.stack_name,
            "disable_rollback": True,
            "parameters": parameters or {},
            "template": template,
            "files": files or {},
            "environment": environment or {}
        }
        self.clients("heat").stacks.update(stack.id, **kw)

        self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay)

        stack = utils.wait_for_status(
            stack,
            ready_statuses=["UPDATE_COMPLETE"],
            failure_statuses=["UPDATE_FAILED", "ERROR"],
            update_resource=utils.get_from_manager(),
            timeout=CONF.openstack.heat_stack_update_timeout,
            check_interval=CONF.openstack.heat_stack_update_poll_interval)
        return stack
Esempio n. 60
0
    def _create_image(self, server):
        """Create an image from the given server

        Uses the server name to name the created image. Returns when the image
        is actually created and is in the "Active" state.

        :param server: Server object for which the image will be created

        :returns: Created image object
        """
        image_uuid = self.clients("nova").servers.create_image(
            server, server.name)
        glance = image_service.Image(self._clients,
                                     atomic_inst=self.atomic_actions())
        image = glance.get_image(image_uuid)
        check_interval = CONF.benchmark.nova_server_image_create_poll_interval
        with atomic.ActionTimer(self, "glance.wait_for_image"):
            image = utils.wait_for_status(
                image,
                ready_statuses=["ACTIVE"],
                update_resource=glance.get_image,
                timeout=CONF.benchmark.nova_server_image_create_timeout,
                check_interval=check_interval)
        return image