def _live_migrate(self, server, target_host, block_migration=False, disk_over_commit=False, skip_host_check=False): """Run live migration of the given server. :param server: Server object :param target_host: Specifies the target compute node to migrate :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to overcommit migrated instance or not :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.live_migrate(target_host, block_migration=block_migration, disk_over_commit=disk_over_commit) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_live_migrate_timeout, check_interval=( CONF.benchmark.nova_server_live_migrate_poll_interval) ) server_admin = self.admin_clients("nova").servers.get(server.id) if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") and not skip_host_check): raise exceptions.LiveMigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _migrate(self, server, skip_host_check=False): """Run migration of the given server. :param server: Server object :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.migrate() utils.wait_for( server, is_ready=utils.resource_is("VERIFY_RESIZE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_migrate_timeout, check_interval=( CONF.benchmark.nova_server_migrate_poll_interval) ) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.MigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _boot_servers(self, image_id, flavor_name, instance_num=1, **kwargs): """Boot multiple servers. Returns when all the servers are actually booted and are in the "Running" state. :param image_id: ID of the image to be used for server creation :param flavor_name: Name of the flavor to be used for server creation :param instance_num: Number of instances to boot :param kwargs: Other optional parameters to boot servers :returns: List of created server objects """ reservation = self.clients("ec2").run_instances( image_id=image_id, instance_type=flavor_name, min_count=instance_num, max_count=instance_num, **kwargs) servers = [instance for instance in reservation.instances] time.sleep(CONF.benchmark.ec2_server_boot_prepoll_delay) servers = [utils.wait_for( server, is_ready=utils.resource_is("RUNNING"), update_resource=self._update_resource, timeout=CONF.benchmark.ec2_server_boot_timeout, check_interval=CONF.benchmark.ec2_server_boot_poll_interval ) for server in servers] return servers
def _update_stack(self, stack, template, parameters=None, files=None, environment=None): """Update an existing stack :param stack: stack that need to be updated :param template: Updated template :param parameters: template parameters for stack update :param files: additional files used in template :param environment: stack environment definition :returns: object of updated stack """ kw = { "stack_name": stack.stack_name, "disable_rollback": True, "parameters": parameters or {}, "template": template, "files": files or {}, "environment": environment or {} } self.clients("heat").stacks.update(stack.id, **kw) time.sleep(CONF.benchmark.heat_stack_update_prepoll_delay) stack = utils.wait_for( stack, is_ready=utils.resource_is("UPDATE_COMPLETE"), update_resource=utils.get_from_manager(["UPDATE_FAILED"]), timeout=CONF.benchmark.heat_stack_update_timeout, check_interval=CONF.benchmark.heat_stack_update_poll_interval) return stack
def _create_snapshot(self, volume_id, force=False, **kwargs): """Create one snapshot. Returns when the snapshot is actually created and is in the "Available" state. :param volume_id: volume uuid for creating snapshot :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param kwargs: Other optional parameters to initialize the volume :returns: Created snapshot object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) kwargs["force"] = force snapshot = self.clients("cinder").volume_snapshots.create( volume_id, **kwargs) time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) snapshot = bench_utils.wait_for( snapshot, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval) return snapshot
def _create_volume(self, size, **kwargs): """Create one volume. Returns when the volume is actually created and is in the "Available" state. :param size: int be size of volume in GB, or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param kwargs: Other optional parameters to initialize the volume :returns: Created volume object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) if isinstance(size, dict): size = random.randint(size["min"], size["max"]) volume = self.clients("cinder").volumes.create(size, **kwargs) # NOTE(msdubov): It is reasonable to wait 5 secs before starting to # check whether the volume is ready => less API calls. time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) volume = bench_utils.wait_for( volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval ) return volume
def get_master_agent(self, router_id): net_admin = self._admin_clients.neutron() def get_actives(r): agents = net_admin.list_l3_agent_hosting_routers(r) active_agents = filter( lambda d: d.get("ha_state") == "active", agents.get("agents", [])) LOG.info("Router %s is ACTIVE on: %s" % (r, [(a["id"], a["host"]) for a in active_agents])) return active_agents utils.wait_is_ready( router_id, is_ready=utils.resource_is(str(1), lambda x: str(len(get_actives(x)))), timeout=vmutils.CONF.benchmark.vm_ping_timeout, check_interval=vmutils.CONF.benchmark.vm_ping_poll_interval ) masters = get_actives(router_id) LOG.info("Found router %s master on agent %s" % (router_id, (masters[0]["id"], masters[0]["host"]))) return masters[0]
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for(server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=CONF.benchmark.vm_ping_timeout, check_interval=CONF.benchmark.vm_ping_poll_interval)
def _create_snapshot(self, volume_id, force=False, **kwargs): """Create one snapshot. Returns when the snapshot is actually created and is in the "Available" state. :param volume_id: volume uuid for creating snapshot :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param kwargs: Other optional parameters to initialize the volume :returns: Created snapshot object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) kwargs["force"] = force snapshot = self.clients("cinder").volume_snapshots.create(volume_id, **kwargs) time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) snapshot = bench_utils.wait_for( snapshot, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval ) return snapshot
def _create_share(self, share_proto, size=1, **kwargs): """Create a share. :param share_proto: share protocol for new share, available values are NFS, CIFS, GlusterFS and HDFS. :param size: size of a share in GB :param snapshot_id: ID of the snapshot :param name: name of new share :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or str with ID :param share_type: either instance of ShareType or str with ID :param is_public: defines whether to set share as public or not. :returns: instance of :class:`Share` """ if not kwargs.get("name"): kwargs["name"] = self._generate_random_name() share = self.clients("manila").shares.create( share_proto, size, **kwargs) time.sleep(CONF.benchmark.manila_share_create_prepoll_delay) share = utils.wait_for( share, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.manila_share_create_timeout, check_interval=CONF.benchmark.manila_share_create_poll_interval, ) return share
def _boot_servers(self, image_id, flavor_name, instance_num=1, **kwargs): """Boot multiple servers. Returns when all the servers are actually booted and are in the "Running" state. :param image_id: ID of the image to be used for server creation :param flavor_name: Name of the flavor to be used for server creation :param instance_num: Number of instances to boot :param kwargs: Other optional parameters to boot servers :returns: List of created server objects """ reservation = self.clients("ec2").run_instances( image_id=image_id, instance_type=flavor_name, min_count=instance_num, max_count=instance_num, **kwargs) servers = [instance for instance in reservation.instances] time.sleep(CONF.benchmark.ec2_server_boot_prepoll_delay) servers = [ utils.wait_for( server, is_ready=utils.resource_is("RUNNING"), update_resource=self._update_resource, timeout=CONF.benchmark.ec2_server_boot_timeout, check_interval=CONF.benchmark.ec2_server_boot_poll_interval) for server in servers ] return servers
def _create_volume(self, size, **kwargs): """Create one volume. Returns when the volume is actually created and is in the "Available" state. :param size: int be size of volume in GB, or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param kwargs: Other optional parameters to initialize the volume :returns: Created volume object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) if isinstance(size, dict): size = random.randint(size["min"], size["max"]) volume = self.clients("cinder").volumes.create(size, **kwargs) # NOTE(msdubov): It is reasonable to wait 5 secs before starting to # check whether the volume is ready => less API calls. time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) volume = bench_utils.wait_for( volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval) return volume
def _resize(self, server, flavor): server.resize(flavor) utils.wait_for( server, is_ready=utils.resource_is("VERIFY_RESIZE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_timeout, check_interval=CONF.benchmark.nova_server_resize_poll_interval)
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for( server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=CONF.benchmark.vm_ping_timeout, check_interval=CONF.benchmark.vm_ping_poll_interval )
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for( server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=120 )
def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for( server, is_ready=utils.resource_is(status), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval))
def _create_image(self, container_format, image_location, disk_format, name=None, prefix=None, length=None, **kwargs): """Create a new image. :param container_format: container format of image. Acceptable formats: ami, ari, aki, bare, and ovf :param image_location: image file location :param disk_format: disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso :param name: string used to name the image :param prefix: prefix of generated image name if name not specified ignore if name specified :param length: length of autometic generated part in image name ignore if name specified :param kwargs: optional parameters to create image :returns: image object """ name = name or self._generate_random_name(prefix, length) kw = { "name": name, "container_format": container_format, "disk_format": disk_format, } kw.update(kwargs) image_location = os.path.expanduser(image_location) try: if os.path.isfile(image_location): kw["data"] = open(image_location) else: kw["copy_from"] = image_location image = self.clients("glance").images.create(**kw) time.sleep(CONF.benchmark.glance_image_create_prepoll_delay) image = utils.wait_for( image, is_ready=utils.resource_is("active"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.glance_image_create_timeout, check_interval=CONF.benchmark.glance_image_create_poll_interval ) finally: if "data" in kw: kw["data"].close() return image
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval )
def _wait_for_swarm_ping(self, swarm_connection): utils.wait_for( None, is_ready=utils.resource_is( "UP ALL", swarm_connection.status), timeout=CONF.benchmark.vm_swarm_ping_timeout, check_interval=CONF.benchmark.vm_swarm_ping_poll_interval )
def _resize(self, server, flavor): server.resize(flavor) utils.wait_for( server, is_ready=utils.resource_is("VERIFY_RESIZE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_timeout, check_interval=CONF.benchmark.nova_server_resize_poll_interval )
def _detach_volume(self, server, volume): server_id = server.id volume_id = volume.id self.clients("nova").volumes.delete_server_volume(server_id, volume_id) utils.wait_for( volume, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval)
def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for( server, is_ready=utils.resource_is(status), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval) )
def _boot_servers(self, image_id, flavor_id, requests, name_prefix=None, instances_amount=1, auto_assign_nic=False, **kwargs): """Boot multiple servers. Returns when all the servers are actually booted and are in the "Active" state. :param image_id: ID of the image to be used for server creation :param flavor_id: ID of the flavor to be used for server creation :param requests: Number of booting requests to perform :param name_prefix: The prefix to use while naming the created servers. The rest of the server names will be '_<number>' :param instances_amount: Number of instances to boot per each request :param auto_assign_nic: bool, whether or not to auto assign NICs :param kwargs: other optional parameters to initialize the servers :returns: List of created server objects """ if not name_prefix: name_prefix = self._generate_random_name() if auto_assign_nic and not kwargs.get("nics", False): nic = self._pick_random_nic() if nic: kwargs["nics"] = nic for i in range(requests): self.clients("nova").servers.create("%s_%d" % (name_prefix, i), image_id, flavor_id, min_count=instances_amount, max_count=instances_amount, **kwargs) # NOTE(msdubov): Nova python client returns only one server even when # min_count > 1, so we have to rediscover all the # created servers manually. servers = filter(lambda server: server.name.startswith(name_prefix), self.clients("nova").servers.list()) time.sleep(CONF.benchmark.nova_server_boot_prepoll_delay) servers = [ utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_boot_timeout, check_interval=CONF.benchmark.nova_server_boot_poll_interval) for server in servers ] return servers
def _create_nova_vm(self, nova_client, flavor, image, keypair, server_name, sec_group_name, nova_server_boot_timeout, **kwargs): """Create nova instance :param nova_client: nova client :param flavor: int, flavor for VM instance :param image: str/uuid, image_name/image_id of the new instance :param keypair: str, key-pair to allow ssh :param server_name: str, name for VM instance :param nova_server_boot_timeout: int, max time for instance to go active :return: new nova instance """ secgroup_found = False secgroup = None # add sec-group sec_groups = nova_client.security_groups.list() for sec in sec_groups: if sec.name == sec_group_name: secgroup_found = True secgroup = sec LOG.info("Security group already present") break if not secgroup_found: LOG.info("Adding new security group") secgroup = nova_client.security_groups.create( sec_group_name, sec_group_name) # add new rule nova_client.security_group_rules.create(secgroup.id, from_port=22, to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0") # boot new nova instance LOG.info("Booting new instance: %s", server_name) server = nova_client.servers.create( server_name, image=image, flavor=flavor, key_name=keypair.name, security_groups=[secgroup.id if secgroup else None], **kwargs) # wait for instance to become active LOG.info("Waiting for instance to become active") task_utils.wait_for(server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=nova_server_boot_timeout) # assert if instance is 'active' assert ('ACTIVE' == server.status), ( "The instance is not in ACTIVE state") return server
def _detach_volume(self, server, volume): server_id = server.id volume_id = volume.id self.clients("nova").volumes.delete_server_volume(server_id, volume_id) utils.wait_for( volume, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval )
def assert_server_status(server, **kwargs): LOG.debug("WAITING FOR SERVER TO GO ACTIVE") server = task_utils.wait_for( server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=kwargs["nova_server_boot_timeout"], check_interval=5, ) LOG.debug("SERVER STATUS: %s", server.status) assert "ACTIVE" == server.status, "THE INSTANCE IS NOT IN ACTIVE STATE"
def _attach_volume(self, server, volume, device=None): server_id = server.id volume_id = volume.id self.clients("nova").volumes.create_server_volume( server_id, volume_id, device) utils.wait_for( volume, is_ready=utils.resource_is("in-use"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval))
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for( stack, is_ready=utils.resource_is("RESTORE_COMPLETE"), update_resource=utils.get_from_manager(["RESTORE_FAILED"]), timeout=CONF.benchmark.heat_stack_restore_timeout, check_interval=CONF.benchmark.heat_stack_restore_poll_interval)
def _upload_volume_to_image(self, volume, force=False, container_format="bare", disk_format="raw"): """Upload the given volume to image. Returns created image. :param volume: volume object :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param container_format: container format of image. Acceptable formats: ami, ari, aki, bare, and ovf :param: disk_format: disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso :returns: Returns created image object """ resp, img = volume.upload_to_image(force, self._generate_random_name(), container_format, disk_format) # NOTE (e0ne): upload_to_image changes volume status to uploading so # we need to wait until it will be available. volume = bench_utils.wait_for( volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval) image_id = img["os-volume_upload_image"]["image_id"] image = self.clients("glance").images.get(image_id) image = bench_utils.wait_for( image, is_ready=bench_utils.resource_is("active"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.glance_image_create_timeout, check_interval=CONF.benchmark.glance_image_create_poll_interval) return image
def _suspend_stack(self, stack): """Suspend given stack. :param stack: stack that needs to be suspended """ self.clients("heat").actions.suspend(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("SUSPEND_COMPLETE"), update_resource=utils.get_from_manager(["SUSPEND_FAILED"]), timeout=CONF.benchmark.heat_stack_suspend_timeout, check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
def _resume_stack(self, stack): """Resume given stack. :param stack: stack that needs to be resumed """ self.clients("heat").actions.resume(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("RESUME_COMPLETE"), update_resource=utils.get_from_manager(["RESUME_FAILED"]), timeout=CONF.benchmark.heat_stack_resume_timeout, check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
def _deploy_environment(self, environment, session): """Deploy environment. :param environment: Environment instance :param session: Session instance """ self.clients("murano").sessions.deploy(environment.id, session.id) utils.wait_for( environment, is_ready=utils.resource_is("READY"), update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), timeout=CONF.benchmark.deploy_environment_timeout, check_interval=CONF.benchmark.deploy_environment_check_interval)
def _create_backup(self, volume_id, **kwargs): """Create a volume backup of the given volume. :param volume_id: The ID of the volume to backup. :param kwargs: Other optional parameters """ backup = self.clients("cinder").backups.create(volume_id, **kwargs) return bench_utils.wait_for( backup, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval)
def _create_image(self, container_format, image_location, disk_format, name=None, prefix=None, length=None, **kwargs): """Create a new image. :param container_format: container format of image. Acceptable formats: ami, ari, aki, bare, and ovf :param image_location: image file location :param disk_format: disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso :param name: string used to name the image :param prefix: prefix of generated image name if name not specified ignore if name specified :param length: length of autometic generated part in image name ignore if name specified :param kwargs: optional parameters to create image :returns: image object """ name = name or self._generate_random_name(prefix, length) kw = { "name": name, "container_format": container_format, "disk_format": disk_format, } kw.update(kwargs) image_location = os.path.expanduser(image_location) try: if os.path.isfile(image_location): kw["data"] = open(image_location) else: kw["copy_from"] = image_location image = self.clients("glance").images.create(**kw) time.sleep(CONF.benchmark.glance_image_create_prepoll_delay) image = utils.wait_for( image, is_ready=utils.resource_is("active"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.glance_image_create_timeout, check_interval=CONF.benchmark. glance_image_create_poll_interval) finally: if "data" in kw: kw["data"].close() return image
def _upload_volume_to_image(self, volume, force=False, container_format="bare", disk_format="raw"): """Upload the given volume to image. Returns created image. :param volume: volume object :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param container_format: container format of image. Acceptable formats: ami, ari, aki, bare, and ovf :param: disk_format: disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso :returns: Returns created image object """ resp, img = volume.upload_to_image(force, self._generate_random_name(), container_format, disk_format) # NOTE (e0ne): upload_to_image changes volume status to uploading so # we need to wait until it will be available. volume = bench_utils.wait_for( volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval ) image_id = img["os-volume_upload_image"]["image_id"] image = self.clients("glance").images.get(image_id) image = bench_utils.wait_for( image, is_ready=bench_utils.resource_is("active"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.glance_image_create_timeout, check_interval=CONF.benchmark.glance_image_create_poll_interval ) return image
def _create_nova_vm(self, nova_client, keypair, **kwargs): """Create nova instance :param nova_client: nova client :param keypair: str, key-pair to allow ssh :return: new nova instance """ # add sec-group sec_group_suffix = "rally_secgroup_" + kwargs["sec_group_suffix"] LOG.debug("ADDING NEW SECURITY GROUP %s", sec_group_suffix) secgroup = nova_client.security_groups.create(sec_group_suffix, sec_group_suffix) # add security rules for SSH and ICMP nova_client.security_group_rules.create(secgroup.id, from_port=22, to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0") nova_client.security_group_rules.create(secgroup.id, from_port=-1, to_port=-1, ip_protocol="icmp", cidr="0.0.0.0/0") # boot new nova instance server_name = "rally_server_" + (kwargs["server_suffix"]) LOG.debug("BOOTING NEW INSTANCE: %s", server_name) server = nova_client.servers.create(server_name, image=kwargs["image"], flavor=kwargs["flavor"], key_name=keypair.name, security_groups=[secgroup.id], nics=kwargs["nics"]) # wait for instance to become active LOG.debug("WAITING FOR INSTANCE TO BECOME ACTIVE") server = task_utils.wait_for( server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=kwargs["nova_server_boot_timeout"], check_interval=5) LOG.debug("SERVER STATUS: %s", server.status) # assert if instance is 'active' assert ('ACTIVE' == server.status), ( "THE INSTANCE IS NOT IN ACTIVE STATE") return server
def assert_server_status(server, **kwargs): """Assert server status :param server: nova server """ LOG.debug('WAITING FOR SERVER TO GO ACTIVE') server = task_utils.wait_for(server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=kwargs["nova_server_boot_timeout"], check_interval=5) LOG.debug("SERVER STATUS: %s", server.status) assert ('ACTIVE' == server.status), ("THE INSTANCE IS NOT IN ACTIVE STATE")
def _deploy_environment(self, environment, session): """Deploy environment. :param environment: Environment instance :param session: Session instance """ self.clients("murano").sessions.deploy(environment.id, session.id) utils.wait_for( environment, is_ready=utils.resource_is("READY"), update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), timeout=CONF.benchmark.deploy_environment_timeout, check_interval=CONF.benchmark.deploy_environment_check_interval )
def _create_backup(self, volume_id, **kwargs): """Create a volume backup of the given volume. :param volume_id: The ID of the volume to backup. :param kwargs: Other optional parameters """ backup = self.clients("cinder").backups.create(volume_id, **kwargs) return bench_utils.wait_for( backup, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval )
def _suspend_stack(self, stack): """Suspend given stack. :param stack: stack that needs to be suspended """ self.clients("heat").actions.suspend(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("SUSPEND_COMPLETE"), update_resource=utils.get_from_manager( ["SUSPEND_FAILED"]), timeout=CONF.benchmark.heat_stack_suspend_timeout, check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
def _snapshot_stack(self, stack): """Creates a snapshot for given stack. :param stack: stack that will be used as base for snapshot :returns snapshot created for given stack """ snapshot = self.clients("heat").stacks.snapshot(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("SNAPSHOT_COMPLETE"), update_resource=utils.get_from_manager(["SNAPSHOT_FAILED"]), timeout=CONF.benchmark.heat_stack_snapshot_timeout, check_interval=CONF.benchmark.heat_stack_snapshot_poll_interval) return snapshot
def _resume_stack(self, stack): """Resume given stack. :param stack: stack that needs to be resumed """ self.clients("heat").actions.resume(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("RESUME_COMPLETE"), update_resource=utils.get_from_manager( ["RESUME_FAILED"]), timeout=CONF.benchmark.heat_stack_resume_timeout, check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
def _restore_backup(self, backup_id, volume_id=None): """Restore the given backup. :param backup_id: The ID of the backup to restore. :param volume_id: The ID of the volume to restore the backup to. """ restore = self.clients("cinder").restores.restore(backup_id, volume_id) restored_volume = self.clients("cinder").volumes.get(restore.volume_id) return bench_utils.wait_for( restored_volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval)
def _check_stack(self, stack): """Check given stack. Check the stack and stack resources. :param stack: stack that needs to be checked """ self.clients("heat").actions.check(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("CHECK_COMPLETE"), update_resource=utils.get_from_manager(["CHECK_FAILED"]), timeout=CONF.benchmark.heat_stack_check_timeout, check_interval=CONF.benchmark.heat_stack_check_poll_interval)
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for( stack, is_ready=utils.resource_is("RESTORE_COMPLETE"), update_resource=utils.get_from_manager(["RESTORE_FAILED"]), timeout=CONF.benchmark.heat_stack_restore_timeout, check_interval=CONF.benchmark.heat_stack_restore_poll_interval, )
def _attach_volume(self, server, volume, device=None): server_id = server.id volume_id = volume.id self.clients("nova").volumes.create_server_volume(server_id, volume_id, device) utils.wait_for( volume, is_ready=utils.resource_is("in-use"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval) )
def assert_server_status(server, **kwargs): """Assert server status :param server: nova server """ LOG.debug('WAITING FOR SERVER TO GO ACTIVE') server = task_utils.wait_for( server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=kwargs["nova_server_boot_timeout"], check_interval=5) LOG.debug("SERVER STATUS: %s", server.status) assert('ACTIVE' == server.status), ("THE INSTANCE IS NOT IN ACTIVE STATE")
def _set_alarm_state(self, alarm, state, timeout): """Set the state of the alarm. :param alarm: alarm instance :param state: an alarm state to be set :param timeout: The number of seconds for which to attempt a successful check of the alarm state. :returns: alarm in the set state """ self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state) return bench_utils.wait_for(alarm, is_ready=bench_utils.resource_is(state), update_resource=bench_utils .get_from_manager(), timeout=timeout, check_interval=1)
def _start_server(self, server): """Start the given server. A start will be issued for the given server upon which time this method will wait for it to become ACTIVE. :param server: The server to start and wait to become ACTIVE. """ server.start() utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_start_timeout, check_interval=CONF.benchmark.nova_server_start_poll_interval )
def _stop_server(self, server): """Stop the given server. Issues a stop on the given server and waits for the server to become SHUTOFF. :param server: The server to stop. """ server.stop() utils.wait_for( server, is_ready=utils.resource_is("SHUTOFF"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_stop_timeout, check_interval=CONF.benchmark.nova_server_stop_poll_interval )
def _unshelve_server(self, server): """Unshelve the given server. Returns when the server is unshelved and is in the "ACTIVE" state. :param server: Server object """ server.unshelve() time.sleep(CONF.benchmark.nova_server_unshelve_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_unshelve_timeout, check_interval=CONF.benchmark.nova_server_unshelve_poll_interval )