def get_plug_point_class_instances(): """Instances of classes that implements pre/post stack operation methods. Get list of instances of classes that (may) implement pre and post stack operation methods. The list of class instances is sorted using get_ordinal methods on the plug point classes. If class1.ordinal() < class2.ordinal(), then class1 will be before before class2 in the list. """ global pp_class_instances if pp_class_instances is None: pp_class_instances = [] pp_classes = [] try: slps = resources.global_env().get_stack_lifecycle_plugins() pp_classes = [cls for name, cls in slps] except Exception: LOG.exception(_LE("failed to get lifecycle plug point classes")) for ppc in pp_classes: try: pp_class_instances.append(ppc()) except Exception: LOG.exception( _LE("failed to instantiate stack lifecycle class %s"), ppc) try: pp_class_instances = sorted(pp_class_instances, key=lambda ppci: ppci.get_ordinal()) except Exception: LOG.exception(_LE("failed to sort lifecycle plug point classes")) return pp_class_instances
def read_global_environment(env, env_dir=None): if env_dir is None: cfg.CONF.import_opt('environment_dir', 'conveyor.conveyorheat.common.config') env_dir = cfg.CONF.environment_dir try: env_files = glob.glob(os.path.join(env_dir, '*')) except OSError as osex: LOG.error(_LE('Failed to read %s'), env_dir) LOG.exception(osex) return for file_path in env_files: try: with open(file_path) as env_fd: LOG.info(_LI('Loading %s'), file_path) env_body = env_fmt.parse(env_fd.read()) env_fmt.default_for_missing(env_body) env.load(env_body) except ValueError as vex: LOG.error(_LE('Failed to parse %(file_path)s'), { 'file_path': file_path}) LOG.exception(vex) except IOError as ioex: LOG.error(_LE('Failed to read %(file_path)s'), { 'file_path': file_path}) LOG.exception(ioex)
def _v3_client_init(self): client = kc_v3.Client(session=self.session, auth=self.context.auth_plugin, interface='public') if hasattr(self.context.auth_plugin, 'get_access'): # NOTE(jamielennox): get_access returns the current token without # reauthenticating if it's present and valid. try: auth_ref = self.context.auth_plugin.get_access(self.session) except kc_exception.Unauthorized: LOG.error(_LE("Keystone client authentication failed")) raise exception.AuthorizationFailure() if self.context.trust_id: # Sanity check if not auth_ref.trust_scoped: LOG.error(_LE("trust token re-scoping failed!")) raise exception.AuthorizationFailure() # Sanity check that impersonation is effective if self.context.trustor_user_id != auth_ref.user_id: LOG.error(_LE("Trust impersonation failed")) raise exception.AuthorizationFailure() return client
def interface_detach(self, context, server_id, port_id): LOG.debug(_LE("Novaclient detach interface from %s start"), server_id) LOG.debug(_LE("Nova client query server %s start"), server_id) client = novaclient(context, admin=True) server = client.servers.get(server_id) LOG.debug(_LE("Nova client query server %s end"), str(server)) return client.servers.interface_detach(server, port_id)
def get_server(self, context, server_id, is_dict=True): client = novaclient(context, admin=True) LOG.debug(_LE("Nova client query server %s start"), server_id) server = client.servers.get(server_id) if is_dict: server = self._dict_server(server) LOG.debug(_LE("Nova client query server %s end"), str(server)) return server
def interface_attach(self, context, server_id, net_id, port_id=None, fixed_ip=None): LOG.debug(_LE("Nova client attach a interface to %s start"), server_id) LOG.debug(_LE("Nova client query server %s start"), server_id) client = novaclient(context, admin=True) server = client.servers.get(server_id) LOG.debug(_LE("Nova client query server %s end"), str(server)) obj = client.servers.interface_attach(server, port_id, net_id, fixed_ip) return obj
def get_network(self, context, network_id, timeout=None, **_params): network = None try: network = self.call('get_network') except exc.HTTPNotFound: LOG.error(_LE('Can not find network %s info'), network_id) raise neutronclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query network %(id)s info error: %(err)s'), {'id': network_id, 'err': e}) raise exception.V2vException return network
def get_router(self, context, router_id, **_params): router = None try: router = self.call('get_router') except exc.HTTPNotFound: LOG.error(_LE('Can not find router %s info'), router_id) raise neutronclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query router %(id)s info error: %(err)s'), {'id': router_id, 'err': e}) raise exception.V2vException return router
def get_floatingip(self, context, floatingip, **_params): floatingip_info = None try: floatingip_info = self.call('get_floatingip') except exc.HTTPNotFound: LOG.error(_LE('Can not find floatingip %s info'), floatingip) raise neutronclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query floatingip %(id)s info error: %(err)s'), {'id': floatingip, 'err': e}) raise exception.V2vException return floatingip_info
def get_subnet(self, context, subnet_id, **_params): subnet = None try: subnet = self.call('get_subnet') except exc.HTTPNotFound: LOG.error(_LE('Can not find subnet %s info'), subnet_id) raise neutronclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query subnet %(id)s info error: %(err)s'), {'id': subnet_id, 'err': e}) raise exception.V2vException return subnet
def get_security_group(self, context, security_group_id, **_params): security_group = None try: security_group = self.call('get_security_group') except exc.HTTPNotFound: LOG.error(_LE('Can not find secgroup %s info'), security_group_id) raise neutronclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query security group %(id)s info error: %(err)s'), {'id': security_group_id, 'err': e}) raise exception.ResourceNotFound(resource_type='Network', resource_id=security_group_id) return security_group
def get_volume_type(self, context, volume_type_id, trans_map=True): volume_type = None try: volume_type = self.call('get_volume_type') except exc.HTTPNotFound: LOG.error(_LE('Can not find volume type %s info'), volume_type_id) raise cinderclient_exceptions.NotFound except Exception as e: LOG.error(_LE('Query volumetype %(id)s info error: %(err)s'), {'id': volume_type_id, 'err': e}) raise exception.V2vException return volume_type
def get_all_servers(self, context, detailed=True, search_opts=None, marker=None, limit=None, is_dict=True): LOG.debug(_LE("Nova client query all servers start")) client = novaclient(context, admin=True) server_list = client.servers.list(detailed=detailed, search_opts=search_opts, marker=marker, limit=limit) if server_list and is_dict: server_dict_list = [] for server in server_list: server = self._dict_server(server) server_dict_list.append(server) return server_dict_list return server_list LOG.debug(_LE("Nova client query all servers end"))
def attach_volume(self, context, server_id, volume_id, device): try: self.call('attach_volume') except Exception as e: LOG.error(_LE('Attach volume %(vol_id)s to server %(vm)s: %(e)s'), {'vol_id': volume_id, 'vm': server_id, 'e': e}) raise exception.V2vException
def detach_volume(self, context, server_id, attachment_id): try: self.call('detach_volume') except Exception as e: LOG.error(_LE('Detach volume %(vol_id)s to server %(vm)s: %(e)s'), {'vol_id': attachment_id, 'vm': server_id, 'e': e}) raise exception.V2vException
def _create_auth_plugin(self): if self.auth_token_info: auth_ref = access.AccessInfo.factory(body=self.auth_token_info, auth_token=self.auth_token) return access_plugin.AccessInfoPlugin( auth_url=self.keystone_v3_endpoint, auth_ref=auth_ref) if self.auth_token: # FIXME(jamielennox): This is broken but consistent. If you # only have a token but don't load a service catalog then # url_for wont work. Stub with the keystone endpoint so at # least it might be right. return token_endpoint.Token(endpoint=self.keystone_v3_endpoint, token=self.auth_token) if self.password: return v3.Password(username=self.username, password=self.password, project_id=self.tenant_id, user_domain_id=self.user_domain, auth_url=self.keystone_v3_endpoint) LOG.error(_LE("Keystone v3 API connection failed, no password " "trust or auth_token!")) raise exception.AuthorizationFailure()
def create_stack_user(self, username, password=''): """Create a user defined as part of a stack. The user is defined either via template or created internally by a resource. This user will be added to the heat_stack_user_role as defined in the config. Returns the keystone ID of the resulting user. """ # FIXME(shardy): There's duplicated logic between here and # create_stack_domain user, but this function is expected to # be removed after the transition of all resources to domain # users has been completed stack_user_role = self.client.roles.list( name=cfg.CONF.heat_stack_user_role) if len(stack_user_role) == 1: role_id = stack_user_role[0].id # Create the user user = self.client.users.create( name=self._get_username(username), password=password, default_project=self.context.tenant_id) # Add user to heat_stack_user_role LOG.debug("Adding user %(user)s to role %(role)s" % { 'user': user.id, 'role': role_id}) self.client.roles.grant(role=role_id, user=user.id, project=self.context.tenant_id) else: LOG.error(_LE("Failed to add user %(user)s to role %(role)s, " "check role exists!"), { 'user': username, 'role': cfg.CONF.heat_stack_user_role}) raise exception.Error(_("Can't find role %s") % cfg.CONF.heat_stack_user_role) return user.id
def _handle_volume_for_stack_after_clone(self, context, template): try: resources = template.get('resources') for key, res in resources.items(): res_type = res.get('type') if res_type == 'OS::Cinder::Volume': try: copy_data = res.get('extra_properties', {}). \ get('copy_data') if not copy_data: continue attachments = res.get('extra_properties', {}) \ .get('attachments') volume_id = res.get('extra_properties', {}) \ .get('id') vgw_id = res.get('extra_properties').get('gw_id') self._detach_volume(context, vgw_id, volume_id) if attachments: for attachment in attachments: server_id = attachment.get('server_id') device = attachment.get('device') self.compute_api.attach_volume(context, server_id, volume_id, device) except Exception as e: LOG.error(_LE('Error from handle volume of stack after' ' clone.' 'Error=%(e)s'), {'e': e}) except Exception as e: LOG.warn('detach the volume %s from vgw %s error,' 'the volume not attached to vgw', volume_id, vgw_id)
def _do_ops(cinstances, opname, cnxt, stack, current_stack=None, action=None, is_stack_failure=None): success_count = 0 failure = False failure_exception_message = None for ci in cinstances: op = getattr(ci, opname, None) if callable(op): try: if is_stack_failure is not None: op(cnxt, stack, current_stack, action, is_stack_failure) else: op(cnxt, stack, current_stack, action) success_count += 1 except Exception as ex: LOG.exception(_LE( "%(opname)s %(ci)s failed for %(a)s on %(sid)s"), {'opname': opname, 'ci': type(ci), 'a': action, 'sid': stack.id}) failure = True failure_exception_message = ex.args[0] if ex.args else str(ex) break LOG.info(_LI("done with class=%(c)s, stackid=%(sid)s, action=%(a)s"), {'c': type(ci), 'sid': stack.id, 'a': action}) return (failure, failure_exception_message, success_count)
def _handle_volume_for_stack_after_clone(self, context, template): try: resources = template.get('resources') for key, res in resources.items(): res_type = res.get('type') if res_type == 'OS::Cinder::Volume': try: if res.get('extra_properties', {}).get( 'is_deacidized'): set_shareable = res.get('extra_properties', {}) \ .get('set_shareable') volume_id = res.get('extra_properties', {}) \ .get('id') vgw_id = res.get('extra_properties').get('gw_id') self._detach_volume(context, vgw_id, volume_id) if set_shareable: self.volume_api.set_volume_shareable(context, volume_id, False) except Exception as e: LOG.error(_LE('Error from handle volume ' 'of stack after clone.' 'Error=%(e)s'), {'e': e}) elif res_type and res_type.startswith('file://'): son_template = json.loads(res.get('content')) self._handle_volume_for_stack_after_clone(context, son_template) except Exception as e: LOG.warn('detach the volume %s from vgw %s error,' 'the volume not attached to vgw', volume_id, vgw_id)
def validate(self): super(RemoteStack, self).validate() try: self.heat() except Exception as ex: exc_info = dict(region=self._region_name, exc=six.text_type(ex)) msg = _('Cannot establish connection to Heat endpoint at region ' '"%(region)s" due to "%(exc)s"') % exc_info raise exception.StackValidationFailed(message=msg) try: params = self.properties[self.PARAMETERS] env = environment.get_child_environment(self.stack.env, params) tmpl = template_format.parse(self.properties[self.TEMPLATE]) args = { 'template': tmpl, 'files': self.stack.t.files, 'environment': env.user_env_as_dict(), } self.heat().stacks.validate(**args) except Exception as ex: exc_info = dict(region=self._region_name, exc=six.text_type(ex)) LOG.error(_LE('exception: %s'), type(ex)) msg = _('Failed validating stack template using Heat endpoint at ' 'region "%(region)s" due to "%(exc)s"') % exc_info raise exception.StackValidationFailed(message=msg)
def keypair_list(self, context): keypairs = [] try: keypairs = self.call('list_keypair') except Exception as e: LOG.error(_LE('Query all keypairs info error: %s'), e) raise exception.V2vException return keypairs
def secgroup_list(self, context, **_params): security_groups = [] try: security_groups = self.call('list_security_groups') except Exception as e: LOG.error(_LE('Query security groups info error: %s'), e) raise exception.V2vException return security_groups
def subnet_list(self, context, **_params): subnets = [] try: subnets = self.call('list_subnets') except Exception as e: LOG.error(_LE('Query subnets info error: %s'), e) raise exception.V2vException return subnets
def network_list(self, context, **_params): networks = [] try: networks = self.call('list_networks') except Exception as e: LOG.error(_LE('Query networks info error: %s'), e) raise exception.V2vException return networks
def flavor_list(self, context, detailed=True, is_public=True): flavors = [] try: flavors = self.call('list_flavor') except Exception as e: LOG.error(_LE('Query all flavors info error: %s'), e) raise exception.V2vException return flavors
def router_list(self, context, **_params): routers = [] try: routers = self.call('list_routers') except Exception as e: LOG.error(_LE('Query routers info error: %s'), e) raise exception.V2vException return routers
def availability_zone_list(self, context, detailed=True): availability_zones = [] try: availability_zones = self.call('list_availability_zone') except Exception as e: LOG.error(_LE('Query all availability zone info error: %s'), e) raise exception.V2vException return availability_zones
def floatingip_list(self, context, **_params): floatingips = [] try: floatingips = self.call('list_floatingips') except Exception as e: LOG.error(_LE('Query floatingips info error: %s'), e) raise exception.V2vException return floatingips
def get_all_servers(self, context, detailed=True, search_opts=None, marker=None, limit=None): servers = [] try: servers = self.call('list_instances') except Exception as e: LOG.error(_LE('Query server list info error: %s'), e) return servers
def novaclient(context, admin=False): # FIXME: the novaclient ServiceCatalog object is mis-named. # It actually contains the entire access blob. # Only needed parts of the service catalog are passed in, see # nova/context.py. compat_catalog = { 'access': { 'serviceCatalog': context.service_catalog or [] } } sc = service_catalog.ServiceCatalog(compat_catalog) nova_endpoint_template = CONF.nova_endpoint_template nova_catalog_info = CONF.nova_catalog_info if admin: nova_endpoint_template = CONF.nova_endpoint_admin_template nova_catalog_info = CONF.nova_catalog_admin_info if nova_endpoint_template: url = nova_endpoint_template % context.to_dict() else: info = nova_catalog_info service_type, service_name, endpoint_type = info.split(':') # extract the region if set in configuration if CONF.os_region_name: attr = 'region' filter_value = CONF.os_region_name else: attr = None filter_value = None try: url = sc.url_for(attr=attr, filter_value=filter_value, service_type=service_type, service_name=service_name, endpoint_type=endpoint_type) except Exception as e: LOG.error(_LE("Novaclient get URL from error: %s") % e) cs = url_client.Client() url = cs.get_service_endpoint(context, 'compute', region_name=CONF.os_region_name) LOG.debug(_LE("Novaclient get URL from common function: %s") % url) if not url: url = CONF.nova_url + '/' + context.project_id LOG.debug(_LE('Novaclient connection created using URL: %s') % url) extensions = [assisted_volume_snapshots] c = nova_client.Client(context.user_id, context.auth_token, context.project_id, auth_url=url, insecure=CONF.nova_api_insecure, cacert=CONF.nova_ca_certificates_file, extensions=extensions) # noauth extracts user_id:project_id from auth_token c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, context.project_id) c.client.management_url = url return c
def delete_server(self, context, server_id): client = novaclient(context, admin=True) LOG.debug(_LE("Nova client delete server %s start"), server_id) server = client.servers.delete(server_id) LOG.debug(_LE("Nova client delete server %s end"), str(server))
def add_extra_properties_for_server(self, context, resource, resource_map, sys_clone, copy_data, undo_mgr): migrate_net_map = CONF.migrate_net_map server_properties = resource.properties server_id = resource.id server_extra_properties = resource.extra_properties server_az = server_properties.get('availability_zone') vm_state = server_extra_properties.get('vm_state') gw_url = server_extra_properties.get('gw_url') if not gw_url: if vm_state == 'stopped': gw_id, gw_ip = utils.get_next_vgw(server_az) if not gw_id or not gw_ip: raise exception.V2vException(message='no vgw host found') gw_url = gw_ip + ':' + str(CONF.v2vgateway_api_listen_port) resource.extra_properties.update({"gw_url": gw_url, "gw_id": gw_id}) resource.extra_properties['sys_clone'] = sys_clone resource.extra_properties['is_deacidized'] = True block_device_mapping = server_properties.get( 'block_device_mapping_v2') if block_device_mapping: for block_device in block_device_mapping: volume_name = block_device.get('volume_id').get( 'get_resource') volume_resource = resource_map.get(volume_name) volume_resource.extra_properties['gw_url'] = gw_url volume_resource.extra_properties['is_deacidized'] = \ True boot_index = block_device.get('boot_index') dev_name = block_device.get('device_name') if boot_index == 0 or boot_index == '0': volume_resource.extra_properties['sys_clone'] = \ sys_clone if sys_clone: self._handle_dv_for_svm(context, volume_resource, server_id, dev_name, gw_id, gw_ip, undo_mgr) else: d_copy = copy_data and volume_resource. \ extra_properties['copy_data'] volume_resource.extra_properties['copy_data'] = \ d_copy if not d_copy: continue self._handle_dv_for_svm(context, volume_resource, server_id, dev_name, gw_id, gw_ip, undo_mgr) else: if migrate_net_map: # get the availability_zone of server server_az = server_properties.get('availability_zone') if not server_az: LOG.error(_LE('Not get the availability_zone' 'of server %s') % resource.id) raise exception.AvailabilityZoneNotFound( server_uuid=resource.id) migrate_net_id = migrate_net_map.get(server_az) if not migrate_net_id: LOG.error(_LE('Not get the migrate net of server %s') % resource.id) raise exception.NoMigrateNetProvided( server_uuid=resource.id) # attach interface LOG.debug('Attach a port of net %s to server %s', migrate_net_id, server_id) obj = self.compute_api.interface_attach(context, server_id, migrate_net_id, None, None) interface_attachment = obj._info if interface_attachment: LOG.debug('The interface attachment info is %s ' % str(interface_attachment)) migrate_fix_ip = interface_attachment.get('fixed_ips')[0] \ .get('ip_address') migrate_port_id = interface_attachment.get('port_id') undo_mgr.undo_with(functools.partial (self.compute_api.interface_detach, context, server_id, migrate_port_id)) gw_url = migrate_fix_ip + ':' + str( CONF.v2vgateway_api_listen_port) extra_properties = {} extra_properties['gw_url'] = gw_url extra_properties['is_deacidized'] = True extra_properties['migrate_port_id'] = migrate_port_id extra_properties['sys_clone'] = sys_clone resource.extra_properties.update(extra_properties) # waiting port attach finished, and can ping this vm self._await_port_status(context, migrate_port_id, migrate_fix_ip) # else: # interfaces = self.neutron_api.port_list( # context, device_id=server_id) # host_ip = None # for infa in interfaces: # if host_ip: # break # binding_profile = infa.get("binding:profile", []) # if binding_profile: # host_ip = binding_profile.get('host_ip') # if not host_ip: # LOG.error(_LE('Not find the clone data # ip for server')) # raise exception.NoMigrateNetProvided( # server_uuid=resource.id # ) # gw_url = host_ip + ':' + str( # CONF.v2vgateway_api_listen_port) # extra_properties = {} # extra_properties['gw_url'] = gw_url # extra_properties['sys_clone'] = sys_clone # resource.extra_properties.update(extra_properties) block_device_mapping = server_properties.get( 'block_device_mapping_v2') if block_device_mapping: client = None if gw_url: gw_urls = gw_url.split(':') client = birdiegatewayclient.get_birdiegateway_client( gw_urls[0], gw_urls[1]) for block_device in block_device_mapping: device_name = block_device.get('device_name') volume_name = block_device.get('volume_id').get( 'get_resource') volume_resource = resource_map.get(volume_name) boot_index = block_device.get('boot_index') if boot_index == 0 or boot_index == '0': volume_resource.extra_properties['sys_clone'] = \ sys_clone if not sys_clone: continue else: d_copy = copy_data and volume_resource. \ extra_properties['copy_data'] volume_resource.extra_properties['copy_data'] = \ d_copy if not d_copy: continue # need to check the vm disk name if not client: continue src_dev_format = client.vservices. \ get_disk_format(device_name).get('disk_format') src_mount_point = client. \ vservices.get_disk_mount_point(device_name). \ get('mount_point') volume_resource.extra_properties['guest_format'] = \ src_dev_format volume_resource.extra_properties['mount_point'] = \ src_mount_point volume_resource.extra_properties['gw_url'] = gw_url volume_resource.extra_properties['is_deacidized'] = \ True sys_dev_name = client. \ vservices.get_disk_name(volume_resource.id). \ get('dev_name') if not sys_dev_name: sys_dev_name = device_name volume_resource.extra_properties['sys_dev_name'] = \ sys_dev_name
def create_trust_context(self): """Create a trust using the trustor identity in the current context. The trust is created with the trustee as the heat service user. If the current context already contains a trust_id, we do nothing and return the current context. Returns a context containing the new trust_id. """ if self.context.trust_id: return self.context # We need the service admin user ID (not name), as the trustor user # can't lookup the ID in keystoneclient unless they're admin # workaround this by getting the user_id from admin_client try: trustee_user_id = self.context.trusts_auth_plugin.get_user_id( self.session) except kc_exception.Unauthorized: LOG.error(_LE("Domain admin client authentication failed")) raise exception.AuthorizationFailure() trustor_user_id = self.context.auth_plugin.get_user_id(self.session) trustor_proj_id = self.context.auth_plugin.get_project_id(self.session) # inherit the roles of the trustor, unless set trusts_delegated_roles if cfg.CONF.trusts_delegated_roles: roles = cfg.CONF.trusts_delegated_roles else: roles = self.context.roles matching_roles = [rol for rol in self.context.roles if rol in roles] if 0 == len(matching_roles): matching_roles = cfg.CONF.trusts_delegated_roles LOG.error("the user can not be trust role %s" % self.context.roles) trust_client = (self.admin_client if cfg.CONF.FusionSphere.pubcloud else self.client) try: trust = trust_client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_proj_id, impersonation=True, role_names=matching_roles) except kc_exception.NotFound: LOG.debug("Failed to find roles %s for user %s" % (roles, trustor_user_id)) raise exception.MissingCredentialError( required=_("roles %s") % roles) except kc_exception.EmptyCatalog: base_url = self.v3_endpoint + '/OS-TRUST' LOG.debug("The service catalog is empty, use %s to create trusts" % base_url) trust = self.client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_proj_id, impersonation=True, role_names=matching_roles, base_url=base_url) context_data = self.context.to_dict() context_data['overwrite'] = False trust_context = context.RequestContext.from_dict(context_data) trust_context.trust_id = trust.id trust_context.trustor_user_id = trustor_user_id return trust_context
def _handle_volume_for_svm_after_clone(self, context, server_resource, resources): bdms = server_resource['properties'].get('block_device_mapping_v2', []) vgw_id = server_resource.get('extra_properties', {}).get('gw_id') for bdm in bdms: volume_key = bdm.get('volume_id', {}).get('get_resource') boot_index = bdm.get('boot_index') device_name = bdm.get('device_name') volume_res = resources.get(volume_key) try: if volume_res.get('extra_properties', {}).\ get('is_deacidized'): d_copy = volume_res.get('extra_properties', {}).\ get('copy_data') if not d_copy: continue volume_id = volume_res.get('extra_properties', {}) \ .get('id') vgw_url = volume_res.get('extra_properties', {}) \ .get('gw_url') sys_clone = volume_res.get('extra_properties', {}) \ .get('sys_clone') vgw_ip = vgw_url.split(':')[0] client = birdiegatewayclient.get_birdiegateway_client( vgw_ip, str(CONF.v2vgateway_api_listen_port)) if boot_index not in ['0', 0] or sys_clone: client.vservices._force_umount_disk("/opt/" + volume_id) # if provider cloud can not detcah volume in active status if not CONF.is_active_detach_volume: resouce_common = common.ResourceCommon() self.compute_api.stop_server(context, vgw_id) resouce_common._await_instance_status( context, vgw_id, 'SHUTOFF') if boot_index in ['0', 0]: if sys_clone: self.compute_api.detach_volume( context, vgw_id, volume_id) self._wait_for_volume_status( context, volume_id, vgw_id, 'available') self.volume_api.set_volume_shareable( context, volume_id, False) else: self.compute_api.detach_volume(context, vgw_id, volume_id) self._wait_for_volume_status(context, volume_id, vgw_id, 'available') server_id = server_resource.get( 'extra_properties', {}).get('id') self.compute_api.attach_volume(context, server_id, volume_id, device_name) self._wait_for_volume_status(context, volume_id, server_id, 'in-use') if not CONF.is_active_detach_volume: self.compute_api.start_server(context, vgw_id) resouce_common._await_instance_status( context, vgw_id, 'ACTIVE') except Exception as e: LOG.error( _LE('Error from handle volume of vm after' ' clone.' 'Error=%(e)s'), {'e': e})