def test_api_version(self): # The function manager.return_api_version has two versions, # when called with api version 3.1 it should return the # string '3.1' and when called with api version 3.2 or higher # it should return the string '3.2'. version = api_versions.APIVersion('3.1') api = client.Client(api_version=version) manager = test_utils.FakeManagerWithApi(api) self.assertEqual('3.1', manager.return_api_version()) version = api_versions.APIVersion('3.2') api = client.Client(api_version=version) manager = test_utils.FakeManagerWithApi(api) self.assertEqual('3.2', manager.return_api_version()) # pick up the highest version version = api_versions.APIVersion('3.3') api = client.Client(api_version=version) manager = test_utils.FakeManagerWithApi(api) self.assertEqual('3.2', manager.return_api_version()) version = api_versions.APIVersion('3.0') api = client.Client(api_version=version) manager = test_utils.FakeManagerWithApi(api) # An exception will be returned here because the function # return_api_version doesn't support version 3.0 self.assertRaises(exceptions.VersionNotFoundForAPIMethod, manager.return_api_version)
def get_cinder_client(self): """ cinder_client.limits cinder_client.volumes cinder_client.volume_snapshots cinder_client.volume_types cinder_client.volume_type_access cinder_client.volume_encryption_types cinder_client.qos_specs cinder_client.quota_classes cinder_client.quotas cinder_client.backups cinder_client.restores cinder_client.transfers cinder_client.services cinder_client.consistencygroups cinder_client.cgsnapshots cinder_client.availability_zones cinder_client.pools cinder_client.capabilities """ session = self._get_session_for_service() cinder_client = cinder_v3.Client(session=session) return cinder_client
def __init__(self, conf, endpoint_override=None): self.session = None self.endpoint_override = endpoint_override self.project_domain_name = conf.project_domain_name self.username_domain_name = conf.user_domain_name self.username = conf.username self.password = conf.password self.project_name = conf.project_name self.auth_url = conf.auth_url self.region_name = conf.region_name self.endpoint_type = conf.endpoint_type self.init_session() self._volumes = cinderclient.Client( self.username, self.password, self.project_name, auth_url=self.auth_url, region_name=self.region_name, endpoint_override=self.endpoint_override, insecure=True) self._compute = nova_client.Client( api_versions.get_api_version('2.53'), session=self.session, endpoint_type=self.endpoint_type, endpoint_override=self.endpoint_override, region_name=self.region_name) self._placement = PlacementClient(self.session)
def get_client(context): """Get a cinder client connection. :param context: request context, instance of ironic.common.context.RequestContext :returns: A cinder client. """ service_auth = keystone.get_auth('cinder') session = _get_cinder_session() # TODO(pas-ha) remove in Rocky adapter_opts = {} # NOTE(pas-ha) new option must always win if set if CONF.cinder.url and not CONF.cinder.endpoint_override: adapter_opts['endpoint_override'] = CONF.cinder.url if CONF.keystone.region_name and not CONF.cinder.region_name: adapter_opts['region_name'] = CONF.keystone.region_name adapter = keystone.get_adapter('cinder', session=session, auth=service_auth, **adapter_opts) # TODO(pas-ha) use versioned endpoint data to select required # cinder api version cinder_url = adapter.get_endpoint() # TODO(pas-ha) investigate possibility of passing a user context here, # similar to what neutron/glance-related code does # NOTE(pas-ha) cinderclient has both 'connect_retries' (passed to # ksa.Adapter) and 'retries' (used in its subclass of ksa.Adapter) options. # The first governs retries on establishing the HTTP connection, # the second governs retries on OverLimit exceptions from API. # The description of [cinder]/retries fits the first, # so this is what we pass. return client.Client(session=session, auth=service_auth, endpoint_override=cinder_url, connect_retries=CONF.cinder.retries, global_request_id=context.global_id)
def get_cinderclient(self, context=None, legacy_update=False): # NOTE: For legacy image update from single store to multiple # stores we need to use admin context rather than user provided # credentials if legacy_update: user_overriden = False context = context.elevated() else: user_overriden = self.is_user_overriden() if user_overriden: username = self.store_conf.cinder_store_user_name password = self.store_conf.cinder_store_password project = self.store_conf.cinder_store_project_name url = self.store_conf.cinder_store_auth_address else: username = context.user_id password = context.auth_token project = context.project_id if self.store_conf.cinder_endpoint_template: template = self.store_conf.cinder_endpoint_template url = template % context.to_dict() else: info = self.store_conf.cinder_catalog_info service_type, service_name, interface = info.split(':') try: catalog = keystone_sc.ServiceCatalogV2( context.service_catalog) url = catalog.url_for( region_name=self.store_conf.cinder_os_region_name, service_type=service_type, service_name=service_name, interface=interface) except keystone_exc.EndpointNotFound: reason = _("Failed to find Cinder from a service catalog.") raise exceptions.BadStoreConfiguration(store_name="cinder", reason=reason) c = cinderclient.Client( username, password, project, auth_url=url, region_name=self.store_conf.cinder_os_region_name, insecure=self.store_conf.cinder_api_insecure, retries=self.store_conf.cinder_http_retries, cacert=self.store_conf.cinder_ca_certificates_file) LOG.debug( 'Cinderclient connection created for user %(user)s using URL: ' '%(url)s.', {'user': username, 'url': url}) # noauth extracts user_id:project_id from auth_token if not user_overriden: c.client.auth_token = context.auth_token or '%s:%s' % (username, project) c.client.management_url = url return c
def _setup(self): auth = v3.Password(auth_url=self.auth_url, user_domain_name=self.user_domain_name, username=self.username, password=self.password, project_domain_name=self.project_domain_name, project_name=self.project_name) sess = session.Session(auth=auth) self.client = client.Client(self.client_version, session=sess)
def __init__(self, session): """ Initializes the factory :param session: an initialized OpenStack session to use for the various component clients """ self._nova = novaClient.Client(2, session=session, endpoint_type="public") self._cinder = cinderClient.Client(2, session=session, endpoint_type="public") self._neutron = neutronClient.Client(session=session, endpoint_type="public")
def _check_cinder(self): cinder = cinder_client.Client(session=self._get_session()) try: for service in cinder.services.list(host=self.CONF.host): if service.state == 'up': return 0 else: logger.error("Agent %s is down, commencing suicide", service.host) return 1 logger.warning("Agent hostname not %s registered" % self.CONF.host) except ClientException as e: # keystone/nova Down, return 0 logger.warning("Keystone or Cinder down, cannot determine liveness: %s", e) return 0
def get_cinder_client(self): if not self.cinder_client: auth = v3.Password(auth_url=self.conf['auth_url'], username=self.conf['admin_user'], password=self.conf['admin_pwd'], user_domain_name=self.conf['user_domain'], project_name=self.conf['admin_tenant'], project_domain_name=self.conf['project_domain']) self.cinder_client = cinder_client_v3.Client( session=ksc_session.Session(auth=auth), auth_url=self.conf['auth_url'], endpoint_type='internalURL', region_name="RegionOne") return self.cinder_client
def client(): session = sessions.cache().get_session(sessions.SESSION_TYPE_CINDER) auth = keystone.auth() if CONF.cinder.api_version == 2: cinder = cinder_client_v2.Client( session=session, auth=auth, endpoint_type=CONF.cinder.endpoint_type, region_name=CONF.os_region_name) else: cinder = cinder_client_v3.Client( session=session, auth=auth, endpoint_type=CONF.cinder.endpoint_type, region_name=CONF.os_region_name) return cinder
def get_client(): """Get a cinder client connection. :returns: A cinder client. """ params = {'connect_retries': CONF.cinder.retries} # TODO(jtaryma): Add support for noauth # NOTE(TheJulia): If a URL is provided for cinder, we will pass # along the URL to python-cinderclient. Otherwise the library # handles keystone url autodetection. if CONF.cinder.url: params['endpoint_override'] = CONF.cinder.url if CONF.keystone.region_name: params['region_name'] = CONF.keystone.region_name params['session'] = _get_cinder_session() return client.Client(**params)
def cinder(self): if self._cinder: return self._cinder endpoint_type = self._get_client_option('cinder', 'endpoint_type') region_name = self._get_client_option('cinder', 'region_name') cinderclient_version = self._get_client_option('cinder', 'api_version') endpoint = self.url_for(service_type='block-storage', interface=endpoint_type, region_name=region_name) args = { 'cacert': self._get_client_option('cinder', 'ca_file'), 'insecure': self._get_client_option('cinder', 'insecure') } session = self.keystone().session self._cinder = cinder_client.Client(cinderclient_version, session=session, endpoint_override=endpoint, **args) return self._cinder
def __init__(self, conf): self.interface = conf.endpoint_type self.project_domain_name = 'Default' self.username_domain_name = "Default" self.username = conf.username self.password = conf.password self.project_name = conf.project_name self.auth_url = conf.auth_url self.region_name = conf.region_name self.init_session() self._volumes = cinderclient.Client(self.username, self.password, self.project_name, auth_url=self.auth_url, region_name=self.region_name, insecure=True, endpoint_type=self.interface) self._images = glanceclient.Client(session=self.session, region_name=self.region_name, interface=self.interface)
def cinder_client(context, region_name=None): if CONF.cinder_url: url = '%(cinder_url)s%(tenant)s' % { 'cinder_url': normalize_url(CONF.cinder_url), 'tenant': context.project_id } else: region = region_name or CONF.service_credentials.region_name url = get_endpoint(context.service_catalog, service_type=CONF.cinder_service_type, endpoint_region=region, endpoint_type=CONF.cinder_endpoint_type) client = CinderClient.Client(context.user, context.auth_token, project_id=context.project_id, auth_url=CONF.service_credentials.auth_url, insecure=CONF.cinder_api_insecure) client.client.auth_token = context.auth_token client.client.management_url = url return client
def test_api_version(self): version = api_versions.APIVersion('3.1') api = client.Client(api_version=version) manager = test_utils.FakeManagerWithApi(api) r1 = base.Resource(manager, {'id': 1}) self.assertEqual(version, r1.api_version)
def create_conn(cred=None): ''' create connection ''' nt = client.Client(username=cred['username'], api_key=cred['password'], project_id=cred['project_id'], auth_url=cred['auth_url'], endpoint_type=cred['endpoint_type'], cacert=cred['certificate'], region_name=cred['region_name']) return nt
def cinder_api(self): """Kết nối tới cinder""" return cinder_client.Client('3', session=self.sess)
def setUp(self): super(ClientTestBase, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or os.environ.get('OS_STDOUT_CAPTURE') == '1'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or os.environ.get('OS_STDERR_CAPTURE') == '1'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if (os.environ.get('OS_LOG_CAPTURE') != 'False' and os.environ.get('OS_LOG_CAPTURE') != '0'): self.useFixture( fixtures.LoggerFixture(nuke_handlers=False, format=self.log_format, level=None)) # Collecting of credentials: # # Grab the cloud config from a user's clouds.yaml file. # First look for a functional_admin cloud, as this is a cloud # that the user may have defined for functional testing that has # admin credentials. # If that is not found, get the devstack config and override the # username and project_name to be admin so that admin credentials # will be used. # # Finally, fall back to looking for environment variables to support # existing users running these the old way. We should deprecate that # as tox 2.0 blanks out environment. # # TODO(sdague): while we collect this information in # tempest-lib, we do it in a way that's not available for top # level tests. Long term this probably needs to be in the base # class. openstack_config = openstack.config.OpenStackConfig() try: cloud_config = openstack_config.get_one_cloud('functional_admin') except openstack.config.exceptions.OpenStackConfigException: try: cloud_config = openstack_config.get_one_cloud( 'devstack', auth=dict(username='******', project_name='admin')) except openstack.config.exceptions.OpenStackConfigException: try: cloud_config = openstack_config.get_one_cloud('envvars') except openstack.config.exceptions.OpenStackConfigException: cloud_config = None if cloud_config is None: raise NoCloudConfigException( "Could not find a cloud named functional_admin or a cloud" " named devstack. Please check your clouds.yaml file and" " try again.") auth_info = cloud_config.config['auth'] user = auth_info['username'] passwd = auth_info['password'] self.project_name = auth_info['project_name'] auth_url = auth_info['auth_url'] user_domain_id = auth_info['user_domain_id'] self.project_domain_id = auth_info['project_domain_id'] if 'insecure' in cloud_config.config: self.insecure = cloud_config.config['insecure'] else: self.insecure = False self.cacert = cloud_config.config['cacert'] self.cert = cloud_config.config['cert'] auth = identity.Password(username=user, password=passwd, project_name=self.project_name, auth_url=auth_url, project_domain_id=self.project_domain_id, user_domain_id=user_domain_id) session = ksession.Session(cert=self.cert, auth=auth, verify=(self.cacert or not self.insecure)) self.client = self._get_novaclient(session) self.glance = glanceclient.Client('2', session=session) # pick some reasonable flavor / image combo if "flavor" not in CACHE: CACHE["flavor"] = pick_flavor(self.client.flavors.list()) if "image" not in CACHE: CACHE["image"] = pick_image(self.glance.images.list()) self.flavor = CACHE["flavor"] self.image = CACHE["image"] if "network" not in CACHE: # Get the networks from neutron. neutron = neutronclient.Client(session=session) neutron_networks = neutron.list_networks()['networks'] # Convert the neutron dicts to Network objects. nets = [] for network in neutron_networks: nets.append(networks.Network(networks.NeutronManager, network)) # Keep track of whether or not there are multiple networks # available to the given tenant because if so, a specific # network ID has to be passed in on server create requests # otherwise the server POST will fail with a 409. CACHE['multiple_networks'] = len(nets) > 1 CACHE["network"] = pick_network(nets) self.network = CACHE["network"] self.multiple_networks = CACHE['multiple_networks'] # create a CLI client in case we'd like to do CLI # testing. tempest.lib does this really weird thing where it # builds a giant factory of all the CLIs that it knows # about. Eventually that should really be unwound into # something more sensible. cli_dir = os.environ.get( 'OS_NOVACLIENT_EXEC_DIR', os.path.join(os.path.abspath('.'), '.tox/functional/bin')) self.cli_clients = tempest.lib.cli.base.CLIClient( username=user, password=passwd, tenant_name=self.project_name, uri=auth_url, cli_dir=cli_dir, insecure=self.insecure) self.keystone = keystoneclient.Client(session=session, username=user, password=passwd) self.cinder = cinderclient.Client(auth=auth, session=session)
def _migrate_project(region, username=None, charge_code=None, access_token=None): sess = admin_session(region) glance = glanceclient.Client('2', session=sess) keystone = admin_ks_client(region=region) init_progress = 0.1 ks_legacy_user = get_user(keystone, username) if not ks_legacy_user: yield 1.0, f'User "{username}" not found in region "{region}", skipping' return ks_legacy_project = next(iter([ ks_p for ks_p in keystone.projects.list(user=ks_legacy_user, domain='default') if getattr(ks_p, 'charge_code', ks_p.name) == charge_code ]), None) if not ks_legacy_project: yield 1.0, f'Project {charge_code} not found in region "{region}", skipping' return # Perform login, which populates projects based on current memberships _do_federated_login(region, access_token) federated_domain = next(iter([ks_d for ks_d in keystone.domains.list() if ks_d.name == 'chameleon'])) if not federated_domain: raise ValueError('Could not find federated domain') ks_federated_project = next(iter([ ks_p for ks_p in keystone.projects.list(name=charge_code, domain=federated_domain) ]), None) if not ks_federated_project: raise ValueError('Could not find corresponding federated project') images_to_migrate = [ img for img in glance.images.list(owner=ks_legacy_project) if img.owner == ks_legacy_project.id ] volumes_to_migrate = [] if region == 'KVM@TACC': unscoped_session = unscoped_user_session( region, access_token=access_token) scoped_user_session = project_scoped_session( project_id=ks_federated_project.id, unscoped_token=unscoped_session.get_token(), region=region) admin_cinder = cinder_client.Client(session=sess) user_cinder = cinder_client.Client(session=scoped_user_session) volumes_to_migrate = [v for v in admin_cinder.volumes.list( search_opts={ 'all_tenants': 1, 'project_id': ks_legacy_project.id } )] num_images = len(images_to_migrate) num_volumes = num_volumes = len(volumes_to_migrate) migrations_count = num_images + num_volumes if migrations_count: # Increment the bar slightly to show there is work being done progress = init_progress yield progress, ( f'Will migrate {num_images} disk images and {num_volumes} volumes ' f'for project "{charge_code}"') else: progress = 0.9 yield progress, ( f'No images or volumes left to migrate for project ' f'"{charge_code}"') for image in images_to_migrate: yield progress, f'Migrating disk image "{image.name}"...' # Preserve already-public images visibility = 'public' if image.visibility == 'public' else 'shared' glance.images.update(image.id, owner=ks_federated_project.id, visibility=visibility) glance.image_members.create(image.id, ks_legacy_project.id) glance.image_members.update(image.id, ks_legacy_project.id, 'accepted') progress += ((1.0 - init_progress) / migrations_count) for volume in volumes_to_migrate: yield progress, f'Migrating volumes "{volume.name}"...' if volume.status == 'available': transfer = admin_cinder.transfers.create(volume.id) user_cinder.transfers.accept(transfer.id, transfer.auth_key) else: yield progress, ( f'Volume {volume.name} is not available to transfer. ' f'Please detach volume from any instances and re-run ' f'migration.' ) progress += ((1.0 - init_progress) / migrations_count) keystone.projects.update(ks_legacy_project, migrated_at=datetime.now(tz=timezone.utc), migrated_by=username) progress = 1.0 yield progress, f'Finished migration of region "{region}"'
def get_cinder_client(): return cinclient.Client(interface='internal', session=sess)
def capi(self): if not self._capi: self._capi = cinderclient.Client(session=self.session) return self._capi