def setUp(self): super().setUp() self.conf = self.config_fixture.conf self.region = cache.create_region() self.region_kwargs = cache.create_region( function=cache.kwarg_function_key_generator) cache.configure_cache_region(self.conf, self.region) cache.configure_cache_region(self.conf, self.region_kwargs)
def setUp(self): super(CacheRegionTest, self).setUp() self.region = cache.create_region() cache.configure_cache_region(self.config_fixture.conf, self.region) self.region.wrap(TestProxy) self.region_kwargs = cache.create_region( function=cache.kwarg_function_key_generator) cache.configure_cache_region(self.config_fixture.conf, self.region_kwargs) self.region_kwargs.wrap(TestProxy) self.test_value = TestProxyValue('Decorator Test')
def setUp(self): super(CacheRegionTest, self).setUp() self.region = cache.create_region() cache.configure_cache_region(self.config_fixture.conf, self.region) self.region.wrap(TestProxy) self.region_kwargs = cache.create_region( function=cache.kwarg_function_key_generator) cache.configure_cache_region(self.config_fixture.conf, self.region_kwargs) self.region_kwargs.wrap(TestProxy) self.test_value = TestProxyValue('Decorator Test')
def _get_cache_region_for_legacy(url): parsed = parse.urlparse(url) backend = parsed.scheme if backend == 'memory': backend = 'oslo_cache.dict' query = parsed.query # NOTE(fangzhen): The following NOTE and code is from legacy # oslo-incubator cache module. Previously reside in neutron at # neutron/openstack/common/cache/cache.py:78 # NOTE(flaper87): We need the following hack # for python versions < 2.7.5. Previous versions # of python parsed query params just for 'known' # schemes. This was changed in this patch: # http://hg.python.org/cpython/rev/79e6ff3d9afd if not query and '?' in parsed.path: query = parsed.path.split('?', 1)[-1] parameters = parse.parse_qs(query) expiration_time = int(parameters.get('default_ttl', [0])[0]) region = cache.create_region() region.configure(backend, expiration_time=expiration_time) return region else: raise RuntimeError( _('Old style configuration can use only memory ' '(dict) backend'))
def _get_custom_cache_region(expiration_time=WEEK, backend=None, url=None): """Create instance of oslo_cache client. For backends you can pass specific parameters by kwargs. For 'dogpile.cache.memcached' backend 'url' parameter must be specified. :param backend: backend name :param expiration_time: interval in seconds to indicate maximum time-to-live value for each key :param url: memcached url(s) """ region = cache.create_region() region_params = {} if expiration_time != 0: region_params['expiration_time'] = expiration_time if backend == 'oslo_cache.dict': region_params['arguments'] = {'expiration_time': expiration_time} elif backend == 'dogpile.cache.memcached': region_params['arguments'] = {'url': url} else: raise RuntimeError(_('old style configuration can use ' 'only dictionary or memcached backends')) region.configure(backend, **region_params) return region
def _get_cache_region_for_legacy(url): parsed = parse.urlparse(url) backend = parsed.scheme if backend == 'memory': backend = 'oslo_cache.dict' query = parsed.query # NOTE(fangzhen): The following NOTE and code is from legacy # oslo-incubator cache module. Previously reside in neutron at # neutron/openstack/common/cache/cache.py:78 # NOTE(flaper87): We need the following hack # for python versions < 2.7.5. Previous versions # of python parsed query params just for 'known' # schemes. This was changed in this patch: # http://hg.python.org/cpython/rev/79e6ff3d9afd if not query and '?' in parsed.path: query = parsed.path.split('?', 1)[-1] parameters = parse.parse_qs(query) expiration_time = int(parameters.get('default_ttl', [0])[0]) region = cache.create_region() region.configure(backend, expiration_time=expiration_time) return region else: raise RuntimeError(_('Old style configuration can use only memory ' '(dict) backend'))
def get_cache_region(): global _REGION if not _REGION: _REGION = core.configure_cache_region( conf=register_cache_configurations(cfg.CONF), region=core.create_region()) return _REGION
def _get_custom_cache_region(expiration_time=WEEK, backend=None, url=None): """Create instance of oslo_cache client. For backends you can pass specific parameters by kwargs. For 'dogpile.cache.memcached' backend 'url' parameter must be specified. :param backend: backend name :param expiration_time: interval in seconds to indicate maximum time-to-live value for each key :param url: memcached url(s) """ region = cache.create_region() region_params = {} if expiration_time != 0: region_params['expiration_time'] = expiration_time if backend == 'oslo_cache.dict': region_params['arguments'] = {'expiration_time': expiration_time} elif backend == 'dogpile.cache.memcached': region_params['arguments'] = {'url': url} else: raise RuntimeError(_('old style configuration can use ' 'only dictionary or memcached backends')) region.configure(backend, **region_params) return region
def get_cache_region(): global _REGION if not _REGION: _REGION = core.create_region() _REGION.configure('oslo_cache.dict', arguments={'expiration_time': WEEK}) core.configure_cache_region(conf=register_cache_configurations( cfg.CONF), region=_REGION) return _REGION
def create_region(name): """Create a dopile region. Wraps oslo_cache.core.create_region. This is used to ensure that the Region is properly patched and allows us to more easily specify a region name. :param str name: The region name :returns: The new region. :rtype: :class:`dogpile.cache.region.CacheRegion` """ region = cache.create_region() region.name = name # oslo.cache doesn't allow this yet return region
def __init__(self): if not CONF.cache.enabled: LOG.warning("Metadata doesn't use cache. " "Configure cache options to use cache.") self.cache_region = cache_core.create_region() cache_core.configure_cache_region(CONF, self.cache_region)
def _get_default_cache_region(expiration_time): region = cache.create_region() if expiration_time != 0: CONF.cache.expiration_time = expiration_time cache.configure_cache_region(CONF, region) return region
CONF.register_group(proxy_group) CONF.register_opts(proxy_opts, proxy_group) CONF.register_group(keystone_group) CONF.register_opts(keystone_opts, keystone_group) # Logging log.register_options(CONF) # Caching cache.configure(CONF) MEMOIZE_SESSION = None session_cache_region = cache.create_region() MEMOIZE_SESSION = cache.get_memoization_decorator( CONF, session_cache_region, group="proxy") def load_config(): """Load parameters from the proxy's config file.""" conf_files = [f for f in ['k2k-proxy.conf', 'etc/k2k-proxy.conf', '/etc/k2k-proxy.conf'] if path.isfile(f)] if conf_files is not []: CONF(default_config_files=conf_files) def more_config():
def _get_default_cache_region(expiration_time): region = cache.create_region() if expiration_time != 0: CONF.cache.expiration_time = expiration_time cache.configure_cache_region(CONF, region) return region
def get_cache(conf): region = core.create_region() return core.configure_cache_region(conf, region)
constants.K8S_OPERATOR_IN, constants.K8S_OPERATOR_NOT_IN ] LOG = log.getLogger(__name__) CONF = cfg.CONF pod_ip_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(pod_ip_caching_opts, "pod_ip_caching") cache.configure(CONF) pod_ip_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, pod_ip_cache_region, "pod_ip_caching") cache.configure_cache_region(CONF, pod_ip_cache_region) def get_network_id(subnets): ids = ovu.osvif_to_neutron_network_ids(subnets) if len(ids) != 1: raise k_exc.IntegrityError("Subnet mapping %(subnets)s is not valid: " "%(num_networks)s unique networks found" % { 'subnets': subnets, 'num_networks': len(ids) })
def __init__(self): if not CONF.cache.enabled: LOG.warning("Metadata doesn't use cache. " "Configure cache options to use cache.") self.cache_region = cache_core.create_region() cache_core.configure_cache_region(CONF, self.cache_region)
cfg.CONF.register_opts([caching, cache_time], "group1") cache.configure(CONF) def my_key_generator(namespace, fn, **kw): fname = fn.__name__ def generate_key(*arg): key = fname + "_".join(str(s) for s in arg) print(key) return key return generate_key example_cache_region = cache.create_region(function=my_key_generator) MEMOIZE = cache.get_memoization_decorator(CONF, example_cache_region, "group1") backend = cfg.CONF.cache.backend print("------------------------------------------------------") print("Used backend: {}".format(backend)) print("------------------------------------------------------") # Load config file here cache.configure_cache_region(CONF, example_cache_region) print("Cache configuration done") @MEMOIZE
'admin_bind_host', 'compute_host', 'admin_port', 'public_port', 'public_endpoint', 'admin_endpoint', ] # This is a general cache region for catalog administration (CRUD operations). MEMOIZE = cache.get_memoization_decorator(group='catalog') # This builds a discrete cache region dedicated to complete service catalogs # computed for a given user + project pair. Any write operation to create, # modify or delete elements of the service catalog should invalidate this # entire cache region. COMPUTED_CATALOG_REGION = oslo_cache.create_region() MEMOIZE_COMPUTED_CATALOG = cache.get_memoization_decorator( group='catalog', region=COMPUTED_CATALOG_REGION) def format_url(url, substitutions, silent_keyerror_failures=None): """Formats a user-defined URL with the given substitutions. :param string url: the URL to be formatted :param dict substitutions: the dictionary used for substitution :param list silent_keyerror_failures: keys for which we should be silent if there is a KeyError exception on substitution attempt :returns: a formatted URL """ substitutions = utils.WhiteListedItemFilter(WHITELISTED_PROPERTIES,
from keystone.i18n import _ from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) # This is a general cache region for catalog administration (CRUD operations). MEMOIZE = cache.get_memoization_decorator(group='catalog') # This builds a discrete cache region dedicated to complete service catalogs # computed for a given user + project pair. Any write operation to create, # modify or delete elements of the service catalog should invalidate this # entire cache region. COMPUTED_CATALOG_REGION = oslo_cache.create_region() MEMOIZE_COMPUTED_CATALOG = cache.get_memoization_decorator( group='catalog', region=COMPUTED_CATALOG_REGION) @dependency.provider('catalog_api') @dependency.requires('resource_api') class Manager(manager.Manager): """Default pivot point for the Catalog backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """
def setup_cache(conf): global CACHE_REGION core.configure(conf) region = core.create_region() CACHE_REGION = core.configure_cache_region(conf, region)
def _init_cache_region(self): self.cache_region = cache_core.create_region() cache_core.configure_cache_region(CONF, self.cache_region)
def _get_cache_region(conf): region = cache.create_region() cache.configure_cache_region(conf, region) return region
def setUp(self): super(CacheRegionTest, self).setUp() self.region = cache.create_region() cache.configure_cache_region(self.config_fixture.conf, self.region) self.region.wrap(TestProxy) self.test_value = TestProxyValue('Decorator Test')
from keystone.common import cache from keystone.common import dependency from keystone.common import manager import keystone.conf from keystone import exception from keystone.i18n import _, _LE from keystone.models import token_model from keystone import notifications from keystone.token import persistence from keystone.token import providers from keystone.token import utils CONF = keystone.conf.CONF LOG = log.getLogger(__name__) TOKENS_REGION = oslo_cache.create_region() MEMOIZE_TOKENS = cache.get_memoization_decorator(group='token', region=TOKENS_REGION) # NOTE(morganfainberg): This is for compatibility in case someone was relying # on the old location of the UnsupportedTokenVersionException for their code. UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException # supported token versions V2 = token_model.V2 V3 = token_model.V3 VERSIONS = token_model.VERSIONS def base64_encode(s): """Encode a URL-safe string.
# -*- encoding : utf-8 -*- """ @File : __init__.py.py @Time :2021/1/26 18:53 @Author :kuang congxian @Contact :[email protected] @Description : null """ from oslo_cache import core as cache from oslo_config import cfg from mall.common.load_config import CONF caching = cfg.BoolOpt('caching', default=True) cache_time = cfg.IntOpt('cache_time', default=3600) CONF.register_opts([caching, cache_time], "feature-name") cache.configure(CONF) example_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, example_cache_region, "feature-name") # Load config file here cache.configure_cache_region(CONF, example_cache_region)
def setUp(self): super(CacheRegionTest, self).setUp() self.region = cache.create_region() cache.configure_cache_region(self.config_fixture.conf, self.region) self.region.wrap(TestProxy) self.test_value = TestProxyValue('Decorator Test')
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core as oslo_cache from oslo_config import cfg from capstone import conf user_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, user_region) token_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_region) # Ideally, this would be set to just under 24 hours (such as 23.5 hours), so # that we cache tokens for as long as possible without returning expired # tokens. token_region.expiration_time = 60 token_map_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_map_region) # Ideally, this would be set to just over 24 hours (such as 25 hours), so that # the cache invalidator can more confidently purge revoked token data from the # token_region.
def _get_cache_region(conf): region = cache.create_region() cache.configure_cache_region(conf, region) return region
from kuryr_kubernetes import utils from neutronclient.common import exceptions as n_exc LOG = logging.getLogger(__name__) namespace_handler_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=120), ] oslo_cfg.CONF.register_opts(namespace_handler_caching_opts, "namespace_handler_caching") cache.configure(oslo_cfg.CONF) namespace_handler_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator( oslo_cfg.CONF, namespace_handler_cache_region, "namespace_handler_caching") cache.configure_cache_region(oslo_cfg.CONF, namespace_handler_cache_region) class NamespaceHandler(k8s_base.ResourceEventHandler): OBJECT_KIND = constants.K8S_OBJ_NAMESPACE OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "namespaces") def __init__(self): super(NamespaceHandler, self).__init__() self._drv_project = drivers.NamespaceProjectDriver.get_instance() self._drv_subnets = drivers.PodSubnetsDriver.get_instance() self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
'sriov'], 'neutron': ['neutron-vif'], 'nested': ['nested-vlan'], } DEFAULT_TIMEOUT = 180 DEFAULT_INTERVAL = 3 subnet_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(subnet_caching_opts, "subnet_caching") cache.configure(CONF) subnet_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator( CONF, subnet_cache_region, "subnet_caching") cache.configure_cache_region(CONF, subnet_cache_region) def utf8_json_decoder(byte_data): """Deserializes the bytes into UTF-8 encoded JSON. :param byte_data: The bytes to be converted into the UTF-8 encoded JSON. :returns: The UTF-8 encoded JSON represented by Python dictionary format. """ return jsonutils.loads(byte_data.decode('utf8'))
# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core as oslo_cache from oslo_config import cfg from capstone import conf user_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, user_region) token_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_region) # Ideally, this would be set to just under 24 hours (such as 23.5 hours), so # that we cache tokens for as long as possible without returning expired # tokens. token_region.expiration_time = 60 token_map_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_map_region) # Ideally, this would be set to just over 24 hours (such as 25 hours), so that # the cache invalidator can more confidently purge revoked token data from the # token_region.
def setup_cache(conf): global CACHE_REGION core.configure(conf) region = core.create_region() CACHE_REGION = core.configure_cache_region(conf, region)
from keystone.common import dependency from keystone.common import manager import keystone.conf from keystone import exception from keystone.i18n import _, _LE from keystone.models import token_model from keystone import notifications from keystone.token import persistence from keystone.token import providers from keystone.token import utils CONF = keystone.conf.CONF LOG = log.getLogger(__name__) TOKENS_REGION = oslo_cache.create_region() MEMOIZE_TOKENS = cache.get_memoization_decorator( group='token', region=TOKENS_REGION) # NOTE(morganfainberg): This is for compatibility in case someone was relying # on the old location of the UnsupportedTokenVersionException for their code. UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException # supported token versions V2 = token_model.V2 V3 = token_model.V3 VERSIONS = token_model.VERSIONS def base64_encode(s):
from kuryr_kubernetes import exceptions as k_exc from kuryr_kubernetes.handlers import k8s_base from kuryr_kubernetes import objects from kuryr_kubernetes import utils LOG = logging.getLogger(__name__) vif_handler_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=120), ] oslo_cfg.CONF.register_opts(vif_handler_caching_opts, "vif_handler_caching") cache.configure(oslo_cfg.CONF) vif_handler_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(oslo_cfg.CONF, vif_handler_cache_region, "vif_handler_caching") cache.configure_cache_region(oslo_cfg.CONF, vif_handler_cache_region) class VIFHandler(k8s_base.ResourceEventHandler): """Controller side of VIF binding process for Kubernetes pods. `VIFHandler` runs on the Kuryr-Kubernetes controller and together with the CNI driver (that runs on 'kubelet' nodes) is responsible for providing networking to Kubernetes pods. `VIFHandler` relies on a set of drivers (which are responsible for managing Neutron resources) to define the VIF objects and pass them to the CNI driver in form of the Kubernetes pod
from oslo_log import log as logging from octavia.db import api as db_apis from octavia.i18n import _ from octavia.network import base from octavia.network.drivers.neutron import allowed_address_pairs as aap from octavia.network.drivers.neutron import utils from octavia_f5.common import constants from octavia_f5.db import repositories LOG = logging.getLogger(__name__) CONF = cfg.CONF PROJECT_ID_ALIAS = 'project-id' cache.configure(CONF) cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, cache_region, "networking") cache.configure_cache_region(CONF, cache_region) class HierachicalPortBindingDriver(aap.AllowedAddressPairsDriver): def __init__(self): super(HierachicalPortBindingDriver, self).__init__() self.amp_repo = repositories.AmphoraRepository() def allocate_vip(self, load_balancer): port_id = load_balancer.vip.port_id if port_id: LOG.info('Port %s already exists. Nothing to be done.', port_id) port = self.get_port(port_id) return self._port_to_vip(port, load_balancer)
def register_region(name): _REGIONS[name] = core.create_region()
def _init_cache_region(self): self.cache_region = cache_core.create_region() cache_core.configure_cache_region(CONF, self.cache_region)
"drivers respectively"), default={}), ] oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool") node_vif_driver_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=3600), ] oslo_cfg.CONF.register_opts(node_vif_driver_caching_opts, "node_driver_caching") cache.configure(oslo_cfg.CONF) node_driver_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(oslo_cfg.CONF, node_driver_cache_region, "node_driver_caching") cache.configure_cache_region(oslo_cfg.CONF, node_driver_cache_region) class NoopVIFPool(base.VIFPoolDriver): """No pool VIFs for Kubernetes Pods""" def set_vif_driver(self, driver): self._drv_vif = driver def request_vif(self, pod, project_id, subnets, security_groups): return self._drv_vif.request_vif(pod, project_id, subnets, security_groups)
] CONF.register_group(proxy_group) CONF.register_opts(proxy_opts, proxy_group) CONF.register_group(keystone_group) CONF.register_opts(keystone_opts, keystone_group) # Logging log.register_options(CONF) # Caching cache.configure(CONF) MEMOIZE_SESSION = None session_cache_region = cache.create_region() MEMOIZE_SESSION = cache.get_memoization_decorator(CONF, session_cache_region, group="proxy") def load_config(): """Load parameters from the proxy's config file.""" conf_files = [ f for f in ['k2k-proxy.conf', 'etc/k2k-proxy.conf', '/etc/k2k-proxy.conf'] if path.isfile(f) ] if conf_files is not []: CONF(default_config_files=conf_files)