def get_memoization_decorator(group, expiration_group=None, region=None): if region is None: region = CACHE_REGION return cache.get_memoization_decorator(CONF, region, group, expiration_group=expiration_group)
def _get_cacheable_function(self): memoize = cache.get_memoization_decorator( self.config_fixture.conf, self.region, group='cache') @memoize def cacheable_function(value): return value return cacheable_function
def _get_cacheable_function(self): memoize = cache.get_memoization_decorator(self.config_fixture.conf, self.region, group='cache') @memoize def cacheable_function(value): return value return cacheable_function
def _get_cacheable_function(self, region=None): region = region if region else self.region memoize = cache.get_memoization_decorator( self.config_fixture.conf, region, group='cache') @memoize def cacheable_function(value=0, **kw): return value return cacheable_function
def _get_cacheable_function(self): with mock.patch.object(cache.REGION, 'cache_on_arguments', self.region.cache_on_arguments): memoize = cache.get_memoization_decorator(self.config_fixture.conf, group='cache') @memoize def cacheable_function(value): return value return cacheable_function
def _get_cache_fallthrough_fn(self, cache_time): memoize = cache.get_memoization_decorator( self.config_fixture.conf, self.region, group='cache', expiration_group=TEST_GROUP2) class _test_obj(object): def __init__(self, value): self.test_value = value @memoize def get_test_value(self): return self.test_value def _do_test(value): test_obj = _test_obj(value) # Ensure the value has been cached test_obj.get_test_value() # Get the now cached value cached_value = test_obj.get_test_value() self.assertTrue(cached_value.cached) self.assertEqual(value.value, cached_value.value) self.assertEqual(cached_value.value, test_obj.test_value.value) # Change the underlying value on the test object. test_obj.test_value = TestProxyValue( uuidutils.generate_uuid(dashed=False)) self.assertEqual(cached_value.value, test_obj.get_test_value().value) # override the system time to ensure the non-cached new value # is returned new_time = time.time() + (cache_time * 2) with mock.patch.object(time, 'time', return_value=new_time): overriden_cache_value = test_obj.get_test_value() self.assertNotEqual(cached_value.value, overriden_cache_value.value) self.assertEqual(test_obj.test_value.value, overriden_cache_value.value) return _do_test
CONF.register_opts(proxy_opts, proxy_group) CONF.register_group(keystone_group) CONF.register_opts(keystone_opts, keystone_group) # Logging log.register_options(CONF) # Caching cache.configure(CONF) MEMOIZE_SESSION = None session_cache_region = cache.create_region() MEMOIZE_SESSION = cache.get_memoization_decorator(CONF, session_cache_region, group="proxy") def load_config(): """Load parameters from the proxy's config file.""" conf_files = [ f for f in ['k2k-proxy.conf', 'etc/k2k-proxy.conf', '/etc/k2k-proxy.conf'] if path.isfile(f) ] if conf_files is not []: CONF(default_config_files=conf_files) def more_config():
# under the License. from oslo_cache import core as oslo_cache from oslo_config import cfg from capstone import conf user_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, user_region) token_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_region) # Ideally, this would be set to just under 24 hours (such as 23.5 hours), so # that we cache tokens for as long as possible without returning expired # tokens. token_region.expiration_time = 60 token_map_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_map_region) # Ideally, this would be set to just over 24 hours (such as 25 hours), so that # the cache invalidator can more confidently purge revoked token data from the # token_region. token_map_region.expiration_time = 90 config_group = 'cache' memoize_user = oslo_cache.get_memoization_decorator(conf.CONF, user_region, config_group) memoize_token = oslo_cache.get_memoization_decorator(conf.CONF, token_region, config_group)
from oslo_cache import core from oslo_config import cfg from oslo_log import log from oslo_utils import reflection from oslo_utils import strutils import six from heat.common import cache from heat.common import exception from heat.common.i18n import _ from heat.engine import resources # decorator that allows to cache the value # of the function based on input arguments MEMOIZE = core.get_memoization_decorator(conf=cfg.CONF, region=cache.get_cache_region(), group="constraint_validation_cache") LOG = log.getLogger(__name__) class Schema(collections.Mapping): """Schema base class for validating properties or parameters. Schema objects are serializable to dictionaries following a superset of the HOT input Parameter schema using dict(). Serialises to JSON in the form:: { 'type': 'list',
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core from oslo_config import cfg from conveyor.conveyorheat.common import cache MEMOIZE_EXTENSIONS = core.get_memoization_decorator( conf=cfg.CONF, region=cache.get_cache_region(), group="service_extension_cache") MEMOIZE_FINDER = core.get_memoization_decorator( conf=cfg.CONF, region=cache.get_cache_region(), group="resource_finder_cache")
# -*- encoding : utf-8 -*- """ @File : __init__.py.py @Time :2021/1/26 18:53 @Author :kuang congxian @Contact :[email protected] @Description : null """ from oslo_cache import core as cache from oslo_config import cfg from mall.common.load_config import CONF caching = cfg.BoolOpt('caching', default=True) cache_time = cfg.IntOpt('cache_time', default=3600) CONF.register_opts([caching, cache_time], "feature-name") cache.configure(CONF) example_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, example_cache_region, "feature-name") # Load config file here cache.configure_cache_region(CONF, example_cache_region)
CONF.register_group(proxy_group) CONF.register_opts(proxy_opts, proxy_group) CONF.register_group(keystone_group) CONF.register_opts(keystone_opts, keystone_group) # Logging log.register_options(CONF) # Caching cache.configure(CONF) MEMOIZE_SESSION = None session_cache_region = cache.create_region() MEMOIZE_SESSION = cache.get_memoization_decorator( CONF, session_cache_region, group="proxy") def load_config(): """Load parameters from the proxy's config file.""" conf_files = [f for f in ['k2k-proxy.conf', 'etc/k2k-proxy.conf', '/etc/k2k-proxy.conf'] if path.isfile(f)] if conf_files is not []: CONF(default_config_files=conf_files) def more_config(): """Perform configuration that must be delayed until after import time. This code must be delayed until the config files have been loaded. They
from oslo_config import cfg CONF = cfg.CONF _opts.configure(CONF) CONF(default_config_files=['app.ini']) print(CONF.cache.backend) print(CONF.cache.config_prefix) cache.configure(CONF) #def key_maker(namespace, fn, **kwargs): # return "zongzw_" + namespace + fn.__name__ + kwargs #region = cache.create_region(function=key_maker) region = cache.create_region() cache.configure_cache_region(CONF, region) mem_decorator = cache.get_memoization_decorator(CONF, region, "cache") @mem_decorator def func(arg1, arg2): return (arg1, arg2) func('1', '2') region.set('1', '2223') print(region.get('1'))
def get_cache_decorator(provider): if type(provider) is not str or provider == "": raise exception.CoriolisException("Invalid provider name") MEMOIZE = cache.get_memoization_decorator(CONF, cache_region, provider) return MEMOIZE
from octavia.db import api as db_apis from octavia.i18n import _ from octavia.network import base from octavia.network.drivers.neutron import allowed_address_pairs as aap from octavia.network.drivers.neutron import utils from octavia_f5.common import constants from octavia_f5.db import repositories LOG = logging.getLogger(__name__) CONF = cfg.CONF PROJECT_ID_ALIAS = 'project-id' cache.configure(CONF) cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, cache_region, "networking") cache.configure_cache_region(CONF, cache_region) class HierachicalPortBindingDriver(aap.AllowedAddressPairsDriver): def __init__(self): super(HierachicalPortBindingDriver, self).__init__() self.amp_repo = repositories.AmphoraRepository() def allocate_vip(self, load_balancer): port_id = load_balancer.vip.port_id if port_id: LOG.info('Port %s already exists. Nothing to be done.', port_id) port = self.get_port(port_id) return self._port_to_vip(port, load_balancer)
# under the License. from neutronclient.common import exceptions from neutronclient.neutron import v2_0 as neutronV20 from neutronclient.v2_0 import client as nc from oslo_cache import core from oslo_config import cfg from oslo_utils import uuidutils from heat.common import cache from heat.common import exception from heat.engine.clients import client_plugin MEMOIZE = core.get_memoization_decorator(conf=cfg.CONF, region=cache.get_cache_region(), group="service_extension_cache") class NeutronClientPlugin(client_plugin.ClientPlugin): exceptions_module = exceptions service_types = [NETWORK] = ['network'] def _create(self): con = self.context endpoint_type = self._get_client_option('neutron', 'endpoint_type') endpoint = self.url_for(service_type=self.NETWORK,
subnet_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] nodes_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(subnet_caching_opts, "subnet_caching") CONF.register_opts(nodes_caching_opts, "nodes_caching") cache.configure(CONF) subnet_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, subnet_cache_region, "subnet_caching") cache.configure_cache_region(CONF, subnet_cache_region) nodes_cache_region = cache.create_region() MEMOIZE_NODE = cache.get_memoization_decorator(CONF, nodes_cache_region, "nodes_caching") cache.configure_cache_region(CONF, nodes_cache_region) def utf8_json_decoder(byte_data): """Deserializes the bytes into UTF-8 encoded JSON. :param byte_data: The bytes to be converted into the UTF-8 encoded JSON. :returns: The UTF-8 encoded JSON represented by Python dictionary format. """ return jsonutils.loads(byte_data.decode('utf8'))
from neutronclient.common import exceptions as neutron_exceptions from trove.common import cache from trove.common import cfg from trove.common import clients from trove.common import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) MGMT_NETWORKS = None MGMT_CIDRS = None NEUTRON_EXTENSION_CACHE = {} PROJECT_ID_EXT_ALIAS = 'project-id' MEMOIZE_PORTS = core.get_memoization_decorator(conf=CONF, region=cache.get_cache_region(), group="instance_ports_cache") def check_extension_enabled(client, extension_alias): """Check if an extension is enabled in Neutron.""" global NEUTRON_EXTENSION_CACHE if extension_alias in NEUTRON_EXTENSION_CACHE: status = NEUTRON_EXTENSION_CACHE[extension_alias] LOG.debug(f"Neutron extension {extension_alias} cached as " f"{'enabled' if status else 'disabled'}") else: try: client.show_extension(extension_alias) LOG.debug(f'Neutron extension {extension_alias} found enabled')
] LOG = log.getLogger(__name__) CONF = cfg.CONF pod_ip_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(pod_ip_caching_opts, "pod_ip_caching") cache.configure(CONF) pod_ip_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, pod_ip_cache_region, "pod_ip_caching") cache.configure_cache_region(CONF, pod_ip_cache_region) def get_network_id(subnets): ids = ovu.osvif_to_neutron_network_ids(subnets) if len(ids) != 1: raise k_exc.IntegrityError("Subnet mapping %(subnets)s is not valid: " "%(num_networks)s unique networks found" % { 'subnets': subnets, 'num_networks': len(ids) }) return ids[0]
cache.configure(CONF) def my_key_generator(namespace, fn, **kw): fname = fn.__name__ def generate_key(*arg): key = fname + "_".join(str(s) for s in arg) print(key) return key return generate_key example_cache_region = cache.create_region(function=my_key_generator) MEMOIZE = cache.get_memoization_decorator(CONF, example_cache_region, "group1") backend = cfg.CONF.cache.backend print("------------------------------------------------------") print("Used backend: {}".format(backend)) print("------------------------------------------------------") # Load config file here cache.configure_cache_region(CONF, example_cache_region) print("Cache configuration done") @MEMOIZE def boom(x):
def get_cache_decorator(group): MEMOIZE = cache.get_memoization_decorator(CONF, cache_region, group) return MEMOIZE
'neutron': ['neutron-vif'], 'nested': ['nested-vlan'], } DEFAULT_TIMEOUT = 180 DEFAULT_INTERVAL = 3 subnet_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(subnet_caching_opts, "subnet_caching") cache.configure(CONF) subnet_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator( CONF, subnet_cache_region, "subnet_caching") cache.configure_cache_region(CONF, subnet_cache_region) def utf8_json_decoder(byte_data): """Deserializes the bytes into UTF-8 encoded JSON. :param byte_data: The bytes to be converted into the UTF-8 encoded JSON. :returns: The UTF-8 encoded JSON represented by Python dictionary format. """ return jsonutils.loads(byte_data.decode('utf8')) def convert_netns(netns): """Convert /proc based netns path to Docker-friendly path.
from oslo_cache import core as oslo_cache from oslo_config import cfg from capstone import conf user_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, user_region) token_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_region) # Ideally, this would be set to just under 24 hours (such as 23.5 hours), so # that we cache tokens for as long as possible without returning expired # tokens. token_region.expiration_time = 60 token_map_region = oslo_cache.create_region() oslo_cache.configure_cache_region(cfg.CONF, token_map_region) # Ideally, this would be set to just over 24 hours (such as 25 hours), so that # the cache invalidator can more confidently purge revoked token data from the # token_region. token_map_region.expiration_time = 90 config_group = 'cache' memoize_user = oslo_cache.get_memoization_decorator( conf.CONF, user_region, config_group) memoize_token = oslo_cache.get_memoization_decorator( conf.CONF, token_region, config_group)
from kuryr_kubernetes import objects from kuryr_kubernetes import utils LOG = logging.getLogger(__name__) vif_handler_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=120), ] oslo_cfg.CONF.register_opts(vif_handler_caching_opts, "vif_handler_caching") cache.configure(oslo_cfg.CONF) vif_handler_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(oslo_cfg.CONF, vif_handler_cache_region, "vif_handler_caching") cache.configure_cache_region(oslo_cfg.CONF, vif_handler_cache_region) class VIFHandler(k8s_base.ResourceEventHandler): """Controller side of VIF binding process for Kubernetes pods. `VIFHandler` runs on the Kuryr-Kubernetes controller and together with the CNI driver (that runs on 'kubelet' nodes) is responsible for providing networking to Kubernetes pods. `VIFHandler` relies on a set of drivers (which are responsible for managing Neutron resources) to define the VIF objects and pass them to the CNI driver in form of the Kubernetes pod annotation. """
from neutronclient.common import exceptions as n_exc LOG = logging.getLogger(__name__) namespace_handler_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=120), ] oslo_cfg.CONF.register_opts(namespace_handler_caching_opts, "namespace_handler_caching") cache.configure(oslo_cfg.CONF) namespace_handler_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator( oslo_cfg.CONF, namespace_handler_cache_region, "namespace_handler_caching") cache.configure_cache_region(oslo_cfg.CONF, namespace_handler_cache_region) class NamespaceHandler(k8s_base.ResourceEventHandler): OBJECT_KIND = constants.K8S_OBJ_NAMESPACE OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "namespaces") def __init__(self): super(NamespaceHandler, self).__init__() self._drv_project = drivers.NamespaceProjectDriver.get_instance() self._drv_subnets = drivers.PodSubnetsDriver.get_instance() self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( specific_driver='multi_pool')
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_cache import core from oslo_config import cfg from heat.common import cache MEMOIZE_EXTENSIONS = core.get_memoization_decorator( conf=cfg.CONF, region=cache.get_cache_region(), group="service_extension_cache") MEMOIZE_FINDER = core.get_memoization_decorator( conf=cfg.CONF, region=cache.get_cache_region(), group="resource_finder_cache") @six.add_metaclass(abc.ABCMeta) class ExtensionMixin(object): def __init__(self, *args, **kwargs): super(ExtensionMixin, self).__init__(*args, **kwargs) self._extensions = None @abc.abstractmethod
def get_decorator(conf, name, group): return core.get_memoization_decorator(conf, _REGIONS[name], group=group)
] oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool") node_vif_driver_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=3600), ] oslo_cfg.CONF.register_opts(node_vif_driver_caching_opts, "node_driver_caching") cache.configure(oslo_cfg.CONF) node_driver_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(oslo_cfg.CONF, node_driver_cache_region, "node_driver_caching") cache.configure_cache_region(oslo_cfg.CONF, node_driver_cache_region) class NoopVIFPool(base.VIFPoolDriver): """No pool VIFs for Kubernetes Pods""" def set_vif_driver(self, driver): self._drv_vif = driver def request_vif(self, pod, project_id, subnets, security_groups): return self._drv_vif.request_vif(pod, project_id, subnets, security_groups) def release_vif(self, pod, vif, *argv):
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core from oslo_config import cfg from heat.common import cache MEMOIZE = core.get_memoization_decorator(conf=cfg.CONF, region=cache.get_cache_region(), group="service_extension_cache")