def register_cache_configurations(conf): """Register all configurations required for oslo.cache. The procedure registers all configurations required for oslo.cache. It should be called before configuring of cache region :param conf: instance of heat configuration :return updated heat configuration """ # register global configurations for caching in heat core.configure(conf) # register heat specific configurations constraint_cache_group = cfg.OptGroup('constraint_validation_cache') constraint_cache_opts = [ cfg.IntOpt('expiration_time', default=60, help=_( 'TTL, in seconds, for any cached item in the ' 'dogpile.cache region used for caching of validation ' 'constraints.')), cfg.BoolOpt("caching", default=True, help=_( 'Toggle to enable/disable caching when Orchestration ' 'Engine validates property constraints of stack.' 'During property validation with constraints ' 'Orchestration Engine caches requests to other ' 'OpenStack services. Please note that the global ' 'toggle for oslo.cache(enabled=True in [cache] group) ' 'must be enabled to use this feature.')) ] conf.register_group(constraint_cache_group) conf.register_opts(constraint_cache_opts, group=constraint_cache_group) return conf
def configure(conf=None): if conf is None: conf = CONF for module in conf_modules: module.register_opts(conf) # add oslo.cache related config options cache.configure(conf)
def parse_args(argv, default_config_files=None, configure_db=True): log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS) log.register_options(CONF) options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION, sqlite_db='nova.sqlite') rpc.set_defaults(control_exchange='nova') cache.configure(CONF) debugger.register_cli_opts() CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) rpc.init(CONF) if configure_db: sqlalchemy_api.configure(CONF)
def configure(conf=None): if conf is None: conf = CONF conf.register_cli_opt( cfg.BoolOpt('standard-threads', default=False, help='Do not monkey-patch threading system modules.')) conf.register_cli_opt( cfg.StrOpt('pydev-debug-host', help='Host to connect to for remote debugger.')) conf.register_cli_opt( cfg.PortOpt('pydev-debug-port', help='Port to connect to for remote debugger.')) for module in conf_modules: module.register_opts(conf) # register any non-default auth methods here (used by extensions, etc) auth.setup_authentication() # add oslo.cache related config options cache.configure(conf)
def parse_args(argv, default_config_files=None, configure_db=True, init_rpc=True): log.register_options(CONF) # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Nova needs log.set_defaults(default_log_levels=log.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION, sqlite_db='nova.sqlite') rpc.set_defaults(control_exchange='nova') cache.configure(CONF) config.set_middleware_defaults() CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) if init_rpc: rpc.init(CONF) if configure_db: sqlalchemy_api.configure(CONF)
def register_oslo_configs(conf): cache_utils_config.register_cache_opts(conf) cache.configure(conf)
cfg.StrOpt('metadata_proxy_shared_secret', default='', help=_('Shared secret to sign instance-id request'), secret=True), cfg.IntOpt("cache_expiration", default=15, min=0, help=_('This option is the time (in seconds) to cache metadata. ' 'Increasing this setting should improve response times of the ' 'metadata API when under heavy load. Higher values may ' 'increase memory usage, and result in longer times for host ' 'metadata changes to take effect.')) ] CONF.register_opts(metadata_opts, group='metadata') cache_core.configure(CONF) class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" def __init__(self): if not CONF.cache.enabled: LOG.warning("Metadata doesn't use cache. " "Configure cache options to use cache.") self.cache_region = cache_core.create_region() cache_core.configure_cache_region(CONF, self.cache_region) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): LOG.debug('Request: %s', req)
def register_config(conf): core.configure(conf)
def register_oslo_configs(conf): conf.register_opts(cache_opts) cache.configure(conf)
def register_opts(conf): core.configure(conf)
from kuryr_kubernetes.handlers import k8s_base from kuryr_kubernetes import objects from kuryr_kubernetes import utils LOG = logging.getLogger(__name__) vif_handler_caching_opts = [ oslo_cfg.BoolOpt('caching', default=True), oslo_cfg.IntOpt('cache_time', default=120), ] oslo_cfg.CONF.register_opts(vif_handler_caching_opts, "vif_handler_caching") cache.configure(oslo_cfg.CONF) vif_handler_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator( oslo_cfg.CONF, vif_handler_cache_region, "vif_handler_caching") cache.configure_cache_region(oslo_cfg.CONF, vif_handler_cache_region) class VIFHandler(k8s_base.ResourceEventHandler): """Controller side of VIF binding process for Kubernetes pods. `VIFHandler` runs on the Kuryr-Kubernetes controller and together with the CNI driver (that runs on 'kubelet' nodes) is responsible for providing networking to Kubernetes pods. `VIFHandler` relies on a set of drivers (which are responsible for managing Neutron resources) to define the VIF objects and pass them to the CNI driver in form of the Kubernetes pod
OPERATORS_WITH_VALUES = [ constants.K8S_OPERATOR_IN, constants.K8S_OPERATOR_NOT_IN ] LOG = log.getLogger(__name__) CONF = cfg.CONF pod_ip_caching_opts = [ cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=3600), ] CONF.register_opts(pod_ip_caching_opts, "pod_ip_caching") cache.configure(CONF) pod_ip_cache_region = cache.create_region() MEMOIZE = cache.get_memoization_decorator(CONF, pod_ip_cache_region, "pod_ip_caching") cache.configure_cache_region(CONF, pod_ip_cache_region) def get_network_id(subnets): ids = ovu.osvif_to_neutron_network_ids(subnets) if len(ids) != 1: raise k_exc.IntegrityError("Subnet mapping %(subnets)s is not valid: " "%(num_networks)s unique networks found" % { 'subnets': subnets, 'num_networks': len(ids)
def register_cache_configurations(conf): """Register all configurations required for oslo.cache. The procedure registers all configurations required for oslo.cache. It should be called before configuring of cache region :param conf: instance of heat configuration :returns: updated heat configuration """ # register global configurations for caching in heat core.configure(conf) # register heat specific configurations constraint_cache_group = cfg.OptGroup('constraint_validation_cache') constraint_cache_opts = [ cfg.IntOpt('expiration_time', default=60, help=_( 'TTL, in seconds, for any cached item in the ' 'dogpile.cache region used for caching of validation ' 'constraints.')), cfg.BoolOpt("caching", default=True, help=_( 'Toggle to enable/disable caching when Orchestration ' 'Engine validates property constraints of stack. ' 'During property validation with constraints ' 'Orchestration Engine caches requests to other ' 'OpenStack services. Please note that the global ' 'toggle for oslo.cache(enabled=True in [cache] group) ' 'must be enabled to use this feature.')) ] conf.register_group(constraint_cache_group) conf.register_opts(constraint_cache_opts, group=constraint_cache_group) extension_cache_group = cfg.OptGroup('service_extension_cache') extension_cache_opts = [ cfg.IntOpt('expiration_time', default=3600, help=_('TTL, in seconds, for any cached item in the ' 'dogpile.cache region used for caching of service ' 'extensions.')), cfg.BoolOpt('caching', default=True, help=_( 'Toggle to enable/disable caching when Orchestration ' 'Engine retrieves extensions from other OpenStack ' 'services. Please note that the global toggle for ' 'oslo.cache(enabled=True in [cache] group) must be ' 'enabled to use this feature.')) ] conf.register_group(extension_cache_group) conf.register_opts(extension_cache_opts, group=extension_cache_group) find_cache_group = cfg.OptGroup('resource_finder_cache') find_cache_opts = [ cfg.IntOpt('expiration_time', default=3600, help=_('TTL, in seconds, for any cached item in the ' 'dogpile.cache region used for caching of OpenStack ' 'service finder functions.')), cfg.BoolOpt('caching', default=True, help=_( 'Toggle to enable/disable caching when Orchestration ' 'Engine looks for other OpenStack service resources ' 'using name or id. Please note that the global ' 'toggle for oslo.cache(enabled=True in [cache] group) ' 'must be enabled to use this feature.')) ] conf.register_group(find_cache_group) conf.register_opts(find_cache_opts, group=find_cache_group) return conf
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core from oslo_config import cfg core.configure(cfg.CONF)
def setup_cache(conf): global CACHE_REGION core.configure(conf) region = core.create_region() CACHE_REGION = core.configure_cache_region(conf, region)
def register_oslo_configs(conf): cache.configure(conf)
from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as objects_base from nova.tests import fixtures as nova_fixtures from nova.tests.unit import conf_fixture from nova.tests.unit import policy_fixture from nova.tests import uuidsentinel as uuids from nova import utils CONF = cfg.CONF logging.register_options(CONF) CONF.set_override('use_stderr', False) logging.setup(CONF, 'nova') cache.configure(CONF) _TRUE_VALUES = ('True', 'true', '1', 'yes') CELL1_NAME = 'cell1' if six.PY2: nested = contextlib.nested else: @contextlib.contextmanager def nested(*contexts): with contextlib.ExitStack() as stack: yield [stack.enter_context(c) for c in contexts] class SampleNetworks(fixtures.Fixture):