def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.username = self.configuration.san_login self.password = self.configuration.san_password self.cluster_stats = {} self.datera_api_token = None self.interval = self.configuration.datera_503_interval self.retry_attempts = (self.configuration.datera_503_timeout / self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug self.datera_api_versions = [] if self.datera_debug: utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not self.configuration.datera_disable_profiler self.thread_local = threading.local() backend_name = self.configuration.safe_get('volume_backend_name') self.backend_name = backend_name or 'Datera' datc.register_driver(self)
def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.username = self.configuration.san_login self.password = self.configuration.san_password self.cluster_stats = {} self.datera_api_token = None self.interval = self.configuration.datera_503_interval self.retry_attempts = (self.configuration.datera_503_timeout / self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug self.datera_api_versions = [] if self.datera_debug: utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not self.configuration.datera_disable_profiler self.thread_local = threading.local() backend_name = self.configuration.safe_get( 'volume_backend_name') self.backend_name = backend_name or 'Datera' datc.register_driver(self)
def __init__(self, san_ip, username, password, port, tenant, replica_count, placement_mode, chunk_size, default_image_size, retry_attempts, retry_interval, max_timeout, ssl=True, client_cert=None, client_cert_key=None): self.san_ip = san_ip self.username = username self.password = password self.san_port = port self.tenant_id = tenant self.replica_count = replica_count self.placement_mode = placement_mode self.chunk_size = chunk_size self.use_ssl = ssl self.datera_api_token = None self.thread_local = threading.local() self.client_cert = client_cert self.client_cert_key = client_cert_key self.do_profile = True self.retry_attempts = retry_attempts self.interval = retry_interval self.default_size = default_image_size self.max_timeout = max_timeout if not all((self.san_ip, self.username, self.password)): raise exceptions.MissingCredentialError(required=[ 'datera_san_ip', 'datera_san_login', 'datera_san_password' ]) for apiv in reversed(API_VERSIONS): try: api = dfs_sdk.get_api(self.san_ip, self.username, self.password, 'v{}'.format(apiv), disable_log=True) system = api.system.get() LOG.debug('Connected successfully to cluster: %s', system.name) self.api = api self.apiv = apiv break except Exception as e: LOG.warning(e)
class LogAdapter(object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id. """ _txn_id = threading.local() def __init__(self, logger): self.logger = logger for proxied_method in ('debug', 'log', 'warn', 'warning', 'error', 'critical', 'info'): setattr(self, proxied_method, getattr(logger, proxied_method)) @property def txn_id(self): if hasattr(self._txn_id, 'value'): return self._txn_id.value @txn_id.setter def txn_id(self, value): self._txn_id.value = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def exception(self, msg, *args): _junk, exc, _junk = sys.exc_info() call = self.logger.error emsg = '' if isinstance(exc, OSError): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) else: call = self.logger.exception elif isinstance(exc, socket.error): if exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self.logger.exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self.logger.exception call('%s: %s' % (msg, emsg), *args)
def __init__(self, context, db_driver=None): # Ensure we have room for offset headers chunk_size = CONF.backup_datera_chunk_size * units.Gi - TOTAL_OFFSET # We don't care about chunks any smaller than our normal chunk size sha_size = chunk_size container_name = "replace-me" super(DateraBackupDriver, self).__init__(context, chunk_size, sha_size, container_name, db_driver) self.ctxt = context self.db_driver = db_driver self.support_force_delete = True self._backup = None self.san_ip = CONF.backup_datera_san_ip self.username = CONF.backup_datera_san_login self.password = CONF.backup_datera_san_password self.api_port = CONF.backup_datera_api_port self.driver_use_ssl = CONF.backup_driver_use_ssl self.driver_client_cert = CONF.backup_driver_client_cert self.driver_client_cert_key = CONF.backup_driver_client_cert_key self.replica_count = CONF.backup_datera_replica_count self.placement_mode = CONF.backup_datera_placement_mode self.driver_strs = CONF.backup_datera_secondary_backup_drivers self.driver = None self.drivers = {} self.type = 'datera' self.cluster_stats = {} self.datera_api_token = None self.interval = CONF.backup_datera_503_interval self.retry_attempts = (CONF.backup_datera_503_timeout / self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = CONF.backup_datera_debug self.datera_api_versions = [] if self.datera_debug: utils.setup_tracing(['method']) self.tenant_id = CONF.backup_datera_tenant_id if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not CONF.backup_datera_disable_profiler self.thread_local = threading.local() self.thread_local.trace_id = "" self._populate_secondary_drivers() datc.register_driver(self) self._check_options()
def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.username = self.configuration.san_login self.password = self.configuration.san_password self.ldap = self.configuration.datera_ldap_server self.cluster_stats = {} self.datera_api_token = None self.interval = self.configuration.datera_503_interval self.retry_attempts = (self.configuration.datera_503_timeout / self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug if self.datera_debug: volume_utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id if self.tenant_id is None: self.tenant_id = '' self.defaults = self.configuration.datera_volume_type_defaults if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None self.template_override = ( not self.configuration.datera_disable_template_override) self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not self.configuration.datera_disable_profiler self.do_metadata = ( not self.configuration.datera_disable_extended_metadata) self.image_cache = self.configuration.datera_enable_image_cache self.image_type = self.configuration.datera_image_cache_volume_type_id self.thread_local = threading.local() # pylint: disable=no-member self.datera_version = None self.apiv = None self.api = None self.filterf = self.get_filter_function() self.goodnessf = self.get_goodness_function() self.use_chap_auth = self.configuration.use_chap_auth self.chap_username = self.configuration.chap_username self.chap_password = self.configuration.chap_password backend_name = self.configuration.safe_get( 'volume_backend_name') self.backend_name = backend_name or 'Datera' datc.register_driver(self)
'username': self.username, 'tenant_name': self.tenant_name, 'is_admin': self.is_admin, 'roles': self.roles, } def is_auth_capable(self): return self.service_catalog and self.token and self.tenant_id and \ self.user_id def get_admin_context(): return Context(is_admin=True) _CTX_STORE = threading.local() _CTX_KEY = 'current_ctx' def has_ctx(): return hasattr(_CTX_STORE, _CTX_KEY) def ctx(): if not has_ctx(): raise ex.IncorrectStateError("Context isn't available here") return getattr(_CTX_STORE, _CTX_KEY) def current(): return ctx()
import os import sys import time import struct import socket import random from eventlet.green import threading from eventlet.zipkin._thrift.zipkinCore import ttypes from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND client = None _tls = threading.local() # thread local storage def put_annotation(msg, endpoint=None): """ This is annotation API. You can add your own annotation from in your code. Annotation is recorded with timestamp automatically. e.g.) put_annotation('cache hit for %s' % request) :param msg: String message :param endpoint: host info """ if is_sample(): a = ZipkinDataBuilder.build_annotation(msg, endpoint) trace_data = get_trace_data() trace_data.add_annotation(a)
class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id and client ip. """ _cls_thread_local = threading.local() def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server setattr(self, 'warn', self.warning) @property def txn_id(self): if hasattr(self._cls_thread_local, 'txn_id'): return self._cls_thread_local.txn_id @txn_id.setter def txn_id(self, value): self._cls_thread_local.txn_id = value @property def client_ip(self): if hasattr(self._cls_thread_local, 'client_ip'): return self._cls_thread_local.client_ip @client_ip.setter def client_ip(self, value): self._cls_thread_local.client_ip = value @property def thread_locals(self): return (self.txn_id, self.client_ip) @thread_locals.setter def thread_locals(self, value): self.txn_id, self.client_ip = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def process(self, msg, kwargs): """ Add extra info to message """ kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id, 'client_ip': self.client_ip} return msg, kwargs def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _exception(self, msg, *args, **kwargs): logging.LoggerAdapter.exception(self, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() call = self.error emsg = '' if isinstance(exc, OSError): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) else: call = self._exception elif isinstance(exc, socket.error): if exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self._exception call('%s: %s' % (msg, emsg), *args, **kwargs) def set_statsd_prefix(self, prefix): """ The StatsD client prefix defaults to the "name" of the logger. This method may override that default with a specific value. Currently used in the proxy-server to differentiate the Account, Container, and Object controllers. """ if self.logger.statsd_client: self.logger.statsd_client.set_prefix(prefix) def statsd_delegate(statsd_func_name): """ Factory which creates methods which delegate to methods on self.logger.statsd_client (an instance of StatsdClient). The created methods conditionally delegate to a method whose name is given in 'statsd_func_name'. The created delegate methods are a no-op when StatsD logging is not configured. The created delegate methods also handle the defaulting of sample_rate (to either the default specified in the config with 'log_statsd_default_sample_rate' or the value passed into delegate function). :param statsd_func_name: the name of a method on StatsdClient. """ func = getattr(StatsdClient, statsd_func_name) @functools.wraps(func) def wrapped(self, *a, **kw): if getattr(self.logger, 'statsd_client'): return func(self.logger.statsd_client, *a, **kw) return wrapped update_stats = statsd_delegate('update_stats') increment = statsd_delegate('increment') decrement = statsd_delegate('decrement') timing = statsd_delegate('timing') timing_since = statsd_delegate('timing_since')
'username': self.username, 'tenant_name': self.tenant_name, 'is_admin': self.is_admin, 'roles': self.roles, } def is_auth_capable(self): return self.service_catalog and self.token and self.tenant_id and \ self.user_id def get_admin_context(): return Context(is_admin=True) _CTX_STORE = threading.local() _CTX_KEY = 'current_ctx' def has_ctx(): return hasattr(_CTX_STORE, _CTX_KEY) def ctx(): if not has_ctx(): # TODO(slukjanov): replace with specific error raise RuntimeError("Context isn't available here") return getattr(_CTX_STORE, _CTX_KEY) def current():
class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id. """ _txn_id = threading.local() def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server setattr(self, 'warn', self.warning) @property def txn_id(self): if hasattr(self._txn_id, 'value'): return self._txn_id.value @txn_id.setter def txn_id(self, value): self._txn_id.value = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def process(self, msg, kwargs): """ Add extra info to message """ kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id} return msg, kwargs def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _exception(self, msg, *args, **kwargs): logging.LoggerAdapter.exception(self, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() call = self.error emsg = '' if isinstance(exc, OSError): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) else: call = self._exception elif isinstance(exc, socket.error): if exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self._exception call('%s: %s' % (msg, emsg), *args, **kwargs)
RES_NOT_FOUND = REST_NOT_FOUND RES_CONFLICT = REST_CONFLICT RES_EXISTS_INTERNAL_ERR_CODE = REST_EXISTS_INTERNAL_ERR_CODE VSD_NO_ATTR_CHANGES_TO_MODIFY_ERR_CODE = \ REST_NO_ATTR_CHANGES_TO_MODIFY_ERR_CODE VSD_RESP_OBJ = 3 LIST_L2DOMAINS = re.compile(r'.*/l2domains(\?.*)?$') LIST_SUBNETS = re.compile(r'.*/subnets(\?.*)?$') GET_L2DOMAIN = re.compile(r'/l2domains/([0-9a-fA-F\-]+?)(\?.*)?$') GET_SUBNET = re.compile(r'/subnets/([0-9a-fA-F\-]+?)(\?.*)?$') NUAGE_AUTH = None NUAGE_AUTH_SEMAPHORE = threading.Semaphore() THREAD_LOCAL_DATA = threading.local() class RESTProxyBaseException(Exception): message = _("An unknown exception occurred.") def __init__(self, **kwargs): try: super(RESTProxyBaseException, self).__init__(self.message % kwargs) self.msg = self.message % kwargs except Exception: if self.use_fatal_exceptions(): raise else: super(RESTProxyBaseException, self).__init__(self.message)
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import eventlet requests = eventlet.import_patched('requests.__init__') from eventlet.green import threading from py_zipkin import transport # Shut up some pretty-verbose logging from requests' urllib3 import logging logging.getLogger("requests.packages.urllib3").setLevel(logging.WARNING) _tls = threading.local() # thread local storage for GreenHttpTransport global_green_http_transport = None class GreenHttpTransport(transport.BaseTransportHandler): """ We'll keep one global instance of this class to send the v2 API JSON payloads to the server with a greened `requests` module connection pool. """ def __init__(self, logger, address, port, flush_threshold_size=2**20, flush_threshold_sec=2.0): self.logger = logger
VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = { constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed' } NOVA_API_VERSION = "2.1" NOTIFIER_ENABLE_DEFAULT = True # NOTE(ralonsoh): the Nova notifier can be called simultaneously by several RPC # callbacks from the agents (DHCP, L2), trying to update the provisioning # status of a port. In order to handle each context notifier enable flag, a # thread local variable is used. _notifier_store = threading.local() @registry.has_registry_receivers class Notifier(object): _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova')
import os import sys import time import struct import socket import random from eventlet.green import threading from eventlet.zipkin._thrift.zipkinCore import ttypes from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND client = None _tls = threading.local() # thread local storage def put_annotation(msg, endpoint=None): """ This is annotation API. You can add your own annotation from in your code. Annotation is recorded with timestamp automatically. e.g.) put_annotation('cache hit for %s' % request) :param msg: String message :param endpoint: host info """ if is_sample(): a = ZipkinDataBuilder.build_annotation(msg, endpoint) trace_data = get_trace_data() trace_data.add_annotation(a) def put_key_value(key, value, endpoint=None):
from swift_zipkin import transport # Convenience imports so other places don't have to import py_zipkin stuff from py_zipkin.zipkin import ( create_http_headers_for_new_span, ZipkinAttrs, ) from py_zipkin.request_helpers import extract_zipkin_attrs_from_headers # shut up linter create_http_headers_for_new_span = create_http_headers_for_new_span ZipkinAttrs = ZipkinAttrs extract_zipkin_attrs_from_headers = extract_zipkin_attrs_from_headers sample_rate_pct = 100 _tls = threading.local() # thread local storage for a SpanSavingTracer # TODO: see if we can get this into the upstream Tracer, including the weakref # storage for zipkin_span._tracer class SpanSavingTracer(Tracer): """ Like py-zipkin's Tracer, but supports accessing the "current" zipkin span context object. None (the referent has been garbage-collected), it is discareded, and another value popped. """ def __init__(self): super(SpanSavingTracer, self).__init__() self._span_ctx_stack = Stack()