def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :returns: None :raises: nova.exception.InvalidInput """ self.name = name self.app = app self._server = None self._protocol = protocol self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._wsgi_logger = logging.WritableLogger(self._logger) if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') try: socket.inet_pton(socket.AF_INET6, host) family = socket.AF_INET6 except Exception: family = socket.AF_INET self._socket = eventlet.listen((host, port), family, backlog=backlog) (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def main(): """Parse environment and arguments and call the appropriate action.""" try: config_file = os.environ['CONFIG_FILE'] except KeyError: config_file = os.environ['FLAGFILE'] config.parse_args(sys.argv, default_config_files=jsonutils.loads(config_file)) logging.setup("nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') if CONF.action.name in ['add', 'del', 'old']: msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) LOG.debug(msg) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) return(1) print init_leases(network_id) rpc.cleanup()
def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup("nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') objects.register_all() if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() if CONF.action.name in ['add', 'del', 'old']: msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) LOG.debug(msg) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup()
def init(): from oslo.config import cfg CONF = cfg.CONF # NOTE(markmc): gracefully handle the CLI options not being registered if 'remote_debug' not in CONF: return if not (CONF.remote_debug.host and CONF.remote_debug.port): return from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) LOG.debug('Listening on %(host)s:%(port)s for debug connection', {'host': CONF.remote_debug.host, 'port': CONF.remote_debug.port}) try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace(host=CONF.remote_debug.host, port=CONF.remote_debug.port, stdoutToServer=False, stderrToServer=False) LOG.warn(_('WARNING: Using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. This ' 'could result in failures that do not occur under normal ' 'operation. Use at your own risk.'))
def main(): config.parse_args(sys.argv) logging.setup("nova") LOG = logging.getLogger('nova.virt.baremetal.deploy_helper') app = BareMetalDeploy() srv = simple_server.make_server('', 10000, app) srv.serve_forever()
def wrapper(*args, **kwds): LOG = logging.getLogger(f.__module__) # isEnabledFor not introduced until 2.7 #logging_dbg = LOG.isEnabledFor(DEBUG) logging_dbg = LOG.logger.isEnabledFor(DEBUG) #logging_dbg = True if logging_dbg: if dump_parms: d_args, d_kwds = ((args, kwds) if filter_ is None else filter_(*args, **kwds)) LOG.debug("Entering args:%s kwds:%s '%s' %s" % (d_args, d_kwds, f.__name__, f.__module__)) else: LOG.debug("Entering '%s' %s" % (f.__name__, f.__module__)) r = f(*args, **kwds) if logging_dbg: if dump_parms: LOG.debug("Exiting: return '%s' '%s' %s" % (r, f.__name__, f.__module__)) else: LOG.debug("Exiting: return '%s' %s" % (f.__name__, f.__module__)) return r
def main(): objects.register_all() config.parse_args(sys.argv) logging.setup("service_monitor") utils.monkey_patch() LOG = logging.getLogger('service_monitor') monitor = ServiceMonitor(); monitor.monitor(True)
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :param max_url_len: Maximum length of permitted URLs. :returns: None :raises: nova.exception.InvalidInput """ # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._wsgi_logger = logging.WritableLogger(self._logger) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = CONF.client_socket_timeout or None if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET try: self._socket = eventlet.listen(bind_addr, family, backlog=backlog) except EnvironmentError: LOG.error(_LE("Could not bind to %(host)s:%(port)s"), {'host': host, 'port': port}) raise (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"), {'name': self.name, 'host': self.host, 'port': self.port})
def main(): config.parse_args(sys.argv) logging.setup("nova") global LOG LOG = logging.getLogger("nova.virt.baremetal.deploy_helper") objects.register_all() app = BareMetalDeploy() srv = simple_server.make_server("", 10000, app) srv.serve_forever()
def _intercept_log_messages(self): try: mylog = logging.getLogger('nova') stream = cStringIO.StringIO() handler = logging.logging.StreamHandler(stream) handler.setFormatter(logging.LegacyFormatter()) mylog.logger.addHandler(handler) yield stream finally: mylog.logger.removeHandler(handler)
def intercept_log_messages(): try: mylog = logging.getLogger("nova") stream = cStringIO.StringIO() handler = logging.logging.StreamHandler(stream) handler.setFormatter(logging.ContextFormatter()) mylog.logger.addHandler(handler) yield stream finally: mylog.logger.removeHandler(handler)
def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using openstack's default logging system""" priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger( 'nova.openstack.common.notification.%s' % message['event_type']) getattr(logger, priority)(json.dumps(message))
def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'nova-' part :param manager: defaults to CONF.<topic>_manager :param report_interval: defaults to CONF.report_interval :param periodic_enable: defaults to CONF.periodic_enable :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay :param periodic_interval_max: if set, the max time to wait between runs """ if not host: host = CONF.host if not binary: binary = os.path.basename(sys.argv[0]) if not topic: topic = binary.rpartition('nova-')[2] if not manager: manager_cls = ('%s_manager' % binary.rpartition('nova-')[2]) manager = CONF.get(manager_cls, None) if report_interval is None: report_interval = CONF.report_interval if periodic_enable is None: periodic_enable = CONF.periodic_enable if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay if CONF.remote_debug.host and CONF.remote_debug.port: from pydev import pydevd LOG = logging.getLogger('nova') LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'), {'host': CONF.remote_debug.host, 'port': CONF.remote_debug.port}) pydevd.settrace(host=CONF.remote_debug.host, port=CONF.remote_debug.port, stdoutToServer=False, stderrToServer=False) LOG.warn(_('WARNING: Using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. This ' 'could result in failures that do not occur under normal ' 'operation. Use at your own risk.')) service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_enable=periodic_enable, periodic_fuzzy_delay=periodic_fuzzy_delay, periodic_interval_max=periodic_interval_max, db_allowed=db_allowed) return service_obj
def __init__(self, host, port, cli=False): if cli: import logging self.LOG = logging.getLogger("vifinfo_client") else: from nova.openstack.common import log self.LOG = log.getLogger(__name__) self.server = "%s:%s" % (host, port) self.LOG.debug("quantum server: self.server")
def __init__(self, app, conf): super(NovaMoon, self).__init__(app) self.LOG = logging.getLogger(__name__) self.conf = conf self.app = app self.password = self.conf.get("moon_server_password") self.moon_server_ip = self.conf.get("moon_server_ip") self.moon_server_port = self.conf.get("moon_server_port", 8080) self.LOG.info('Starting moon middleware to {}'.format(self.moon_server_ip)) self._moon_client = get_moon_client( self.moon_server_ip, self.moon_server_port, password=self.password)
def __init__( self, name, app, host="0.0.0.0", port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None, ): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :param max_url_len: Maximum length of permitted URLs. :returns: None :raises: nova.exception.InvalidInput """ self.name = name self.app = app self._server = None self._protocol = protocol self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._wsgi_logger = logging.WritableLogger(self._logger) self._use_ssl = use_ssl self._max_url_len = max_url_len if backlog < 1: raise exception.InvalidInput(reason="The backlog must be more than 1") bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET self._socket = eventlet.listen(bind_addr, family, backlog=backlog) (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def main(): config.parse_args(sys.argv) logging.setup("nova") LOG = logging.getLogger('nova.all') utils.monkey_patch() objects.register_all() launcher = service.process_launcher() # nova-api for api in CONF.enabled_apis: try: should_use_ssl = api in CONF.enabled_ssl_apis server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s-api'), api) for mod in [s3server, xvp_proxy]: try: launcher.launch_service(mod.get_wsgi_server()) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), mod.__name__) for binary in ['nova-compute', 'nova-network', 'nova-scheduler', 'nova-cert', 'nova-conductor']: # FIXME(sirp): Most service configs are defined in nova/service.py, but # conductor has set a new precedent of storing these configs # nova/<service>/api.py. # # We should update the existing services to use this new approach so we # don't have to treat conductor differently here. if binary == 'nova-conductor': topic = CONF.conductor.topic manager = CONF.conductor.manager else: topic = None manager = None try: launcher.launch_service(service.Service.create(binary=binary, topic=topic, manager=manager)) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), binary) launcher.wait()
def test_error_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier') self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) LOG = log.getLogger('nova') log.setup('nova') msgs = [] def mock_notify(context, topic, data): msgs.append(data) self.stubs.Set(nova.openstack.common.rpc, 'notify', mock_notify) LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') self.assertEqual(msg['priority'], 'ERROR') self.assertEqual(msg['payload']['error'], 'foo')
def setUp(self): super(DriverTestCase, self).setUp() self.flags(volume_driver=self.driver_name, logging_default_format_string="%(message)s") self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.output = "" def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute) log = logging.getLogger('nova') self.stream = cStringIO.StringIO() log.logger.addHandler(logging.logging.StreamHandler(self.stream)) inst = {} instance = db.instance_create(self.context, {}) self.instance_id = instance['id'] self.instance_uuid = instance['uuid']
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module import ConfigParser from oslo.config import cfg from nova.openstack.common import log as logging global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = ConfigParser.RawConfigParser() cfg.read(cfgfile) NOVA_VENDOR = cfg.get("Nova", "vendor") if cfg.has_option("Nova", "vendor"): NOVA_VENDOR = cfg.get("Nova", "vendor") NOVA_PRODUCT = cfg.get("Nova", "product") if cfg.has_option("Nova", "product"): NOVA_PRODUCT = cfg.get("Nova", "product") NOVA_PACKAGE = cfg.get("Nova", "package") if cfg.has_option("Nova", "package"): NOVA_PACKAGE = cfg.get("Nova", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"), {'cfgfile': cfgfile, 'ex': ex})
def setUp(self): super(NotifierListTestCase, self).setUp() list_notifier._reset_drivers() # Mock log to add one to exception_count when log.exception is called def mock_exception(cls, *args): self.exception_count += 1 self.exception_count = 0 list_notifier_log = logging.getLogger("nova.notifier.list_notifier") self.stubs.Set(list_notifier_log, "exception", mock_exception) # Mock no_op notifier to add one to notify_count when called. def mock_notify(cls, *args): self.notify_count += 1 self.notify_count = 0 self.stubs.Set(nova.notifier.no_op_notifier, "notify", mock_notify) # Mock log_notifier to raise RuntimeError when called. def mock_notify2(cls, *args): raise RuntimeError("Bad notifier.") self.stubs.Set(nova.notifier.log_notifier, "notify", mock_notify2)
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common import log as logging from sqlalchemy import Column from sqlalchemy import Enum from sqlalchemy import MetaData from sqlalchemy import Table LOG = logging.getLogger() def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine shadow_table = Table('shadow_instances', meta, autoload=True) locked_by_column = getattr(shadow_table.c, 'locked_by') if str(locked_by_column.type).__contains__("SHADOW_INSTANCES0LOCKED_BY"): LOG.info("the shadow instance table need to convert.") shadow_table.drop() table = Table('instances', meta, autoload=True) columns = [] for column in table.columns: if column.name == 'locked_by':
from nova.conductor import api as conductor_api from nova.conductor.tasks import live_migrate import nova.context from nova import exception from nova import manager from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova.openstack.common.rpc import common as rpc_common from nova import quota from nova.scheduler import utils as scheduler_utils LOG = logging.getLogger(__name__) scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='nova.scheduler.filter_scheduler.FilterScheduler', help='Default driver to use for the scheduler') CONF = cfg.CONF CONF.register_opt(scheduler_driver_opt) QUOTAS = quota.QUOTAS class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" RPC_API_VERSION = '2.7'
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys import zmq from nova.openstack.common import cfg from nova.openstack.common import log as logging from common import config import msg_handler as message from eswitch_handler import eSwitchHandler LOG = logging.getLogger('mlnx_daemon') class MlxEswitchDaemon(object): def __init__(self): self.max_polling_count = cfg.CONF.DAEMON.max_polling_count self.default_timeout = cfg.CONF.DAEMON.default_timeout fabrics = self._parse_physical_mapping() self.eswitch_handler = eSwitchHandler(fabrics) self.dispatcher = message.MessageDispatch(self.eswitch_handler) def start(self): self._init_connections() def _parse_physical_mapping(self): fabrics = [] for entry in cfg.CONF.DAEMON.fabrics:
"""Handles all requests relating to GridCentric functionality.""" import random from nova import compute from nova.compute import task_states from nova.compute import vm_states from nova import exception from nova.db import base from nova import quota from nova.openstack.common import log as logging from nova.openstack.common import rpc from nova import utils from oslo.config import cfg LOG = logging.getLogger('nova.gridcentric.api') CONF = cfg.CONF gridcentric_api_opts = [ cfg.StrOpt('gridcentric_topic', default='gridcentric', help='the topic gridcentric nodes listen on') ] CONF.register_opts(gridcentric_api_opts) class API(base.Base): """API for interacting with the gridcentric manager.""" def __init__(self, **kwargs): super(API, self).__init__(**kwargs) self.compute_api = compute.API()
import os import pwd import stat import time import glob import threading import tempfile import nova from nova import exception from nova import flags from nova.virt import images from nova.compute import utils as compute_utils from nova.openstack.common import cfg from nova.openstack.common import log as logging LOG = logging.getLogger('nova.gridcentric.vmsconn') FLAGS = flags.FLAGS vmsconn_opts = [ cfg.StrOpt('libvirt_user', default='libvirt-qemu', help='The user that libvirt runs qemu as.'), cfg.StrOpt('openstack_user', default='', help='The openstack user')] FLAGS.register_opts(vmsconn_opts) from eventlet import tpool import vms.commands as commands import vms.logger as logger
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Compute w/ Cells """ import functools from nova.compute import cells_api as compute_cells_api from nova import db from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.tests.compute import test_compute LOG = logging.getLogger('nova.tests.test_compute_cells') ORIG_COMPUTE_API = None def stub_call_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) original_instance = kwargs.pop('original_instance', None) if original_instance: instance = original_instance # Restore this in 'child cell DB' db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) return fn(context, instance, *args, **kwargs)
def __call__(self, *args, **kwargs): stacktrace = "".join(traceback.format_stack()) LOG = logging.getLogger('nova.compute') LOG.error(_LE('No db access allowed in nova-compute: %s'), stacktrace) raise exception.DBNotAllowed('nova-compute')
This rountine can be used to find and delete stale lock files from nova's interprocess synchroization. It can be used safely while services are running. """ import logging import optparse from nova import flags from nova.openstack.common import log from nova import utils LOG = log.getLogger('nova.utils') FLAGS = flags.FLAGS def parse_options(): """process command line options.""" parser = optparse.OptionParser('usage: %prog [options]') parser.add_option('--verbose', action='store_true', help='List lock files found and deleted') options, args = parser.parse_args() return options, args
from nova import exception from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import imagebackend from nova.virt.libvirt.imagecache import get_cache_fname from nova.virt.libvirt import utils as libvirt_utils from nova.compute import utils as compute_utils from nova.openstack.common import log as logging from oslo.config import cfg from .. import image as co_image from nova.openstack.common.gettextutils import _ LOG = logging.getLogger('nova.cobalt.vmsconn') CONF = cfg.CONF vmsconn_opts = [ cfg.BoolOpt( 'cobalt_use_image_service', deprecated_name='gridcentric_use_image_service', default=False, help= 'Cobalt should use the image service to store disk copies and descriptors.' ), cfg.StrOpt('openstack_user', default='', help='The openstack user'), cfg.BoolOpt('cobalt_clean_unused_symlinks', default=True, help='Cobalt should clean up symlinks that is creates and' 'are discovered to be unused.')
from urlparse import urlparse, parse_qs, urlunparse import webob from eventlet.green import urllib2 from nova import context from nova import flags from nova.consoleauth import rpcapi from nova.openstack.common import log as logging from nova.openstack.common import cfg from nova.openstack.common import rpc from nova import version from nova import wsgi LOG = logging.getLogger("nova.ajaxterm.ajaxterm_proxy") ajaxterm_proxy_opts = [ cfg.IntOpt('ajaxterm_proxy_port', default=8022, help='Port that the AjaxTem console proxy should bind to'), cfg.StrOpt('ajaxterm_proxy_host', default='0.0.0.0', help='Address that the AjaxTerm console proxy should bind to'), ] FLAGS = flags.FLAGS FLAGS.register_opts(ajaxterm_proxy_opts) flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
"""Nova common internal object model""" import collections from nova import context from nova import exception from nova.objects import utils as obj_utils from nova.openstack.common import log as logging from nova.openstack.common.rpc import common as rpc_common import nova.openstack.common.rpc.dispatcher import nova.openstack.common.rpc.proxy import nova.openstack.common.rpc.serializer LOG = logging.getLogger('object') def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" return '_%s' % name def make_class_properties(cls): # NOTE(danms): Inherit NovaObject's base fields only cls.fields.update(NovaObject.fields) for name, typefn in cls.fields.iteritems(): def getter(self, name=name, typefn=typefn): attrname = get_attrname(name) if not hasattr(self, attrname):
import functools import netaddr from oslo import messaging import six from nova import context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import fields from nova.openstack.common import log as logging from nova.openstack.common import versionutils LOG = logging.getLogger('object') class NotSpecifiedSentinel: pass def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" return '_%s' % name def make_class_properties(cls): # NOTE(danms/comstud): Inherit fields from super classes. # mro() returns the current class first and returns 'object' last, so # those can be skipped. Also be careful to not overwrite any fields
def __call__(self, *args, **kwargs): stacktrace = "".join(traceback.format_stack()) LOG = logging.getLogger('nova.compute') LOG.error(_('No db access allowed in nova-compute: %s'), stacktrace) raise exception.DBNotAllowed('nova-compute')
class TestGlanceStore(stubs.XenAPITestBaseNoDB): def setUp(self): super(TestGlanceStore, self).setUp() self.store = glance.GlanceStore() self.flags(host='1.1.1.1', port=123, api_insecure=False, group='glance') self.flags(connection_url='test_url', connection_password='******', group='xenserver') self.context = context.RequestContext('user', 'project', auth_token='foobar') fake.reset() stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) self.session = driver._session self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path') self.instance = { 'uuid': 'blah', 'system_metadata': [], 'auto_disk_config': True, 'os_type': 'default', 'xenapi_use_agent': 'true' } def _get_params(self): return { 'image_id': 'fake_image_uuid', 'glance_host': '1.1.1.1', 'glance_port': 123, 'glance_use_ssl': False, 'sr_path': '/fake/sr/path', 'extra_headers': { 'X-Service-Catalog': '[]', 'X-Auth-Token': 'foobar', 'X-Roles': '', 'X-Tenant-Id': 'project', 'X-User-Id': 'user', 'X-Identity-Status': 'Confirmed' } } def _get_download_params(self): params = self._get_params() params['uuid_stack'] = ['uuid1'] return params def test_download_image(self): params = self._get_download_params() self.stubs.Set(vm_utils, '_make_uuid_stack', lambda *a, **kw: ['uuid1']) self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'download_vhd', **params) self.mox.ReplayAll() self.store.download_image(self.context, self.session, self.instance, 'fake_image_uuid') self.mox.VerifyAll() @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1']) @mock.patch.object(random, 'shuffle') @mock.patch.object(time, 'sleep') @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'), 'debug') def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep, mock_shuffle, mock_make_uuid_stack): params = self._get_download_params() self.flags(num_retries=2, group='glance') params.pop("glance_port") params.pop("glance_host") calls = [ mock.call('glance', 'download_vhd', glance_port=9292, glance_host='10.0.1.1', **params), mock.call('glance', 'download_vhd', glance_port=9293, glance_host='10.0.0.1', **params) ] log_calls = [ mock.call( mock.ANY, { 'callback_result': '10.0.1.1', 'attempts': 3, 'attempt': 1, 'fn': 'download_vhd', 'plugin': 'glance' }), mock.call( mock.ANY, { 'callback_result': '10.0.0.1', 'attempts': 3, 'attempt': 2, 'fn': 'download_vhd', 'plugin': 'glance' }) ] glance_api_servers = ['10.0.1.1:9292', 'http://10.0.0.1:9293'] self.flags(api_servers=glance_api_servers, group='glance') with (mock.patch.object( self.session, 'call_plugin_serialized')) as mock_call_plugin_serialized: error_details = ["", "", "RetryableError", ""] error = self.session.XenAPI.Failure(details=error_details) mock_call_plugin_serialized.side_effect = [error, "success"] self.store.download_image(self.context, self.session, self.instance, 'fake_image_uuid') mock_call_plugin_serialized.assert_has_calls(calls) mock_log_debug.assert_has_calls(log_calls, any_order=True) self.assertEqual(1, mock_fault.call_count) def _get_upload_params(self, auto_disk_config=True, expected_os_type='default'): params = self._get_params() params['vdi_uuids'] = ['fake_vdi_uuid'] params['properties'] = { 'auto_disk_config': auto_disk_config, 'os_type': expected_os_type } return params def _test_upload_image(self, auto_disk_config, expected_os_type='default'): params = self._get_upload_params(auto_disk_config, expected_os_type) self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'upload_vhd', **params) self.mox.ReplayAll() self.store.upload_image(self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image(self): self._test_upload_image(True) def test_upload_image_None_os_type(self): self.instance['os_type'] = None self._test_upload_image(True, 'linux') def test_upload_image_no_os_type(self): del self.instance['os_type'] self._test_upload_image(True, 'linux') def test_upload_image_auto_config_disk_disabled(self): sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}] self.instance["system_metadata"] = sys_meta self._test_upload_image("disabled") def test_upload_image_raises_exception(self): params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(RuntimeError) self.mox.ReplayAll() self.assertRaises(RuntimeError, self.store.upload_image, self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image_retries_then_raises_exception(self): self.flags(num_retries=2, group='glance') params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') error_details = ["", "", "RetryableError", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc( self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(0.5) self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc( self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(1) self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc( self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) self.mox.ReplayAll() self.assertRaises(exception.CouldNotUploadImage, self.store.upload_image, self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll() def test_upload_image_retries_on_signal_exception(self): self.flags(num_retries=2, group='glance') params = self._get_upload_params() self.mox.StubOutWithMock(self.session, 'call_plugin_serialized') self.mox.StubOutWithMock(time, 'sleep') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') error_details = ["", "task signaled", "", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc( self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(0.5) # Note(johngarbutt) XenServer 6.1 and later has this error error_details = ["", "signal: SIGTERM", "", ""] error = self.session.XenAPI.Failure(details=error_details) self.session.call_plugin_serialized('glance', 'upload_vhd', **params).AndRaise(error) compute_utils.add_instance_fault_from_exc( self.context, self.instance, error, (fake.Failure, error, mox.IgnoreArg())) time.sleep(1) self.session.call_plugin_serialized('glance', 'upload_vhd', **params) self.mox.ReplayAll() self.store.upload_image(self.context, self.session, self.instance, 'fake_image_uuid', ['fake_vdi_uuid']) self.mox.VerifyAll()
""" from eventlet import tpool from nova import exception from nova.openstack.common import log as logging import vms import vms.commands as commands import vms.config as config import vms.control as control import vms.logger as logger import vms.virt as virt import vms.vmsrun as vmsrun LOG = logging.getLogger("nova.gridcentric.vmsapi") class VmsApi(object): """ The interface into the vms commands. This will be versioned whenever the vms interface changes. """ def __init__(self, version="2.5"): self.version = version def configure_logger(self): logger.setup_for_library() def select_hypervisor(self, hypervisor):
sys.modules['ceilometer.' + name] = sys.modules['nova.' + name] from nova.conductor import api from oslo.config import cfg from ceilometer import extension_manager from ceilometer.compute.virt import inspector from ceilometer.openstack.common.gettextutils import _ # This module runs inside the nova compute # agent, which only configures the "nova" logger. # We use a fake logger name in that namespace # so that messages from this module appear # in the log file. LOG = logging.getLogger('nova.ceilometer.notifier') _gatherer = None instance_info_source = api.API() class DeletedInstanceStatsGatherer(object): def __init__(self, extensions): self.mgr = extensions self.inspector = inspector.get_hypervisor_inspector() def _get_counters_from_plugin(self, ext, instance, *args, **kwds): """Used with the extenaion manager map() method.""" return ext.obj.get_counters(self, instance) def __call__(self, instance):
from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import exception from nova.objects import block_device as block_device_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import uuidutils from nova import volume LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer("compute", "volumes") authorize_attach = extensions.extension_authorizer("compute", "volume_attachments") def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" d = _translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d
from nova.network.quantumv2 import _get_auth_token from nova.openstack.common import log as logging from nova import flags, exception from nova.network import quantumv2 from nova.network.quantumv2.api import API, _ensure_requested_network_ordering from ermak.udpclient import QuantumUdpClient LOG = logging.getLogger("nova.network.api") FLAGS = flags.FLAGS class QuantumUdpApi(API): def __init__(self, *args, **kwargs): super(API, self).__init__(*args, **kwargs) def ext_client(self, context): token = context.auth_token if not token: if FLAGS.quantum_auth_strategy: token = _get_auth_token() if token: my_client = QuantumUdpClient(endpoint_url=FLAGS.quantum_url, token=token, timeout=FLAGS.quantum_url_timeout) else: my_client = QuantumUdpClient(endpoint_url=FLAGS.quantum_url, auth_strategy=None, timeout=FLAGS.quantum_url_timeout) return my_client