def _get_global_flags(self): """Get global flags.""" # Good location to add_argument() global options like --verbose self.parser.add_argument('--json', action='store_true', help='JSON formatted output') self.parser.add_argument('-k', '--insecure', action='store_true', help='Explicitly allow the client to perform' ' \"insecure SSL\" (https) requests.' ' The server\'s certificate will not be' ' verified against any certificate' ' authorities. This option should be' ' used with caution.') self.parser.add_argument('-d', '--debug', action='store_true', help='Print out request and response ' 'details.') args, _ = self.parser.parse_known_args() if args.json: self.json_output = True if args.insecure: self.verify = False if args.debug: logging.basicConfig( format="%(levelname)s (%(module)s) %(message)s", level=logging.DEBUG) logging.getLogger('iso8601').setLevel(logging.WARNING) urllibpool = 'urllib3.connectionpool' logging.getLogger(urllibpool).setLevel(logging.WARNING)
def test_screen_logs(self): with fixtures.FakeLogger() as logger: with tools.ScreeningLogger(log_name='ec2api.api'): LOG1 = logging.getLogger('ec2api.api') LOG1.error('error message') LOG2 = logging.getLogger('ec2api.api.vpc') LOG2.warning('warning message') self.assertIn('warning message', logger.output) self.assertNotIn('error message', logger.output)
def setup(product_name, version="unknown"): dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG] handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG] = dbg_color oslogging.setup(CONF, product_name, version) if CONF.rally_debug: oslogging.getLogger( project=product_name).logger.setLevel(logging.RDEBUG)
def _setup_logging(self, debug): # Output the logs to command-line interface color_handler = handlers.ColorHandler(sys.stdout) logger_root = logging.getLogger(None).logger logger_root.level = logging.DEBUG if debug else logging.WARNING logger_root.addHandler(color_handler) # Set the logger level of special library logging.getLogger("iso8601").logger.setLevel(logging.WARNING) logging.getLogger("urllib3.connectionpool").logger.setLevel(logging.WARNING)
def setUp(self): super(LogLevelTestCase, self).setUp() levels = self.CONF.default_log_levels levels.append("nova-test=INFO") levels.append("nova-not-debug=WARN") self.config(default_log_levels=levels, verbose=True) log.setup(self.CONF, 'testing') self.log = log.getLogger('nova-test') self.log_no_debug = log.getLogger('nova-not-debug')
def test_debug_logging(self): self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should no longer be a null handler self.assertEqual(1, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertIn("at debug", stdlog.logger.output)
def setup(product_name, logfile=None): dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG] handlers.ColorHandler.LEVEL_COLORS[logging.KBDEBUG] = dbg_color if logfile: if os.path.exists(logfile): os.remove(logfile) CONF.log_file = logfile oslogging.setup(CONF, product_name) if CONF.kb_debug: oslogging.getLogger( project=product_name).logger.setLevel(logging.KBDEBUG)
def _load_all_resources(): from oslo_log import log as logging logging.getLogger('heat.engine.environment').logger.setLevel(logging.ERROR) manager = plugin_manager.PluginManager('heat.engine.resources') resource_mapping = plugin_manager.PluginMapping('resource') res_plugin_mappings = resource_mapping.load_all(manager) for mapping in res_plugin_mappings: name, cls = mapping if all_resources.get(name) is not None: all_resources[name].append(cls) else: all_resources[name] = [cls]
def main(): log.register_options(CONF) config.set_middleware_defaults() CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") LOG = log.getLogger('manila.all') msg = _('manila-all is deprecated in Newton and ' 'will be removed in Ocata.') versionutils.report_deprecated_feature(LOG, msg) utils.monkey_patch() launcher = service.process_launcher() # manila-api try: server = service.WSGIService('osapi_share') launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load osapi_share')) for binary in ['manila-share', 'manila-scheduler', 'manila-api', 'manila-data']: try: launcher.launch_service(service.Service.create(binary=binary)) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), binary) launcher.wait()
def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self.threads = threads self.children = set() self.stale_children = set() self.running = True # NOTE(abhishek): Allows us to only re-initialize glance_store when # the API's configuration reloads. self.initialize_glance_store = initialize_glance_store self.pgid = os.getpid() try: # NOTE(flaper87): Make sure this process # runs in its own process group. os.setpgid(self.pgid, self.pgid) except OSError: # NOTE(flaper87): When running glance-control, # (glance's functional tests, for example) # setpgid fails with EPERM as glance-control # creates a fresh session, of which the newly # launched service becomes the leader (session # leaders may not change process groups) # # Running glance-(api|registry) is safe and # shouldn't raise any error here. self.pgid = 0
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if not CONF.enabled_backends: LOG.error('Configuration for cinder-volume does not specify ' '"enabled_backends". Using DEFAULT section to configure ' 'drivers is not supported since Ocata.') sys.exit(1) if os.name == 'nt': # We cannot use oslo.service to spawn multiple services on Windows. # It relies on forking, which is not available on Windows. # Furthermore, service objects are unmarshallable objects that are # passed to subprocesses. _launch_services_win32() else: _launch_services_posix()
def load_paste_app(app_name=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file. :param app_name: name of the application to load :raises RuntimeError when config file cannot be located or application cannot be loaded from config file """ if app_name is None: app_name = cfg.CONF.prog conf_file = _get_deployment_config_file() if conf_file is None: raise RuntimeError(_("Unable to locate config file")) try: app = paste_deploy_app(conf_file, app_name, cfg.CONF) # Log the options used when starting if we're in debug mode... if cfg.CONF.debug: cfg.CONF.log_opt_values(logging.getLogger(app_name), std_logging.DEBUG) return app except (LookupError, ImportError) as e: raise RuntimeError(_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e})
def trace_logging_wrapper(*args, **kwargs): if len(args) > 0: maybe_self = args[0] else: maybe_self = kwargs.get("self", None) if maybe_self and hasattr(maybe_self, "__module__"): logger = logging.getLogger(maybe_self.__module__) else: logger = LOG # NOTE(ameade): Don't bother going any further if DEBUG log level # is not enabled for the logger. if not logger.isEnabledFor(py_logging.DEBUG): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) logger.debug("==> %(func)s: call %(all_args)r", {"func": func_name, "all_args": all_args}) start_time = time.time() * 1000 try: result = f(*args, **kwargs) except Exception as exc: total_time = int(round(time.time() * 1000)) - start_time logger.debug( "<== %(func)s: exception (%(time)dms) %(exc)r", {"func": func_name, "time": total_time, "exc": exc} ) raise total_time = int(round(time.time() * 1000)) - start_time logger.debug( "<== %(func)s: return (%(time)dms) %(result)r", {"func": func_name, "time": total_time, "result": result} ) return result
def __init__(self, conf): """Initialize a nova client object.""" creds = conf.service_credentials logger = None if conf.nova_http_log_debug: logger = log.getLogger("novaclient-debug") logger.logger.setLevel(log.DEBUG) ks_session = keystone_client.get_session(conf) self.nova_client = nova_client.Client( version=api_versions.APIVersion('2.1'), session=ks_session, # nova adapter options region_name=creds.region_name, endpoint_type=creds.interface, service_type=conf.service_types.nova, logger=logger) self.glance_client = glanceclient.Client( version='2', session=ks_session, region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance)
def setup(): global logger CONF(sys.argv[1:], project='example_rpc_server') log.setup(CONF, 'example_rpc_server') logger = log.getLogger(__name__)
def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_multi_svm.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(lib_multi_svm.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) kwargs = { 'configuration': fake.get_config_cmode(), 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.library._client.get_ontapi_version.return_value = (1, 21) self.client = self.library._client
def load_paste_app(app_name=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file. :param app_name: name of the application to load :raises RuntimeError when config file cannot be located or application cannot be loaded from config file """ if app_name is None: app_name = cfg.CONF.prog # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor() conf_file = _get_deployment_config_file() if conf_file is None: raise RuntimeError(_("Unable to locate config file [%s]") % cfg.CONF.paste_deploy["api_paste_config"]) try: app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF) # Log the options used when starting if we're in debug mode... if cfg.CONF.debug: cfg.CONF.log_opt_values(logging.getLogger(app_name), sys_logging.DEBUG) return app except (LookupError, ImportError) as e: raise RuntimeError( _("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {"app_name": app_name, "conf_file": conf_file, "e": e} )
def setUp(self): super(DeprecatedDecorators, self).setUp() # The only reason this is here is because report_deprecated_feature() # registers the fatal_deprecations option which these tests use. versionutils.report_deprecated_feature( log.getLogger(__name__), 'ignore this message')
def main(): # Parse config file and command line options, then start logging ironic_service.prepare_service(sys.argv) # Enable object backporting via the conductor base.IronicObject.indirection_api = base.IronicObjectIndirectionAPI() # Build and start the WSGI app host = CONF.api.host_ip port = CONF.api.port wsgi = simple_server.make_server( host, port, app.VersionSelectorApplication(), server_class=ThreadedSimpleServer) LOG = log.getLogger(__name__) LOG.info(_LI("Serving on http://%(host)s:%(port)s"), {'host': host, 'port': port}) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) try: wsgi.serve_forever() except KeyboardInterrupt: pass
def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_single_svm.LOG, 'info', mock.Mock(side_effect=mock_logger.info)) config = fake.get_config_cmode() config.netapp_vserver = fake.VSERVER1 kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.client = self.library._client
def load_paste_app(app_name=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file. :param app_name: name of the application to load :raises RuntimeError when config file cannot be located or application cannot be loaded from config file """ if app_name is None: app_name = CONF.prog # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor() conf_file = _get_deployment_config_file() try: logger = logging.getLogger(__name__) logger.debug("Loading {app_name} from {conf_file}".format( conf_file=conf_file, app_name=app_name)) app = deploy.loadapp("config:%s" % conf_file, name=app_name) return app except (LookupError, ImportError) as e: msg = _("Unable to load %(app_name)s from configuration file" " %(conf_file)s. \nGot: %(e)r") % {'conf_file': conf_file, 'app_name': app_name, 'e': e} logger.error(msg) raise RuntimeError(msg)
def __init__(self, threads=1000, workers=0): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self._wsgi_logger = loggers.WritableLogger(self._logger) self.threads = threads self.children = set() self.stale_children = set() self.running = True self.pgid = os.getpid() self.workers = workers try: # NOTE(flaper87): Make sure this process # runs in its own process group. os.setpgid(self.pgid, self.pgid) except OSError: # NOTE(flaper87): When running searchlight-control, # (searchlight's functional tests, for example) # setpgid fails with EPERM as searchlight-control # creates a fresh session, of which the newly # launched service becomes the leader (session # leaders may not change process groups) # # Running searchlight-api is safe and # shouldn't raise any error here. self.pgid = 0
def main(): objects.register_all() CONF(sys.argv[1:], project='guts', version=version.version_string()) logging.setup(CONF, "guts") LOG = logging.getLogger('guts.all') utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) rpc.init(CONF) launcher = service.process_launcher() # guts-api try: server = service.WSGIService('osapi_migration') launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load osapi_migration')) # guts-migration try: launcher.launch_service( service.Service.create(binary='guts-migration')) except (Exception, SystemExit): LOG.exception(_LE('Failed to load guts-migration')) launcher.wait()
def _run(self, application, socket): """Start a WSGI server in a new green thread.""" logger = logging.getLogger('eventlet.wsgi') eventlet.wsgi.server(socket, application, custom_pool=self.tg.pool, log=loggers.WritableLogger(logger))
def setUp(self): super(DefaultAuthPluginTests, self).setUp() self.stream = six.StringIO() self.logger = logging.getLogger(__name__) self.session = session.Session() self.requests_mock = self.useFixture(rm_fixture.Fixture())
def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup(CONF, "nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') objects.register_all() if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() if CONF.action.name in ['add', 'del', 'old']: LOG.debug("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'", {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_LE("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup()
def __init__(self, parse_conf=True, config_path=None): """Initialize a configuration from a conf directory and conf file.""" super(TempestConfigPrivate, self).__init__() config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE if config_path: path = config_path else: # Environment variables override defaults... conf_dir = os.environ.get("TEMPEST_CONFIG_DIR", self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get("TEMPEST_CONFIG", self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not os.path.isfile(path): path = failsafe_path # only parse the config file if we expect one to exist. This is needed # to remove an issue with the config file up to date checker. if parse_conf: config_files.append(path) logging.register_options(_CONF) if os.path.isfile(path): _CONF([], project="tempest", default_config_files=config_files) else: _CONF([], project="tempest") logging.setup(_CONF, "tempest") LOG = logging.getLogger("tempest") LOG.info("Using tempest config file %s" % path) register_opts() self._set_attrs() if parse_conf: _CONF.log_opt_values(LOG, std_logging.DEBUG)
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.set_defaults( default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if CONF.backup_workers > 1: LOG.info('Backup running with %s processes.', CONF.backup_workers) launcher = service.get_launcher() for i in range(CONF.backup_workers): _launch_backup_process(launcher, i) launcher.wait() else: LOG.info('Backup running in single process mode.') server = service.Service.create(binary='cinder-backup', coordination=True, process_number=1) service.serve(server) service.wait()
def init(): from oslo_config import cfg CONF = cfg.CONF # NOTE(markmc): gracefully handle the CLI options not being registered if 'remote_debug' not in CONF: return if not (CONF.remote_debug.host and CONF.remote_debug.port): return from conveyor.i18n import _ from oslo_log import log as logging LOG = logging.getLogger(__name__) LOG.debug('Listening on %(host)s:%(port)s for debug connection', {'host': CONF.remote_debug.host, 'port': CONF.remote_debug.port}) try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace(host=CONF.remote_debug.host, port=CONF.remote_debug.port, stdoutToServer=False, stderrToServer=False) LOG.warn(_('WARNING: Using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. This ' 'could result in failures that do not occur under normal ' 'operation. Use at your own risk.'))
def __init__(self): """ Init ONCE in the parent process """ super(Service, self).__init__() self.is_child = False # paste paste_config = CONF.wsgi.api_paste_config app = CONF.wsgi.app self._app = deploy.loadapp("config:%s" % paste_config, name=app) # pool self._pool = eventlet.GreenPool(CONF.wsgi.default_pool_size) # logger self._logger = logging.getLogger("pyingx.wsgi.server") # socket info = socket.getaddrinfo('0.0.0.0', '1314', socket.AF_UNSPEC, socket.SOCK_STREAM) family = info[0][0] bind_addr = info[0][-1] self._socket = eventlet.listen(bind_addr, family=family, backlog=128) (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info("pyingx listening on %(host)s:%(port)s", {'host': self.host, 'port': self.port})
global CONTAINER_IMAGES_DEFAULTS CONTAINER_IMAGES_DEFAULTS = CONTAINER_IMAGE_PREPARE_PARAM[0]['set'] DEFAULT_TEMPLATE_FILE = os.path.join(sys.prefix, 'share', 'tripleo-common', 'container-images', 'overcloud_containers.yaml.j2') DEFAULT_PREPARE_FILE = os.path.join(sys.prefix, 'share', 'tripleo-common', 'container-images', 'container_image_prepare_defaults.yaml') if os.path.isfile(DEFAULT_PREPARE_FILE): init_prepare_defaults(DEFAULT_PREPARE_FILE) LOG = logging.getLogger(__name__ + '.KollaImageBuilder') def get_enabled_services(environment, roles_data): """Build list of enabled services :param environment: Heat environment for deployment :param roles_data: Roles file data used to filter services :returns: set of resource types representing enabled services """ enabled_services = set() parameter_defaults = environment.get('parameter_defaults', {}) for role in roles_data: count = parameter_defaults.get('%sCount' % role['name'], role.get('CountDefault', 0)) try:
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json from oslo_config import cfg from oslo_log import log as logging import osprofiler.profiler import osprofiler.web PROFILER_LOG = logging.getLogger(cfg.CONF.profiler.profiler_log_name) def log_to_file(info, context=None): attrs = [ str(info['timestamp']), info['base_id'], info['parent_id'], info['trace_id'], info['name'] ] if 'info' in info and 'db' in info['info']: db_info = copy.deepcopy(info['info']['db']) db_info['params'] = { k: str(v) if isinstance(v, datetime.datetime) else v for k, v in db_info.get('params', {}).items() }
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from oslo_log import log from ironic_python_agent import errors from ironic_python_agent.extensions import base from ironic_python_agent import hardware LOG = log.getLogger() class CleanExtension(base.BaseAgentExtension): @base.sync_command('get_clean_steps') def get_clean_steps(self, node, ports): """Get the list of clean steps supported for the node and ports :param node: A dict representation of a node :param ports: A dict representation of ports attached to node :returns: A list of clean steps with keys step, priority, and reboot_requested """ LOG.debug( 'Getting clean steps, called with node: %(node)s, '
from unittest import mock from alembic import script from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations from oslo_log import log from oslotest import base as test_base from sqlalchemy.sql import text from manila.db.migrations.alembic import migration from manila.tests.db.migrations.alembic import migrations_data_checks from manila.tests import utils as test_utils LOG = log.getLogger('manila.tests.test_migrations') class ManilaMigrationsCheckers(test_migrations.WalkVersionsMixin, migrations_data_checks.DbMigrationsData): """Test alembic migrations.""" def setUp(self): super().setUp() self.engine = enginefacade.writer.get_engine() @property def snake_walk(self): return True @property def downgrade(self):
from oslo_db.sqlalchemy import session import neutron.plugins.ml2.models as ml2_db from neutron_fwaas.db.firewall import firewall_db from neutron_fwaas.db.firewall import firewall_router_insertion_db as fw_rt_db from neutron_lib import constants as cst sys.path.append(r'/home/xiongjun/work/networking-tn/') ROUTER_INTF = l3_db.DEVICE_OWNER_ROUTER_INTF ROUTER_GW = l3_db.DEVICE_OWNER_ROUTER_GW #streamlog = handlers.ColorHandler() LOG = logging.getLogger(None).logger #LOG.addHandler(streamlog) LOG.setLevel(logging.DEBUG) LOG.debug('trace') CFG_ARGS = [ '--config-file', '/etc/neutron/neutron.conf', '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini' ] CFG_KWARGS = {} SUPPORTED_DR = ['vlan']
from oslo_log import log as logging from nova.conductor import rpcapi as conductor_rpcapi from nova import config import nova.db.api from nova import exception from nova.i18n import _LE, _LW from nova import objects from nova.objects import base as objects_base from nova import service from nova import utils CONF = cfg.CONF CONF.import_opt('network_topic', 'nova.network.rpcapi') CONF.import_opt('use_local', 'nova.conductor.api', group='conductor') LOG = logging.getLogger('nova.network') def block_db_access(): class NoDB(object): def __getattr__(self, attr): return self def __call__(self, *args, **kwargs): stacktrace = "".join(traceback.format_stack()) LOG.error(_LE('No db access allowed in nova-network: %s'), stacktrace) raise exception.DBNotAllowed('nova-network') nova.db.api.IMPL = NoDB()
See the License for the specific language governing permissions and limitations under the License. Freezer Backup modes related functions """ import os from oslo_config import cfg from oslo_log import log from freezer.snapshot import lvm from freezer.snapshot import vss from freezer.utils import winutils CONF = cfg.CONF logging = log.getLogger(__name__) home = os.path.expanduser("~") def snapshot_create(backup_opt_dict): """ Calls the code to take fs snapshots, depending on the platform :param backup_opt_dict: :return: boolean value, True if snapshot has been taken, false otherwise """ if not backup_opt_dict.snapshot: return False if winutils.is_windows(): if backup_opt_dict.snapshot:
def __init__(self, name, app, host=None, port=None, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :returns: None """ # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.client_socket_timeout = CONF.client_socket_timeout or None self.name = name self.app = app self._host = host or "0.0.0.0" self._port = port or 0 self._server = None self._socket = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger("eventlet.wsgi.server") if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET cert_file = CONF.ssl_cert_file key_file = CONF.ssl_key_file ca_file = CONF.ssl_ca_file self._use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if self._use_ssl and (not cert_file or not key_file): raise RuntimeError( _("When running server in SSL mode, you " "must specify both a cert_file and " "key_file option value in your " "configuration file.")) retry_until = time.time() + 30 while not self._socket and time.time() < retry_until: try: self._socket = eventlet.listen(bind_addr, backlog=backlog, family=family) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not self._socket: raise RuntimeError( _("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % { 'host': host, 'port': port }) (self._host, self._port) = self._socket.getsockname()[0:2] LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"), { 'name': self.name, '_host': self._host, '_port': self._port })
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from kuryr_kubernetes import clients from kuryr_kubernetes import constants from kuryr_kubernetes.controller.drivers import base as drivers from kuryr_kubernetes import exceptions from kuryr_kubernetes.handlers import k8s_base from neutronclient.common import exceptions as n_exc LOG = logging.getLogger(__name__) class NamespaceHandler(k8s_base.ResourceEventHandler): OBJECT_KIND = constants.K8S_OBJ_NAMESPACE OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "namespaces") def __init__(self): super(NamespaceHandler, self).__init__() self._drv_project = drivers.NamespaceProjectDriver.get_instance() self._drv_subnets = drivers.PodSubnetsDriver.get_instance() self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance() self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( specific_driver='multi_pool') self._drv_vif_pool.set_vif_driver()
def __init__( self, conf, name, app, host='0.0.0.0', port=0, # nosec pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None, logger_name='eventlet.wsgi.server', socket_family=None, socket_file=None, socket_mode=None): """Initialize, but do not start, a WSGI server. :param conf: Instance of ConfigOpts. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param protocol: Protocol class. :param backlog: Maximum number of queued connections. :param use_ssl: Wraps the socket in an SSL context if True. :param max_url_len: Maximum length of permitted URLs. :param logger_name: The name for the logger. :param socket_family: Socket family. :param socket_file: location of UNIX socket. :param socket_mode: UNIX socket mode. :returns: None :raises: InvalidInput :raises: EnvironmentError """ self.conf = conf self.conf.register_opts(_options.wsgi_opts) self.default_pool_size = self.conf.wsgi_default_pool_size # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger(logger_name) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = conf.client_socket_timeout or None if backlog < 1: raise InvalidInput(reason=_('The backlog must be more than 0')) if not socket_family or socket_family in [ socket.AF_INET, socket.AF_INET6 ]: self.socket = self._get_socket(host, port, backlog) elif hasattr(socket, "AF_UNIX") and socket_family == socket.AF_UNIX: self.socket = self._get_unix_socket(socket_file, socket_mode, backlog) else: raise ValueError(_("Unsupported socket family: %s"), socket_family) (self.host, self.port) = self.socket.getsockname()[0:2] if self._use_ssl: sslutils.is_enabled(conf)
def setUp(self): super(TestLoggingFixture, self).setUp() self.log = logging.getLogger(__name__)
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Switch managed_* column types from Unicode to String Bug #276448 """ from oslo_log import log as logging from sqlalchemy.schema import MetaData, Table from sqlalchemy import String LOG = logging.getLogger() meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine records = Table('records', meta, autoload=True) records.columns.managed_extra.alter(type=String(100)) records.columns.managed_plugin_type.alter(type=String(50)) records.columns.managed_plugin_name.alter(type=String(50)) records.columns.managed_resource_type.alter(type=String(50)) records.columns.managed_resource_region.alter(type=String(100)) records.columns.managed_tenant_id.alter(type=String(36))
from networking_bambuk._i18n import _LE from networking_bambuk.common import config from networking_bambuk.common import port_infos from networking_bambuk.db.bambuk import bambuk_db from oslo_log import log as o_log from oslo_serialization import jsonutils # Defined in neutron_lib.constants ROUTER_INTERFACE_OWNERS = { 'network:router_gateway', # External net port 'network:router_interface_distributed', # Internal net port distributed } LOG = o_log.getLogger(__name__) def _extend_dict_std_attr_id(self, res, db_obj): res['standard_attr_id'] = db_obj['standard_attr_id'] def _port_model_hook(ctx, original_model, query): port_id_col = securitygroups_db.SecurityGroupPortBinding.port_id query = query.outerjoin(securitygroups_db.SecurityGroupPortBinding, original_model.id == port_id_col) return query def _port_result_filter_hook(query, filters): val = filters and filters.get('security_group_id')
import netaddr from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import timeutils from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import exception as ovoo_exc import six from nova import exception from nova import objects from nova.objects import fields as obj_fields from nova import utils LOG = logging.getLogger('object') def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" # FIXME(danms): This is just until we use o.vo's class properties # and object base. return '_obj_' + name class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry): def registration_hook(self, cls, index): # NOTE(danms): This is called when an object is registered, # and is responsible for maintaining nova.objects.$OBJECT # as the highest-versioned implementation of a given object. version = utils.convert_version_to_tuple(cls.VERSION)
# License for the specific language governing permissions and limitations # under the License. import ctypes from oslo_log import log as oslo_logging from six.moves import winreg from cloudbaseinit import exception from cloudbaseinit.utils.windows import kernel32 from cloudbaseinit.utils.windows.storage import base from cloudbaseinit.utils.windows import wmi_loader wmi = wmi_loader.wmi() LOG = oslo_logging.getLogger(__name__) class WSMStorageManager(base.BaseStorageManager): def __init__(self): self._conn = wmi.WMI(moniker='//./Root/Microsoft/Windows/Storage') def extend_volumes(self, volume_indexes=None): volumes = self._conn.MSFT_Volume() for idx, volume in enumerate(volumes, 1): # TODO(alexpilotti): don't rely on the volumes WMI query order if volume_indexes and idx not in volume_indexes: continue partitions = volume.associators(wmi_result_class='MSFT_Partition')
def setUpClass(cls): cls.LOG = logging.getLogger(cls._get_full_case_name()) super(BaseTestCase, cls).setUpClass()
def setUp(self): super(JSONFormatterTestCase, self).setUp() self.log = log.getLogger('test-json') self._add_handler_with_cleanup(self.log, formatter=formatters.JSONFormatter) self._set_log_level_with_cleanup(self.log, logging.DEBUG)
def setUp(self): super(LoggerTestCase, self).setUp() self.log = log.getLogger(None)
class CreateJobTemplate(show.ShowOne): """Creates job template""" log = logging.getLogger(__name__ + ".CreateJobTemplate") def get_parser(self, prog_name): parser = super(CreateJobTemplate, self).get_parser(prog_name) parser.add_argument( '--name', metavar="<name>", help="Name of the job template [REQUIRED if JSON is not provided]", ) parser.add_argument('--type', metavar="<type>", choices=JOB_TYPES_CHOICES, help="Type of the job (%s) " "[REQUIRED if JSON is not provided]" % ', '.join(JOB_TYPES_CHOICES)) parser.add_argument( '--mains', metavar="<main>", nargs='+', help="Name(s) or ID(s) for job's main job binary(s)", ) parser.add_argument( '--libs', metavar="<lib>", nargs='+', help="Name(s) or ID(s) for job's lib job binary(s)", ) parser.add_argument('--description', metavar="<description>", help="Description of the job template") parser.add_argument( '--public', action='store_true', default=False, help='Make the job template public', ) parser.add_argument( '--protected', action='store_true', default=False, help='Make the job template protected', ) parser.add_argument('--interface', metavar='<filename>', help='JSON representation of the interface') parser.add_argument('--json', metavar='<filename>', help='JSON representation of the job template') return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: template = jsonutils.loads(blob) except ValueError as e: raise exceptions.CommandError('An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) data = client.jobs.create(**template).to_dict() else: if parsed_args.interface: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: parsed_args.interface = jsonutils.loads(blob) except ValueError as e: raise exceptions.CommandError( 'An error occurred when reading ' 'interface from file %s: %s' % (parsed_args.json, e)) mains_ids = [ utils.get_resource_id(client.job_binaries, m) for m in parsed_args.mains ] if parsed_args.mains else None libs_ids = [ utils.get_resource_id(client.job_binaries, m) for m in parsed_args.libs ] if parsed_args.libs else None data = client.jobs.create( name=parsed_args.name, type=parsed_args.type, mains=mains_ids, libs=libs_ids, description=parsed_args.description, interface=parsed_args.interface, is_public=parsed_args.public, is_protected=parsed_args.protected).to_dict() _format_job_template_output(data) data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS) return self.dict2columns(data)
def test_child_log_has_level_of_parent_flag(self): l = log.getLogger('nova-test.foo') self.assertEqual(logging.INFO, l.logger.getEffectiveLevel())
class FIPtoUnderlayTestNuage(base_nuage_fip_underlay.NuageFipUnderlayBase): LOG = logging.getLogger(__name__) # user order of tests as in this file to avoid unnecessary neutron restart # unittest.TestLoader.sortTestMethodsUsing(None) @classmethod def resource_setup(cls): super(FIPtoUnderlayTestNuage, cls).resource_setup() @nuage_test.header() def test_create_external_subnet_without_underlay(self): """ Create an external FIP subnet without underlay without nuage_fip+underlay in .ini Response must include underlay = False """ self._verify_create_delete_external_subnet_without_underlay() @nuage_test.header() def test_create_external_subnet_with_underlay_default_none(self): """ Create an external FIP subnet with underlay without nuage_fip+underlay in .ini Response must include same underlay status as used in creation """ self._verify_create_external_fip_subnet_with_underlay() @nuage_test.header() def test_show_external_subnet_without_underlay(self): """ Show an external fip subnet created without underlay without nuage_fip_underlay in .ini Response must include underlay = False """ self._verify_show_external_subnet_without_underlay() @nuage_test.header() def test_show_external_subnet_with_underlay(self): """ Show external fip subnet with underlay without nuage_fip_underlay in .ini file Response must include underlay - False """ self._verify_show_external_subnet_with_underlay() @nuage_test.header() def test_list_external_subnets_underlay(self): """ List external fip subnets with underlay without nuage_fip_underlay in .ini file Response must include underlay True for those subnets created with underlay True and False otherwise """ self._verify_list_external_subnets_underlay() # # # Negative test cases # # @nuage_test.header() @test.attr(type=['negative']) def test_create_external_subnet_with_underlay_invalid_values_neg(self): """ Try to create an external FIP subnet with invalid values for underlay=True/False Must fail with proper reason """ ext_network = self._create_network(external=True) invalid_underlay_values = [ 'Ttrue', 'Treu', 'Tru', 'Truet', 'Trrue', 'Truue', 'Truee', 'Flase', 'Falsche', 'Fales', 'Flaes', 'FFalse', 'fFalse' ] subnet_name = data_utils.rand_name('subnet-invalid-underlay-value') for underlay in invalid_underlay_values: kvargs = { 'network_id': ext_network['id'], 'cidr': '135.99.99.0/24', 'ip_version': self._ip_version, 'name': subnet_name, 'underlay': underlay } self.assertRaises(exceptions.BadRequest, self.admin_subnets_client.create_subnet, **kvargs) pass @nuage_test.header() @test.attr(type=['negative']) def test_create_internal_subnet_with_underlay_neg(self): """ Try to create an internal subnet while specifying underlay=True/False Must fail """ int_network = self.create_network() underlay_states = [False, True] for underlay in underlay_states: subnet_name = data_utils.rand_name( 'internal-fip-subnet-with-underlay-neg') kvargs = { 'network_id': int_network['id'], 'cidr': '135.66.66.0/24', 'ip_version': self._ip_version, 'name': subnet_name, 'underlay': underlay } self.assertRaises(exceptions.BadRequest, self.admin_subnets_client.create_subnet, **kvargs) pass @nuage_test.header() @test.attr(type=['negative']) def test_update_internal_subnet_with_underlay_neg(self): """ Try to update an internal subnet while specifying underlay=True/False Must fail: verifies OPENSTACK-722 """ int_network = self.create_network() subnet_name = data_utils.rand_name( 'underlay-update-internal-subnet-not-allowed') create_body = self.admin_subnets_client.create_subnet( network_id=int_network['id'], cidr="99.97.95.0/24", ip_version=self._ip_version, name=subnet_name) subnet = create_body['subnet'] new_name = subnet_name + '-updated' kvargs = {'name': new_name, 'underlay': True} self.assertRaises(exceptions.BadRequest, self.admin_subnets_client.update_subnet, subnet['id'], **kvargs) self.admin_subnets_client.delete_subnet(subnet['id']) pass @nuage_test.header() @test.attr(type=['negative']) def test_create_external_subnet_with_underlay_invalid_syntax_neg(self): """ Try to create an external FIP subnet with invalid values for "underlay' Must fail with proper reason """ ext_network = self._create_network(external=True) underlay_invalid_syntax = [ 'Underley', 'Overlay', 'under1ay', 'inderlay', 'overlay', 'ollekenbolleke', 'undarlay', 'anderluy', 'etcetera', '...', '***' ] subnet_name = data_utils.rand_name('subnet-invalid-underlay-syntax') for underlay in underlay_invalid_syntax: kvargs = { 'network_id': ext_network['id'], 'cidr': '135.99.99.0/24', 'ip_version': self._ip_version, 'name': subnet_name, underlay: True } self.assertRaises(exceptions.BadRequest, self.admin_subnets_client.create_subnet, **kvargs) @nuage_test.header() @test.attr(type=['negative']) def test_create_external_subnet_with_gw_nuage_pat_underlay_true_neg(self): """ Try to create an external subnet with gateway while nuage_pat_underlay = True in .ini file Must fail """ self.needs_ini_nuage_fip_underlay(True) # Todo: checl validity of this test case: not clear as of time of writing print "to be completed" @nuage_test.header() @test.attr(type=['negative']) def test_update_external_subnet_with_snat_neg(self): self._verify_update_external_subnet_with_underlay_neg()
class UpdateJobTemplate(show.ShowOne): """Updates job template""" log = logging.getLogger(__name__ + ".UpdateJobTemplate") def get_parser(self, prog_name): parser = super(UpdateJobTemplate, self).get_parser(prog_name) parser.add_argument( 'job_template', metavar="<job-template>", help="Name or ID of the job template", ) parser.add_argument( '--name', metavar="<name>", help="New name of the job template", ) parser.add_argument('--description', metavar="<description>", help='Description of the job template') public = parser.add_mutually_exclusive_group() public.add_argument('--public', action='store_true', help='Make the job template public ' '(Visible from other tenants)', dest='is_public') public.add_argument('--private', action='store_false', help='Make the job_template private ' '(Visible only from this tenant)', dest='is_public') protected = parser.add_mutually_exclusive_group() protected.add_argument('--protected', action='store_true', help='Make the job template protected', dest='is_protected') protected.add_argument('--unprotected', action='store_false', help='Make the job template unprotected', dest='is_protected') parser.set_defaults(is_public=None, is_protected=None) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) client = self.app.client_manager.data_processing jt_id = utils.get_resource_id(client.jobs, parsed_args.job_template) update_data = utils.create_dict_from_kwargs( name=parsed_args.name, description=parsed_args.description, is_public=parsed_args.is_public, is_protected=parsed_args.is_protected) data = client.jobs.update(jt_id, **update_data).job _format_job_template_output(data) data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS) return self.dict2columns(data)