def test_calls__captureWarnings(self, _logging): """ Ensure that start_logging() calls captureWarnings(). """ logs.start_logging() _logging.captureWarnings.assert_called_once_with(True)
def test_root_logger_configured_console(self, getLogger, get): """ This test ensures that the root logger is configured appropriately when console logging set. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger if name not in root_logger.manager.loggerDict: root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # Let's make sure the handler is setup right self.assertEqual(root_logger.addHandler.call_count, 1) root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logging.StreamHandler)) # And the handler should have the formatter with our format string self.assertTrue( isinstance(root_handler.formatter, logs.TaskLogFormatter))
def test_root_logger_configured_default(self, getLogger, get): """ This test ensures that the root logger is configured appropriately. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger if name not in root_logger.manager.loggerDict: root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # Let's make sure the handler is setup right self.assertEqual(root_logger.addHandler.call_count, 1) root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logs.CompliantSysLogHandler)) self.assertEqual(root_handler.address, os.path.join('/', 'dev', 'log')) self.assertEqual(root_handler.facility, logs.CompliantSysLogHandler.LOG_DAEMON) # And the handler should have the formatter with our format string self.assertTrue( isinstance(root_handler.formatter, logs.TaskLogFormatter))
def test_root_logger_configured_console(self, getLogger, get): """ This test ensures that the root logger is configured appropriately when console logging set. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger if name not in root_logger.manager.loggerDict: root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # Let's make sure the handler is setup right self.assertEqual(root_logger.addHandler.call_count, 1) root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logging.StreamHandler)) # And the handler should have the formatter with our format string self.assertTrue(isinstance(root_handler.formatter, logs.TaskLogFormatter))
def test_calls__blacklist_loggers(self, getLogger, get, _blacklist_loggers): """ Ensure that start_logging() calls _blacklist_loggers(). """ logs.start_logging() _blacklist_loggers.assert_called_once_with()
def _start_logging(): """ Call into Pulp to get the logging started, and set up the logger to be used in this module. """ global logger logs.start_logging() logger = logging.getLogger(__name__)
def test_root_logger_configured(self, getLogger, get): """ This test ensures that the root logger is configured appropriately. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger if name not in root_logger.manager.loggerDict: root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # Let's make sure the handler is setup right self.assertEqual(root_logger.addHandler.call_count, 1) root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logs.CompliantSysLogHandler)) self.assertEqual(root_handler.address, os.path.join("/", "dev", "log")) self.assertEqual(root_handler.facility, logs.CompliantSysLogHandler.LOG_DAEMON) # And the handler should have the formatter with our format string self.assertEqual(root_handler.formatter._fmt, logs.LOG_FORMAT_STRING)
def test_calls_simplefilter(self, _warnings, mock_sys): """ Ensure that start_logging() calls warnings.simplefilter """ mock_sys.warnoptions = False logs.start_logging() _warnings.simplefilter.assert_called_once_with("ignore", DeprecationWarning)
def setUpClass(cls): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') stop_logging() config_filename = os.path.join(TEST_DATA_DIR, 'test-override-pulp.conf') config.config.read(config_filename) start_logging() manager_factory.initialize() constants.DISTRIBUTION_STORAGE_PATH = TEMP_DISTRO_STORAGE_DIR
def _start_logging(): """ Call into Pulp to get the logging started, and set up the _logger to be used in this module. """ global _logger logs.start_logging() _logger = logging.getLogger(__name__) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) _logger.root.addHandler(console_handler)
def setUpClass(cls): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') stop_logging() config_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'test-override-pulp.conf') config.config.read(config_filename) start_logging() name = config.config.get('database', 'name') connection.initialize(name) manager_factory.initialize()
def _start_logging(): """ Call into Pulp to get the logging started, and set up the logger to be used in this module. """ global logger logs.start_logging() logger = logging.getLogger(__name__) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) logger.root.addHandler(console_handler)
def test_calls__captureWarnings_with_attribute_error(self, _logging): """ Ensure that start_logging() calls captureWarnings() and handles AttributeError The validation for this is that the AttributeError is swallowed. """ _logging.captureWarnings.side_effect = AttributeError logs.start_logging() _logging.captureWarnings.assert_called_once_with(True)
def setUpClass(cls): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') stop_logging() config_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../../pulp_rpm/test/unit/data', 'test-override-pulp.conf') config.config.read(config_filename) start_logging() name = config.config.get('database', 'name') connection.initialize(name) manager_factory.initialize() constants.DISTRIBUTION_STORAGE_PATH = TEMP_DISTRO_STORAGE_DIR
def load_test_config(): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') override_file = os.path.join(DATA_DIR, 'test-override-pulp.conf') stop_logging() try: config.add_config_file(override_file) except RuntimeError: pass start_logging() return config.config
def load_test_config(): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') override_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../data', 'test-override-pulp.conf') override_repo_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../data', 'test-override-repoauth.conf') stop_logging() try: config.add_config_file(override_file) config.add_config_file(override_repo_file) except RuntimeError: pass start_logging() return config.config
def _start_logging(): """ Call into Pulp to get the logging started, and set up the _logger to be used in this module. """ global _logger logs.start_logging() _logger = logging.getLogger(__name__) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) _logger.root.addHandler(console_handler) # Django will un-set our default ignoring DeprecationWarning *unless* sys.warnoptions is set. # So, set it as though '-W ignore::DeprecationWarning' was passed on the commandline. Our code # that sets DeprecationWarnings as ignored also checks warnoptions, so this must be added after # pulp.server.logs.start_logging is called but before Django is initialized. sys.warnoptions.append('ignore::DeprecationWarning')
def load_test_config(): if not os.path.exists('/tmp/pulp'): os.makedirs('/tmp/pulp') override_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'test-override-pulp.conf') override_repo_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'test-override-repoauth.conf') stop_logging() try: config.add_config_file(override_file) config.add_config_file(override_repo_file) except RuntimeError: pass start_logging() return config.config
def setUpClass(cls): if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir + '/*', ignore_errors=True) name = pulp_conf.get('database', 'name') connection.initialize(name) managers.initialize()
def _load_test_config(): """ Load the test database configuration information. """ stop_logging() config.config.set('database', 'name', 'pulp_unittest') config.config.set('server', 'storage_dir', '/tmp/pulp') # Prevent the tests from altering the config so that nobody accidentally makes global changes config.config.set = _enforce_config config.load_configuration = _enforce_config config.__setattr__ = _enforce_config config.config.__setattr__ = _enforce_config start_logging()
def setUpClass(cls): # This will make Celery tasks run synchronously celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir + '/*', ignore_errors=True) managers.initialize()
def wsgi_application(): """ Application factory to create, configure, and return a WSGI application using the django framework :return: wsgi application callable """ try: logger = logging.getLogger(__name__) logs.start_logging() initialization.initialize() except initialization.InitializationException, e: logger.fatal('*************************************************************') logger.fatal('The Pulp Puppet Forge server failed to start due to the following reasons:') logger.exception(' ' + e.message) logger.fatal('*************************************************************') raise e
def setUpClass(cls): if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir+'/*', ignore_errors=True) name = pulp_conf.get('database', 'name') connection.initialize(name) managers.initialize()
def setUpClass(cls): # This will make Celery tasks run synchronously celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir+'/*', ignore_errors=True) managers.initialize()
def test_deprecation_warnings_suppressed(self, mock_sys): with warnings.catch_warnings(record=True) as recorded_warnings: # reset warnings filter to include DeprecationWarnings # https://docs.python.org/2/library/warnings.html#updating-code-for-new-versions-of-python warnings.simplefilter('default') # this warning should be emitted and caught prior to calling start_logging warnings.warn("caught", DeprecationWarning) mock_sys.warnoptions = False logs.start_logging() # this warning should be emitted and ignored after calling start_logging warnings.warn("ignored", DeprecationWarning) # Only one warning was caught, and it's the one that was expected. self.assertEqual(len(recorded_warnings), 1) self.assertEqual(repr(recorded_warnings[0].message), repr(DeprecationWarning("caught")))
def _initialize_web_services(): """ This function initializes Pulp for webservices. """ # This initialization order is very sensitive, and each touches a number of # sub-systems in pulp. If you get this wrong, you will have pulp tripping # over itself on start up. global _IS_INITIALIZED, STACK_TRACER if _IS_INITIALIZED: return logs.start_logging() # Run the common initialization code that all processes should share. This will start the # database connection, initialize plugins, and initialize the manager factory. initialization.initialize() # configure agent services AgentServices.init() # Verify the database has been migrated to the correct version. This is # very likely a reason the server will fail to start. try: migration_models.check_package_versions() except Exception: msg = "The database has not been migrated to the current version. " msg += "Run pulp-manage-db and restart the application." raise initialization.InitializationException(msg), None, sys.exc_info()[2] # There's a significantly smaller chance the following calls will fail. # The previous two are likely user errors, but the remainder represent # something gone horribly wrong. As such, I'm not going to account for each # and instead simply let the exception itself bubble up. # start agent services AgentServices.start() # If we got this far, it was successful, so flip the flag _IS_INITIALIZED = True
def _initialize_web_services(): """ This function initializes Pulp for webservices. """ # This initialization order is very sensitive, and each touches a number of # sub-systems in pulp. If you get this wrong, you will have pulp tripping # over itself on start up. global _IS_INITIALIZED, STACK_TRACER if _IS_INITIALIZED: return logs.start_logging() # Run the common initialization code that all processes should share. This will start the # database connection, initialize plugins, and initialize the manager factory. initialization.initialize() # configure agent services AgentServices.init() # Verify the database has been migrated to the correct version. This is # very likely a reason the server will fail to start. try: migration_models.check_package_versions() except Exception: msg = 'The database has not been migrated to the current version. ' msg += 'Run pulp-manage-db and restart the application.' raise initialization.InitializationException(msg), None, sys.exc_info()[2] # There's a significantly smaller chance the following calls will fail. # The previous two are likely user errors, but the remainder represent # something gone horribly wrong. As such, I'm not going to account for each # and instead simply let the exception itself bubble up. # start agent services AgentServices.start() # If we got this far, it was successful, so flip the flag _IS_INITIALIZED = True
def test_log_level_invalid(self, getLogger, get): """ Test that we still default to INFO if the user sets some non-existing log level. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_has_calls(mock.call('server', 'log_level')) # We should have defaulted root_logger.setLevel.assert_called_once_with(logging.INFO)
def test_log_level_set(self, getLogger, get): """ Test that we correctly allow users to set their log level. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_called_once_with('server', 'log_level') # We should have used the user's setting root_logger.setLevel.assert_called_once_with(logging.ERROR)
def test_log_level_set(self, getLogger, get): """ Test that we correctly allow users to set their log level. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_has_calls(mock.call('server', 'log_level')) # We should have used the user's setting root_logger.setLevel.assert_called_once_with(logging.ERROR)
def wsgi_application(): """ Application factory to create, configure, and return a WSGI application using the django framework :return: wsgi application callable """ try: logger = logging.getLogger(__name__) logs.start_logging() initialization.initialize() except initialization.InitializationException, e: logger.fatal( '*************************************************************') logger.fatal( 'The Pulp Puppet Forge server failed to start due to the following reasons:' ) logger.exception(' ' + e.message) logger.fatal( '*************************************************************') raise e
def test_log_level_invalid(self, getLogger, get): """ Test that we still default to INFO if the user sets some non-existing log level. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_called_once_with('server', 'log_level') # We should have defaulted root_logger.setLevel.assert_called_once_with(logging.INFO)
def test_log_level_unset(self, getLogger, get): """ Test that we still default to INFO if the user doesn't set it. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger if name not in root_logger.manager.loggerDict: root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_called_once_with('server', 'log_level') # We should have defaulted root_logger.setLevel.assert_called_once_with(logs.DEFAULT_LOG_LEVEL)
def _load_test_config(): """ Load test configuration, reconfigure logging, block config changes during testing """ # prevent reading of server.conf block_load_conf() # allow altering the conf during config load, since we have to load the defaults restore_config_attrs() # force reloading the config config.load_configuration() # configure the test database config.config.set('database', 'name', 'pulp_unittest') config.config.set('server', 'storage_dir', '/tmp/pulp') # reset logging conf stop_logging() start_logging() # block future attempts to alter the config in place override_config_attrs()
def test_log_type_invalid(self, getLogger, get): """ Test that we still default to syslog if the user sets some non-existing log type. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log type get.assert_has_calls(mock.call('server', 'log_type')) # We should have defaulted root_logger.addHandler.assert_called_once() root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logs.CompliantSysLogHandler))
def setUpClass(cls): # This will make Celery tasks run synchronously celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True cls.reserve_resources_patch = mock.patch( 'pulp.server.async.tasks._reserve_resource.' 'apply_async') mock_patch = cls.reserve_resources_patch.start() mock_patch.return_value.get.return_value = 'some_queue' if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir + '/*', ignore_errors=True) name = pulp_conf.get('database', 'name') connection.initialize(name) managers.initialize()
def test_log_type_set(self, getLogger, get): """ Test that we correctly allow users to set their log type. """ root_logger = mock.MagicMock(spec=logging.Logger) root_logger.manager = mock.MagicMock() root_logger.manager.loggerDict = {} def fake_getLogger(name=None): if name is None: return root_logger root_logger.manager.loggerDict[name] = mock.MagicMock() return root_logger.manager.loggerDict[name] getLogger.side_effect = fake_getLogger logs.start_logging() # The config should have been queried for log level get.assert_has_calls(mock.call('server', 'log_type')) # We should have used the user's setting root_logger.addHandler.assert_called_once() root_handler = root_logger.addHandler.mock_calls[0][1][0] self.assertTrue(isinstance(root_handler, logging.StreamHandler))
def setUpClass(cls): # This will make Celery tasks run synchronously celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True cls.reserve_resources_patch = mock.patch('pulp.server.async.tasks._reserve_resource.' 'apply_async') mock_patch = cls.reserve_resources_patch.start() mock_patch.return_value.get.return_value = 'some_queue' if not os.path.exists(cls.TMP_ROOT): os.makedirs(cls.TMP_ROOT) stop_logging() path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'data', 'pulp.conf') pulp_conf.read(path) start_logging() storage_dir = pulp_conf.get('server', 'storage_dir') if not os.path.exists(storage_dir): os.makedirs(storage_dir) shutil.rmtree(storage_dir+'/*', ignore_errors=True) name = pulp_conf.get('database', 'name') connection.initialize(name) managers.initialize()
else: return self.handle() return process(self.processors) import web web.application.handle_with_processors = _handle_with_processors from pulp.server import config # automatically loads config from pulp.server import logs # We need to read the config, start the logging, and initialize the db # connection prior to any other imports, since some of the imports will invoke # setup methods. logs.start_logging() from pulp.server import initialization from pulp.server.agent.direct.services import Services as AgentServices from pulp.server.debugging import StacktraceDumper from pulp.server.db.migrate import models as migration_models from pulp.server.webservices.controllers import ( consumer_groups, consumers, contents, dispatch, events, permissions, plugins, repo_groups, repositories, roles, root_actions, status, users) from pulp.server.webservices.middleware.exception import ExceptionHandlerMiddleware from pulp.server.webservices.middleware.postponed import PostponedOperationMiddleware # constants and application globals -------------------------------------------- URLS = ( # Please keep the following in alphabetical order.
return self.handle() return process(self.processors) import web web.application.handle_with_processors = _handle_with_processors from pulp.server import config # automatically loads config from pulp.server import logs # We need to read the config, start the logging, and initialize the db # connection prior to any other imports, since some of the imports will invoke # setup methods. logs.start_logging() from pulp.server import initialization from pulp.server.agent.direct.services import Services as AgentServices from pulp.server.debugging import StacktraceDumper from pulp.server.db.migrate import models as migration_models from pulp.server.webservices.controllers import (consumer_groups, consumers, contents, repo_groups, repositories) from pulp.server.webservices.middleware.exception import ExceptionHandlerMiddleware from pulp.server.webservices.middleware.postponed import PostponedOperationMiddleware from pulp.server.webservices.middleware.framework_router import FrameworkRoutingMiddleware from pulp.server.webservices.wsgi import application as django_application # constants and application globals --------------------------------------------