def test_collect_all_no_change_softwareconfig(self): soft_config_map = { 'ec2': {'requests_impl': test_ec2.FakeRequests}, 'cfn': { 'requests_impl': test_cfn.FakeRequestsSoftwareConfig(self)}, 'heat': { 'keystoneclient': test_heat.FakeKeystoneClient(self), 'heatclient': test_heat.FakeHeatClient(self) }, 'request': {'requests_impl': test_request.FakeRequests}, 'zaqar': { 'keystoneclient': test_zaqar.FakeKeystoneClient(self), 'zaqarclient': test_zaqar.FakeZaqarClient(self) }, } (changed_keys, paths) = self._call_collect_all( store=True, collector_kwargs_map=soft_config_map) expected_changed = set(cfg.CONF.collectors) expected_changed.add('dep-name1') expected_changed.add('dep-name2') expected_changed.add('dep-name3') self.assertEqual(expected_changed, changed_keys) # Commit for changed in changed_keys: cache.commit(changed) (changed_keys, paths2) = self._call_collect_all( store=True, collector_kwargs_map=soft_config_map) self.assertEqual(set(), changed_keys) self.assertEqual(paths, paths2)
def __main__(args=sys.argv, requests_impl_map=None): setup_conf() CONF(args=args[1:], prog="os-collect-config") log.setup("os-collect-config") unknown_collectors = set(CONF.collectors) - set(DEFAULT_COLLECTORS) if unknown_collectors: raise exc.InvalidArguments( 'Unknown collectors %s. Valid collectors are: %s' % (list(unknown_collectors), DEFAULT_COLLECTORS)) while True: (any_changed, content) = collect_all( cfg.CONF.collectors, store=bool(CONF.command), requests_impl_map=requests_impl_map) if CONF.command: if any_changed: env = dict(os.environ) env["OS_CONFIG_FILES"] = ':'.join(content) logger.info("Executing %s" % CONF.command) subprocess.call(CONF.command, env=env, shell=True) for collector in cfg.CONF.collectors: cache.commit(collector) else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", CONF.polling_interval) time.sleep(CONF.polling_interval) else: print(json.dumps(content, indent=1)) break
def test_collect_all_no_change(self): (changed_keys, paths) = self._call_collect_all(store=True) self.assertEqual(set(cfg.CONF.collectors), changed_keys) # Commit for changed in changed_keys: cache.commit(changed) (changed_keys, paths2) = self._call_collect_all(store=True) self.assertEqual(set(), changed_keys) self.assertEqual(paths, paths2)
def test_cache_ignores_json_inequality(self): content1 = u'{"a": "value-a", "b": "value-b"}' content2 = u'{"b": "value-b", "a": "value-a"}' value1 = json.loads(content1) value2 = json.loads(content2) self.assertEqual(value1, value2) (changed, path) = cache.store('content', value1) self.assertTrue(changed) cache.commit('content') (changed, path) = cache.store('content', value1) self.assertFalse(changed) (changed, path) = cache.store('content', value2) self.assertFalse(changed)
def test_collect_all_no_change_softwareconfig(self): soft_config_map = { 'ec2': { 'requests_impl': test_ec2.FakeRequests }, 'gcore': { 'client': test_gcore.GcoreTestHeatResourceClient.from_conf() }, 'cfn': { 'requests_impl': test_cfn.FakeRequestsSoftwareConfig(self) }, 'heat': { 'keystoneclient': test_heat.FakeKeystoneClient(self), 'heatclient': test_heat.FakeHeatClient(self), 'discover_class': test_heat.FakeKeystoneDiscover }, 'request': { 'requests_impl': test_request.FakeRequests }, 'zaqar': { 'keystoneclient': test_zaqar.FakeKeystoneClient(self), 'zaqarclient': test_zaqar.FakeZaqarClient(self), 'discover_class': test_heat.FakeKeystoneDiscover }, } (changed_keys, paths) = self._call_collect_all(store=True, collector_kwargs_map=soft_config_map) expected_changed = set(cfg.CONF.collectors) expected_changed.add('dep-name1') expected_changed.add('dep-name2') expected_changed.add('dep-name3') self.assertEqual(expected_changed, changed_keys) # Commit for changed in changed_keys: cache.commit(changed) # Replace the ec2 requests with a failing one to simulate a transient # network failure soft_config_map['ec2'] = {'requests_impl': test_ec2.FakeFailRequests} (changed_keys, paths2) = self._call_collect_all(store=True, collector_kwargs_map=soft_config_map) self.assertEqual(set(), changed_keys) # check the second collect includes cached ec2 data despite network # failure self.assertEqual(paths, paths2)
def __main__(args=sys.argv, requests_impl_map=None): setup_conf() CONF(args=args[1:], prog="os-collect-config") log.setup("os-collect-config") (any_changed, content) = collect_all(COLLECTORS, store=bool(CONF.command), requests_impl_map=requests_impl_map) if CONF.command: if any_changed: env = dict(os.environ) env["OS_CONFIG_FILES"] = ':'.join(content) logger.info("Executing %s" % CONF.command) subprocess.call(CONF.command, env=env, shell=True) for collector in COLLECTORS: cache.commit(collector.name) else: logger.debug("No changes detected.") else: print json.dumps(content, indent=1)
def test_cache(self): # Never seen, so changed is expected. (changed, path) = cache.store('foo', {'a': 1}) self.assertTrue(changed) self.assertTrue(os.path.exists(self.cache_dir)) self.assertTrue(os.path.exists(path)) orig_path = '%s.orig' % path self.assertTrue(os.path.exists(orig_path)) last_path = '%s.last' % path self.assertFalse(os.path.exists(last_path)) # .orig exists now but not .last so this will shortcut to changed (changed, path) = cache.store('foo', {'a': 2}) self.assertTrue(changed) orig_path = '%s.orig' % path with open(path) as now: with open(orig_path) as then: self.assertNotEqual(now.read(), then.read()) # Saves the current copy as .last cache.commit('foo') last_path = '%s.last' % path self.assertTrue(os.path.exists(last_path)) # We committed this already, so we should have no changes (changed, path) = cache.store('foo', {'a': 2}) self.assertFalse(changed) cache.commit('foo') # Fully exercising the line-by-line matching now that a .last exists (changed, path) = cache.store('foo', {'a': 3}) self.assertTrue(changed) self.assertTrue(os.path.exists(path)) # And the meta list list_path = cache.store_meta_list('foo_list', ['foo']) self.assertTrue(os.path.exists(list_path)) with open(list_path) as list_file: list_list = json.loads(list_file.read()) self.assertThat(list_list, matchers.IsInstance(list)) self.assertIn(path, list_list)
def test_cache(self): cache_root = self.useFixture(fixtures.TempDir()) cache_dir = os.path.join(cache_root.path, 'cache') collect.setup_conf() cfg.CONF(['--cachedir', cache_dir]) # Never seen, so changed is expected. (changed, path) = cache.store('foo', {'a': 1}) self.assertTrue(changed) self.assertTrue(os.path.exists(cache_dir)) self.assertTrue(os.path.exists(path)) orig_path = '%s.orig' % path self.assertTrue(os.path.exists(orig_path)) last_path = '%s.last' % path self.assertFalse(os.path.exists(last_path)) # .orig exists now but not .last so this will shortcut to changed (changed, path) = cache.store('foo', {'a': 2}) self.assertTrue(changed) orig_path = '%s.orig' % path with open(path) as now: with open(orig_path) as then: self.assertNotEquals(now.read(), then.read()) # Saves the current copy as .last cache.commit('foo') last_path = '%s.last' % path self.assertTrue(os.path.exists(last_path)) # We committed this already, so we should have no changes (changed, path) = cache.store('foo', {'a': 2}) self.assertFalse(changed) cache.commit('foo') # Fully exercising the line-by-line matching now that a .last exists (changed, path) = cache.store('foo', {'a': 3}) self.assertTrue(changed) self.assertTrue(os.path.exists(path))
def __main__(args=sys.argv, collector_kwargs_map=None): signal.signal(signal.SIGHUP, reexec_self) setup_conf() CONF(args=args[1:], prog="os-collect-config", version=version.version_info.version_string()) # This resets the logging infrastructure which prevents capturing log # output in tests cleanly, so should only be called if there isn't already # handlers defined i.e. not in unit tests if not log.getLogger(None).logger.handlers: log.setup("os-collect-config") if CONF.print_cachedir: print(CONF.cachedir) return unknown_collectors = set(CONF.collectors) - set(COLLECTORS.keys()) if unknown_collectors: raise exc.InvalidArguments( 'Unknown collectors %s. Valid collectors are: %s' % (list(unknown_collectors), DEFAULT_COLLECTORS)) if CONF.force: CONF.set_override('one_time', True) exitval = 0 config_files = CONF.config_file config_hash = getfilehash(config_files) sleep_time = 1 while True: store_and_run = bool(CONF.command and not CONF.print_only) (changed_keys, content) = collect_all(cfg.CONF.collectors, store=store_and_run, collector_kwargs_map=collector_kwargs_map) if store_and_run: if changed_keys or CONF.force: # shorter sleeps while changes are detected allows for faster # software deployment dependency processing sleep_time = 1 # ignore HUP now since we will reexec after commit anyway signal.signal(signal.SIGHUP, signal.SIG_IGN) try: call_command(content, CONF.command) except subprocess.CalledProcessError as e: exitval = e.returncode logger.error( 'Command failed, will not cache new data. %s' % e) if not CONF.one_time: new_config_hash = getfilehash(config_files) if config_hash == new_config_hash: logger.warn( 'Sleeping %.2f seconds before re-exec.' % sleep_time) time.sleep(sleep_time) else: # The command failed but the config file has # changed re-exec now as the config file change # may have fixed things. logger.warn('Config changed, re-execing now') config_hash = new_config_hash else: for changed in changed_keys: cache.commit(changed) if not CONF.one_time: reexec_self() else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", sleep_time) time.sleep(sleep_time) sleep_time *= 2 if sleep_time > CONF.polling_interval: sleep_time = CONF.polling_interval else: print(json.dumps(content, indent=1)) break return exitval
def __main__(args=sys.argv, collector_kwargs_map=None): signal.signal(signal.SIGHUP, reexec_self) # NOTE(bnemec): We need to exit on SIGPIPEs so systemd can restart us. # See lp 1795030 signal.signal(signal.SIGPIPE, signal.SIG_DFL) setup_conf() CONF(args=args[1:], prog="os-collect-config", version=version.version_info.version_string()) # This resets the logging infrastructure which prevents capturing log # output in tests cleanly, so should only be called if there isn't already # handlers defined i.e. not in unit tests if not log.getLogger(None).logger.handlers: log.setup(CONF, "os-collect-config") if CONF.print_cachedir: print(CONF.cachedir) return unknown_collectors = set(CONF.collectors) - set(COLLECTORS.keys()) if unknown_collectors: raise exc.InvalidArguments( 'Unknown collectors %s. Valid collectors are: %s' % (list(unknown_collectors), DEFAULT_COLLECTORS)) if CONF.force: CONF.set_override('one_time', True) if CONF.splay > 0 and not CONF.one_time: # sleep splay seconds in the beginning to prevent multiple collect # processes from all running at the same time time.sleep(random.randrange(0, CONF.splay)) exitval = 0 config_files = CONF.config_file config_hash = getfilehash(config_files) exponential_sleep_time = CONF.min_polling_interval while True: # shorter sleeps while changes are detected allows for faster # software deployment dependency processing store_and_run = bool(CONF.command and not CONF.print_only) (changed_keys, content) = collect_all( cfg.CONF.collectors, store=store_and_run, collector_kwargs_map=collector_kwargs_map) if store_and_run: if changed_keys or CONF.force: # ignore HUP now since we will reexec after commit anyway signal.signal(signal.SIGHUP, signal.SIG_IGN) try: call_command(content, CONF.command) except subprocess.CalledProcessError as e: exitval = e.returncode logger.error('Command failed, will not cache new data. %s' % e) else: for changed in changed_keys: cache.commit(changed) if not CONF.one_time: new_config_hash = getfilehash(config_files) if config_hash != new_config_hash: reexec_self() else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", exponential_sleep_time) time.sleep(exponential_sleep_time) exponential_sleep_time *= 2 if exponential_sleep_time > CONF.polling_interval: exponential_sleep_time = CONF.polling_interval else: print(json.dumps(content, indent=1)) break return exitval
def test_commit_no_cache(self): self.assertIsNone(cache.commit('neversaved'))
def __main__(args=sys.argv, requests_impl_map=None): signal.signal(signal.SIGHUP, reexec_self) setup_conf() CONF(args=args[1:], prog="os-collect-config", version=version.version_info.version_string()) # This resets the logging infrastructure which prevents capturing log # output in tests cleanly, so should only be called if there isn't already # handlers defined i.e. not in unit tests if not log.getLogger(None).logger.handlers: log.setup("os-collect-config") if CONF.print_cachedir: print(CONF.cachedir) return unknown_collectors = set(CONF.collectors) - set(DEFAULT_COLLECTORS) if unknown_collectors: raise exc.InvalidArguments( "Unknown collectors %s. Valid collectors are: %s" % (list(unknown_collectors), DEFAULT_COLLECTORS) ) if CONF.force: CONF.set_override("one_time", True) config_files = CONF.config_file config_hash = getfilehash(config_files) while True: store_and_run = bool(CONF.command and not CONF.print_only) (any_changed, content) = collect_all( cfg.CONF.collectors, store=store_and_run, requests_impl_map=requests_impl_map ) if store_and_run: if any_changed or CONF.force: # ignore HUP now since we will reexec after commit anyway signal.signal(signal.SIGHUP, signal.SIG_IGN) try: call_command(content, CONF.command) except subprocess.CalledProcessError as e: logger.error("Command failed, will not cache new data. %s" % e) if not CONF.one_time: new_config_hash = getfilehash(config_files) if config_hash == new_config_hash: logger.warn("Sleeping %.2f seconds before re-exec." % CONF.polling_interval) time.sleep(CONF.polling_interval) else: # The command failed but the config file has # changed re-exec now as the config file change # may have fixed things. logger.warn("Config changed, re-execing now") config_hash = new_config_hash else: for collector in cfg.CONF.collectors: cache.commit(collector) if not CONF.one_time: reexec_self() else: logger.debug("No changes detected.") if CONF.one_time: break else: logger.info("Sleeping %.2f seconds.", CONF.polling_interval) time.sleep(CONF.polling_interval) else: print(json.dumps(content, indent=1)) break
def test_commit_no_cache(self): self.assertEqual(None, cache.commit('neversaved'))