def test_test_run_time(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # running collect won't do anything until the config changes cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(True)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} # avoid restarting the object for testing cfg_collector.previous['checksum'] = None # running collect should now cause the run_time to exceed 0.0, rendering run_config_test False cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that(messages, has_item(starts_with('/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_plus_status(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the second from the list collectors = nginx_obj.collectors metrics_collector = collectors[1] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_bad_plus_status_discovery_with_config(self): amplify.agent.common.context.context.app_config['nginx']['plus_status'] = '/foo_plus' amplify.agent.common.context.context.app_config['nginx']['stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # self.http_request should look like this # [ # first - internal plus statuses # 'http://127.0.0.1:82/plus_status', 'https://127.0.0.1:82/plus_status', # 'http://127.0.0.1/foo_plus', 'https://127.0.0.1/foo_plus', # # then external plus statuses # 'http://bad.status.naas.nginx.com:82/plus_status', 'https://bad.status.naas.nginx.com:82/plus_status', # # finally - stub statuses # 'http://127.0.0.1:82/basic_status', 'https://127.0.0.1:82/basic_status', # 'http://127.0.0.1/foo_basic', 'https://127.0.0.1/foo_basic' # ] assert_that(self.http_requests[2], equal_to('http://127.0.0.1/foo_plus')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_plus_status_priority(self): """ Checks that if we can reach plus status then we don't use stub_status """ time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check that it has n+ status and stub_status enabled assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.stub_status_enabled, equal_to(True)) # get metrics collector - the second from the list collectors = nginx_obj.collectors metrics_collector = collectors[1] # run status twice metrics_collector.status() time.sleep(1) metrics_collector.status() # check gauges - we should't see request.writing/reading here, because n+ status doesn't have those metrics = nginx_obj.statsd.current assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, not_(has_item('nginx.http.request.writing'))) assert_that(gauges, not_(has_item('nginx.http.request.reading')))
def test_collect(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] # run collect cfg_collector.collect() assert_that(nginx_obj.configd.current, not_(empty()))
def test_bad_stub_status_discovery_with_config(self): amplify.agent.common.context.context.app_config['nginx']['stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) assert_that(self.http_requests[-1], equal_to('https://127.0.0.1/foo_basic')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_two_instances(self): container = NginxManager() container._discover_objects() obj = container.objects.find_all(types=container.types)[0] self.start_second_nginx() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(2)) local_ids = map(lambda x: x.local_id, container.objects.find_all(types=container.types)) assert_that(local_ids, has_item(obj.local_id))
def test_collect_meta(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.find_all(types=container.types)[0] collector = NginxMetaCollector(object=nginx_obj, interval=nginx_obj.intervals['meta']) assert_that(not_(collector.in_container)) collector.collect() assert_that(nginx_obj.metad.current, contains_inanyorder( 'type', 'local_id', 'root_uuid', 'running', 'stub_status_enabled', 'status_module_enabled', 'ssl', 'stub_status_url', 'plus_status_url', 'version', 'plus', 'configure', 'packages', 'path', 'built_from_source', 'parent_hostname', 'start_time', 'pid' ))
def test_bad_plus_status_discovery(self): self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check all plus status urls assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.plus_status_internal_url, equal_to(None)) assert_that(nginx_obj.plus_status_external_url, equal_to('http://bad.status.naas.nginx.com:82/plus_status'))
def test_ssl_config_doesnt_work_if_ssl_disabled(self): # set upload_ssl to True context.app_config['containers']['nginx']['upload_ssl'] = False container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] cfg_collector.collect() config = nginx_obj.configd.current assert_that(config['data']['ssl_certificates'], has_length(0))
def test_plus_status_discovery(self): """ Checks that for plus nginx we collect two status urls: - one for web link (with server name) - one for agent purposes (local url) """ container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check all plus status urls assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.plus_status_internal_url, equal_to('https://127.0.0.1:443/plus_status')) assert_that(nginx_obj.plus_status_external_url, equal_to('http://status.naas.nginx.com:443/plus_status_bad'))
def test_plus_status_cache_limit(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - 4 times for x in xrange(4): metrics_collector.plus_status() time.sleep(1) assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], has_length(3))
def test_plus_status_cache(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], not_(has_length(0)))
def test_discover(self): nginx_manager = NginxManager() nginx_manager._discover_objects() assert_that(nginx_manager.objects.objects_by_type[nginx_manager.type], has_length(1)) # get nginx object nginx_obj = nginx_manager.objects.objects[nginx_manager.objects.objects_by_type[nginx_manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() plus_manager = PlusManager() plus_manager._discover_objects() assert_that(plus_manager.objects.find_all(types=plus_manager.types), has_length(2))
def test_reload(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] # The following assertion is unreliable for some reason. assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.reload_nginx() new_master, new_workers = self.get_master_workers() assert_that(new_master, equal_to(old_master)) container._discover_objects() obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_restart(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.restart_nginx() new_master, new_workers = self.get_master_workers() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, not_(equal_to(old_master))) assert_that(obj.pid, equal_to(new_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_test_run_time(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] collectors = nginx_obj.collectors cfg_collector = collectors[2] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # run collect cfg_collector.collect() assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that(messages, has_item(starts_with('/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_api_discovery(self): """ Checks that for plus nginx we collect two status urls: - one for web link (with server name) - one for agent purposes (local url) """ manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # check all plus status urls assert_that(nginx_obj.api_enabled, equal_to(True)) assert_that(nginx_obj.api_internal_url, equal_to('https://127.0.0.1:443/api')) assert_that(nginx_obj.api_external_url, equal_to('https://status.naas.nginx.com:443/api_bad'))
def test_reload(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] # The following assertion is unreliable for some reason. assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.reload_nginx() new_master, new_workers = self.get_master_workers() assert_that(new_master, equal_to(old_master)) container._discover_objects() obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_plus_api_cache(self): time.sleep(1) manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus api - twice, because counters will appear only on the second run metrics_collector.plus_api() time.sleep(1) metrics_collector.plus_api() assert_that(context.plus_cache['https://127.0.0.1:443/api'], not_(has_length(0)))
def test_plus_status_cache(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], not_(has_length(0)))
def test_plus_status_cache_limit(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - 4 times for x in xrange(4): metrics_collector.plus_status() time.sleep(1) assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], has_length(3))
def test_add_config_error_then_reload(self): manager = NginxManager() manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(0)) # write the initial good config and start running nginx (should work) self.write_config('events {}' 'http {' ' server {' ' location /status {stub_status on;}' ' }' '}') self.start_nginx(check=True) # check that nginx object was created and then run its config collector manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(1)) nginx_obj = nginx_objects[0] # store some values from before the bad reload for testing purposes later before_bad_reload_config_subtree = nginx_obj.config.subtree before_bad_reload_config_stub_status_urls = nginx_obj.config.stub_status_urls before_bad_reload_object_workers = nginx_obj.workers before_bad_reload_object_stub_status_url = nginx_obj.stub_status_url before_bad_reload_object_api_endpoints_to_skip = nginx_obj.api_endpoints_to_skip # introduce an error to the config and try to reload nginx (should not work) self.write_config('events {{{{{{{{{}' 'http {' ' server {' ' location /status {stub_status on;}' ' }' '}') self.reload_nginx(check=False) # run the config collector again now that the config has errors in it # collect manually because the nginx object shouldn't have restarted so it wouldn't parse on restore manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(1)) nginx_obj = nginx_objects[0] nginx_obj.collectors[0].collect(no_delay=True) # check that the nginx process did not reload but the NginxConfig object did parse and update assert_that(before_bad_reload_object_workers, equal_to(nginx_obj.workers)) assert_that(before_bad_reload_config_subtree, has_length(2)) assert_that(before_bad_reload_config_stub_status_urls, has_length(2)) assert_that( nginx_obj.config.subtree, has_length(0)) # when too many "{" in file subtree is empty assert_that(nginx_obj.config.stub_status_urls, has_length(0)) # check that although the NginxConfig parsed and updated, the NginxObject kept its cached data assert_that(before_bad_reload_object_stub_status_url, equal_to(nginx_obj.stub_status_url)) assert_that(before_bad_reload_object_api_endpoints_to_skip, equal_to(nginx_obj.api_endpoints_to_skip))
def test_skip_parse_on_reload(self): # wrap NginxConfig.full_parse with a method that counts how many times it's been called NginxConfig.full_parse = count_calls(NginxConfig.full_parse) assert_that(NginxConfig.full_parse.call_count, equal_to(0)) manager = NginxManager() manager._discover_objects() # check that the config has only been parsed once (at startup) nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # reload nginx and discover objects again so manager will recognize it self.reload_nginx() time.sleep(2) manager._discover_objects() # metrics collector will cause the nginx object to need a restart because pids have changed metrics_collector = nginx_obj.collectors[2] metrics_collector.collect(no_delay=True) manager._discover_objects() # check that the config was not parsed again after the restart nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # check that the new nginx object's config collector won't call full_parse config_collector = nginx_obj.collectors[0] config_collector.collect(no_delay=True) assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # check that the config collector will still call full parse if config changes config_collector.previous['files'] = {} config_collector.collect(no_delay=True) assert_that(NginxConfig.full_parse.call_count, equal_to(2))
def test_restart(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.restart_nginx() new_master, new_workers = self.get_master_workers() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, not_(equal_to(old_master))) assert_that(obj.pid, equal_to(new_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_discover(self): nginx_manager = NginxManager() nginx_manager._discover_objects() assert_that(nginx_manager.objects.objects_by_type[nginx_manager.type], has_length(1)) # get nginx object nginx_obj = nginx_manager.objects.objects[ nginx_manager.objects.objects_by_type[nginx_manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() plus_manager = PlusManager() plus_manager._discover_objects() assert_that(plus_manager.objects.find_all(types=plus_manager.types), has_length(3))
def test_discover_ignore_api_objects(self): if self.plus_manager.__name__ != 'StatusManager': # this test only applies to old status manager return nginx_manager = NginxManager() nginx_manager._discover_objects() assert_that(nginx_manager.objects.objects_by_type[nginx_manager.type], has_length(1)) # get nginx object nginx_obj = nginx_manager.objects.objects[ nginx_manager.objects.objects_by_type[nginx_manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status/api - twice, because counters will appear only on the second run getattr(metrics_collector, self.collector_method)() metrics_collector.plus_api() time.sleep(1) getattr(metrics_collector, self.collector_method)() metrics_collector.plus_api() plus_manager = self.plus_manager() # since objects are stored in the same object tank, # make sure there is no overlap api_manager = ApiManager() api_manager._discover_objects() assert_that(api_manager._api_objects(), has_length(10)) assert_that(plus_manager._status_objects(), has_length(0)) nginx_obj.api_enabled = self.api plus_manager._discover_objects() api_manager._discover_objects() assert_that(api_manager._api_objects(), has_length(0)) assert_that(plus_manager._status_objects(), has_length(10))
def test_find_packages_nginx_from_package(self): manager = NginxManager() manager._discover_objects() nginx_obj = manager.objects.find_all(types=manager.types)[0] collector = self.meta_collector_class( object=nginx_obj, interval=nginx_obj.intervals['meta']) collector.meta = collector.default_meta self.push_subp_result(stdout_lines=[ 'ii nginx 1.4.6-1ubuntu3.8 all small, powerful, scalable web/proxy server', 'ii nginx-common 1.4.6-1ubuntu3.8 all small, powerful, scalable web/proxy server - common files', 'ii nginx-core 1.4.6-1ubuntu3.8 amd64 nginx web/proxy server (core version)', '' ]) self.push_subp_result(stdout_lines=self.from_package_subp_result[0], stderr_lines=self.from_package_subp_result[1]) collector.find_packages() assert_that(collector.meta['packages'], has_key('nginx-core')) assert_that(collector.meta['built_from_source'], equal_to(False))
def test_plus_api(self): time.sleep(1) container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus api - twice, because counters will appear only on the second run metrics_collector.plus_api() time.sleep(1) metrics_collector.plus_api() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) assert_that(counters, has_item('plus.http.ssl.handshakes')) assert_that(counters, has_item('plus.http.ssl.failed')) assert_that(counters, has_item('plus.http.ssl.reuses')) assert_that(counters, has_item('plus.proc.respawned')) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_skip_parse_until_change(self): manager = NginxManager() # wrap NginxConfig.full_parse with a method that counts how many times it's been called from amplify.agent.objects.nginx.config.config import NginxConfig def count_full_parse_calls(config_obj): NginxConfig.__full_parse_calls += 1 config_obj.__full_parse() NginxConfig.__full_parse_calls = 0 NginxConfig.__full_parse = NginxConfig.full_parse NginxConfig.full_parse = count_full_parse_calls # discover the NGINX object and check that the config has been fully parsed once manager._discover_objects() nginx_obj = manager.objects.objects[manager.objects.objects_by_type[manager.type][0]] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # get the NginxConfig collector cfg_collector = nginx_obj.collectors[0] # check that NginxConfig.full_parse is not called again during collect cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2))
def test_test_run_time(self): manager = NginxManager() manager._discover_objects() nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] cfg_collector = nginx_obj.collectors[0] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # running collect won't do anything until the config changes cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(True)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} # avoid restarting the object for testing cfg_collector.previous['checksum'] = None # running collect should now cause the run_time to exceed 0.0, rendering run_config_test False cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that( messages, has_item( starts_with( '/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_discover(self): nginx_manager = NginxManager() nginx_manager._discover_objects() assert_that(nginx_manager.objects.objects_by_type[nginx_manager.type], has_length(1)) # get nginx object nginx_obj = nginx_manager.objects.objects[ nginx_manager.objects.objects_by_type[nginx_manager.type][0]] # dont want manager to skip this nginx object nginx_obj.api_enabled = self.api # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run getattr(metrics_collector, self.collector_method)() time.sleep(1) getattr(metrics_collector, self.collector_method)() plus_manager = self.plus_manager() plus_manager._discover_objects() assert_that(plus_manager.objects.find_all(types=plus_manager.types), has_length(10))
def test_stub_status(self): container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # get metrics collector - the second from the list collectors = nginx_obj.collectors metrics_collector = collectors[1] # run plus status - twice, because counters will appear only on the second run metrics_collector.stub_status() time.sleep(1) metrics_collector.stub_status() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current')) assert_that(gauges, has_item('nginx.http.request.writing')) assert_that(gauges, has_item('nginx.http.request.reading'))
def test_skip_parse_on_reload(self): # wrap NginxConfig.full_parse with a method that counts how many times it's been called from amplify.agent.objects.nginx.config.config import NginxConfig def count_full_parse_calls(config_obj): NginxConfig.__full_parse_calls += 1 config_obj.__full_parse() NginxConfig.__full_parse_calls = 0 NginxConfig.__full_parse = NginxConfig.full_parse NginxConfig.full_parse = count_full_parse_calls manager = NginxManager() manager._discover_objects() # check that the config has only been parsed once (at startup) nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # reload nginx and discover objects again so manager will recognize it self.reload_nginx() time.sleep(2) manager._discover_objects() # metrics collector will cause the nginx object to need a restart because pids have changed metrics_collector = nginx_obj.collectors[2] metrics_collector.collect(no_delay=True) manager._discover_objects() # check that the config was not parsed again after the restart nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # check that the new nginx object's config collector won't call full_parse config_collector = nginx_obj.collectors[0] config_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # check that the config collector will still call full parse if config changes config_collector.previous['files'] = {} config_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2))
def test_nginx_master_reloads(self): # make sure we have a fresh nginx object self.stop_first_nginx() self.start_first_nginx() time.sleep(1) # let it breathe for a while manager = NginxManager() manager._discover_objects() # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # reload nginx a few times.. in this case 5 to test gauge value/behavior for _ in range(5): manager._discover_objects() self.reload_nginx() time.sleep(1) manager._discover_objects() metrics_collector = None for collector in nginx_obj.collectors: if isinstance(collector, NginxMetricsCollector): metrics_collector = collector break assert_that(metrics_collector, not_(equal_to(None))) metrics_collector.reloads_and_restarts_count() metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) # ensure nginx.master.reloads is present as a gauge counter = metrics['counter'] assert_that(counter, has_item('nginx.master.reloads'))
class PsutilsTestCase(RealNginxTestCase): """ Overall test are for testing our calls to psutils and making sure they work. """ def setup_method(self, method): super(PsutilsTestCase, self).setup_method(method) self.system_manager = SystemManager() self.system_manager._discover_objects() self.nginx_manager = NginxManager() self.nginx_manager._discover_objects() self.system_obj = self.system_manager.objects.objects[ self.system_manager.objects.objects_by_type[self.system_manager.type][0] ] self.system_metrics_collector = self.system_obj.collectors[1] self.nginx_obj = self.nginx_manager.objects.objects[ self.nginx_manager.objects.objects_by_type[self.nginx_manager.type][0] ] self.nginx_metrics_collector = self.nginx_obj.collectors[1] def teardown_method(self, method): self.system_manager = None self.nginx_manager = None super(PsutilsTestCase, self).teardown_method(method) def test_system_virtual_memory(self): assert_that(calling(self.system_metrics_collector.virtual_memory), not_(raises(Exception))) def test_system_swap(self): assert_that(calling(self.system_metrics_collector.swap), not_(raises(Exception))) def test_system_cpu(self): assert_that(calling(self.system_metrics_collector.cpu), not_(raises(Exception))) def test_system_disk_partitions(self): assert_that(calling(self.system_metrics_collector.disk_partitions), not_(raises(Exception))) def test_system_disk_io_counters(self): assert_that(calling(self.system_metrics_collector.disk_io_counters), not_(raises(Exception))) def test_system_net_io_counters(self): assert_that(calling(self.system_metrics_collector.net_io_counters), not_(raises(Exception))) def test_nginx_memory_info(self): assert_that(calling(self.nginx_metrics_collector.memory_info), not_(raises(Exception))) def test_nginx_workers_fds_count(self): assert_that(calling(self.nginx_metrics_collector.workers_fds_count), not_(raises(Exception))) # These next two tests have to be skipped due to calls to .handle_zombie() which raises a hamcrest exception. @future_test def test_nginx_workers_rlimit_nofile(self): assert_that(calling(self.nginx_metrics_collector.workers_rlimit_nofile), not_(raises(Exception))) @future_test def test_nginx_workers_io(self): assert_that(calling(self.nginx_metrics_collector.workers_io), not_(raises(Exception))) def test_nginx_workers_cpu(self): assert_that(calling(self.nginx_metrics_collector.workers_cpu), not_(raises(Exception)))
def test_restore_config_collector(self): # begin counting the number of calls of these two methods NginxObject._setup_config_collector = count_calls( NginxObject._setup_config_collector) NginxObject._restore_config_collector = count_calls( NginxObject._restore_config_collector) NginxConfigCollector.parse_config = count_calls( NginxConfigCollector.parse_config) NginxConfig.full_parse = count_calls(NginxConfig.full_parse) # confirm that all the call counts start at zero assert_that(NginxObject._setup_config_collector.call_count, equal_to(0)) assert_that(NginxObject._restore_config_collector.call_count, equal_to(0)) assert_that(NginxConfigCollector.parse_config.call_count, equal_to(0)) assert_that(NginxConfig.full_parse.call_count, equal_to(0)) manager = NginxManager() manager._discover_objects() # check that nginx object was initialized but not from restart nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(nginx_obj.need_restart, equal_to(False)) assert_that(nginx_obj.collectors[0].short_name, equal_to('nginx_config')) assert_that(nginx_obj.data, not_(has_key('config_data'))) # no previous config data # check that _setup_config_collector and not _restore_config_collector assert_that(NginxObject._setup_config_collector.call_count, equal_to(1)) assert_that(NginxObject._restore_config_collector.call_count, equal_to(0)) # check parse_config was called inside _setup_config_collector and that full_parse was run assert_that(NginxConfigCollector.parse_config.call_count, equal_to(1)) assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # restart nginx and discover objects again nginx_obj.need_restart = True manager._discover_objects() # check that nginx object was initialized by restarting nginx that already had run parse_config once nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(nginx_obj.need_restart, equal_to(False)) assert_that(nginx_obj.collectors[0].short_name, equal_to('nginx_config')) assert_that( nginx_obj.data, has_entries({ 'config_data': has_entries({ 'previous': has_entries({ 'files': instance_of( dict) # found by NginxConfigCollector.parse_config }) }) })) # check that _restore_config_collector was called instead of _setup_config_collector assert_that(NginxObject._setup_config_collector.call_count, equal_to(1)) assert_that(NginxObject._restore_config_collector.call_count, equal_to(1)) # check parse_config was called inside _restore_config_collector but full_parse was skipped assert_that(NginxConfigCollector.parse_config.call_count, equal_to(2)) assert_that(NginxConfig.full_parse.call_count, equal_to(1))
class PsutilsTestCase(RealNginxTestCase): """ Overall test are for testing our calls to psutils and making sure they work. """ def setup_method(self, method): super(PsutilsTestCase, self).setup_method(method) self.system_manager = SystemManager() self.system_manager._discover_objects() self.nginx_manager = NginxManager() self.nginx_manager._discover_objects() self.system_obj = self.system_manager.objects.objects[ self.system_manager.objects.objects_by_type[ self.system_manager.type][0]] self.system_metrics_collector = self.system_obj.collectors[1] self.nginx_obj = self.nginx_manager.objects.objects[ self.nginx_manager.objects.objects_by_type[ self.nginx_manager.type][0]] self.nginx_metrics_collector = self.nginx_obj.collectors[2] def teardown_method(self, method): self.system_manager = None self.nginx_manager = None super(PsutilsTestCase, self).teardown_method(method) def test_system_virtual_memory(self): assert_that(calling(self.system_metrics_collector.virtual_memory), not_(raises(Exception))) def test_system_swap(self): assert_that(calling(self.system_metrics_collector.swap), not_(raises(Exception))) def test_system_cpu(self): assert_that(calling(self.system_metrics_collector.cpu), not_(raises(Exception))) def test_system_disk_partitions(self): assert_that(calling(self.system_metrics_collector.disk_partitions), not_(raises(Exception))) def test_system_disk_io_counters(self): assert_that(calling(self.system_metrics_collector.disk_io_counters), not_(raises(Exception))) def test_system_net_io_counters(self): assert_that(calling(self.system_metrics_collector.net_io_counters), not_(raises(Exception))) def test_nginx_memory_info(self): assert_that(calling(self.nginx_metrics_collector.memory_info), not_(raises(Exception))) def test_nginx_workers_fds_count(self): assert_that(calling(self.nginx_metrics_collector.workers_fds_count), not_(raises(Exception))) # These next two tests have to be skipped due to calls to .handle_zombie() which raises a hamcrest exception. @future_test def test_nginx_workers_rlimit_nofile(self): assert_that( calling(self.nginx_metrics_collector.workers_rlimit_nofile), not_(raises(Exception))) @future_test def test_nginx_workers_io(self): assert_that(calling(self.nginx_metrics_collector.workers_io), not_(raises(Exception))) def test_nginx_workers_cpu(self): assert_that(calling(self.nginx_metrics_collector.workers_cpu), not_(raises(Exception)))