def test_plus_status(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the second from the list collectors = nginx_obj.collectors metrics_collector = collectors[1] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_collect_meta_in_container(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.find_all(types=container.types)[0] collector = NginxMetaCollector(object=nginx_obj, interval=nginx_obj.intervals['meta']) assert_that(collector.in_container) collector.collect() assert_that( nginx_obj.metad.current, contains_inanyorder( 'type', 'local_id', 'root_uuid', 'running', 'stub_status_enabled', 'status_module_enabled', 'ssl', 'stub_status_url', 'plus_status_url', 'version', 'plus', 'configure', 'packages', 'path', 'built_from_source', 'parent_hostname', ))
def test_test_run_time(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # running collect won't do anything until the config changes cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(True)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} # avoid restarting the object for testing cfg_collector.previous['checksum'] = None # running collect should now cause the run_time to exceed 0.0, rendering run_config_test False cfg_collector.collect(no_delay=True) assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that(messages, has_item(starts_with('/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_plus_status_priority(self): """ Checks that if we can reach plus status then we don't use stub_status """ time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check that it has n+ status and stub_status enabled assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.stub_status_enabled, equal_to(True)) # get metrics collector - the second from the list collectors = nginx_obj.collectors metrics_collector = collectors[1] # run status twice metrics_collector.status() time.sleep(1) metrics_collector.status() # check gauges - we should't see request.writing/reading here, because n+ status doesn't have those metrics = nginx_obj.statsd.current assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, not_(has_item('nginx.http.request.writing'))) assert_that(gauges, not_(has_item('nginx.http.request.reading')))
def test_plus_ssl_metrics(self): """ Checks that we collect ssl metrics """ time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run status twice metrics_collector.status() time.sleep(1) metrics_collector.status() # check ssl counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('plus.http.ssl.handshakes')) assert_that(counters, has_item('plus.http.ssl.failed')) assert_that(counters, has_item('plus.http.ssl.reuses'))
def test_find_packages_nginx_from_source(self): manager = NginxManager() manager._discover_objects() nginx_obj = manager.objects.find_all(types=manager.types)[0] collector = CentosNginxMetaCollector(object=nginx_obj, interval=nginx_obj.intervals['meta']) collector.meta = collector.default_meta # stdout self.push_subp_result( stdout_lines=self.from_source_subp_result[0], stderr_lines=self.from_source_subp_result[1] ) collector.find_packages() assert_that(collector.meta['packages'], equal_to({})) assert_that(collector.meta['built_from_source'], equal_to(True)) collector.meta = collector.default_meta # stderr - seems like some Centos / RPM versions do this, AMPDEV-1995 self.push_subp_result( stdout_lines=self.from_source_subp_result[1], stderr_lines=self.from_source_subp_result[0] ) collector.find_packages() assert_that(collector.meta['packages'], equal_to({})) assert_that(collector.meta['built_from_source'], equal_to(True))
def test_global_metrics_priority_api_enabled(self): """ Checks that if we can reach plus status then we don't use stub_status """ time.sleep(1) # Give N+ some time to start manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # check that it has n+ api, n+ status, stub_status enabled assert_that(nginx_obj.api_enabled, equal_to(True)) assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.stub_status_enabled, equal_to(True)) # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run status twice metrics_collector.global_metrics() time.sleep(1) metrics_collector.global_metrics() metrics = nginx_obj.statsd.current # this stat is new in n+ api, but is not in status nor stub_status assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('plus.proc.respawned'))
def test_plus_status_priority(self): """ Checks that if we can reach plus status then we don't use stub_status """ time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # check that it has n+ status and stub_status enabled assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.stub_status_enabled, equal_to(True)) # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run status twice metrics_collector.status() time.sleep(1) metrics_collector.status() # check gauges - we should't see request.writing/reading here, because n+ status doesn't have those metrics = nginx_obj.statsd.current assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, not_(has_item('nginx.http.request.writing'))) assert_that(gauges, not_(has_item('nginx.http.request.reading')))
def test_find_all(self): out = subp.call('ps xao pid,ppid,command | grep "supervisor[d]" | tr -s " "')[0] supervisors = [map(int, line.strip().split()[:2]) for line in out if 'supervisord' in line] assert_that(supervisors, has_length(1)) supervisor_pid, supervisor_ppid = supervisors[0] assert_that(supervisor_ppid, equal_to(1)) time.sleep(2) out = subp.call('ps xao pid,ppid,command | grep "nginx[:]" | tr -s " "')[0] masters = [map(int, line.strip().split()[:2]) for line in out if 'nginx: master process' in line] assert_that(masters, has_length(1)) master_pid, master_ppid = masters[0] assert_that(master_ppid, equal_to(supervisor_pid)) worker_pids = [] workers = [map(int, line.strip().split()[:2]) for line in out if 'nginx: worker process' in line] for worker_pid, worker_ppid in workers: worker_pids.append(worker_pid) assert_that(worker_ppid, equal_to(master_pid)) container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(1)) definition, data = nginxes.pop(0) assert_that(data, has_key('pid')) assert_that(data, has_key('workers')) assert_that(master_pid, equal_to(data['pid'])) assert_that(worker_pids, equal_to(data['workers']))
def test_bad_plus_status_discovery_with_config(self): amplify.agent.common.context.context.app_config['nginx']['plus_status'] = '/foo_plus' amplify.agent.common.context.context.app_config['nginx']['stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # self.http_request should look like this # [ # first - internal plus statuses # 'http://127.0.0.1:82/plus_status', 'https://127.0.0.1:82/plus_status', # 'http://127.0.0.1/foo_plus', 'https://127.0.0.1/foo_plus', # # then external plus statuses # 'http://bad.status.naas.nginx.com:82/plus_status', 'https://bad.status.naas.nginx.com:82/plus_status', # # finally - stub statuses # 'http://127.0.0.1:82/basic_status', 'https://127.0.0.1:82/basic_status', # 'http://127.0.0.1/foo_basic', 'https://127.0.0.1/foo_basic' # ] assert_that(self.http_requests[2], equal_to('http://127.0.0.1/foo_plus')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_bad_plus_status_discovery_with_config(self): amplify.agent.common.context.context.app_config['nginx'][ 'plus_status'] = '/foo_plus' amplify.agent.common.context.context.app_config['nginx'][ 'stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # self.http_request should look like this # [ # first - internal plus statuses # 'http://127.0.0.1:82/plus_status', 'https://127.0.0.1:82/plus_status', # 'http://127.0.0.1/foo_plus', 'https://127.0.0.1/foo_plus', # # then external plus statuses # 'http://bad.status.naas.nginx.com:82/plus_status', 'https://bad.status.naas.nginx.com:82/plus_status', # # finally - stub statuses # 'http://127.0.0.1:82/basic_status', 'https://127.0.0.1:82/basic_status', # 'http://127.0.0.1/foo_basic', 'https://127.0.0.1/foo_basic' # ] assert_that(self.http_requests[2], equal_to('http://127.0.0.1/foo_plus')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_test_run_time(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] collectors = nginx_obj.collectors cfg_collector = collectors[2] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # run collect cfg_collector.collect() assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that( messages, has_item( starts_with( '/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_plus_status(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_find_packages_nginx_from_package(self): manager = NginxManager() manager._discover_objects() nginx_obj = manager.objects.find_all(types=manager.types)[0] collector = self.meta_collector_class(object=nginx_obj, interval=nginx_obj.intervals['meta']) collector.meta = collector.default_meta self.push_subp_result( stdout_lines=[ 'ii nginx 1.4.6-1ubuntu3.8 all small, powerful, scalable web/proxy server', 'ii nginx-common 1.4.6-1ubuntu3.8 all small, powerful, scalable web/proxy server - common files', 'ii nginx-core 1.4.6-1ubuntu3.8 amd64 nginx web/proxy server (core version)', '' ] ) self.push_subp_result( stdout_lines=self.from_package_subp_result[0], stderr_lines=self.from_package_subp_result[1] ) collector.find_packages() assert_that(collector.meta['packages'], has_key('nginx-core')) assert_that(collector.meta['built_from_source'], equal_to(False))
def test_skip_upload_ssl(self): context.app_config['containers']['nginx']['upload_ssl'] = False manager = NginxManager() manager._discover_objects() # check that the config has only been parsed once (at startup) nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(nginx_obj.upload_ssl, equal_to(False)) assert_that(nginx_obj.config.ssl_certificates, has_length(0))
def test_collect(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] # run collect cfg_collector.collect() assert_that(nginx_obj.configd.current, not_(empty()))
def test_bad_stub_status_discovery_with_config(self): amplify.agent.common.context.context.app_config['nginx']['stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) assert_that(self.http_requests[-1], equal_to('https://127.0.0.1/foo_basic')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_plus_api(self): time.sleep(1) manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus api - twice, because counters will appear only on the second run metrics_collector.plus_api() time.sleep(1) metrics_collector.plus_api() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) assert_that(counters, has_item('plus.http.ssl.handshakes')) assert_that(counters, has_item('plus.http.ssl.failed')) assert_that(counters, has_item('plus.http.ssl.reuses')) assert_that(counters, has_item('plus.proc.respawned')) for key, counter in counters.iteritems(): for metric in counters[key]: assert_that(isinstance(metric[0], int)) assert_that(isinstance(metric[1], int)) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current')) for key, gauge in gauges.iteritems(): for metric in gauges[key]: assert_that(isinstance(metric[0], int)) assert_that(isinstance(metric[1], int)) # check timers assert_that(metrics, has_item('timer')) timers = metrics['timer'] for key, timer in timers.iteritems(): for metric in timers[key]: assert_that(isinstance(metric, float))
def test_with_default_logs(self): container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[ container.objects.objects_by_type[container.type][0]] # just check that everything went ok assert_that(nginx_obj, not_none())
def test_logs_path(self): self.stop_first_nginx() self.start_second_nginx(conf='nginx_no_logs.conf') manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[manager.type][0]] assert_that(nginx_obj.config.access_logs, has_length(1)) assert_that(nginx_obj.config.access_logs, has_key('/var/log/nginx/access.log')) assert_that(nginx_obj.config.error_logs, has_length(1)) assert_that(nginx_obj.config.error_logs, has_key('/var/log/nginx/error.log'))
def test_two_instances(self): container = NginxManager() container._discover_objects() obj = container.objects.find_all(types=container.types)[0] self.start_second_nginx() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(2)) local_ids = map(lambda x: x.local_id, container.objects.find_all(types=container.types)) assert_that(local_ids, has_item(obj.local_id))
def test_find_all(self): container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(1)) definition, data = nginxes.pop(0) assert_that(data, has_key('pid')) assert_that(data, has_key('workers')) # get ps info master, workers = self.get_master_workers() assert_that(master, equal_to(data['pid'])) assert_that(workers, equal_to(data['workers']))
def test_skip_parse_on_reload(self): # wrap NginxConfig.full_parse with a method that counts how many times it's been called NginxConfig.full_parse = count_calls(NginxConfig.full_parse) assert_that(NginxConfig.full_parse.call_count, equal_to(0)) manager = NginxManager() manager._discover_objects() # check that the config has only been parsed once (at startup) nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # reload nginx and discover objects again so manager will recognize it self.reload_nginx() time.sleep(2) manager._discover_objects() # metrics collector will cause the nginx object to need a restart because pids have changed metrics_collector = nginx_obj.collectors[2] metrics_collector.collect(no_delay=True) manager._discover_objects() # check that the config was not parsed again after the restart nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # check that the new nginx object's config collector won't call full_parse config_collector = nginx_obj.collectors[0] config_collector.collect(no_delay=True) assert_that(NginxConfig.full_parse.call_count, equal_to(1)) # check that the config collector will still call full parse if config changes config_collector.previous['files'] = {} config_collector.collect(no_delay=True) assert_that(NginxConfig.full_parse.call_count, equal_to(2))
def test_bad_stub_status_discovery_with_config(self): context.app_config['nginx']['stub_status'] = '/foo_basic' self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) assert_that(self.http_requests[-1], equal_to('https://127.0.0.1/foo_basic')) assert_that(self.http_requests[-2], equal_to('http://127.0.0.1/foo_basic'))
def test_add_config_error_then_reload(self): manager = NginxManager() manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(0)) # write the initial good config and start running nginx (should work) self.write_config('events {}' 'http {' ' server {' ' location /status {stub_status on;}' ' }' '}') self.start_nginx(check=True) # check that nginx object was created and then run its config collector manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(1)) nginx_obj = nginx_objects[0] # store some values from before the bad reload for testing purposes later before_bad_reload_config_subtree = nginx_obj.config.subtree before_bad_reload_config_stub_status_urls = nginx_obj.config.stub_status_urls before_bad_reload_object_workers = nginx_obj.workers before_bad_reload_object_stub_status_url = nginx_obj.stub_status_url before_bad_reload_object_api_endpoints_to_skip = nginx_obj.api_endpoints_to_skip # introduce an error to the config and try to reload nginx (should not work) self.write_config('events {{{{{{{{{}' 'http {' ' server {' ' location /status {stub_status on;}' ' }' '}') self.reload_nginx(check=False) # run the config collector again now that the config has errors in it # collect manually because the nginx object shouldn't have restarted so it wouldn't parse on restore manager._discover_objects() nginx_objects = manager.objects.find_all(types=manager.types) assert_that(nginx_objects, has_length(1)) nginx_obj = nginx_objects[0] nginx_obj.collectors[0].collect(no_delay=True) # check that the nginx process did not reload but the NginxConfig object did parse and update assert_that(before_bad_reload_object_workers, equal_to(nginx_obj.workers)) assert_that(before_bad_reload_config_subtree, has_length(2)) assert_that(before_bad_reload_config_stub_status_urls, has_length(2)) assert_that( nginx_obj.config.subtree, has_length(0)) # when too many "{" in file subtree is empty assert_that(nginx_obj.config.stub_status_urls, has_length(0)) # check that although the NginxConfig parsed and updated, the NginxObject kept its cached data assert_that(before_bad_reload_object_stub_status_url, equal_to(nginx_obj.stub_status_url)) assert_that(before_bad_reload_object_api_endpoints_to_skip, equal_to(nginx_obj.api_endpoints_to_skip))
def test_find_packages_nginx_from_source(self): manager = NginxManager() manager._discover_objects() nginx_obj = manager.objects.find_all(types=manager.types)[0] collector = self.meta_collector_class( object=nginx_obj, interval=nginx_obj.intervals['meta']) collector.meta = collector.default_meta self.push_subp_result(stdout_lines=self.from_source_subp_result[0], stderr_lines=self.from_source_subp_result[1]) collector.find_packages() assert_that(collector.meta['packages'], equal_to({})) assert_that(collector.meta['built_from_source'], equal_to(True))
def test_collect_meta(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.find_all(types=container.types)[0] collector = NginxMetaCollector(object=nginx_obj, interval=nginx_obj.intervals['meta']) assert_that(not_(collector.in_container)) collector.collect() assert_that(nginx_obj.metad.current, contains_inanyorder( 'type', 'local_id', 'root_uuid', 'running', 'stub_status_enabled', 'status_module_enabled', 'ssl', 'stub_status_url', 'plus_status_url', 'version', 'plus', 'configure', 'packages', 'path', 'built_from_source', 'parent_hostname', 'start_time', 'pid' ))
def test_global_metrics_priority_api_disabled(self): """ Checks that if we can reach plus status then we don't use stub_status """ time.sleep(1) # Give N+ some time to start manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) # get nginx object nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] nginx_obj.api_enabled = False # check that it has n+ status and stub_status enabled assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.stub_status_enabled, equal_to(True)) # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run status twice metrics_collector.global_metrics() time.sleep(1) metrics_collector.global_metrics() # check gauges - we should't see request.writing/reading here, because n+ status doesn't have those metrics = nginx_obj.statsd.current assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, not_(has_item('nginx.http.request.writing'))) assert_that(gauges, not_(has_item('nginx.http.request.reading'))) # plus status should not have this. it's new in plus api assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, not_(has_item('plus.proc.respawned'))) assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) assert_that(counters, has_item('plus.http.ssl.handshakes')) assert_that(counters, has_item('plus.http.ssl.failed')) assert_that(counters, has_item('plus.http.ssl.reuses')) # check gauges assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_bad_plus_status_discovery(self): self.stop_first_nginx() self.start_second_nginx(conf='nginx_bad_status.conf') container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check all plus status urls assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.plus_status_internal_url, equal_to(None)) assert_that(nginx_obj.plus_status_external_url, equal_to('http://bad.status.naas.nginx.com:82/plus_status'))
def test_discover(self): assert_that(context.nginx_configs, has_length(0)) # init manager and make sure that object count is 0 manager = NginxManager() assert_that(context.objects.find_all(types=manager.types), has_length(0)) # discover objects and make sure that there is now 1 managed nginx object manager._discover_objects() assert_that(context.objects.find_all(types=manager.types), has_length(1)) # check to see that there is now one nginx config assert_that(context.nginx_configs, has_length(1))
def test_ssl_config_doesnt_work_if_ssl_disabled(self): # set upload_ssl to True context.app_config['containers']['nginx']['upload_ssl'] = False container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] cfg_collector = nginx_obj.collectors[0] cfg_collector.collect() config = nginx_obj.configd.current assert_that(config['data']['ssl_certificates'], has_length(0))
def test_find_all(self): out = subp.call( 'ps xao pid,ppid,command | grep "supervisor[d]" | tr -s " "')[0] supervisors = [ map(int, line.strip().split()[:2]) for line in out if 'supervisord' in line ] assert_that(supervisors, has_length(1)) supervisor_pid, supervisor_ppid = supervisors[0] assert_that(supervisor_ppid, equal_to(1)) time.sleep(2) out = subp.call( 'ps xao pid,ppid,command | grep "nginx[:]" | tr -s " "')[0] masters = [ map(int, line.strip().split()[:2]) for line in out if 'nginx: master process' in line ] assert_that(masters, has_length(1)) master_pid, master_ppid = masters[0] assert_that(master_ppid, equal_to(supervisor_pid)) worker_pids = [] workers = [ map(int, line.strip().split()[:2]) for line in out if 'nginx: worker process' in line ] for worker_pid, worker_ppid in workers: worker_pids.append(worker_pid) assert_that(worker_ppid, equal_to(master_pid)) container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(1)) definition, data = nginxes.pop(0) assert_that(data, has_key('pid')) assert_that(data, has_key('workers')) assert_that(master_pid, equal_to(data['pid'])) assert_that(worker_pids, equal_to(data['workers']))
def setup_method(self, method): super(PsutilsTestCase, self).setup_method(method) self.system_manager = SystemManager() self.system_manager._discover_objects() self.nginx_manager = NginxManager() self.nginx_manager._discover_objects() self.system_obj = self.system_manager.objects.objects[ self.system_manager.objects.objects_by_type[ self.system_manager.type][0]] self.system_metrics_collector = self.system_obj.collectors[1] self.nginx_obj = self.nginx_manager.objects.objects[ self.nginx_manager.objects.objects_by_type[ self.nginx_manager.type][0]] self.nginx_metrics_collector = self.nginx_obj.collectors[2]
def test_plus_api_unsupported_and_fallback_to_status(self): """ Checks that api_enabled is set to False if no supported API version is found """ plus.SUPPORTED_API_VERSIONS = [0] time.sleep(1) manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # api_enabled should be set to false in metrics collector __init__ assert_that(nginx_obj.api_enabled, equal_to(False)) assert_that(nginx_obj.plus_status_enabled, equal_to(True)) metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.global_metrics() time.sleep(1) metrics_collector.global_metrics() # check counters metrics = nginx_obj.statsd.current assert_that(metrics, has_item('counter')) counters = metrics['counter'] assert_that(counters, has_item('nginx.http.conn.accepted')) assert_that(counters, has_item('nginx.http.request.count')) assert_that(counters, has_item('nginx.http.conn.dropped')) assert_that(counters, has_item('plus.http.ssl.handshakes')) assert_that(counters, has_item('plus.http.ssl.failed')) assert_that(counters, has_item('plus.http.ssl.reuses')) # to check it actually fell back to old plus status assert_that(counters, not_(has_item('plus.proc.respawned'))) # check gauges assert_that(metrics, has_item('gauge')) gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.http.conn.active')) assert_that(gauges, has_item('nginx.http.conn.current')) assert_that(gauges, has_item('nginx.http.conn.idle')) assert_that(gauges, has_item('nginx.http.request.current'))
def test_plus_status_discovery(self): """ Checks that for plus nginx we collect two status urls: - one for web link (with server name) - one for agent purposes (local url) """ container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # check all plus status urls assert_that(nginx_obj.plus_status_enabled, equal_to(True)) assert_that(nginx_obj.plus_status_internal_url, equal_to('https://127.0.0.1:443/plus_status')) assert_that(nginx_obj.plus_status_external_url, equal_to('http://status.naas.nginx.com:443/plus_status_bad'))
def test_skip_parse_until_change(self): manager = NginxManager() # wrap NginxConfig.full_parse with a method that counts how many times it's been called from amplify.agent.objects.nginx.config.config import NginxConfig def count_full_parse_calls(config_obj): NginxConfig.__full_parse_calls += 1 config_obj.__full_parse() NginxConfig.__full_parse_calls = 0 NginxConfig.__full_parse = NginxConfig.full_parse NginxConfig.full_parse = count_full_parse_calls # discover the NGINX object and check that the config has been fully parsed once manager._discover_objects() nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # get the NginxConfig collector cfg_collector = nginx_obj.collectors[0] # check that NginxConfig.full_parse is not called again during collect cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2)) # change the collector's previous directories record and check that it does not call full_parse cfg_collector.previous['directories'] = {} cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2))
def test_plus_status_cache_limit(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - 4 times for x in xrange(4): metrics_collector.plus_status() time.sleep(1) assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], has_length(3))
def test_collect_object_status(self): manager = NginxManager() manager._discover_objects() assert_that(manager.objects.objects_by_type[manager.type], has_length(1)) nginx_obj = manager.objects.objects[manager.objects.objects_by_type[ manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] metrics_collector.collect() # check for status metrics = nginx_obj.statsd.current gauges = metrics['gauge'] assert_that(gauges, has_item('nginx.status')) for metric in gauges['nginx.status']: assert_that(metric[1], equal_to(1))
def test_find_none(self): # Kill running NGINX so that it finds None subp.call('pgrep nginx |sudo xargs kill -SIGKILL', check=False) self.running = False # Setup dummy object context.objects.register(DummyRootObject()) container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(0)) root_object = context.objects.root_object assert_that(root_object.eventd.current, has_length(1)) # Reset objects... context.objects = None context._setup_object_tank()
def test_plus_status_cache(self): time.sleep(1) # Give N+ some time to start container = NginxManager() container._discover_objects() assert_that(container.objects.objects_by_type[container.type], has_length(1)) # get nginx object nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() assert_that(context.plus_cache['https://127.0.0.1:443/plus_status'], not_(has_length(0)))
def test_discover(self): nginx_manager = NginxManager() nginx_manager._discover_objects() assert_that(nginx_manager.objects.objects_by_type[nginx_manager.type], has_length(1)) # get nginx object nginx_obj = nginx_manager.objects.objects[nginx_manager.objects.objects_by_type[nginx_manager.type][0]] # get metrics collector - the third in the list metrics_collector = nginx_obj.collectors[2] # run plus status - twice, because counters will appear only on the second run metrics_collector.plus_status() time.sleep(1) metrics_collector.plus_status() plus_manager = PlusManager() plus_manager._discover_objects() assert_that(plus_manager.objects.find_all(types=plus_manager.types), has_length(2))
def test_reload(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] # The following assertion is unreliable for some reason. assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.reload_nginx() new_master, new_workers = self.get_master_workers() assert_that(new_master, equal_to(old_master)) container._discover_objects() obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_restart(self): old_master, old_workers = self.get_master_workers() container = NginxManager() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, equal_to(old_master)) assert_that(obj.workers, equal_to(old_workers)) self.restart_nginx() new_master, new_workers = self.get_master_workers() container._discover_objects() assert_that(container.objects.find_all(types=container.types), has_length(1)) obj = container.objects.find_all(types=container.types)[0] assert_that(obj.pid, not_(equal_to(old_master))) assert_that(obj.pid, equal_to(new_master)) assert_that(obj.workers, not_(equal_to(old_workers))) assert_that(obj.workers, equal_to(new_workers))
def test_test_run_time(self): container = NginxManager() container._discover_objects() nginx_obj = container.objects.objects[container.objects.objects_by_type[container.type][0]] collectors = nginx_obj.collectors cfg_collector = collectors[2] assert_that(nginx_obj.run_config_test, equal_to(True)) # set maximum run time for test to 0.0 context.app_config['containers']['nginx']['max_test_duration'] = 0.0 # run collect cfg_collector.collect() assert_that(nginx_obj.run_config_test, equal_to(False)) events = nginx_obj.eventd.current.values() messages = [] for event in events: messages.append(event.message) assert_that(messages, has_item(starts_with('/usr/sbin/nginx -t -c /etc/nginx/nginx.conf took')))
def test_skip_parse_until_change(self): manager = NginxManager() # wrap NginxConfig.full_parse with a method that counts how many times it's been called from amplify.agent.objects.nginx.config.config import NginxConfig def count_full_parse_calls(config_obj): NginxConfig.__full_parse_calls += 1 config_obj.__full_parse() NginxConfig.__full_parse_calls = 0 NginxConfig.__full_parse = NginxConfig.full_parse NginxConfig.full_parse = count_full_parse_calls # discover the NGINX object and check that the config has been fully parsed once manager._discover_objects() nginx_obj = manager.objects.objects[manager.objects.objects_by_type[manager.type][0]] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # get the NginxConfig collector cfg_collector = nginx_obj.collectors[0] # check that NginxConfig.full_parse is not called again during collect cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # change the collector's previous files record so that it will call full_parse cfg_collector.previous['files'] = {} cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2)) cfg_collector.collect(no_delay=True) cfg_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2))
def setup_method(self, method): super(PsutilsTestCase, self).setup_method(method) self.system_manager = SystemManager() self.system_manager._discover_objects() self.nginx_manager = NginxManager() self.nginx_manager._discover_objects() self.system_obj = self.system_manager.objects.objects[ self.system_manager.objects.objects_by_type[self.system_manager.type][0] ] self.system_metrics_collector = self.system_obj.collectors[1] self.nginx_obj = self.nginx_manager.objects.objects[ self.nginx_manager.objects.objects_by_type[self.nginx_manager.type][0] ] self.nginx_metrics_collector = self.nginx_obj.collectors[1]
def test_skip_parse_on_reload(self): # wrap NginxConfig.full_parse with a method that counts how many times it's been called from amplify.agent.objects.nginx.config.config import NginxConfig def count_full_parse_calls(config_obj): NginxConfig.__full_parse_calls += 1 config_obj.__full_parse() NginxConfig.__full_parse_calls = 0 NginxConfig.__full_parse = NginxConfig.full_parse NginxConfig.full_parse = count_full_parse_calls manager = NginxManager() manager._discover_objects() # check that the config has only been parsed once (at startup) nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # reload nginx and discover objects again so manager will recognize it self.reload_nginx() time.sleep(2) manager._discover_objects() # metrics collector will cause the nginx object to need a restart because pids have changed metrics_collector = nginx_obj.collectors[2] metrics_collector.collect(no_delay=True) manager._discover_objects() # check that the config was not parsed again after the restart nginx_obj = manager.objects.find_all(types=manager.types)[0] assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # check that the new nginx object's config collector won't call full_parse config_collector = nginx_obj.collectors[0] config_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(1)) # check that the config collector will still call full parse if config changes config_collector.previous['files'] = {} config_collector.collect(no_delay=True) assert_that(NginxConfig.__full_parse_calls, equal_to(2))
class PsutilsTestCase(RealNginxTestCase): """ Overall test are for testing our calls to psutils and making sure they work. """ def setup_method(self, method): super(PsutilsTestCase, self).setup_method(method) self.system_manager = SystemManager() self.system_manager._discover_objects() self.nginx_manager = NginxManager() self.nginx_manager._discover_objects() self.system_obj = self.system_manager.objects.objects[ self.system_manager.objects.objects_by_type[self.system_manager.type][0] ] self.system_metrics_collector = self.system_obj.collectors[1] self.nginx_obj = self.nginx_manager.objects.objects[ self.nginx_manager.objects.objects_by_type[self.nginx_manager.type][0] ] self.nginx_metrics_collector = self.nginx_obj.collectors[1] def teardown_method(self, method): self.system_manager = None self.nginx_manager = None super(PsutilsTestCase, self).teardown_method(method) def test_system_virtual_memory(self): assert_that(calling(self.system_metrics_collector.virtual_memory), not_(raises(Exception))) def test_system_swap(self): assert_that(calling(self.system_metrics_collector.swap), not_(raises(Exception))) def test_system_cpu(self): assert_that(calling(self.system_metrics_collector.cpu), not_(raises(Exception))) def test_system_disk_partitions(self): assert_that(calling(self.system_metrics_collector.disk_partitions), not_(raises(Exception))) def test_system_disk_io_counters(self): assert_that(calling(self.system_metrics_collector.disk_io_counters), not_(raises(Exception))) def test_system_net_io_counters(self): assert_that(calling(self.system_metrics_collector.net_io_counters), not_(raises(Exception))) def test_nginx_memory_info(self): assert_that(calling(self.nginx_metrics_collector.memory_info), not_(raises(Exception))) def test_nginx_workers_fds_count(self): assert_that(calling(self.nginx_metrics_collector.workers_fds_count), not_(raises(Exception))) # These next two tests have to be skipped due to calls to .handle_zombie() which raises a hamcrest exception. @future_test def test_nginx_workers_rlimit_nofile(self): assert_that(calling(self.nginx_metrics_collector.workers_rlimit_nofile), not_(raises(Exception))) @future_test def test_nginx_workers_io(self): assert_that(calling(self.nginx_metrics_collector.workers_io), not_(raises(Exception))) def test_nginx_workers_cpu(self): assert_that(calling(self.nginx_metrics_collector.workers_cpu), not_(raises(Exception)))