def test_parse_memory_string(): metrics_server = MetricsServerClient(None) assert metrics_server.parse_memory_string('68896') == 68896 assert metrics_server.parse_memory_string('68896Mi') == 68896000000 assert metrics_server.parse_memory_string('68896Ki') == 68896000 assert metrics_server.parse_memory_string('68Gi') == 68000000000 assert metrics_server.parse_memory_string('2Ti') == 2000000000000 assert metrics_server.parse_memory_string('1') == 1 assert metrics_server.parse_memory_string('0') == 0 assert metrics_server.parse_memory_string('16') == 16 with pytest.raises(NotImplementedError) as _: metrics_server.parse_memory_string('6889N') with pytest.raises(ValueError) as _: metrics_server.parse_memory_string('688N9') with pytest.raises(KeyError) as _: metrics_server.parse_memory_string('688Ei')
def test_parse_cpu_string(): metrics_server = MetricsServerClient(None) assert metrics_server.parse_cpu_string('1') == pytest.approx(1) assert metrics_server.parse_cpu_string('100') == pytest.approx(100) assert metrics_server.parse_cpu_string('1n') == pytest.approx(0.000000001) assert metrics_server.parse_cpu_string('10n') == pytest.approx(0.00000001)
def test_get_pod_metrics(): metrics_server = MetricsServerClient(None) def mocked_response(*args): mocked = mock.MagicMock() mocked.status_code = 200 mocked.json = lambda: json.loads(""" { "metadata": {}, "items": [ { "metadata": { "name": "abc", "namespace": "default" }, "containers": [ { "name": "container-1a", "usage": { "cpu": "2000n", "memory": "68896Ki" } }, { "name": "container-1b", "usage": { "cpu": "300n", "memory": "2Ki" } } ] }, { "metadata": { "name": "def", "namespace": "default" }, "containers": [ { "name": "container-2a", "usage": { "cpu": "207691n", "memory": "36840Ki" } } ] } ] } """) return mocked with mock.patch('powerfulseal.k8s.metrics_server_client.requests.get', side_effect=mocked_response): result = metrics_server.get_pod_metrics() assert len(result) == 1 and 'default' in result assert len(result['default']) == 2 assert result == { 'default': { 'abc': { 'cpu': 0.0000023, 'memory': 68898000 }, 'def': { 'cpu': 0.000207691, 'memory': 36840000 } } }
def main(argv): """ The main function to invoke the powerfulseal cli """ args = parse_args(args=argv) if args.mode is None: return parse_args(['--help']) ########################################################################## # VALIDATE POLICY MODE ########################################################################## if args.mode == 'validate': policy = PolicyRunner.load_file(args.policy_file) if PolicyRunner.is_policy_valid(policy): return print('OK') print("Policy not valid. See log output above.") return sys.exit(1) ########################################################################## # LOGGING ########################################################################## # Ensure the logger config propagates from the root module of this package logger = logging.getLogger(__name__.split('.')[0]) # The default level should be set to logging.DEBUG to ensure that the stdout # stream handler can filter to the user-specified verbosity level while the # server logging handler can receive all logs logger.setLevel(logging.DEBUG) # Configure logging for stdout if not args.verbose: log_level = logging.ERROR elif args.verbose == 1: log_level = logging.WARNING elif args.verbose == 2: log_level = logging.INFO else: log_level = logging.DEBUG stdout_handler = logging.StreamHandler() stdout_handler.setLevel(log_level) logger.addHandler(stdout_handler) coloredlogs.install(logger=logger) my_verb = args.verbose logger.info("modules %s : verbosity %s : log level %s : handler level %s ", __name__, my_verb, logging.getLevelName(logger.getEffectiveLevel()), logging.getLevelName(log_level)) ########################################################################## # KUBERNETES ########################################################################## kube_config = parse_kubeconfig(args) k8s_client = K8sClient(kube_config=kube_config) k8s_inventory = K8sInventory( k8s_client=k8s_client, delete_pods=args.use_pod_delete_instead_of_ssh_kill) ########################################################################## # CLOUD DRIVER ########################################################################## if args.openstack: logger.info("Building OpenStack driver") driver = OpenStackDriver(cloud=args.openstack_cloud_name, ) elif args.aws: logger.info("Building AWS driver") driver = AWSDriver() elif args.azure: logger.info("Building Azure driver") driver = AzureDriver( cluster_rg_name=args.azure_resource_group_name, cluster_node_rg_name=args.azure_node_resource_group_name, ) elif args.gcp: logger.info("Building GCP driver") driver = GCPDriver(config=args.gcp_config_file) else: logger.info("No driver - some functionality disabled") driver = NoCloudDriver() ########################################################################## # INVENTORY ########################################################################## if args.inventory_file: logger.info("Reading inventory from %s", args.inventory_file) groups_to_restrict_to = read_inventory_file_to_dict( args.inventory_file) else: logger.info("Attempting to read the inventory from kubernetes") groups_to_restrict_to = k8s_client.get_nodes_groups() logger.debug("Restricting inventory to %s" % groups_to_restrict_to) inventory = NodeInventory( driver=driver, restrict_to_groups=groups_to_restrict_to, ) inventory.sync() ########################################################################## # SSH EXECUTOR ########################################################################## if args.use_private_ip: logger.info("Using each node's private IP address") executor = RemoteExecutor( user=args.remote_user, ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys, ssh_path_to_private_key=args.ssh_path_to_private_key, override_host=args.override_ssh_host, ssh_password=args.ssh_password, use_private_ip=args.use_private_ip, ssh_kill_command=args.ssh_kill_command, ) ########################################################################## # INTERACTIVE MODE ########################################################################## if args.mode == 'interactive': # create a command parser cmd = PSCmd( inventory=inventory, driver=driver, executor=executor, k8s_inventory=k8s_inventory, ) logger.info("STARTING INTERACTIVE MODE") while True: try: cmd.cmdloop() except GeneratorExit: print("Exiting") sys.exit(0) except KeyboardInterrupt: print() print("Ctrl-c again to quit") try: six.moves.input() except KeyboardInterrupt: sys.exit(0) return ########################################################################## # METRICS ########################################################################## metric_collector = StdoutCollector() if args.prometheus_collector: flask_debug = os.environ.get("FLASK_DEBUG") flask_env = os.environ.get("FLASK_ENVIROMENT") if flask_debug is not None or (flask_env is not None and flask_env != "production"): logger.error( "PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER" ) else: logger.info("Starting prometheus metrics server on %s", args.prometheus_port) start_http_server(args.prometheus_port, args.prometheus_host) metric_collector = PrometheusCollector() elif args.datadog_collector: logger.info("Starting datadog collector") metric_collector = DatadogCollector() else: logger.info("Using stdout metrics collector") ########################################################################## # AUTONOMOUS MODE ########################################################################## if args.mode == 'autonomous': # read and validate the policy policy = PolicyRunner.load_file(args.policy_file) if not PolicyRunner.is_policy_valid(policy): logger.error("Policy not valid. See log output above.") return sys.exit(1) # run the metrics server if requested if not args.headless: # Create an instance of the singleton server state, ensuring all logs # for retrieval from the web interface state = ServerState( policy, inventory, k8s_inventory, driver, executor, args.host, args.port, args.policy_file, metric_collector=metric_collector, ) server_log_handler = ServerStateLogHandler() server_log_handler.setLevel(log_level) logger.addHandler(server_log_handler) state.start_policy_runner() # start the server logger.info("Starting the UI server") start_server(args.host, args.port, args.accept_proxy_headers) else: logger.info("NOT starting the UI server") logger.info("STARTING AUTONOMOUS MODE") PolicyRunner.run(policy, inventory, k8s_inventory, driver, executor, metric_collector=metric_collector) ########################################################################## # LABEL MODE ########################################################################## elif args.mode == 'label': label_runner = LabelRunner( inventory, k8s_inventory, driver, executor, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING LABEL MODE") label_runner.run() ########################################################################## # DEMO MODE ########################################################################## elif args.mode == 'demo': aggressiveness = int(args.aggressiveness) if not 1 <= aggressiveness <= 5: print("Aggressiveness must be between 1 and 5 inclusive") sys.exit(1) metrics_server_client = MetricsServerClient(args.metrics_server_path) demo_runner = DemoRunner( inventory, k8s_inventory, driver, executor, metrics_server_client, aggressiveness=aggressiveness, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING DEMO MODE") demo_runner.run()