def test_policy_runner_lifecycle(): """ Integration test which starts, checks status, and stops the policy runner """ policy = { 'config': { 'minSecondsBetweenRuns': 0, 'maxSecondsBetweenRuns': 2 }, 'nodeScenarios': [{ 'name': 'Node Test' }], 'podScenarios': [{ 'name': 'Pod Test' }] } test_inventory = MagicMock() test_inventory.sync = MagicMock(return_value=None) server_state = ServerState(policy, test_inventory, None, None, RemoteExecutor(), None, None, None) assert server_state.is_policy_runner_running() is False server_state.start_policy_runner() time.sleep(1) assert server_state.is_policy_runner_running() is True server_state.stop_policy_runner() time.sleep(1) assert server_state.is_policy_runner_running() is False
def test_update_policy(): test_path = 'test.yml' new_policy = {'test': 'test'} server_state = ServerState({}, None, None, None, None, None, None, test_path) m = mock_open() with mock.patch('powerfulseal.web.server.open', m, create=True): server_state.update_policy(new_policy) m.assert_called_once_with(test_path, 'w') assert server_state.policy == new_policy
def test_kill_pod(): server_state = ServerState(None, NodeInventory(None), None, None, RemoteExecutor(), None, None, None) # Patch action of getting nodes to execute kill command on test_node = Node(1) get_node_by_ip_mock = MagicMock(return_value=test_node) server_state.inventory.get_node_by_ip = get_node_by_ip_mock # Patch action of choosing container execute_mock = MagicMock() server_state.executor.execute = execute_mock mock_pod = MagicMock() mock_pod.container_ids = ["docker://container1"] server_state.kill_pod(mock_pod, True) execute_mock.assert_called_once_with( "sudo docker kill -s SIGKILL container1", nodes=[test_node])
def test_autonomous_mode_integration(client): policy = { 'config': { 'minSecondsBetweenRuns': 0, 'maxSecondsBetweenRuns': 2 }, 'nodeScenarios': [{ 'name': 'Node Test' }], 'podScenarios': [{ 'name': 'Pod Test' }] } test_inventory = MagicMock() test_inventory.sync = MagicMock(return_value=None) server_state = ServerState(policy, test_inventory, None, None, RemoteExecutor(), None, None, None) # Autonomous mode has not yet started result = client.get('/api/autonomous-mode') assert json.loads(result.data.decode("utf-8"))['isStarted'] is False # Autonomous mode has not yet started so it cannot be stopped result = client.post('/api/autonomous-mode', data=json.dumps({'action': 'stop'}), content_type='application/json') assert result.status_code == 412 # Start autonomous mode result = client.post('/api/autonomous-mode', data=json.dumps({'action': 'start'}), content_type='application/json') assert result.status_code == 200 # Autonomous mode has started result = client.get('/api/autonomous-mode') assert json.loads(result.data.decode("utf-8"))['isStarted'] is True # Autonomous mode has started so it cannot be started result = client.post('/api/autonomous-mode', data=json.dumps({'action': 'start'}), content_type='application/json') assert result.status_code == 412 # Stop autonomous mode result = client.post('/api/autonomous-mode', data=json.dumps({'action': 'stop'}), content_type='application/json') assert result.status_code == 200 # Autonomous mode has stopped result = client.get('/api/autonomous-mode') assert json.loads(result.data.decode("utf-8"))['isStarted'] is False
def test_is_policy_valid_validates(): valid_policy_path = pkg_resources.resource_filename( "tests.policy", "example_config.yml") valid_policy = PolicyRunner.load_file(valid_policy_path) invalid_policy = {'config': {'minSecondsBetweenRuns': 'invalid'}} server_state = ServerState(valid_policy, None, None, None, None, None, None, None) assert server_state.is_policy_valid() server_state = ServerState(invalid_policy, None, None, None, None, None, None, None) assert not server_state.is_policy_valid()
def main(argv): """ The main function to invoke the powerfulseal cli """ args = parse_args(args=argv) if args.mode is None: return parse_args(['--help']) ########################################################################## # VALIDATE POLICY MODE ########################################################################## if args.mode == 'validate': policy = PolicyRunner.load_file(args.policy_file) if PolicyRunner.is_policy_valid(policy): return print('OK') print("Policy not valid. See log output above.") return sys.exit(1) ########################################################################## # LOGGING ########################################################################## # Ensure the logger config propagates from the root module of this package logger = logging.getLogger(__name__.split('.')[0]) # The default level should be set to logging.DEBUG to ensure that the stdout # stream handler can filter to the user-specified verbosity level while the # server logging handler can receive all logs logger.setLevel(logging.DEBUG) # Configure logging for stdout if not args.verbose: log_level = logging.ERROR elif args.verbose == 1: log_level = logging.WARNING elif args.verbose == 2: log_level = logging.INFO else: log_level = logging.DEBUG stdout_handler = logging.StreamHandler() stdout_handler.setLevel(log_level) logger.addHandler(stdout_handler) coloredlogs.install(logger=logger) my_verb = args.verbose logger.info("modules %s : verbosity %s : log level %s : handler level %s ", __name__, my_verb, logging.getLevelName(logger.getEffectiveLevel()), logging.getLevelName(log_level)) ########################################################################## # KUBERNETES ########################################################################## kube_config = parse_kubeconfig(args) k8s_client = K8sClient(kube_config=kube_config) k8s_inventory = K8sInventory( k8s_client=k8s_client, delete_pods=args.use_pod_delete_instead_of_ssh_kill) ########################################################################## # CLOUD DRIVER ########################################################################## if args.openstack: logger.info("Building OpenStack driver") driver = OpenStackDriver(cloud=args.openstack_cloud_name, ) elif args.aws: logger.info("Building AWS driver") driver = AWSDriver() elif args.azure: logger.info("Building Azure driver") driver = AzureDriver( cluster_rg_name=args.azure_resource_group_name, cluster_node_rg_name=args.azure_node_resource_group_name, ) elif args.gcp: logger.info("Building GCP driver") driver = GCPDriver(config=args.gcp_config_file) else: logger.info("No driver - some functionality disabled") driver = NoCloudDriver() ########################################################################## # INVENTORY ########################################################################## if args.inventory_file: logger.info("Reading inventory from %s", args.inventory_file) groups_to_restrict_to = read_inventory_file_to_dict( args.inventory_file) else: logger.info("Attempting to read the inventory from kubernetes") groups_to_restrict_to = k8s_client.get_nodes_groups() logger.debug("Restricting inventory to %s" % groups_to_restrict_to) inventory = NodeInventory( driver=driver, restrict_to_groups=groups_to_restrict_to, ) inventory.sync() ########################################################################## # SSH EXECUTOR ########################################################################## if args.use_private_ip: logger.info("Using each node's private IP address") executor = RemoteExecutor( user=args.remote_user, ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys, ssh_path_to_private_key=args.ssh_path_to_private_key, override_host=args.override_ssh_host, ssh_password=args.ssh_password, use_private_ip=args.use_private_ip, ssh_kill_command=args.ssh_kill_command, ) ########################################################################## # INTERACTIVE MODE ########################################################################## if args.mode == 'interactive': # create a command parser cmd = PSCmd( inventory=inventory, driver=driver, executor=executor, k8s_inventory=k8s_inventory, ) logger.info("STARTING INTERACTIVE MODE") while True: try: cmd.cmdloop() except GeneratorExit: print("Exiting") sys.exit(0) except KeyboardInterrupt: print() print("Ctrl-c again to quit") try: six.moves.input() except KeyboardInterrupt: sys.exit(0) return ########################################################################## # METRICS ########################################################################## metric_collector = StdoutCollector() if args.prometheus_collector: flask_debug = os.environ.get("FLASK_DEBUG") flask_env = os.environ.get("FLASK_ENVIROMENT") if flask_debug is not None or (flask_env is not None and flask_env != "production"): logger.error( "PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER" ) else: logger.info("Starting prometheus metrics server on %s", args.prometheus_port) start_http_server(args.prometheus_port, args.prometheus_host) metric_collector = PrometheusCollector() elif args.datadog_collector: logger.info("Starting datadog collector") metric_collector = DatadogCollector() else: logger.info("Using stdout metrics collector") ########################################################################## # AUTONOMOUS MODE ########################################################################## if args.mode == 'autonomous': # read and validate the policy policy = PolicyRunner.load_file(args.policy_file) if not PolicyRunner.is_policy_valid(policy): logger.error("Policy not valid. See log output above.") return sys.exit(1) # run the metrics server if requested if not args.headless: # Create an instance of the singleton server state, ensuring all logs # for retrieval from the web interface state = ServerState( policy, inventory, k8s_inventory, driver, executor, args.host, args.port, args.policy_file, metric_collector=metric_collector, ) server_log_handler = ServerStateLogHandler() server_log_handler.setLevel(log_level) logger.addHandler(server_log_handler) state.start_policy_runner() # start the server logger.info("Starting the UI server") start_server(args.host, args.port, args.accept_proxy_headers) else: logger.info("NOT starting the UI server") logger.info("STARTING AUTONOMOUS MODE") PolicyRunner.run(policy, inventory, k8s_inventory, driver, executor, metric_collector=metric_collector) ########################################################################## # LABEL MODE ########################################################################## elif args.mode == 'label': label_runner = LabelRunner( inventory, k8s_inventory, driver, executor, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING LABEL MODE") label_runner.run() ########################################################################## # DEMO MODE ########################################################################## elif args.mode == 'demo': aggressiveness = int(args.aggressiveness) if not 1 <= aggressiveness <= 5: print("Aggressiveness must be between 1 and 5 inclusive") sys.exit(1) metrics_server_client = MetricsServerClient(args.metrics_server_path) demo_runner = DemoRunner( inventory, k8s_inventory, driver, executor, metrics_server_client, aggressiveness=aggressiveness, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING DEMO MODE") demo_runner.run()
def main(argv): """ The main function to invoke the powerfulseal cli """ args = parse_args(args=argv) if args.mode is None: return parse_args(['--help']) ########################################################################## # VALIDATE POLICY MODE ########################################################################## if args.mode == 'validate': policy = PolicyRunner.load_file(args.policy_file) if PolicyRunner.is_policy_valid(policy): return print('OK') print("Policy not valid. See log output above.") return os.exit(1) ########################################################################## # LOGGING ########################################################################## # Ensure the logger config propagates from the root module of this package logger = logging.getLogger(__name__.split('.')[0]) # The default level should be set to logging.DEBUG to ensure that the stdout # stream handler can filter to the user-specified verbosity level while the # server logging handler can receive all logs logger.setLevel(logging.DEBUG) # Configure logging for stdout if not args.verbose: log_level = logging.ERROR elif args.verbose == 1: log_level = logging.WARNING elif args.verbose == 2: log_level = logging.INFO else: log_level = logging.DEBUG stdout_handler = logging.StreamHandler() stdout_handler.setLevel(log_level) logger.addHandler(stdout_handler) coloredlogs.install(logger=logger) ########################################################################## # KUBERNETES ########################################################################## kube_config = args.kubeconfig logger.info("Creating kubernetes client with config %s", kube_config) k8s_client = K8sClient(kube_config=kube_config) k8s_inventory = K8sInventory(k8s_client=k8s_client) ########################################################################## # CLOUD DRIVER ########################################################################## if args.openstack: logger.info("Building OpenStack driver") driver = OpenStackDriver(cloud=args.openstack_cloud_name, ) elif args.aws: logger.info("Building AWS driver") driver = AWSDriver() else: logger.info("No driver - some functionality disabled") driver = NoCloudDriver() ########################################################################## # INVENTORY ########################################################################## if args.inventory_file: logger.info("Reading inventory from %s", args.inventory_file) groups_to_restrict_to = read_inventory_file_to_dict( args.inventory_file) else: logger.info("Attempting to read the inventory from kubernetes") groups_to_restrict_to = k8s_client.get_nodes_groups() logger.debug("Restricting inventory to %s" % groups_to_restrict_to) inventory = NodeInventory( driver=driver, restrict_to_groups=groups_to_restrict_to, ) inventory.sync() ########################################################################## # SSH EXECUTOR ########################################################################## executor = RemoteExecutor( user=args.remote_user, ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys, ssh_path_to_private_key=args.ssh_path_to_private_key, override_host=args.override_ssh_host, ) ########################################################################## # INTERACTIVE MODE ########################################################################## if args.mode == 'interactive': # create a command parser cmd = PSCmd( inventory=inventory, driver=driver, executor=executor, k8s_inventory=k8s_inventory, ) logger.info("STARTING INTERACTIVE MODE") while True: try: cmd.cmdloop() except KeyboardInterrupt: print() print("Ctrl-c again to quit") try: input() except KeyboardInterrupt: sys.exit(0) return ########################################################################## # METRICS ########################################################################## metric_collector = StdoutCollector() if args.prometheus_collector: logger.info("Starting prometheus metrics server on %s", args.prometheus_port) start_http_server(args.prometheus_port, args.prometheus_host) metric_collector = PrometheusCollector() else: logger.info("Not starting prometheus collector") ########################################################################## # AUTONOMOUS MODE ########################################################################## if args.mode == 'autonomous': # read and validate the policy policy = PolicyRunner.load_file(args.policy_file) if not PolicyRunner.is_policy_valid(policy): logger.info("Policy not valid. See log output above.") return os.exit(1) # run the metrics server if requested if not args.headless: # Create an instance of the singleton server state, ensuring all logs # for retrieval from the web interface state = ServerState( policy, inventory, k8s_inventory, driver, executor, args.host, args.port, args.policy_file, metric_collector=metric_collector, ) server_log_handler = ServerStateLogHandler() server_log_handler.setLevel(log_level) logger.addHandler(server_log_handler) state.start_policy_runner() # start the server logger.info("Starting the UI server") start_server(args.host, args.port) else: logger.info("NOT starting the UI server") logger.info("STARTING AUTONOMOUS MODE") PolicyRunner.run(policy, inventory, k8s_inventory, driver, executor, metric_collector=metric_collector) ########################################################################## # LABEL MODE ########################################################################## elif args.mode == 'label': label_runner = LabelRunner( inventory, k8s_inventory, driver, executor, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING LABEL MODE") label_runner.run() ########################################################################## # DEMO MODE ########################################################################## elif args.mode == 'demo': aggressiveness = int(args.aggressiveness) if not 1 <= aggressiveness <= 5: print("Aggressiveness must be between 1 and 5 inclusive") os.exit(1) heapster_client = HeapsterClient(args.heapster_path) demo_runner = DemoRunner( inventory, k8s_inventory, driver, executor, heapster_client, aggressiveness=aggressiveness, min_seconds_between_runs=args.min_seconds_between_runs, max_seconds_between_runs=args.max_seconds_between_runs, namespace=args.kubernetes_namespace, metric_collector=metric_collector, ) logger.info("STARTING DEMO MODE") demo_runner.run()
def main(argv): """ The main function to invoke the powerfulseal cli """ args = parse_args(args=argv) # Configure logging # Ensure the logger config propagates from the root module of this package logger = logging.getLogger(__name__.split('.')[0]) # The default level should be set to logging.DEBUG to ensure that the stdout # stream handler can filter to the user-specified verbosity level while the # server logging handler can receive all logs logger.setLevel(logging.DEBUG) # Configure logging for stdout if not args.verbose: log_level = logging.ERROR elif args.verbose == 1: log_level = logging.WARNING elif args.verbose == 2: log_level = logging.INFO else: log_level = logging.DEBUG stdout_handler = logging.StreamHandler() stdout_handler.setLevel(log_level) logger.addHandler(stdout_handler) # build cloud provider driver logger.debug("Building the driver") if args.open_stack_cloud: logger.info("Building OpenStack driver") driver = OpenStackDriver(cloud=args.open_stack_cloud_name, ) elif args.aws_cloud: logger.info("Building AWS driver") driver = AWSDriver() else: logger.info("No driver - some functionality disabled") driver = NoCloudDriver() # build a k8s client kube_config = args.kube_config logger.debug("Creating kubernetes client with config %d", kube_config) k8s_client = K8sClient(kube_config=kube_config) k8s_inventory = K8sInventory(k8s_client=k8s_client) # read the local inventory logger.debug("Fetching the inventory") if args.inventory_file: groups_to_restrict_to = read_inventory_file_to_dict( args.inventory_file) else: logger.info("Attempting to read the inventory from kubernetes") groups_to_restrict_to = k8s_client.get_nodes_groups() logger.debug("Restricting inventory to %s" % groups_to_restrict_to) inventory = NodeInventory( driver=driver, restrict_to_groups=groups_to_restrict_to, ) inventory.sync() # create an executor executor = RemoteExecutor( user=args.remote_user, ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys, ssh_path_to_private_key=args.ssh_path_to_private_key, ) # create the collector which defaults to StdoutCollector() metric_collector = StdoutCollector() if args.prometheus_collector: if not args.prometheus_host: raise argparse.ArgumentTypeError( "The Prometheus host must be specified with --prometheus-host") if not args.prometheus_port: raise argparse.ArgumentTypeError( "The Prometheus port must be specified with --prometheus-port") start_http_server(args.prometheus_port, args.prometheus_host) metric_collector = PrometheusCollector() if args.server: # If the policy file already exists, then it must be valid. Otherwise, # create the policy file and write a default, empty policy to it. try: if not (os.path.exists(args.run_policy_file) and os.path.isfile(args.run_policy_file)): # Create a new policy file with open(args.run_policy_file, "w") as f: policy = PolicyRunner.DEFAULT_POLICY f.write(yaml.dump(policy, default_flow_style=False)) else: policy = PolicyRunner.load_file(args.run_policy_file) if not PolicyRunner.is_policy_valid(policy): print("Policy file exists but is not valid. Exiting.") sys.exit(-1) except IOError: print("Unable to perform file operations. Exiting.") sys.exit(-1) # Create an instance of the singleton server state, ensuring all logs # for retrieval from the web interface ServerState(policy, inventory, k8s_inventory, driver, executor, args.server_host, args.server_port, args.run_policy_file) server_log_handler = ServerStateLogHandler() server_log_handler.setLevel(logging.DEBUG) logger.addHandler(server_log_handler) start_server(args.server_host, int(args.server_port)) elif args.interactive: # create a command parser cmd = PSCmd( inventory=inventory, driver=driver, executor=executor, k8s_inventory=k8s_inventory, ) while True: try: cmd.cmdloop() except KeyboardInterrupt: print() print("Ctrl-c again to quit") try: input() except KeyboardInterrupt: sys.exit(0) elif args.demo: aggressiveness = int(args.aggressiveness) if not 1 <= aggressiveness <= 5: print("Aggressiveness must be between 1 and 5 inclusive") exit() heapster_client = HeapsterClient(args.heapster_path) demo_runner = DemoRunner( inventory, k8s_inventory, driver, executor, heapster_client, aggressiveness=aggressiveness, min_seconds_between_runs=int(args.min_seconds_between_runs), max_seconds_between_runs=int(args.max_seconds_between_runs), namespace=args.namespace) demo_runner.run() elif args.label: label_runner = LabelRunner( inventory, k8s_inventory, driver, executor, min_seconds_between_runs=int(args.min_seconds_between_runs), max_seconds_between_runs=int(args.max_seconds_between_runs), namespace=args.namespace) label_runner.run() elif args.validate_policy_file: policy = PolicyRunner.load_file(args.validate_policy_file) if PolicyRunner.is_policy_valid(policy): logger.info("All good, captain") else: logger.error("Policy not valid. See log output above.") elif args.run_policy_file: policy = PolicyRunner.load_file(args.run_policy_file) if not PolicyRunner.is_policy_valid(policy): logger.error("Policy not valid. See log output above.") PolicyRunner.run(policy, inventory, k8s_inventory, driver, executor, metric_collector=metric_collector)