def test_filter_day_time():
    label_runner = LabelRunner(None, None, None, None)

    pod = Pod(name="test", namespace="test")
    pod.labels = {
        "seal/start-time": "10-00-00",
        "seal/end-time": "17-30-00",
        "seal/days": "mon,tue,wed,thu,fri,sat,sun"
    }

    for day in range(7):
        now = datetime.now()
        # check that it works for all the days of the week, as specified above
        now.replace(day=day + 1)
        # set the microseconds to 0, to pass the inclusive start
        now.replace(microsecond=0)
        test_cases = [(now.replace(hour=8, minute=0,
                                   second=0), False, "far too early"),
                      (now.replace(hour=9, minute=59,
                                   second=59), False, "just too early"),
                      (now.replace(hour=10, minute=0,
                                   second=0), True, "inclusive start"),
                      (now.replace(hour=13, minute=0,
                                   second=0), True, "within"),
                      (now.replace(hour=17, minute=30,
                                   second=0), False, "exclusive end"),
                      (now.replace(hour=17, minute=30,
                                   second=1), False, "just too late"),
                      (now.replace(hour=20, minute=0,
                                   second=0), False, "far too late")]

        for test_case in test_cases:
            assert (len(label_runner.filter_day_time(
                [pod], test_case[0])) == 1) == test_case[1]
def test_filter_is_enabled():
    label_runner = LabelRunner(None, None, None, None)

    pods = [
        Pod(name='test', namespace='test', labels={'seal/enabled': 'true'}),
        Pod(name='test', namespace='test', labels={'seal/enabled': 'false'}),
        Pod(name='test', namespace='test', labels={'seal/enabled': 'asdf'}),
        Pod(name='test', namespace='test', labels={'bla': 'bla'}),
        Pod(name='test',
            namespace='test',
            annotations={'seal/enabled': 'true'}),
        Pod(name='test',
            namespace='test',
            annotations={'seal/enabled': 'false'}),
        Pod(name='test',
            namespace='test',
            annotations={'seal/enabled': 'true'},
            labels={'seal/enabled': 'false'}),
        Pod(name='test',
            namespace='test',
            annotations={'seal/enabled': 'false'},
            labels={'seal/enabled': 'true'})
    ]

    filtered_pods = label_runner.filter_is_enabled(pods)

    assert len(filtered_pods) is 3
    assert filtered_pods[0] is pods[0]
    assert filtered_pods[1] is pods[4]
    assert filtered_pods[2] is pods[7]
def test_filter_kill_probability(proba):
    random.seed(7)  # make the tests deterministic
    SAMPLES = 100000

    label_runner = LabelRunner(None, None, None, None)
    pod = Pod(name='test', namespace='test')
    pod.labels = {'seal/kill-probability': str(proba)}

    agg_len = 0.0
    for _ in range(SAMPLES):
        agg_len += len(label_runner.filter_kill_probability([pod]))
    assert float(agg_len) / SAMPLES == pytest.approx(proba, 0.01)
def test_kill_pod_SSHing():
    label_runner = LabelRunner(NodeInventory(None), None, None, SSHExecutor())

    # patch metrics collector
    label_runner.metric_collector = MagicMock()

    # Patch action of switching to SSHing mode
    k8s_inventory = MagicMock()
    label_runner.k8s_inventory = k8s_inventory

    # Patch action of getting nodes to execute kill command on
    test_node = Node(1)
    get_node_by_ip_mock = MagicMock(return_value=test_node)
    label_runner.inventory.get_node_by_ip = get_node_by_ip_mock

    # Patch action of choosing container
    execute_mock = MagicMock()
    label_runner.executor.execute = execute_mock

    mock_pod = Pod(name='test', namespace='test')
    mock_pod.container_ids = ["docker://container1"]
    mock_pod.labels = {"seal/force-kill": "false"}
    label_runner.kill_pod(mock_pod)

    mock_pod = Pod(name='test', namespace='test')
    mock_pod.container_ids = ["docker://container1"]
    mock_pod.labels = {}
    label_runner.kill_pod(mock_pod)

    execute_mock.assert_called_with("sudo docker kill -s SIGTERM container1",
                                    nodes=[test_node])
def test_filter_is_enabled():
    label_runner = LabelRunner(None, None, None, None)

    enabled_pod = MagicMock()
    enabled_pod.labels = {'seal/enabled': 'true'}
    disabled_pod_1 = MagicMock()
    disabled_pod_1.labels = {'seal/enabled': 'false'}
    disabled_pod_2 = MagicMock()
    disabled_pod_2.labels = {'seal/enabled': 'asdf'}
    disabled_pod_3 = MagicMock()
    disabled_pod_3.labels = {'bla': 'bla'}

    filtered_pods = label_runner.filter_is_enabled(
        [enabled_pod, disabled_pod_1, disabled_pod_2, disabled_pod_3])
    assert len(filtered_pods) is 1
    assert filtered_pods[0] == enabled_pod
def test_process_time_label():
    label_runner = LabelRunner(None, None, None, None)

    with pytest.raises(ValueError) as _:
        label_runner.process_time_label("10-00-0")

    with pytest.raises(ValueError) as _:
        label_runner.process_time_label("-1-00-00")

    with pytest.raises(ValueError) as _:
        label_runner.process_time_label("24-00-00")

    assert (1, 22, 21) == label_runner.process_time_label("01-22-21")
def test_kill_pod_forced():
    label_runner = LabelRunner(NodeInventory(None), None, None, RemoteExecutor())

    # Patch action of getting nodes to execute kill command on
    test_node = Node(1)
    get_node_by_ip_mock = MagicMock(return_value=test_node)
    label_runner.inventory.get_node_by_ip = get_node_by_ip_mock

    # Patch action of choosing container
    execute_mock = MagicMock()
    label_runner.executor.execute = execute_mock

    mock_pod = MagicMock()
    mock_pod.container_ids = ["docker://container1"]
    mock_pod.labels = {"seal/force-kill": "true"}
    label_runner.kill_pod(mock_pod)
    execute_mock.assert_called_once_with("sudo docker kill -s SIGKILL container1", nodes=[test_node])
def test_filter_day_time():
    label_runner = LabelRunner(None, None, None, None)

    pod = MagicMock
    pod.labels = {"seal/start-time": "10-00-00", "seal/end-time": "17-30-00"}

    now = datetime.now()
    test_cases = [
        (now.replace(hour=8, minute=0, second=0), False, "far too early"),
        (now.replace(hour=9, minute=59, second=59), False, "just too early"),
        (now.replace(hour=10, minute=0, second=0), True, "inclusive start"),
        (now.replace(hour=13, minute=0, second=0), True, "within"),
        (now.replace(hour=17, minute=30, second=0), False, "exclusive end"),
        (now.replace(hour=17, minute=30, second=1), False, "just too late"),
        (now.replace(hour=20, minute=0, second=0), False, "far too late")
    ]

    for test_case in test_cases:
        assert (len(label_runner.filter_day_time(
            [pod], test_case[0])) == 1) == test_case[1]
def test_kill_pod_APIcalling():

    label_runner = LabelRunner(NodeInventory(None), None, None,
                               RemoteExecutor())

    # Patch action of getting nodes to execute kill command on
    test_node = Node(1)
    get_node_by_ip_mock = MagicMock(return_value=test_node)
    label_runner.inventory.get_node_by_ip = get_node_by_ip_mock

    #Pactch action of switching to APIcalling mode
    k8s_inventory = MagicMock()
    k8s_inventory.delete_pods = True
    label_runner.k8s_inventory = k8s_inventory

    # Patch action of choosing container
    k8s_client_mock = MagicMock()
    label_runner.k8s_inventory.k8s_client = k8s_client_mock

    delete_pods_mock = MagicMock()
    label_runner.k8s_inventory.k8s_client.delete_pods = delete_pods_mock

    metric_collector = MagicMock()
    label_runner.metric_collector = metric_collector

    add_pod_killed_metric_mock = MagicMock()
    label_runner.metric_collector.add_pod_killed_metric = add_pod_killed_metric_mock

    mock_pod = Pod(name='test', namespace='test')
    mock_pod.container_ids = ["docker://container1"]
    label_runner.kill_pod(mock_pod)

    delete_pods_mock.assert_called_with([mock_pod])
    add_pod_killed_metric_mock.assert_called_with(mock_pod)
def test_get_integer_days_from_days_label():
    label_runner = LabelRunner(None, None, None, None)
    integer_days = label_runner.get_integer_days_from_days_label(
        "mon,abc,tue,wed,thu,thur,fri,sat,sun,bla")
    assert integer_days == [0, 1, 2, 3, 4, 5, 6]
Exemple #11
0
def main(argv):
    """
        The main function to invoke the powerfulseal cli
    """
    args = parse_args(args=argv)

    if args.mode is None:
        return parse_args(['--help'])

    ##########################################################################
    # VALIDATE POLICY MODE
    ##########################################################################
    if args.mode == 'validate':
        policy = PolicyRunner.load_file(args.policy_file)
        if PolicyRunner.is_policy_valid(policy):
            return print('OK')
        print("Policy not valid. See log output above.")
        return sys.exit(1)

    ##########################################################################
    # LOGGING
    ##########################################################################
    # this is to calm down the flask stdout
    # calm down the workzeug
    logging.getLogger("werkzeug").setLevel(logging.ERROR)
    try:
        import click

        def echo(*args, **kwargs):
            pass

        click.echo = echo
        click.secho = echo
    except:
        pass

    # parse the verbosity flags
    if args.silent == 1:
        log_level = logging.WARNING
    elif args.silent == 2:
        log_level = logging.ERROR
    elif not args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.DEBUG

    server_log_handler = ServerStateLogHandler()
    server_log_handler.setLevel(log_level)

    # do a basic config with the server log handler
    logging.basicConfig(level=log_level, handlers=[server_log_handler])
    # this installs a stdout handler by default to the root
    coloredlogs.install(level=log_level,
                        fmt='%(asctime)s %(levelname)s %(name)s %(message)s')

    # the main cli handler
    logger = makeLogger(__name__)
    logger.setLevel(log_level)
    logger.info("verbosity: %s; log level: %s; handler level: %s",
                args.verbose, logging.getLevelName(logger.getEffectiveLevel()),
                logging.getLevelName(log_level))

    ##########################################################################
    # KUBERNETES
    ##########################################################################
    kube_config = parse_kubeconfig(args)
    k8s_client = K8sClient(kube_config=kube_config)
    operation_mode = args.execution_mode
    # backwards compatibility
    if args.use_pod_delete_instead_of_ssh_kill:
        operation_mode = "kubernetes"
    k8s_inventory = K8sInventory(k8s_client=k8s_client, )

    ##########################################################################
    # CLOUD DRIVER
    ##########################################################################
    if args.openstack:
        logger.info("Building OpenStack driver")
        driver = OpenStackDriver(cloud=args.openstack_cloud_name, )
    elif args.aws:
        logger.info("Building AWS driver")
        driver = AWSDriver()
    elif args.azure:
        logger.info("Building Azure driver")
        driver = AzureDriver(
            cluster_rg_name=args.azure_resource_group_name,
            cluster_node_rg_name=args.azure_node_resource_group_name,
        )
    elif args.gcp:
        logger.info("Building GCP driver")
        driver = GCPDriver(config=args.gcp_config_file)
    else:
        logger.info("No cloud driver - some functionality disabled")
        driver = NoCloudDriver()

    ##########################################################################
    # INVENTORY
    ##########################################################################
    if args.inventory_file:
        logger.info("Reading inventory from %s", args.inventory_file)
        groups_to_restrict_to = read_inventory_file_to_dict(
            args.inventory_file)
    else:
        logger.debug("Attempting to read the inventory from kubernetes")
        groups_to_restrict_to = k8s_client.get_nodes_groups()

    logger.debug("Restricting inventory to %s" % groups_to_restrict_to)

    inventory = NodeInventory(
        driver=driver,
        restrict_to_groups=groups_to_restrict_to,
    )
    inventory.sync()

    ##########################################################################
    # SSH EXECUTOR
    ##########################################################################
    if operation_mode == "kubernetes":
        executor = KubernetesExecutor(k8s_client=k8s_client, )
    else:
        if args.use_private_ip:
            logger.info("Using each node's private IP address")
        if args.override_ssh_host:
            logger.info("Using each overriten host: %s",
                        args.override_ssh_host)
        executor = SSHExecutor(
            user=args.remote_user,
            ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
            ssh_path_to_private_key=args.ssh_path_to_private_key,
            override_host=args.override_ssh_host,
            ssh_password=args.ssh_password,
            use_private_ip=args.use_private_ip,
            ssh_kill_command=args.ssh_kill_command,
        )

    ##########################################################################
    # INTERACTIVE MODE
    ##########################################################################
    if args.mode == 'interactive':
        # create a command parser
        cmd = PSCmd(
            inventory=inventory,
            driver=driver,
            executor=executor,
            k8s_inventory=k8s_inventory,
        )
        logger.info("STARTING INTERACTIVE MODE")
        while True:
            try:
                cmd.cmdloop()
            except GeneratorExit:
                print("Exiting")
                sys.exit(0)
            except KeyboardInterrupt:
                print()
                print("Ctrl-c again to quit")
            try:
                input()
            except KeyboardInterrupt:
                sys.exit(0)
        return

    ##########################################################################
    # METRICS
    ##########################################################################
    metric_collector = StdoutCollector()
    if args.prometheus_collector:
        flask_debug = os.environ.get("FLASK_DEBUG")
        flask_env = os.environ.get("FLASK_ENVIROMENT")
        if flask_debug is not None or (flask_env is not None
                                       and flask_env != "production"):
            logger.error(
                "PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER"
            )
        else:
            logger.info("Starting prometheus metrics server (%s:%s)",
                        args.prometheus_host, args.prometheus_port)
            start_http_server(args.prometheus_port, args.prometheus_host)
            metric_collector = PrometheusCollector()
    elif args.datadog_collector:
        logger.info("Starting datadog collector")
        metric_collector = DatadogCollector()
    else:
        logger.info("Using stdout metrics collector")

    ##########################################################################
    # AUTONOMOUS MODE
    ##########################################################################
    if args.mode == 'autonomous':

        runner = PolicyRunner(args.policy_file, k8s_client, logger)

        # run the metrics server if requested
        if not args.headless:
            # start the server
            logger.info("Starting the UI server (%s:%s)", args.host, args.port)
            start_server(
                host=args.host,
                port=args.port,
                read_policy_fn=runner.read_policy,
                accept_proxy_headers=args.accept_proxy_headers,
                logger=server_log_handler,
            )
        else:
            logger.info("NOT starting the UI server")

        logger.info("STARTING AUTONOMOUS MODE")
        success = runner.run(inventory,
                             k8s_inventory,
                             driver,
                             executor,
                             metric_collector=metric_collector)
        if not success:
            logger.error("Policy runner finishes with an error")
            return sys.exit(1)
        return sys.exit(0)

    ##########################################################################
    # LABEL MODE
    ##########################################################################
    elif args.mode == 'label':
        label_runner = LabelRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            min_seconds_between_runs=args.min_seconds_between_runs,
            max_seconds_between_runs=args.max_seconds_between_runs,
            namespace=args.kubernetes_namespace,
            metric_collector=metric_collector,
        )
        logger.info("STARTING LABEL MODE")
        label_runner.run()
Exemple #12
0
def main(argv):
    """
        The main function to invoke the powerfulseal cli
    """
    args = parse_args(args=argv)

    if args.mode is None:
        return parse_args(['--help'])

    ##########################################################################
    # VALIDATE POLICY MODE
    ##########################################################################
    if args.mode == 'validate':
        policy = PolicyRunner.load_file(args.policy_file)
        if PolicyRunner.is_policy_valid(policy):
            return print('OK')
        print("Policy not valid. See log output above.")
        return sys.exit(1)

    ##########################################################################
    # LOGGING
    ##########################################################################
    # Ensure the logger config propagates from the root module of this package
    logger = logging.getLogger(__name__.split('.')[0])

    # The default level should be set to logging.DEBUG to ensure that the stdout
    # stream handler can filter to the user-specified verbosity level while the
    # server logging handler can receive all logs
    logger.setLevel(logging.DEBUG)

    # Configure logging for stdout
    if not args.verbose:
        log_level = logging.ERROR
    elif args.verbose == 1:
        log_level = logging.WARNING
    elif args.verbose == 2:
        log_level = logging.INFO
    else:
        log_level = logging.DEBUG

    stdout_handler = logging.StreamHandler()
    stdout_handler.setLevel(log_level)
    logger.addHandler(stdout_handler)
    coloredlogs.install(logger=logger)

    my_verb = args.verbose
    logger.info("modules %s : verbosity %s : log level %s : handler level %s ",
                __name__, my_verb,
                logging.getLevelName(logger.getEffectiveLevel()),
                logging.getLevelName(log_level))

    ##########################################################################
    # KUBERNETES
    ##########################################################################
    kube_config = parse_kubeconfig(args)
    k8s_client = K8sClient(kube_config=kube_config)
    k8s_inventory = K8sInventory(
        k8s_client=k8s_client,
        delete_pods=args.use_pod_delete_instead_of_ssh_kill)

    ##########################################################################
    # CLOUD DRIVER
    ##########################################################################
    if args.openstack:
        logger.info("Building OpenStack driver")
        driver = OpenStackDriver(cloud=args.openstack_cloud_name, )
    elif args.aws:
        logger.info("Building AWS driver")
        driver = AWSDriver()
    elif args.azure:
        logger.info("Building Azure driver")
        driver = AzureDriver(
            cluster_rg_name=args.azure_resource_group_name,
            cluster_node_rg_name=args.azure_node_resource_group_name,
        )
    elif args.gcp:
        logger.info("Building GCP driver")
        driver = GCPDriver(config=args.gcp_config_file)
    else:
        logger.info("No driver - some functionality disabled")
        driver = NoCloudDriver()

    ##########################################################################
    # INVENTORY
    ##########################################################################
    if args.inventory_file:
        logger.info("Reading inventory from %s", args.inventory_file)
        groups_to_restrict_to = read_inventory_file_to_dict(
            args.inventory_file)
    else:
        logger.info("Attempting to read the inventory from kubernetes")
        groups_to_restrict_to = k8s_client.get_nodes_groups()

    logger.debug("Restricting inventory to %s" % groups_to_restrict_to)

    inventory = NodeInventory(
        driver=driver,
        restrict_to_groups=groups_to_restrict_to,
    )
    inventory.sync()

    ##########################################################################
    # SSH EXECUTOR
    ##########################################################################
    if args.use_private_ip:
        logger.info("Using each node's private IP address")
    executor = RemoteExecutor(
        user=args.remote_user,
        ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
        ssh_path_to_private_key=args.ssh_path_to_private_key,
        override_host=args.override_ssh_host,
        ssh_password=args.ssh_password,
        use_private_ip=args.use_private_ip,
        ssh_kill_command=args.ssh_kill_command,
    )

    ##########################################################################
    # INTERACTIVE MODE
    ##########################################################################
    if args.mode == 'interactive':
        # create a command parser
        cmd = PSCmd(
            inventory=inventory,
            driver=driver,
            executor=executor,
            k8s_inventory=k8s_inventory,
        )
        logger.info("STARTING INTERACTIVE MODE")
        while True:
            try:
                cmd.cmdloop()
            except GeneratorExit:
                print("Exiting")
                sys.exit(0)
            except KeyboardInterrupt:
                print()
                print("Ctrl-c again to quit")
            try:
                six.moves.input()
            except KeyboardInterrupt:
                sys.exit(0)
        return

    ##########################################################################
    # METRICS
    ##########################################################################
    metric_collector = StdoutCollector()
    if args.prometheus_collector:
        flask_debug = os.environ.get("FLASK_DEBUG")
        flask_env = os.environ.get("FLASK_ENVIROMENT")
        if flask_debug is not None or (flask_env is not None
                                       and flask_env != "production"):
            logger.error(
                "PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER"
            )
        else:
            logger.info("Starting prometheus metrics server on %s",
                        args.prometheus_port)
            start_http_server(args.prometheus_port, args.prometheus_host)
            metric_collector = PrometheusCollector()
    elif args.datadog_collector:
        logger.info("Starting datadog collector")
        metric_collector = DatadogCollector()
    else:
        logger.info("Using stdout metrics collector")

    ##########################################################################
    # AUTONOMOUS MODE
    ##########################################################################
    if args.mode == 'autonomous':

        # read and validate the policy
        policy = PolicyRunner.load_file(args.policy_file)
        if not PolicyRunner.is_policy_valid(policy):
            logger.error("Policy not valid. See log output above.")
            return sys.exit(1)

        # run the metrics server if requested
        if not args.headless:
            # Create an instance of the singleton server state, ensuring all logs
            # for retrieval from the web interface
            state = ServerState(
                policy,
                inventory,
                k8s_inventory,
                driver,
                executor,
                args.host,
                args.port,
                args.policy_file,
                metric_collector=metric_collector,
            )
            server_log_handler = ServerStateLogHandler()
            server_log_handler.setLevel(log_level)
            logger.addHandler(server_log_handler)
            state.start_policy_runner()
            # start the server
            logger.info("Starting the UI server")
            start_server(args.host, args.port, args.accept_proxy_headers)
        else:
            logger.info("NOT starting the UI server")

            logger.info("STARTING AUTONOMOUS MODE")
            PolicyRunner.run(policy,
                             inventory,
                             k8s_inventory,
                             driver,
                             executor,
                             metric_collector=metric_collector)

    ##########################################################################
    # LABEL MODE
    ##########################################################################
    elif args.mode == 'label':
        label_runner = LabelRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            min_seconds_between_runs=args.min_seconds_between_runs,
            max_seconds_between_runs=args.max_seconds_between_runs,
            namespace=args.kubernetes_namespace,
            metric_collector=metric_collector,
        )
        logger.info("STARTING LABEL MODE")
        label_runner.run()

    ##########################################################################
    # DEMO MODE
    ##########################################################################
    elif args.mode == 'demo':
        aggressiveness = int(args.aggressiveness)
        if not 1 <= aggressiveness <= 5:
            print("Aggressiveness must be between 1 and 5 inclusive")
            sys.exit(1)

        metrics_server_client = MetricsServerClient(args.metrics_server_path)
        demo_runner = DemoRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            metrics_server_client,
            aggressiveness=aggressiveness,
            min_seconds_between_runs=args.min_seconds_between_runs,
            max_seconds_between_runs=args.max_seconds_between_runs,
            namespace=args.kubernetes_namespace,
            metric_collector=metric_collector,
        )
        logger.info("STARTING DEMO MODE")
        demo_runner.run()
Exemple #13
0
def main(argv):
    """
        The main function to invoke the powerfulseal cli
    """
    args = parse_args(args=argv)

    if args.mode is None:
        return parse_args(['--help'])

    ##########################################################################
    # VALIDATE POLICY MODE
    ##########################################################################
    if args.mode == 'validate':
        policy = PolicyRunner.load_file(args.policy_file)
        if PolicyRunner.is_policy_valid(policy):
            return print('OK')
        print("Policy not valid. See log output above.")
        return os.exit(1)

    ##########################################################################
    # LOGGING
    ##########################################################################
    # Ensure the logger config propagates from the root module of this package
    logger = logging.getLogger(__name__.split('.')[0])

    # The default level should be set to logging.DEBUG to ensure that the stdout
    # stream handler can filter to the user-specified verbosity level while the
    # server logging handler can receive all logs
    logger.setLevel(logging.DEBUG)

    # Configure logging for stdout
    if not args.verbose:
        log_level = logging.ERROR
    elif args.verbose == 1:
        log_level = logging.WARNING
    elif args.verbose == 2:
        log_level = logging.INFO
    else:
        log_level = logging.DEBUG

    stdout_handler = logging.StreamHandler()
    stdout_handler.setLevel(log_level)
    logger.addHandler(stdout_handler)
    coloredlogs.install(logger=logger)

    ##########################################################################
    # KUBERNETES
    ##########################################################################
    kube_config = args.kubeconfig
    logger.info("Creating kubernetes client with config %s", kube_config)
    k8s_client = K8sClient(kube_config=kube_config)
    k8s_inventory = K8sInventory(k8s_client=k8s_client)

    ##########################################################################
    # CLOUD DRIVER
    ##########################################################################
    if args.openstack:
        logger.info("Building OpenStack driver")
        driver = OpenStackDriver(cloud=args.openstack_cloud_name, )
    elif args.aws:
        logger.info("Building AWS driver")
        driver = AWSDriver()
    else:
        logger.info("No driver - some functionality disabled")
        driver = NoCloudDriver()

    ##########################################################################
    # INVENTORY
    ##########################################################################
    if args.inventory_file:
        logger.info("Reading inventory from %s", args.inventory_file)
        groups_to_restrict_to = read_inventory_file_to_dict(
            args.inventory_file)
    else:
        logger.info("Attempting to read the inventory from kubernetes")
        groups_to_restrict_to = k8s_client.get_nodes_groups()

    logger.debug("Restricting inventory to %s" % groups_to_restrict_to)

    inventory = NodeInventory(
        driver=driver,
        restrict_to_groups=groups_to_restrict_to,
    )
    inventory.sync()

    ##########################################################################
    # SSH EXECUTOR
    ##########################################################################
    executor = RemoteExecutor(
        user=args.remote_user,
        ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
        ssh_path_to_private_key=args.ssh_path_to_private_key,
        override_host=args.override_ssh_host,
    )

    ##########################################################################
    # INTERACTIVE MODE
    ##########################################################################
    if args.mode == 'interactive':
        # create a command parser
        cmd = PSCmd(
            inventory=inventory,
            driver=driver,
            executor=executor,
            k8s_inventory=k8s_inventory,
        )
        logger.info("STARTING INTERACTIVE MODE")
        while True:
            try:
                cmd.cmdloop()
            except KeyboardInterrupt:
                print()
                print("Ctrl-c again to quit")
            try:
                input()
            except KeyboardInterrupt:
                sys.exit(0)
        return

    ##########################################################################
    # METRICS
    ##########################################################################
    metric_collector = StdoutCollector()
    if args.prometheus_collector:
        logger.info("Starting prometheus metrics server on %s",
                    args.prometheus_port)
        start_http_server(args.prometheus_port, args.prometheus_host)
        metric_collector = PrometheusCollector()
    else:
        logger.info("Not starting prometheus collector")

    ##########################################################################
    # AUTONOMOUS MODE
    ##########################################################################
    if args.mode == 'autonomous':

        # read and validate the policy
        policy = PolicyRunner.load_file(args.policy_file)
        if not PolicyRunner.is_policy_valid(policy):
            logger.info("Policy not valid. See log output above.")
            return os.exit(1)

        # run the metrics server if requested
        if not args.headless:
            # Create an instance of the singleton server state, ensuring all logs
            # for retrieval from the web interface
            state = ServerState(
                policy,
                inventory,
                k8s_inventory,
                driver,
                executor,
                args.host,
                args.port,
                args.policy_file,
                metric_collector=metric_collector,
            )
            server_log_handler = ServerStateLogHandler()
            server_log_handler.setLevel(log_level)
            logger.addHandler(server_log_handler)
            state.start_policy_runner()
            # start the server
            logger.info("Starting the UI server")
            start_server(args.host, args.port)
        else:
            logger.info("NOT starting the UI server")

            logger.info("STARTING AUTONOMOUS MODE")
            PolicyRunner.run(policy,
                             inventory,
                             k8s_inventory,
                             driver,
                             executor,
                             metric_collector=metric_collector)

    ##########################################################################
    # LABEL MODE
    ##########################################################################
    elif args.mode == 'label':
        label_runner = LabelRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            min_seconds_between_runs=args.min_seconds_between_runs,
            max_seconds_between_runs=args.max_seconds_between_runs,
            namespace=args.kubernetes_namespace,
            metric_collector=metric_collector,
        )
        logger.info("STARTING LABEL MODE")
        label_runner.run()

    ##########################################################################
    # DEMO MODE
    ##########################################################################
    elif args.mode == 'demo':
        aggressiveness = int(args.aggressiveness)
        if not 1 <= aggressiveness <= 5:
            print("Aggressiveness must be between 1 and 5 inclusive")
            os.exit(1)

        heapster_client = HeapsterClient(args.heapster_path)
        demo_runner = DemoRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            heapster_client,
            aggressiveness=aggressiveness,
            min_seconds_between_runs=args.min_seconds_between_runs,
            max_seconds_between_runs=args.max_seconds_between_runs,
            namespace=args.kubernetes_namespace,
            metric_collector=metric_collector,
        )
        logger.info("STARTING DEMO MODE")
        demo_runner.run()
Exemple #14
0
def main(argv):
    """
        The main function to invoke the powerfulseal cli
    """
    args = parse_args(args=argv)

    # Configure logging

    # Ensure the logger config propagates from the root module of this package
    logger = logging.getLogger(__name__.split('.')[0])

    # The default level should be set to logging.DEBUG to ensure that the stdout
    # stream handler can filter to the user-specified verbosity level while the
    # server logging handler can receive all logs
    logger.setLevel(logging.DEBUG)

    # Configure logging for stdout
    if not args.verbose:
        log_level = logging.ERROR
    elif args.verbose == 1:
        log_level = logging.WARNING
    elif args.verbose == 2:
        log_level = logging.INFO
    else:
        log_level = logging.DEBUG

    stdout_handler = logging.StreamHandler()
    stdout_handler.setLevel(log_level)
    logger.addHandler(stdout_handler)

    # build cloud provider driver
    logger.debug("Building the driver")
    if args.open_stack_cloud:
        logger.info("Building OpenStack driver")
        driver = OpenStackDriver(cloud=args.open_stack_cloud_name, )
    elif args.aws_cloud:
        logger.info("Building AWS driver")
        driver = AWSDriver()
    else:
        logger.info("No driver - some functionality disabled")
        driver = NoCloudDriver()

    # build a k8s client
    kube_config = args.kube_config
    logger.debug("Creating kubernetes client with config %d", kube_config)
    k8s_client = K8sClient(kube_config=kube_config)
    k8s_inventory = K8sInventory(k8s_client=k8s_client)

    # read the local inventory
    logger.debug("Fetching the inventory")
    if args.inventory_file:
        groups_to_restrict_to = read_inventory_file_to_dict(
            args.inventory_file)
    else:
        logger.info("Attempting to read the inventory from kubernetes")
        groups_to_restrict_to = k8s_client.get_nodes_groups()

    logger.debug("Restricting inventory to %s" % groups_to_restrict_to)

    inventory = NodeInventory(
        driver=driver,
        restrict_to_groups=groups_to_restrict_to,
    )
    inventory.sync()

    # create an executor
    executor = RemoteExecutor(
        user=args.remote_user,
        ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
        ssh_path_to_private_key=args.ssh_path_to_private_key,
    )

    # create the collector which defaults to StdoutCollector()
    metric_collector = StdoutCollector()
    if args.prometheus_collector:
        if not args.prometheus_host:
            raise argparse.ArgumentTypeError(
                "The Prometheus host must be specified with --prometheus-host")
        if not args.prometheus_port:
            raise argparse.ArgumentTypeError(
                "The Prometheus port must be specified with --prometheus-port")
        start_http_server(args.prometheus_port, args.prometheus_host)
        metric_collector = PrometheusCollector()

    if args.server:
        # If the policy file already exists, then it must be valid. Otherwise,
        # create the policy file and write a default, empty policy to it.
        try:
            if not (os.path.exists(args.run_policy_file)
                    and os.path.isfile(args.run_policy_file)):
                # Create a new policy file
                with open(args.run_policy_file, "w") as f:
                    policy = PolicyRunner.DEFAULT_POLICY
                    f.write(yaml.dump(policy, default_flow_style=False))
            else:
                policy = PolicyRunner.load_file(args.run_policy_file)
                if not PolicyRunner.is_policy_valid(policy):
                    print("Policy file exists but is not valid. Exiting.")
                    sys.exit(-1)
        except IOError:
            print("Unable to perform file operations. Exiting.")
            sys.exit(-1)

        # Create an instance of the singleton server state, ensuring all logs
        # for retrieval from the web interface
        ServerState(policy, inventory, k8s_inventory, driver, executor,
                    args.server_host, args.server_port, args.run_policy_file)
        server_log_handler = ServerStateLogHandler()
        server_log_handler.setLevel(logging.DEBUG)
        logger.addHandler(server_log_handler)
        start_server(args.server_host, int(args.server_port))
    elif args.interactive:
        # create a command parser
        cmd = PSCmd(
            inventory=inventory,
            driver=driver,
            executor=executor,
            k8s_inventory=k8s_inventory,
        )
        while True:
            try:
                cmd.cmdloop()
            except KeyboardInterrupt:
                print()
                print("Ctrl-c again to quit")
            try:
                input()
            except KeyboardInterrupt:
                sys.exit(0)
    elif args.demo:
        aggressiveness = int(args.aggressiveness)
        if not 1 <= aggressiveness <= 5:
            print("Aggressiveness must be between 1 and 5 inclusive")
            exit()

        heapster_client = HeapsterClient(args.heapster_path)
        demo_runner = DemoRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            heapster_client,
            aggressiveness=aggressiveness,
            min_seconds_between_runs=int(args.min_seconds_between_runs),
            max_seconds_between_runs=int(args.max_seconds_between_runs),
            namespace=args.namespace)
        demo_runner.run()
    elif args.label:
        label_runner = LabelRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            min_seconds_between_runs=int(args.min_seconds_between_runs),
            max_seconds_between_runs=int(args.max_seconds_between_runs),
            namespace=args.namespace)
        label_runner.run()
    elif args.validate_policy_file:
        policy = PolicyRunner.load_file(args.validate_policy_file)
        if PolicyRunner.is_policy_valid(policy):
            logger.info("All good, captain")
        else:
            logger.error("Policy not valid. See log output above.")
    elif args.run_policy_file:
        policy = PolicyRunner.load_file(args.run_policy_file)
        if not PolicyRunner.is_policy_valid(policy):
            logger.error("Policy not valid. See log output above.")
        PolicyRunner.run(policy,
                         inventory,
                         k8s_inventory,
                         driver,
                         executor,
                         metric_collector=metric_collector)
Exemple #15
0
def main(argv):
    """
        The main function to invoke the powerfulseal cli
    """
    args = parse_args(args=argv)

    # Configure logging
    if not args.verbose:
        log_level = logging.ERROR
    elif args.verbose == 1:
        log_level = logging.WARNING
    elif args.verbose == 2:
        log_level = logging.INFO
    else:
        log_level = logging.DEBUG
    logging.basicConfig(stream=sys.stdout, level=log_level)
    logger = logging.getLogger(__name__)
    logger.setLevel(log_level)

    # build cloud provider driver
    logger.debug("Building the driver")
    if args.open_stack_cloud:
        logger.info("Building OpenStack driver")
        driver = OpenStackDriver(cloud=args.open_stack_cloud_name, )
    elif args.aws_cloud:
        logger.info("Building AWS driver")
        driver = AWSDriver()
    else:
        logger.info("No driver - some functionality disabled")
        driver = NoCloudDriver()

    # build a k8s client
    kube_config = args.kube_config
    logger.debug("Creating kubernetes client with config %d", kube_config)
    k8s_client = K8sClient(kube_config=kube_config)
    k8s_inventory = K8sInventory(k8s_client=k8s_client)

    # read the local inventory
    logger.debug("Fetching the inventory")
    if args.inventory_file:
        groups_to_restrict_to = read_inventory_file_to_dict(
            args.inventory_file)
    else:
        logger.info("Attempting to read the inventory from kubernetes")
        groups_to_restrict_to = k8s_client.get_nodes_groups()

    logger.debug("Restricting inventory to %s" % groups_to_restrict_to)

    inventory = NodeInventory(
        driver=driver,
        restrict_to_groups=groups_to_restrict_to,
    )
    inventory.sync()

    # create an executor
    executor = RemoteExecutor(
        user=args.remote_user,
        ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
        ssh_path_to_private_key=args.ssh_path_to_private_key,
    )

    # create the collector which defaults to StdoutCollector()
    metric_collector = StdoutCollector()
    if args.prometheus_collector:
        if not args.prometheus_host:
            raise argparse.ArgumentTypeError(
                "The Prometheus host must be specified with --prometheus-host")
        if not args.prometheus_port:
            raise argparse.ArgumentTypeError(
                "The Prometheus port must be specified with --prometheus-port")
        start_http_server(args.prometheus_port, args.prometheus_host)
        metric_collector = PrometheusCollector()

    if args.interactive:
        # create a command parser
        cmd = PSCmd(
            inventory=inventory,
            driver=driver,
            executor=executor,
            k8s_inventory=k8s_inventory,
        )
        while True:
            try:
                cmd.cmdloop()
            except KeyboardInterrupt:
                print()
                print("Ctrl-c again to quit")
            try:
                input()
            except KeyboardInterrupt:
                sys.exit(0)
    elif args.demo:
        aggressiveness = int(args.aggressiveness)
        if not 1 <= aggressiveness <= 5:
            print("Aggressiveness must be between 1 and 5 inclusive")
            exit()

        heapster_client = HeapsterClient(args.heapster_path)
        demo_runner = DemoRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            heapster_client,
            aggressiveness=aggressiveness,
            min_seconds_between_runs=int(args.min_seconds_between_runs),
            max_seconds_between_runs=int(args.max_seconds_between_runs),
            namespace=args.namespace)
        demo_runner.run()
    elif args.label:
        label_runner = LabelRunner(
            inventory,
            k8s_inventory,
            driver,
            executor,
            min_seconds_between_runs=int(args.min_seconds_between_runs),
            max_seconds_between_runs=int(args.max_seconds_between_runs),
            namespace=args.namespace)
        label_runner.run()
    elif args.validate_policy_file:
        PolicyRunner.validate_file(args.validate_policy_file)
        print("All good, captain")
    elif args.run_policy_file:
        policy = PolicyRunner.validate_file(args.run_policy_file)
        PolicyRunner.run(policy,
                         inventory,
                         k8s_inventory,
                         driver,
                         executor,
                         metric_collector=metric_collector)