Esempio n. 1
0
def configure_control(config: Configuration, secrets=Secrets):
    global value_from_config
    print(config)
    value_from_config = config.get("dummy-key", "default")
Esempio n. 2
0
def signed_api_call(service: str, path: str = "/", method: str = 'GET',
                    configuration: Configuration = None,
                    secrets: Secrets = None,
                    params: Dict[str, Any] = None) -> requests.Response:
    """
    Perform an API call against an AWS service.

    This should only be used when boto does not already implement the service
    itself. See https://boto3.readthedocs.io/en/latest/reference/services/index.html

    for a list of supported services by boto. This function does not claim
    being generic enough to support the whole range of AWS API.

    The `configuration` object should look like this:

    ```json
    {
        "aws_region": "us-east-1",
        "aws_host": "amazonaws.com"
    }
    ```

    While both are optional, and default to the values shown in this snippet,
    you should make sure to be explicit about them to avoid confusion.

    The endpoint being called is built from the given `service` name, the
    given region and host as well as the `path` of the action being called on
    the service. By default, the call is made over `HTTPS` but this can be
    changed by setting `aws_endpoint_scheme` in the configuration dictionary.

    Pass any parameters of the API itself as part of the remaining `params`
    paramater is a dictionary. It should match the signature of the service
    you are trying to call and will be sent as a query-string when `method` is
    `"GET"` or `"DELETE"`, or as a JSON payload otherwise. Refer to the AWS
    documentation for each service type.

    This function does not support profile names so you must provide the
    credentials in secrets.
    """  # noqa: E501
    configuration = configuration or {}
    region = configuration.get("aws_region", "us-east-1") or ""
    host = configuration.get("aws_host", "amazonaws.com")
    scheme = configuration.get("aws_endpoint_scheme", "https")
    host = "{s}.{r}.{h}".format(s=service, r=region, h=host)
    endpoint = configuration.get(
        "aws_endpoint", '{scheme}://{h}'.format(
            scheme=scheme, h=host)).replace('..', '.')
    endpoint = "{e}{p}".format(e=endpoint, p=path)
    creds = get_credentials(secrets)

    # when creds weren't provided via secrets, we let boto search for them
    # from the process environment

    if creds["aws_access_key_id"] and creds["aws_secret_access_key"]:
        auth = AWSRequestsAuth(
            aws_access_key=creds["aws_access_key_id"],
            aws_secret_access_key=creds["aws_secret_access_key"],
            aws_host=host,
            aws_region=region,
            aws_service=service)
    else:
        auth = BotoAWSRequestsAuth(
            aws_host=host,
            aws_region=region,
            aws_service=service)

    headers = {
        "Accept": "application/json"
    }

    if method in ('DELETE', 'GET'):
        return requests.request(
            method, endpoint, headers=headers, auth=auth, params=params)

    return requests.request(
        method, endpoint, headers=headers, auth=auth, json=params)
def aws_client(resource_name: str,
               configuration: Configuration = None,
               secrets: Secrets = None):
    """
    Create a boto3 client for the given resource.

    You may pass the `aws_region` key in the `configuration` object to
    be explicit about which region you want to use.

    You may pass `aws_profile_name` value to the `configuration` object so that
    we load the appropriate profile to converse with the AWS services. In that
    case, make sure your local `~/aws/credentials` config is properly setup, as
    per https://boto3.readthedocs.io/en/latest/guide/configuration.html#aws-config-file

    Also, if you want to assume a role, you should setup that file as per
    https://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider
    as we do not read those settings from the `secrets` object.
    """  # noqa: E501
    configuration = configuration or {}
    aws_profile_name = configuration.get("aws_profile_name")
    aws_assume_role_arn = configuration.get("aws_assume_role_arn")
    params = get_credentials(secrets)

    region = configuration.get("aws_region")
    if not region:
        logger.debug(
            "The configuration key `aws_region` is not set, looking in the "
            "environment instead for `AWS_REGION` or `AWS_DEFAULT_REGION`")
        region = os.getenv("AWS_REGION", os.getenv("AWS_DEFAULT_REGION"))
        if not region:
            raise InterruptExecution("AWS requires a region to be set!")

    if region:
        logger.debug("Using AWS region '{}'".format(region))
        params["region_name"] = region

    if boto3.DEFAULT_SESSION is None:
        # we must create our own session so that we can populate the profile
        # name when it is provided. Only create the default session once.
        boto3.setup_default_session(profile_name=aws_profile_name, **params)

    if not aws_assume_role_arn:
        logger.debug(
            "Client will be using profile '{}' from boto3 session".format(
                aws_profile_name or "default"))
        return boto3.client(resource_name, **params)
    else:
        logger.debug(
            "Fetching credentials dynamically assuming role '{}'".format(
                aws_assume_role_arn))

        aws_assume_role_session_name = configuration.get(
            "aws_assume_role_session_name")
        if not aws_assume_role_session_name:
            aws_assume_role_session_name = "ChaosToolkit"
            logger.debug(
                "You are missing the `aws_assume_role_session_name` "
                "configuration key. A unique one was generated: '{}'".format(
                    aws_assume_role_session_name))

        client = boto3.client('sts', **params)
        params = {
            "RoleArn": aws_assume_role_arn,
            "RoleSessionName": aws_assume_role_session_name
        }
        response = client.assume_role(**params)
        creds = response['Credentials']
        logger.debug("Temporary credentials will expire on {}".format(
            creds["Expiration"].isoformat()))

        params = {
            "aws_access_key_id": creds['AccessKeyId'],
            "aws_secret_access_key": creds['SecretAccessKey'],
            "aws_session_token": creds['SessionToken']
        }
        if region:
            params["region_name"] = region

        return boto3.client(resource_name, **params)
Esempio n. 4
0
def terminate_pods(label_selector: str = None,
                   name_pattern: str = None,
                   all: bool = False,
                   rand: bool = False,
                   ns: str = "default",
                   secrets: Secrets = None,
                   configuration: Configuration = {}):
    """
    Terminate a pod gracefully. Select the appropriate pods by label and/or
    name patterns. Whenever a pattern is provided for the name, all pods
    retrieved will be filtered out if their name do not match the given
    pattern.

    If neither `label_selector` nor `name_pattern` are provided, all pods
    in the namespace will be terminated.

    If `all` is set to `True`, all matching pods will be terminated.
    If `rand` is set to `True`, one random pod will be terminated.
    Otherwise, the first retrieved pod will be terminated.
    """

    api = create_k8s_api_client(secrets)

    v1 = client.CoreV1Api(api)

    ns_to_check = ns
    if ns == "default":
        ns_to_check = get_not_empty_ns(secrets,
                                       configuration.get('ns-ignore-list', []),
                                       label_selector)

    logger.info("Selected '{}' for experiment".format(ns_to_check))
    ret = v1.list_namespaced_pod(ns_to_check, label_selector=label_selector)

    logger.debug("Found {d} pods labelled '{s}'".format(d=len(ret.items),
                                                        s=label_selector))

    pods = []
    if name_pattern:
        pattern = re.compile(name_pattern)
        for p in ret.items:
            if pattern.match(p.metadata.name):
                pods.append(p)
                logger.debug(
                    "Pod '{p}' match pattern".format(p=p.metadata.name))
    else:
        pods = ret.items

    if rand:
        pods = [random.choice(pods)]
        logger.debug("Picked pod '{p}' (rand) to be terminated".format(
            p=pods[0].metadata.name))
    elif not all:
        pods = [pods[0]]
        logger.debug("Picked pod '{p}' to be terminated".format(
            p=pods[0].metadata.name))

    body = client.V1DeleteOptions()
    for p in pods:
        logger.warning("Killing pod " + p.metadata.name)
        res = v1.delete_namespaced_pod(name=p.metadata.name,
                                       namespace=ns_to_check,
                                       body=body)
Esempio n. 5
0
def load_secrets_from_vault(
    secrets_info: Dict[str, Dict[str, str]],  # noqa: C901
    configuration: Configuration = None,
    extra_vars: Dict[str, Any] = None,
) -> Secrets:
    """
    Load secrets from Vault KV secrets store

    In your experiment:

    ```
    {
        "k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar"
            }
        }
    }
    ```

    This will read the Vault secret at path `secret/foo/bar`
    (or `secret/data/foo/bar` if you use Vault KV version 2) and store its
    entirely payload into Chaos Toolkit `mykey`. This means, that all kays
    under that path will be available as-is. For instance, this could be:

    ```
    {
        "mypassword": "******",
        "mylogin": "******"k8s": {
            "mykey": {
                "type": "vault",
                "path": "foo/bar",
                "key": "mypassword"
            }
        }
    }
    ```

    In that case, `mykey` will be set to the value at `secret/foo/bar` under
    the Vault secret key `mypassword`.
    """
    secrets = {}

    client = create_vault_client(configuration)

    for (target, keys) in secrets_info.items():
        secrets[target] = {}

        for (key, value) in keys.items():
            if isinstance(value, dict) and value.get("type") == "vault":
                if not HAS_HVAC:
                    logger.error(
                        "Install the `hvac` package to fetch secrets "
                        "from Vault: `pip install chaostoolkit-lib[vault]`.")
                    return {}

                path = value.get("path")
                if path is None:
                    logger.warning(
                        "Missing Vault secret path for '{}'".format(key))
                    continue

                # see https://github.com/chaostoolkit/chaostoolkit/issues/98
                kv = client.secrets.kv
                is_kv1 = kv.default_kv_version == "1"
                if is_kv1:
                    vault_payload = kv.v1.read_secret(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"),
                    )
                else:
                    vault_payload = kv.v2.read_secret_version(
                        path=path,
                        mount_point=configuration.get(
                            "vault_secrets_mount_point", "secret"),
                    )

                if not vault_payload:
                    logger.warning(
                        "No Vault secret found at path: {}".format(path))
                    continue

                if is_kv1:
                    data = vault_payload.get("data")
                else:
                    data = vault_payload.get("data", {}).get("data")

                if "key" in value:
                    vault_key = value["key"]
                    if vault_key not in data:
                        logger.warning(
                            "No Vault key '{}' at secret path '{}'".format(
                                vault_key, path))
                        continue

                    secrets[target][key] = data.get(vault_key)

                else:
                    secrets[target][key] = data

        if not secrets[target]:
            secrets.pop(target)

    return secrets
def chaosansible_run(
    host_list: list = ("localhost"),
    configuration: Configuration = None,
    facts: bool = False,
    become: bool = False,
    run_once: bool = False,
    ansible: dict = {},
    num_target: str = "all",
    secrets: Secrets = None,
):

    """
    Run a task through ansible and eventually gather facts from host
    """

    # Check for correct inputs
    if ansible:
        if ansible.get("module") is None:
            raise InvalidActivity("No ansible module defined")

        if ansible.get("args") is None:
            raise InvalidActivity("No ansible module args defined")

    configuration = configuration or {}

    # Ansible configuration elements
    module_path = configuration.get("ansible_module_path")
    become_user = configuration.get("ansible_become_user")
    ssh_key_path = configuration.get("ansible_ssh_private_key")
    ansible_user = configuration.get("ansible_user")
    become_ask_pass = configuration.get("become_ask_pass")
    ssh_extra_args = configuration.get("ansible_ssh_extra_args")

    context.CLIARGS = ImmutableDict(
        connection="smart",
        verbosity=0,
        module_path=module_path,
        forks=10,
        become=become,
        become_method="sudo",
        become_user=become_user,
        check=False,
        diff=False,
        private_key_file=ssh_key_path,
        remote_user=ansible_user,
        ssh_extra_args=ssh_extra_args,
    )

    # Update host_list regarding the number of desired target.
    # Need to generate a new host-list because after being update
    # and will be used later
    if num_target != "all":
        new_host_list = random_host(host_list, int(num_target))
    else:
        new_host_list = host_list[:]

    # Create an inventory
    sources = ",".join(new_host_list)
    if len(new_host_list) == 1:
        sources += ","

    loader = DataLoader()
    inventory = InventoryManager(loader=loader, sources=sources)

    # Instantiate callback for storing results
    results_callback = ResultsCollectorJSONCallback()

    variable_manager = VariableManager(loader=loader, inventory=inventory)
    if become_ask_pass:
        passwords = dict(become_pass=become_ask_pass)
    else:
        passwords = None

    # Ansible taskmanager
    tqm = TaskQueueManager(
        inventory=inventory,
        variable_manager=variable_manager,
        loader=loader,
        passwords=passwords,
        stdout_callback=results_callback,
        run_additional_callbacks=False,
    )

    # Ansible playbook
    play_source = dict(
        name="Ansible Play",
        hosts=new_host_list,
        gather_facts=facts,
        tasks=[
            dict(
                name="facts",
                action=dict(module="debug", args=dict(var="ansible_facts")),
            ),
        ],
    )

    # In cas we only want to gather facts
    if ansible:
        module = ansible.get("module")
        args = ansible.get("args")
        play_source["tasks"].append(
            dict(
                name="task",
                run_once=run_once,
                action=dict(module=module, args=args),
                register="shell_out",
            )
        )

    # Create an ansible playbook
    play = Play().load(play_source,
                       variable_manager=variable_manager,
                       loader=loader)

    # Run it
    try:
        result = tqm.run(play)
    finally:
        tqm.cleanup()
        if loader:
            loader.cleanup_all_tmp_files()

    # Remove ansible tmpdir
    shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)

    if len(results_callback.host_failed) > 0:
        print("Ansible error(s): ")
        for error in results_callback.host_failed:
            print(results_callback.host_failed[error].__dict__)

        raise FailedActivity("Failed to run ansible task")

    elif len(results_callback.host_unreachable) > 0:
        print("Unreachable host(s): ")
        for error in results_callback.host_unreachable:
            print(error)

        raise FailedActivity("At least one target is down")

    else:
        results = {}

        for host, result in results_callback.host_ok.items():
            results[host] = result

        return json.dumps(results)
Esempio n. 7
0
def configure_control(experiment: Experiment, configuration: Configuration,
                      secrets: Secrets, settings: Settings):
    if configuration:
        experiment["control-value"] = configuration.get("dummy-key", "default")
    elif settings:
        experiment["control-value"] = settings.get("dummy-key", "default")
Esempio n. 8
0
def configure_control(configuration: Configuration, secrets: Secrets):
    global value_from_config
    value_from_config = configuration.get("dummy-key", "default")
def load_dynamic_configuration(
    config: Configuration, secrets: Secrets = None
) -> Configuration:
    """
    This is for loading a dynamic configuration if exists.
    The dynamic config is a regular activity (probe) in the configuration
    section. If there's a use-case for setting a configuration dynamically
    right before the experiment is starting. It executes the probe,
    and then the return value of this probe will be the config you wish to set.
    The dictionary needs to have a key named `type` and as a value `probe`,
    alongside the rest of the probe props.
    (No need for the `tolerance` key).

    For example:

    ```json
    "some_dynamic_config": {
      "name": "some config probe",
      "type": "probe",
      "provider": {
        "type": "python",
        "module": "src.probes",
        "func": "config_probe",
        "arguments": {
            "arg1":"arg1"
        }
      }
    }
    ```

    `some_dynamic_config` will be set with the return value
    of the function config_probe.

    Side Note: the probe type can be the same as a regular probe can be,
    python, process or http. The config argument contains all the
    configurations of the experiment including the raw config_probe
    configuration that can be dynamically injected.

    The configurations contain as well all the env vars after they are set in
    `load_configuration`.

    The `secrets` argument contains all the secrets of the experiment.

    For `process` probes, the stdout value (stripped of endlines)
    is stored into the configuration.
    For `http` probes, the `body` value is stored.
    For `python` probes, the output of the function will be stored.

    We do not stop on errors but log a debug message and do not include the
    key into the result dictionary.
    """
    # we delay this so that the configuration module can be imported leanly
    # from elsewhere
    from chaoslib.activity import run_activity

    conf = {}
    secrets = secrets or {}

    had_errors = False
    logger.debug("Loading dynamic configuration...")
    for (key, value) in config.items():
        if not (isinstance(value, dict) and value.get("type") == "probe"):
            conf[key] = config.get(key, value)
            continue

        # we have a dynamic config
        name = value.get("name")
        provider_type = value["provider"]["type"]
        value["provider"]["secrets"] = deepcopy(secrets)
        try:
            output = run_activity(value, conf, secrets)
        except Exception:
            had_errors = True
            logger.debug(f"Failed to load configuration '{name}'", exc_info=True)
            continue

        if provider_type == "python":
            conf[key] = output
        elif provider_type == "process":
            if output["status"] != 0:
                had_errors = True
                logger.debug(
                    f"Failed to load configuration dynamically "
                    f"from probe '{name}': {output['stderr']}"
                )
            else:
                conf[key] = output.get("stdout", "").strip()
        elif provider_type == "http":
            conf[key] = output.get("body")

    if had_errors:
        logger.warning(
            "Some of the dynamic configuration failed to be loaded."
            "Please review the log file for understanding what happened."
        )

    return conf