def get_health_check_status(check_id: str,
                            configuration: Configuration = None,
                            secrets: Secrets = None) -> AWSResponse:
    """Get the status of the specified health check

    :param check_id: The health check id
    :param configuration: access values used by actions/probes
    :param secrets: values that need to be passed on to actions/probes
    :returns: Dict[str, Any]
    """
    client = aws_client("route53", configuration, secrets)
    try:
        response = client.get_health_check_status(HealthCheckId=check_id)
        if not response.get("HealthCheckObservations"):
            raise FailedActivity("No results found for %s" % check_id)
        return response
    except ClientError as e:
        logger.exception(e.response["Error"]["Message"])
        raise FailedActivity(e.response["Error"]["Message"])
Ejemplo n.º 2
0
def stop_instances(instance_ids: List[str] = None,
                   az: str = None,
                   filters: List[Dict[str, Any]] = None,
                   force: bool = False,
                   configuration: Configuration = None,
                   secrets: Secrets = None) -> List[AWSResponse]:
    """
    Stop the given EC2 instances or, if none is provided, all instances
    of the given availability zone. If you need more control, you can
    also provide a list of filters following the documentation
    https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_instances
    """

    if not az and not instance_ids and not filters:
        raise FailedActivity(
            "To stop EC2 instances, you must specify either the instance ids,"
            " an AZ to pick random instances from, or a set of filters.")

    if az and not instance_ids and not filters:
        logger.warning('Based on configuration provided I am going to '
                       'stop all instances in AZ %s!' % az)

    client = aws_client('ec2', configuration, secrets)

    if not instance_ids:
        filters = deepcopy(filters) if filters else []

        if az:
            filters.append({'Name': 'availability-zone', 'Values': [az]})
        instance_types = list_instances_by_type(filters, client)

        if not instance_types:
            raise FailedActivity(
                "No instances in availability zone: {}".format(az))
    else:
        instance_types = get_instance_type_by_id(instance_ids, client)

    logger.debug("Picked EC2 instances '{}' from AZ '{}' to be stopped".format(
        str(instance_types), az))

    return stop_instances_any_type(instance_types=instance_types,
                                   force=force,
                                   client=client)
Ejemplo n.º 3
0
def __random_instance_from(scale_set, configuration,
                           secrets) -> Dict[str, Any]:
    instances = __fetch_vmss_instances(scale_set, configuration, secrets)
    if not instances:
        raise FailedActivity("No VMSS instances found")
    else:
        logger.debug("Found VMSS instances: {}".format(
            [x['name'] for x in instances]))

    return random.choice(instances)
Ejemplo n.º 4
0
def __fetch_machines(filter, configuration, secrets) -> []:
    machines = fetch_resources(filter, RES_TYPE_VM, secrets, configuration)
    if not machines:
        logger.warning("No virtual machines found")
        raise FailedActivity("No virtual machines found")
    else:
        logger.debug(
            "Fetched virtual machines: {}".format(
                [x['name'] for x in machines]))
    return machines
Ejemplo n.º 5
0
def fetch_vmss(filter, configuration, secrets) -> List[dict]:
    vmss = fetch_resources(filter, RES_TYPE_VMSS, secrets, configuration)

    if not vmss:
        raise FailedActivity("No VMSS found")

    else:
        logger.debug("Fetched VMSS: {}".format([set['name'] for set in vmss]))

    return vmss
Ejemplo n.º 6
0
def fetch_webapps(filter, configuration, secrets):
    webapps = fetch_resources(filter, RES_TYPE_WEBAPP, secrets, configuration)
    if not webapps:
        logger.warning("No web apps found")
        raise FailedActivity("No web apps found")
    else:
        logger.debug(
            "Fetched web apps: {}".format(
                [x['name'] for x in webapps]))
    return webapps
Ejemplo n.º 7
0
def set_service_deployment_configuration(
        cluster: str,
        service: str,
        maximum_percent: int = 200,
        minimum_healthy_percent: int = 100,
        configuration: Configuration = None,
        secrets: Secrets = None) -> AWSResponse:
    """
    Sets the maximum healthy count and minimum healthy percentage values for
    a services deployment configuration

    :param cluster: The ECS cluster name or ARN
    :param service: The ECS service name
    :param maximum_percent: The upper limit on the number of tasks a service is
        allowed to have in RUNNING or PENDING during deployment
    :param minimum_healthy_percent: The lower limit on the number of tasks a
        service must keep in RUNNING to be considered healthy during deployment
    :param configuration: access values used by actions/probes
    :param secrets: values that need to be passed on to actions/probes
    :return: Dict[str, Any]
    """
    if minimum_healthy_percent > maximum_percent:
        raise FailedActivity('minimum_healthy_percent cannot be larger '
                             'than maximum_percent')

    client = aws_client("ecs", configuration, secrets)

    if not validate_cluster(cluster, client):
        raise FailedActivity('unable to locate cluster: %s' % cluster)
    if not validate_service(cluster, service, client):
        raise FailedActivity('unable to locate service: %s on %s' % (
            service, cluster))

    params = {
        'cluster': cluster,
        'service': service,
        'deploymentConfiguration': {
            'maximumPercent': maximum_percent,
            'minimumHealthyPercent': minimum_healthy_percent
        }
    }
    return client.update_service(**params)
Ejemplo n.º 8
0
def get_load_balancer_arns(load_balancer_names: List[str],
                           client: boto3.client) -> Dict[str, List[str]]:
    """
    Returns load balancer arns categorized by the type of load balancer

    return structure:
    {
        'network': ['load balancer arn'],
        'application': ['load balancer arn']
    }
    """
    results = {}
    logger.debug('Searching for load balancer name(s): {}.'.format(
        load_balancer_names))

    try:
        response = client.describe_load_balancers(
            Names=load_balancer_names)

        for lb in response['LoadBalancers']:
            if lb['State']['Code'] != 'active':
                raise FailedActivity(
                    'Invalid state for load balancer {}: '
                    '{} is not active'.format(
                        lb['LoadBalancerName'], lb['State']['Code']))
            results.setdefault(lb['Type'], []).append(
                lb['LoadBalancerArn'])
            results.setdefault('Names', []).append(lb['LoadBalancerName'])
    except ClientError as e:
        raise FailedActivity(e.response['Error']['Message'])

    missing_lbs = [l for l in load_balancer_names if l not in results['Names']]
    if missing_lbs:
        raise FailedActivity(
            'Unable to locate load balancer(s): {}'.format(missing_lbs))

    if not results:
        raise FailedActivity(
            'Unable to find any load balancer(s) matching name(s): {}'.format(
                load_balancer_names))

    return results
Ejemplo n.º 9
0
def modify_instance_fleet(cluster_id: str,
                          fleet_id: str,
                          on_demand_capacity: int = None,
                          spot_capacity: int = None,
                          configuration: Configuration = None,
                          secrets: Secrets = None) -> AWSResponse:
    """Modify the on-demand and spot capacities for an instance fleet

    :param cluster_id: The cluster id
    :param fleet_id: The instance fleet id
    :param on_demand_capacity: Target capacity of on-demand units
    :param spot_capacity: Target capacity of spot units
    :param configuration: access values used by actions/probes
    :param secrets: values that need to be passed on to actions/probes
    :return: Dict[str, Any]
    """
    if not any([on_demand_capacity, spot_capacity]):
        raise FailedActivity('Must provide at least one of '
                             '["on_demand_capacity", "spot_capacity"]')

    client = aws_client('emr', configuration, secrets)

    params = {
        'ClusterId': cluster_id,
        'InstanceFleet': {
            'InstanceFleetId':
            fleet_id,
            **({
                'TargetSpotCapacity': spot_capacity
            } if spot_capacity else {}),
            **({
                'TargetOnDemandCapacity': on_demand_capacity
            } if on_demand_capacity else {})
        }
    }

    try:
        client.modify_instance_fleet(**params)
        return get_instance_fleet(client, cluster_id, fleet_id)
    except ClientError as e:
        logger.exception(e.response['Error']['Message'])
        raise FailedActivity(e.response['Error']['Message'])
def reboot_db_instance(
    db_instance_identifier: str,
    force_failover: bool = False,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Forces a reboot of your DB instance.
    """
    client = aws_client("rds", configuration, secrets)
    if not db_instance_identifier:
        raise FailedActivity("you must specify the db instance identifier")
    try:
        return client.reboot_db_instance(
            DBInstanceIdentifier=db_instance_identifier,
            ForceFailover=force_failover)
    except Exception as x:
        raise FailedActivity(
            "failed issuing a reboot of db instance '{}': '{}'".format(
                db_instance_identifier, str(x)))
Ejemplo n.º 11
0
def validate_processes(process_names: List[str]):
    valid_processes = [
        'Launch', 'Terminate', 'HealthCheck', 'AZRebalance',
        'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer',
        'ReplaceUnhealthy'
    ]

    invalid_processes = [p for p in process_names if p not in valid_processes]
    if invalid_processes:
        raise FailedActivity('invalid process(es): {} not in {}.'.format(
            invalid_processes, valid_processes))
Ejemplo n.º 12
0
def network_latency(instance_ids: List[str] = None,
                    execution_duration: str = "60",
                    device: str = "eth0",
                    delay: str = "1000ms",
                    variance: str = "500ms",
                    ratio: str = "",
                    configuration: Configuration = None,
                    secrets: Secrets = None) -> List[AWSResponse]:
    """
    Increases the response time of the virtual machine.

    Parameters
    ----------
    instance_ids : List[str]
        Filter the virtual machines. If the filter is omitted all machines in
        the subscription will be selected as potential chaos candidates.
    execution_duration : str, optional
        Lifetime of the file created. Defaults to 120 seconds.
    device : str, optional
        default to eth0, or specify the device name, e.g. enps0
    delay : str
        Added delay in ms. Defaults to 1000ms.
    variance : str
        Variance of the delay in ms. Defaults to 500ms.
    ratio: str = "5%", optional
        the specific ratio of how many Variance of the delay in ms.
        Defaults to "".
    configuration : Configuration
        Chaostoolkit Configuration
    secrets : Secrets
        Chaostoolkit Secrets
    """
    logger.debug(
        "Start network_latency: configuration='{}', instance_ids='{}'".format(
            configuration, instance_ids))
    response = []
    try:
        for instance in instance_ids:
            param = dict()
            param["duration"] = execution_duration
            param["instance_id"] = instance
            param["param"] = "delay " + delay + " " + variance + " " + ratio
            param["device"] = device
            response.append(
                __linux_from_default(instance_id=instance,
                                     configuration=configuration,
                                     secrets=secrets,
                                     action=NETWORK_UTIL,
                                     parameters=param))
        return response
    except Exception as x:
        raise FailedActivity(
            "failed issuing a execute of shell script via AWS SSM {}".format(
                str(x)))
Ejemplo n.º 13
0
def attach_volume(asg_names: List[str] = None,
                  tags: List[Dict[str, str]] = None,
                  configuration: Configuration = None,
                  secrets: Secrets = None) -> List[AWSResponse]:
    """
    Attaches ebs volumes that have been previously detached by CTK

    Parameters:
        One of:
            asg_names: list: one or more asg names
            tags: list: key/value pairs to identify asgs by

    `tags` are expected as a list of dictionary objects:
        [
            {'Key': 'TagKey1', 'Value': 'TagValue1'},
            {'Key': 'TagKey2', 'Value': 'TagValue2'},
            ...
        ]
    """
    validate_asgs(asg_names, tags)
    client = aws_client('autoscaling', configuration, secrets)

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    ec2_client = aws_client('ec2', configuration, secrets)
    volumes = get_detached_volumes(ec2_client)
    if not volumes:
        raise FailedActivity('No volumes detached by ChaosTookit found')

    results = []
    for volume in volumes:
        for t in volume['Tags']:
            if t['Key'] != 'ChaosToolkitDetached':
                continue
            attach_data = t['Value'].split(';')
            if len(attach_data) != 3:
                continue

            device_name = attach_data[0].split('=')[-1]
            instance_id = attach_data[1].split('=')[-1]
            asg_name = attach_data[2].split('=')[-1]

            if asg_name not in [
                    a['AutoScalingGroupName']
                    for a in asgs['AutoScalingGroups']
            ]:
                continue
            results.append(
                attach_instance_volume(client, instance_id, volume['VolumeId'],
                                       device_name))
    return results
def put_function_concurrency(
    function_name: str,
    concurrent_executions: int,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Throttles Lambda by setting reserved concurrency amount.
    """
    client = aws_client("lambda", configuration, secrets)
    if not function_name:
        raise FailedActivity("you must specify the lambda function name")
    try:
        return client.put_function_concurrency(
            FunctionName=function_name,
            ReservedConcurrentExecutions=concurrent_executions,
        )
    except Exception as x:
        raise FailedActivity(
            f"failed throttling lambda function '{function_name}': '{str(x)}'")
Ejemplo n.º 15
0
def node_resource_group_query(query, sample, configuration, secrets):
    aks = fetch_resources(query, RES_TYPE_AKS, secrets, configuration)
    if not aks:
        logger.warning("No AKS clusters found")
        raise FailedActivity("No AKS clusters found")
    else:
        logger.debug("Found AKS clusters: {}".format([x['name'] for x in aks]))
    choice = random.choice(aks)
    node_resource_group = choice['properties']['nodeResourceGroup']

    return format_query(sample, node_resource_group)
Ejemplo n.º 16
0
def service_is_deploying(cluster: str,
                         service: str,
                         configuration: Configuration = None,
                         secrets: Secrets = None) -> bool:
    """Checks to make sure there is not an in progress deployment"""
    client = aws_client("ecs", configuration, secrets)
    response = client.describe_services(cluster=cluster, services=[service])
    services = response.get('services', [])
    if not services:
        raise FailedActivity('Error retrieving service data from AWS')
    return len(services[0].get('deployments')) > 1
Ejemplo n.º 17
0
def uncordon_node(name: str = None, label_selector: str = None,
                  secrets: Secrets = None):
    """
    Uncordon nodes matching the given label name, so that pods can be
    scheduled on them again.
    """
    api = create_k8s_api_client(secrets)

    v1 = client.CoreV1Api(api)
    if name:
        ret = v1.list_node(field_selector="metadata.name={}".format(name))
        logger.debug("Found {d} node named '{s}'".format(
            d=len(ret.items), s=name))
    else:
        ret = v1.list_node(label_selector=label_selector)
        logger.debug("Found {d} node(s) labelled '{s}'".format(
            d=len(ret.items), s=label_selector))

    logger.debug("Found {d} nodes labelled '{s}'".format(
        d=len(ret.items), s=label_selector))

    nodes = ret.items
    if not nodes:
        raise FailedActivity(
            "failed to find a node that matches selector {}".format(
                label_selector))

    body = {
        "spec": {
            "unschedulable": False
        }
    }

    for n in nodes:
        try:
            v1.patch_node(n.metadata.name, body)
        except ApiException as x:
            logger.debug("Scheduling node '{}' failed: {}".format(
                n.metadata.name, x.body))
            raise FailedActivity("Failed to schedule node '{}': {}".format(
                n.metadata.name, x.body))
Ejemplo n.º 18
0
def stop_instance(instance_id: str = None,
                  az: str = None,
                  force: bool = False,
                  filters: List[Dict[str, Any]] = None,
                  configuration: Configuration = None,
                  secrets: Secrets = None) -> AWSResponse:
    """
    Stop a single EC2 instance.

    You may provide an instance id explicitely or, if you only specify the AZ,
    a random instance will be selected. If you need more control, you can
    also provide a list of filters following the documentation
    https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_instances
    """

    if not az and not instance_id and not filters:
        raise FailedActivity(
            "To stop an EC2 instance, you must specify either the instance id,"
            " an AZ to pick a random instance from, or a set of filters.")

    client = aws_client('ec2', configuration, secrets)

    if not instance_id:
        filters = deepcopy(filters) if filters else []

        if az:
            filters.append({'Name': 'availability-zone', 'Values': [az]})
        instance_types = pick_random_instance(filters, client)

        if not instance_types:
            raise FailedActivity(
                "No instances in availability zone: {}".format(az))
    else:
        instance_types = get_instance_type_by_id(instance_id, client)

    logger.debug("Picked EC2 instance '{}' from AZ '{}' to be stopped".format(
        instance_types, az))

    return stop_instances_any_type(instance_types=instance_types,
                                   force=force,
                                   client=client)
Ejemplo n.º 19
0
def get_asg_by_name(asg_names: List[str], client: boto3.client) -> AWSResponse:
    results = {"AutoScalingGroups": []}
    paginator = client.get_paginator("describe_auto_scaling_groups")
    for p in paginator.paginate(AutoScalingGroupNames=asg_names):
        results["AutoScalingGroups"].extend(p["AutoScalingGroups"])

    valid_asgs = [a["AutoScalingGroupName"] for a in results["AutoScalingGroups"]]
    invalid_asgs = [a for a in asg_names if a not in valid_asgs]

    if invalid_asgs:
        raise FailedActivity(f"No ASG(s) found matching: {invalid_asgs}")
    return results
Ejemplo n.º 20
0
def failover_db_cluster(db_cluster_identifier: str,
                        target_db_instance_identifier: str = None,
                        configuration: Configuration = None,
                        secrets: Secrets = None) -> AWSResponse:
    """
    Forces a failover for a DB cluster.
    """
    client = aws_client("rds", configuration, secrets)
    if not db_cluster_identifier:
        raise FailedActivity(
            "you must specify the db cluster identifier"
        )
    try:
        return client.failover_db_cluster(
            DBClusterIdentifier=db_cluster_identifier,
            TargetDBInstanceIdentifier=target_db_instance_identifier
        )
    except Exception as x:
        raise FailedActivity(
            "failed issuing a failover for DB cluster '{}': '{}'".format(
                db_cluster_identifier, str(x)))
Ejemplo n.º 21
0
def toggle_versioning(
    bucket_name: str,
    mfa_delete: str = None,
    status: str = None,
    mfa: str = None,
    owner: str = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> None:
    """Toggles versioning on a S3 bucket

    If the "status" parameter is not provided, the bucket will be scanned to
    determine if versioning is enabled. If it is enabled, it will be suspended.
    If it is suspended it will be enabled using basic values unless MFA is provided.

    :param bucket_name: The S3 bucket name
    :param status: "Enabled" to turn on versioning, "Suspended" to disable
    :param mfa: The authentication device serial number, a space, and the value from
        the device (optional)
    :param mfa_delete: Specifies if MFA delete is enabled in the bucket versioning
        (optional)
    :param owner: The account ID of the expected bucket owner (optional)
    :param configuration: access values used by actions/probes (optional)
    :param secrets: values that need to be passed on to actions/probes (optional)
    :return: None
    """
    client = aws_client("s3", configuration, secrets)
    if not validate_bucket_exists(client, bucket_name):
        raise FailedActivity(f'Bucket "{bucket_name}" does not exist!')

    versioning_status = get_bucket_versioning(client, bucket_name)
    if not status:
        status = "Suspended"
        if versioning_status == "Suspended":
            status = "Enabled"

    params = {
        "Bucket": bucket_name,
        **({
            "MFA": mfa
        } if mfa else {}),
        **({
            "ExpectedBucketOwner": owner
        } if owner else {}),
        "VersioningConfiguration": {
            "Status": status,
            **({
                "MFADelete": mfa_delete
            } if mfa_delete else {}),
        },
    }
    client.put_bucket_versioning(**params)
Ejemplo n.º 22
0
def kill_process(instance_ids: List[str] = None,
                 execution_duration: str = "1",
                 process: str = None,
                 signal: str = "",
                 configuration: Configuration = None,
                 secrets: Secrets = None) -> List[AWSResponse]:
    """
    kill -s [signal_as_below] [processname]
    HUP INT QUIT ILL TRAP ABRT EMT FPE KILL BUS SEGV SYS PIPE ALRM TERM URG
    STOP TSTP CONT CHLD TTIN TTOU IO XCPU XFSZ VTALRM PROF WINCH INFO USR1 USR2

    Parameters
    ----------
    instance_ids : List[str]
        Filter the virtual machines. If the filter is omitted all machines in
        the subscription will be selected as potential chaos candidates.
    execution_duration : str, optional default to 1 second
        This is not technically not useful as the process usually is killed
        without and delay, however you can set more seconds here to let the
        thread wait for more time to extend your experiment execution in case
        you need to watch more on the observation metrics.
    process : str
        process or pid that kill command accetps
    signal : str , default to ""
        The signal of kill command, use kill -l for help
    configuration : Configuration
        Chaostoolkit Configuration
    secrets : Secrets
        Chaostoolkit Secrets
    """
    logger.debug(
        "Start network_latency: configuration='{}', instance_ids='{}'".format(
            configuration, instance_ids))
    response = []
    try:
        for instance in instance_ids:
            param = dict()
            param["duration"] = execution_duration
            param["instance_id"] = instance
            param["process_name"] = process
            param["signal"] = signal
            response.append(
                __linux_from_default(instance_id=instance,
                                     configuration=configuration,
                                     secrets=secrets,
                                     action=KILL_PROCESS,
                                     parameters=param))
        return response
    except Exception as x:
        raise FailedActivity(
            "failed issuing a execute of shell script via AWS SSM {}".format(
                str(x)))
Ejemplo n.º 23
0
def describe_replication_groups(group_ids: List[str],
                                client: boto3.client) -> List[AWSResponse]:
    results = []
    for g in group_ids:
        response = client.describe_replication_groups(
            ReplicationGroupId=g)['ReplicationGroups']

        if not response:
            raise FailedActivity('Replication group %s not found.' % g)

        for r in response:
            results.append(r)
    return results
Ejemplo n.º 24
0
def targets_health_count(tg_names: List[str],
                         configuration: Configuration = None,
                         secrets: Secrets = None) -> AWSResponse:
    """
    Count of healthy/unhealthy targets per targetgroup
    """

    if not tg_names:
        raise FailedActivity("Non-empty list of target groups is required")

    client = aws_client("elbv2", configuration, secrets)

    return get_targets_health_count(tg_names=tg_names, client=client)
Ejemplo n.º 25
0
def describe_cache_clusters(cluster_ids: List[str],
                            client: boto3.client) -> List[AWSResponse]:
    results = []
    for c in cluster_ids:
        response = client.describe_cache_clusters(
            CacheClusterId=c, ShowCacheNodeInfo=True)['CacheClusters']

        if not response:
            raise FailedActivity('Cache cluster %s not found.' % c)

        for r in response:
            results.append(r)
    return results
Ejemplo n.º 26
0
def get_content(id: str) -> str:
    """Get a script's content"""
    settings = get_loaded_settings()

    try:
        with client_session(verify_tls=False, settings=settings) as session:
            return get_script_content(session, id)

    except Exception as ex:
        msg = "Unable to fetch script content. Reason: {}".format(str(ex))
        logger.error(msg)
        logger.debug(ex)
        raise FailedActivity(msg)
def get_experiment(experiment_id: str,
                   configuration: Configuration = None,
                   secrets: Secrets = None) -> AWSResponse:
    """
    Gets information about the specified experiment.

    :param experiment_id: str representing the id of the experiment to fetch information
        of
    :param configuration: Configuration object representing the CTK Configuration
    :param secrets: Secret object representing the CTK Secrets
    :returns: AWSResponse representing the response from FIS upon retrieving the
        experiment information

    Examples
    --------
    >>> get_experiment(
    ...    experiment_id="EXPTUCK2dxepXgkR38"
    ... )
    {'ResponseMetadata': {'RequestId': '0665fe39-2213-400b-b7ff-5f1ab9b7a5ea',
    'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Fri, 20 Aug 2021 11:08:27 GMT',
    ...
    'experiment': {'id': 'EXPTUCK2dxepXgkR38',
    'experimentTemplateId': 'EXT6oWVA1WrLNy4XS',
    ...
    }
    """

    if not experiment_id:
        raise FailedActivity(
            "You must pass a valid experiment id, id provided was empty")

    fis_client = aws_client(resource_name="fis",
                            configuration=configuration,
                            secrets=secrets)

    try:
        return fis_client.get_experiment(id=experiment_id)
    except Exception as ex:
        raise FailedActivity(f"Get Experiment failed, reason was: {ex}")
Ejemplo n.º 28
0
def attach_instance_volume(client: boto3.client, instance_id: str,
                           volume_id: str, mount_point: str) -> AWSResponse:
    try:
        response = client.attach_volume(Device=mount_point,
                                        InstanceId=instance_id,
                                        VolumeId=volume_id)
        logger.debug('Attached volume %s to instance %s' %
                     (volume_id, instance_id))
    except ClientError as e:
        raise FailedActivity(
            'Unable to attach volume %s to instance %s: %s' %
            (volume_id, instance_id, e.response['Error']['Message']))
    return response
Ejemplo n.º 29
0
def killall_processes(instance_ids: List[str] = None,
                      execution_duration: str = "1",
                      process_name: str = None,
                      signal: str = "",
                      configuration: Configuration = None,
                      secrets: Secrets = None) -> List[AWSResponse]:
    """
    The killall utility kills processes selected by name
    refer to https://linux.die.net/man/1/killall

    Parameters
    ----------
    instance_ids : List[str]
        Filter the virtual machines. If the filter is omitted all machines in
        the subscription will be selected as potential chaos candidates.
    execution_duration : str, optional default to 1 second
        This is not technically not useful as the process usually is killed
        without and delay, however you can set more seconds here to let the
        thread wait for more time to extend your experiment execution in case
        you need to watch more on the observation metrics.
    process_name : str
        Name of the process to be killed
    signal : str , default to ""
        The signal of killall command, e.g. use -9 to force kill
    configuration : Configuration
        Chaostoolkit Configuration
    secrets : Secrets
        Chaostoolkit Secrets
    """
    logger.debug(
        "Start network_latency: configuration='{}', instance_ids='{}'".format(
            configuration, instance_ids))
    response = []
    try:
        for instance in instance_ids:
            param = dict()
            param["duration"] = execution_duration
            param["instance_id"] = instance
            param["process_name"] = process_name
            param["signal"] = signal
            response.append(
                __linux_from_default(instance_id=instance,
                                     configuration=configuration,
                                     secrets=secrets,
                                     action=KILLALL_PROCESSES,
                                     parameters=param))
        return response
    except Exception as x:
        raise FailedActivity(
            "failed issuing a execute of shell script via AWS SSM {}".format(
                str(x)))
def delete_db_instance(
    db_instance_identifier: str,
    skip_final_snapshot: bool = True,
    db_snapshot_identifier: str = None,
    delete_automated_backups: bool = True,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Deletes a RDS instance

    - db_instance_identifier: the identifier of the RDS instance to delete
    - skip_final_snapshot: boolean (true): determines whether or not to
        perform a final snapshot of the rds instance before deletion
    - db_snapshot_identifier: the identifier to give the final rds snapshot
    - delete_automated_backups: boolean (true): determines if the automated
        backups of the rds instance are deleted immediately
    """
    client = aws_client("rds", configuration, secrets)

    params = dict(
        DBInstanceIdentifier=db_instance_identifier,
        DeleteAutomatedBackups=delete_automated_backups,
        SkipFinalSnapshot=skip_final_snapshot,
    )

    if not skip_final_snapshot:
        if not db_snapshot_identifier:
            raise FailedActivity("You must provide a snapshot identifier if "
                                 "taking a final DB snapshot")
        params["FinalDBSnapshotIdentifier"] = db_snapshot_identifier

    try:
        return client.delete_db_instance(**params)
    except ClientError as e:
        raise FailedActivity(
            "Failed to delete RDS DB instance %s: %s" %
            (db_instance_identifier, e.response["Error"]["Message"]))