def test_create_client_with_aws_role_arn_and_profile(boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())
    finalArgs = {}

    aws_client(
        'ecs', configuration=CONFIG_WITH_ARN_AND_PROFILE)
    boto3.client.assert_any_call(
        'sts', region_name="us-east-1", **creds)
    boto3.client.assert_called_with(
            'ecs',
            region_name="us-east-1",
            aws_access_key_id=ANY,
            aws_secret_access_key=ANY,
            aws_session_token=ANY)

    aws_client(
        'ecs', configuration=CONFIG_WITH_ARN_AND_PROFILE)
    boto3.client.assert_any_call(
        'sts', region_name="us-east-1", **creds)
    boto3.setup_default_session.assert_called_with(
            profile_name="myprofile", region_name="us-east-1", **creds)
    boto3.client.assert_called_with(
            'ecs',
            region_name="us-east-1",
            aws_access_key_id=ANY,
            aws_secret_access_key=ANY,
            aws_session_token=ANY)
Exemple #2
0
def test_create_client_with_aws_role_arn(boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())

    aws_client("ecs", configuration=CONFIG_WITH_ARN)
    boto3.client.assert_any_call("sts", region_name="us-east-1", **creds)
    boto3.client.assert_called_with(
        "ecs",
        region_name="us-east-1",
        aws_access_key_id=ANY,
        aws_secret_access_key=ANY,
        aws_session_token=ANY,
    )

    aws_client("ecs", configuration=CONFIG_WITH_ARN)
    boto3.client.assert_any_call("sts", region_name="us-east-1", **creds)
    boto3.setup_default_session.assert_called_with(profile_name=None,
                                                   region_name="us-east-1",
                                                   **creds)
    boto3.client.assert_called_with(
        "ecs",
        region_name="us-east-1",
        aws_access_key_id=ANY,
        aws_secret_access_key=ANY,
        aws_session_token=ANY,
    )
Exemple #3
0
def test_region_must_be_set(logger: logging.Logger, boto3: object):
    boto3.DEFAULT_SESSION = None

    with pytest.raises(InterruptExecution):
        aws_client("ecs")
        logger.debug.assert_any_call(
            "The configuration key `aws_region` is not set, looking in the "
            "environment instead for `AWS_REGION` or `AWS_DEFAULT_REGION`")
def test_region_can_be_set_via_config(
        logger: logging.Logger, boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())

    aws_client('ecs', configuration={"aws_region": "us-west2"})
    logger.debug.assert_any_call("Using AWS region 'us-west2'")
    logger.warning.assert_not_called()
def test_region_must_be_set(logger: logging.Logger, boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())

    with pytest.raises(InterruptExecution):
        aws_client('ecs')
        logger.debug.assert_any_call(
            'The configuration key `aws_region` is not set, looking in the '
            'environment instead for `AWS_REGION` or `AWS_DEFAULT_REGION`')
def attach_volume(
    asg_names: List[str] = None,
    tags: List[Dict[str, str]] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> List[AWSResponse]:
    """
    Attaches ebs volumes that have been previously detached by CTK

    Parameters:
        One of:
            asg_names: list: one or more asg names
            tags: list: key/value pairs to identify asgs by

    `tags` are expected as a list of dictionary objects:
        [
            {'Key': 'TagKey1', 'Value': 'TagValue1'},
            {'Key': 'TagKey2', 'Value': 'TagValue2'},
            ...
        ]
    """
    validate_asgs(asg_names, tags)
    client = aws_client("autoscaling", configuration, secrets)

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    ec2_client = aws_client("ec2", configuration, secrets)
    volumes = get_detached_volumes(ec2_client)
    if not volumes:
        raise FailedActivity("No volumes detached by ChaosTookit found")

    results = []
    for volume in volumes:
        for t in volume["Tags"]:
            if t["Key"] != "ChaosToolkitDetached":
                continue
            attach_data = t["Value"].split(";")
            if len(attach_data) != 3:
                continue

            device_name = attach_data[0].split("=")[-1]
            instance_id = attach_data[1].split("=")[-1]
            asg_name = attach_data[2].split("=")[-1]

            if asg_name not in [
                a["AutoScalingGroupName"] for a in asgs["AutoScalingGroups"]
            ]:
                continue
            results.append(
                attach_instance_volume(
                    client, instance_id, volume["VolumeId"], device_name
                )
            )
    return results
Exemple #7
0
def test_create_client_from_cred_keys(boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(SECRETS)

    aws_client("ecs", configuration=CONFIGURATION, secrets=SECRETS)
    boto3.client.assert_called_with("ecs", region_name="us-east-1", **creds)

    aws_client("ecs", configuration=CONFIGURATION, secrets=SECRETS)
    boto3.setup_default_session.assert_called_with(profile_name=None,
                                                   region_name="us-east-1",
                                                   **creds)
    boto3.client.assert_called_with("ecs", region_name="us-east-1", **creds)
Exemple #8
0
def test_create_client_from_profile_name(boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())

    aws_client("ecs", configuration=CONFIG_WITH_PROFILE)
    boto3.client.assert_called_with("ecs", region_name="us-east-1", **creds)

    aws_client("ecs", configuration=CONFIG_WITH_PROFILE)
    boto3.setup_default_session.assert_called_with(profile_name="myprofile",
                                                   region_name="us-east-1",
                                                   **creds)
    boto3.client.assert_called_with("ecs", region_name="us-east-1", **creds)
def test_region_can_be_set_as_AWS_REGION(logger: logging.Logger, boto3: object):
    boto3.DEFAULT_SESSION = None
    creds = get_credentials(dict())

    try:
        os.environ["AWS_REGION"] = "us-west-2"
        aws_client('ecs')
        logger.debug.assert_any_call(
            'The configuration key `aws_region` is not set, looking in the '
            'environment instead for `AWS_REGION` or `AWS_DEFAULT_REGION`')
        logger.warning.assert_not_called()
        logger.debug.assert_any_call("Using AWS region 'us-west2'")
    except:
        os.environ.pop("AWS_REGION", None)
Exemple #10
0
def set_subnets(
    load_balancer_names: List[str],
    subnet_ids: List[str],
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> List[AWSResponse]:
    """
    Changes the subnets for the specified application load balancer(s)
    This action will replace the existing security groups on an application
    load balancer with the specified security groups.

    Parameters:
        - load_balancer_names: a list of load balancer names
        - subnet_ids: a list of subnet ids

    returns:
        [
            {
                'LoadBalancerArn': 'string',
                'AvailabilityZones': {
                    'ZoneName': 'string',
                    'SubnetId': 'string',
                    'LoadBalancerAddresses': [
                        {
                            'IpAddress': 'string',
                            'AllocationId': 'string'
                        }
                    ]
                }
            },
            ...
        ]
    """
    subnet_ids = get_subnets(subnet_ids,
                             aws_client("ec2", configuration, secrets))

    client = aws_client("elbv2", configuration, secrets)
    load_balancers = get_load_balancer_arns(load_balancer_names, client)

    if load_balancers.get("network", []):
        raise FailedActivity(
            "Cannot change subnets of network load balancers.")

    results = []
    for load_balancer in load_balancers["application"]:
        response = client.set_subnets(LoadBalancerArn=load_balancer,
                                      Subnets=subnet_ids)
        response["LoadBalancerArn"] = load_balancer
        results.append(response)
    return results
def detach_random_volume(
    asg_names: List[str] = None,
    tags: List[Dict[str, str]] = None,
    force: bool = True,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> List[AWSResponse]:
    """
    Detaches a random (non root) ebs volume from ec2 instances
    associated to an ASG

    Parameters:
        One of:
            asg_names: a list of one or more asg names
            tags: a list of key/value pair to identify asg(s) by

        force: force detach volume (default: true)

    `tags` are expected as a list of dictionary objects:
    [
        {'Key': 'TagKey1', 'Value': 'TagValue1'},
        {'Key': 'TagKey2', 'Value': 'TagValue2'},
        ...
    ]
    """
    validate_asgs(asg_names, tags)
    client = aws_client("autoscaling", configuration, secrets)

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    ec2_client = aws_client("ec2", configuration, secrets)
    results = []
    for a in asgs["AutoScalingGroups"]:
        instances = [e["InstanceId"] for e in a["Instances"]]
        if not instances:
            raise FailedActivity(
                "no instances found for asg: %s" % (a["AutoScalingGroupName"])
            )
        volumes = get_random_instance_volume(ec2_client, instances)

        for v in volumes:
            results.append(
                detach_instance_volume(ec2_client, force, a["AutoScalingGroupName"], v)
            )
    return results
Exemple #12
0
def desired_equals_healthy_tags(tags: List[Dict[str, str]],
                                configuration: Configuration = None,
                                secrets: Secrets = None) -> AWSResponse:
    """
    If desired number matches the number of healthy instances

    for each of the auto-scaling groups matching tags provided

    `tags` are  expected as:
    [{
        'Key': 'KeyName',
        'Value': 'KeyValue'
    },
    ...
    ]

    Returns: bool
    """

    if not tags:
        raise FailedActivity("Non-empty tags is required")

    client = aws_client('autoscaling', configuration, secrets)
    groups_descr = get_asg_by_tags(tags, client)

    return is_desired_equals_healthy(groups_descr)
Exemple #13
0
def is_scaling_in_progress(tags: List[Dict[str, str]],
                           configuration: Configuration = None,
                           secrets: Secrets = None) -> AWSResponse:
    """
    Check if there is any scaling activity in progress for ASG matching tags

    Returns: Boolean
    """

    if not tags:
        raise FailedActivity("Non-empty tags is required")

    client = aws_client('autoscaling', configuration, secrets)
    groups_descr = get_asg_by_tags(tags, client)

    for group_descr in groups_descr['AutoScalingGroups']:
        for instance in group_descr['Instances']:
            if instance['LifecycleState'] != 'InService' \
                    or instance['HealthStatus'] != 'Healthy':

                logger.debug("Scaling activities in progress: {}".format(True))

                return True

    logger.debug("Scaling activities in progress: {}".format(False))

    return False
Exemple #14
0
def delete_replication_groups(group_ids: List[str],
                              final_snapshot_id: str = None,
                              retain_primary_cluster: bool = True,
                              configuration: Configuration = None,
                              secrets: Secrets = None) -> List[AWSResponse]:
    """
    Deletes one or more replication groups and creates a final snapshot

    Parameters:
        group_ids: list: a list of one or more replication group ids
        final_snapshot_id: str: an identifier to give the final snapshot
        retain_primary_cluster: bool (default: True): delete only the read
            replicas associated to the replication group, not the primary
    """
    client = aws_client('elasticache', configuration, secrets)
    replication_groups = describe_replication_groups(group_ids, client)

    results = []
    for r in replication_groups:
        logger.debug('Deleting Replication Group: %s' %
                     (r['ReplicationGroupId']))
        if retain_primary_cluster:
            logger.debug('Deleting only read replicas.')

        params = dict(ReplicationGroupId=r['ReplicationGroupId'],
                      RetainPrimaryCluster=retain_primary_cluster)

        if final_snapshot_id:
            params['FinalSnapshotIdentifier'] = final_snapshot_id

        results.append(
            client.delete_replication_group(**params)['ReplicationGroup'])
    return results
Exemple #15
0
def detach_random_volume(instance_ids: List[str] = None,
                         filters: List[Dict[str, Any]] = None,
                         force: bool = True,
                         configuration: Configuration = None,
                         secrets: Secrets = None) -> List[AWSResponse]:
    """
        Detaches a random ebs volume (non root) from one or more EC2 instances

        Parameters:
            One of:
                instance_ids: a list of one or more ec2 instance ids
                filters: a list of key/value pairs to pull ec2 instances

            force: force detach volume (default: true)

        Additional filters may be used to narrow the scope:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_instances
    """
    if not any([instance_ids, filters]):
        raise FailedActivity('To detach volumes, you must specify the '
                             'instance_id or provide a set of filters')

    client = aws_client('ec2', configuration, secrets)

    if not instance_ids:
        filters = deepcopy(filters) or []
        instances = list_instance_volumes(client, filters=filters)
    else:
        instances = list_instance_volumes(client, instance_ids=instance_ids)

    results = []
    for e in instances:
        results.append(detach_instance_volume(client, force, e))
    return results
Exemple #16
0
def put_parameter(
    parameter: str,
    value: str,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Puts a parameter in ssm parameter store. 

    Input parameter prefix.
    Input parameter value.
    """

    if not parameter or not value:
        raise FailedActivity("you must specify the parameter and the value")

    client = aws_client("ssm", configuration, secrets)

    result = client.put_parameter(
        Name=parameter,
        Description="parameter injected with chaostoolkit",
        Value=value,
        Type="String",
        Overwrite=True,
    )
Exemple #17
0
def describe_auto_scaling_groups(
    asg_names: List[str] = None,
    tags: List[Dict[str, Any]] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Returns AWS descriptions for provided ASG(s)

    Params:
        OneOf:
            - asg_names: a list of asg names to describe
            - tags: a list of key/value pairs to collect ASG(s)

    `tags` are expected as a list of dictionary objects:
    [
        {'Key': 'TagKey1', 'Value': 'TagValue1'},
        {'Key': 'TagKey2', 'Value': 'TagValue2'},
        ...
    ]
    """
    client = aws_client("autoscaling", configuration, secrets)
    if asg_names:
        return get_asg_by_name(asg_names, client)
    elif tags:
        return get_asg_by_tags(tags, client)
    else:
        raise FailedActivity('Must specify either "asg_names" or "tags"')
Exemple #18
0
def process_is_suspended(
    asg_names: List[str] = None,
    tags: List[Dict[str, str]] = None,
    process_names: List[str] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> bool:
    """
    Determines if one or more processes on an ASG are suspended.

    :returns Boolean
    """
    client = aws_client("autoscaling", configuration, secrets)

    if not any([asg_names, tags]):
        raise FailedActivity(
            "one of the following arguments are required: asg_names or tags"
        )

    if all([asg_names, tags]):
        raise FailedActivity(
            "only one of the following arguments are allowed: asg_names/tags"
        )

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    for a in asgs["AutoScalingGroups"]:
        suspended = [p.get("ProcessName") for p in a["SuspendedProcesses"]]
        if not all(p in suspended for p in process_names):
            return False
    return True
Exemple #19
0
def is_scaling_in_progress(
    tags: List[Dict[str, str]],
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> bool:
    """
    Check if there is any scaling activity in progress for ASG matching tags

    Returns: Boolean
    """

    if not tags:
        raise FailedActivity("Non-empty tags is required")

    client = aws_client("autoscaling", configuration, secrets)
    groups_descr = get_asg_by_tags(tags, client)

    for group_descr in groups_descr["AutoScalingGroups"]:
        for instance in group_descr["Instances"]:
            if (
                instance["LifecycleState"] != "InService"
                or instance["HealthStatus"] != "Healthy"
            ):

                logger.debug(f"Scaling activities in progress: {True}")
                return True

    logger.debug(f"Scaling activities in progress: {False}")
    return False
Exemple #20
0
def list_event_source_mapping(source_arn: str = None,
                              function_name: str = None,
                              configuration: Configuration = None,
                              secrets: Secrets = None) -> AWSResponse:
    """
    List event source mappings for the provided lambda function or ARN of the
    event source

    :param source_arn: The ARN of the event source
    :param function_name: The name of the lambda function
    :param configuration: AWS configuration data
    :param secrets: AWS secrets
    :return: AWSResponse
    """
    if not any([source_arn, function_name]):
        raise FailedActivity(
            'must specify at least one of "source_arn" or "function_name"')

    client = aws_client("lambda", configuration, secrets)

    params = {
        **({
            'EventSourceArn': source_arn
        } if source_arn else {}),
        **({
            'FunctionName': function_name
        } if function_name else {})
    }

    try:
        return client.list_event_source_mappings(**params)
    except ClientError as e:
        raise FailedActivity(e.response['Error']['Message'])
Exemple #21
0
def revoke_security_group_ingress(requested_security_group_id: str,
                                  ip_protocol: str,
                                  from_port: int,
                                  to_port: int,
                                  ingress_security_group_id: str = None,
                                  cidr_ip: str = None,
                                  configuration: Configuration = None,
                                  secrets: Secrets = None) -> AWSResponse:
    """
    Remove one ingress rule from a security group
    https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.revoke_security_group_ingress

    - requested_security_group_id: the id for the security group to update
    - ip_protocol: ip protocol name (tcp, udp, icmp, icmpv6) or -1 to specify all
    - from_port: start of port range
    - to_port: end of port range
    - ingress_security_group_id: id of the securiy group to allow access to. You can either specify this or cidr_ip.
    - cidr_ip: the IPv6 CIDR range. You can either specify this or ingress_security_group_id
    """  # noqa: E501
    client = aws_client('ec2', configuration, secrets)
    request_kwargs = create_ingress_kwargs(
        requested_security_group_id,
        ip_protocol,
        from_port,
        to_port,
        ingress_security_group_id,
        cidr_ip
    )
    try:
        response = client.revoke_security_group_ingress(**request_kwargs)
        return response
    except ClientError as e:
        raise ActivityFailed(
            'Failed to remove ingress rule: {}'.format(
                e.response["Error"]["Message"]))
def list_emr_clusters(configuration: Configuration = None,
                      secrets: Secrets = None) -> AWSResponse:
    client = aws_client('emr', configuration, secrets)
    clusters = client.list_clusters()
    for c in clusters:
        print(c)
    return clusters
Exemple #23
0
def put_rule(rule_name: str,
             schedule_expression: str = None,
             event_pattern: str = None,
             state: str = None,
             description: str = None,
             role_arn: str = None,
             configuration: Configuration = None,
             secrets: Secrets = None) -> AWSResponse:
    """
    Creates or updates a CloudWatch event rule.

    Please refer to https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/events.html#CloudWatchEvents.Client.put_rule
    for details on input arguments.
    """  # noqa: E501
    client = aws_client("events", configuration, secrets)
    kwargs = {
        'Name': rule_name,
    }
    if schedule_expression is not None:
        kwargs['ScheduleExpression'] = schedule_expression
    if event_pattern is not None:
        kwargs['EventPattern'] = event_pattern
    if state is not None:
        kwargs['State'] = state
    if description is not None:
        kwargs['Description'] = description
    if role_arn is not None:
        kwargs['RoleArn'] = role_arn
    return client.put_rule(**kwargs)
Exemple #24
0
def instance_state(state: str,
                   instance_ids: List[str] = None,
                   filters: List[Dict[str, Any]] = None,
                   configuration: Configuration = None,
                   secrets: Secrets = None) -> bool:
    """
    Determines if EC2 instances match desired state

    For additional filter options, please refer to the documentation found:
    https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_instances
    """
    client = aws_client('ec2', configuration, secrets)

    if not any([instance_ids, filters]):
        raise FailedActivity('Probe "instance_state" missing required '
                             'parameter "instance_ids" or "filters"')

    if instance_ids:
        instances = client.describe_instances(InstanceIds=instance_ids)
    else:
        instances = client.describe_instances(Filters=filters)

    for i in instances['Reservations'][0]['Instances']:
        if i['State']['Name'] != state:
            return False
    return True
Exemple #25
0
def instance_status(
    instance_id: str = None,
    filters: List[Dict[str, Any]] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> Union[str, List[str]]:
    if (not instance_id and not filters) or (instance_id and filters):
        raise FailedActivity("instance_id or filters are required")

    client = aws_client("rds", configuration, secrets)
    results = describe_db_instances(client=client,
                                    instance_id=instance_id,
                                    filters=filters)
    if not results:
        if instance_id:
            raise FailedActivity("no instance found matching %s" % instance_id)
        if filters:
            raise FailedActivity("no instance(s) found matching %s" % filters)

    # if all instances have the same status return only single value.
    # eg: "available"
    # if an instances has a different status, return list
    # eg: ["available", "creating"]
    results = list({r["DBInstanceStatus"] for r in results["DBInstances"]})
    if len(results) == 1:
        return results[0]
    return results
Exemple #26
0
def change_subnets(subnets: List[str],
                   asg_names: List[str] = None,
                   tags: List[dict] = None,
                   configuration: Configuration = None,
                   secrets: Secrets = None):
    """
    Adds/removes subnets on autoscaling groups

    Parameters:
        One of:
            asg_names: a list of one or more asg names
            tags: a list of key/value pair to identify asg(s) by

        subnets: a list of subnet IDs to associate to the ASG

    `tags` are expected as a list of dictionary objects:
    [
        {'Key': 'TagKey1', 'Value': 'TagValue1'},
        {'Key': 'TagKey2', 'Value': 'TagValue2'},
        ...
    ]
    """
    validate_asgs(asg_names, tags)
    client = aws_client('autoscaling', configuration, secrets)

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    for a in asgs['AutoScalingGroups']:
        client.update_auto_scaling_group(
            AutoScalingGroupName=a['AutoScalingGroupName'],
            VPCZoneIdentifier=','.join(subnets))
Exemple #27
0
def versioning_status(
    bucket_name: str,
    status: str,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> bool:
    """
    Checks the versioning status of a bucket against the provided status

    :param bucket_name: the name of the S3 bucket
    :param status: either 'Enabled' or 'Suspended'
    :param configuration: access values used by actions/probes (optional)
    :param secrets: values that need to be passed on to actions/probes (optional)
    :return: boolean
    """
    if status not in ("Enabled", "Suspended"):
        raise FailedActivity(
            'Parameter "status" not one of "Enabled" or "Suspended"')

    client = aws_client("s3", configuration, secrets)
    if not validate_bucket_exists(client, bucket_name):
        raise FailedActivity(f'Bucket "{bucket_name}" does not exist!')

    versioning = get_bucket_versioning(client, bucket_name)
    if versioning.lower() == status.lower():
        return True
    return False
Exemple #28
0
def put_rule(
    rule_name: str,
    schedule_expression: str = None,
    event_pattern: str = None,
    state: str = None,
    description: str = None,
    role_arn: str = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> AWSResponse:
    """
    Creates or updates a CloudWatch event rule.

    Please refer to https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/events.html#CloudWatchEvents.Client.put_rule
    for details on input arguments.
    """  # noqa: E501
    client = aws_client("events", configuration, secrets)
    kwargs = {
        "Name": rule_name,
        **({"ScheduleExpression": schedule_expression} if schedule_expression else {}),
        **({"EventPattern": event_pattern} if event_pattern else {}),
        **({"State": state} if state else {}),
        **({"Description": description} if description else {}),
        **({"RoleArn": role_arn} if role_arn else {}),
    }
    return client.put_rule(**kwargs)
Exemple #29
0
def has_subnets(
    subnets: List[str],
    asg_names: List[str] = None,
    tags: List[Dict[str, str]] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> bool:
    """
    Determines if the provided autoscaling groups are in the provided subnets

    :returns boolean
    """
    client = aws_client("autoscaling", configuration, secrets)

    if not any([asg_names, tags]):
        raise FailedActivity(
            "one of the following arguments are required: asg_names or tags"
        )

    if all([asg_names, tags]):
        raise FailedActivity(
            "only one of the following arguments are allowed: asg_names/tags"
        )

    if asg_names:
        asgs = get_asg_by_name(asg_names, client)
    else:
        asgs = get_asg_by_tags(tags, client)

    for a in asgs["AutoScalingGroups"]:
        if sorted(a["VPCZoneIdentifier"].split(",")) != sorted(subnets):
            return False
    return True
Exemple #30
0
def cluster_status(
    cluster_id: str = None,
    filters: List[Dict[str, Any]] = None,
    configuration: Configuration = None,
    secrets: Secrets = None,
) -> Union[str, List[str]]:
    if (not cluster_id and not filters) or (cluster_id and filters):
        raise FailedActivity("cluster_id or filters are required")

    client = aws_client("rds", configuration, secrets)
    results = describe_db_cluster(client=client,
                                  cluster_id=cluster_id,
                                  filters=filters)
    if not results:
        if cluster_id:
            raise FailedActivity("no cluster found matching %s" % cluster_id)
        if filters:
            raise FailedActivity("no cluster(s) found matching %s" % filters)

    # if all instances have the same status return only single value.
    # eg: "available"
    # if an instances has a different status, return list of unique values
    # eg: ["available", "backing-up"]
    results = list({r["Status"] for r in results["DBClusters"]})
    if len(results) == 1:
        return results[0]
    return results