def user_exist(iam: boto3.client, user: str, rights_msg: str) -> bool: """Check is specified AWS user exist. Required "iam:GetUser". Args: iam: (boto3.client.IAM) A low-level client representing AWS IAM. user: (str) Cluster.dev user name. rights_msg: (str) Notification message of lack of rights. Returns: bool - True if user exist and False if not. """ try: iam.get_user(UserName=user) except iam.exceptions.NoSuchEntityException: logger.debug(f'User "{user}" does not exist yet') return False except iam.exceptions.ClientError as error: logger.error(f'{error}{rights_msg}') sys.exit() logger.debug(f'User "{user}" exist') return True
def create_user( iam: boto3.client, username: str, rights_msg: str, path: str = '/cluster.dev/', ): """Create AWS IAM user. Required "iam:CreateUser". Args: iam: (boto3.client.IAM) A low-level client representing AWS IAM. username: (str) Cluster.dev user name. rights_msg: (str) Notification message of lack of rights. path: (str) The path for the policy. Default: '/cluster.dev/'. """ try: iam.create_user( Path=path, UserName=username, ) except iam.exceptions.ClientError as error: logger.error(f'{error}{rights_msg}') sys.exit() logger.debug(f"User '{path}{username}' created")
def send_to_s3(client: boto3.client, local_file: str, s3_path_key: str, content_type: str) -> None: client.put_object(Bucket=os.getenv('S3_BUCKET'), Key=s3_path_key, Body=open(f'{local_file}', 'rb'), ContentType=content_type) return
def get_messages(sqs: client, qurl: str) -> Any: """Get SQS Messages from the queue. :param sqs: S3 output bucket for the job being updated :type sqs: boto3.client connection :param qurl: URL for the SNS Queue to listen for new messages :return: List of messages from the queue :rtype: Any """ loop = 0 messages = sqs.receive_message( QueueUrl=qurl, MaxNumberOfMessages=1, VisibilityTimeout=GLOBAL_VARS["Q_TIMEOUT"], ) while "Messages" not in messages: loop += 1 if loop == GLOBAL_VARS["MAX_TRIES"]: return None _LOGGER.debug("Waiting ....") sleep(GLOBAL_VARS["RETRY_TIME"]) messages = sqs.receive_message( QueueUrl=qurl, MaxNumberOfMessages=1, VisibilityTimeout=GLOBAL_VARS["Q_TIMEOUT"], ) return messages
def detach_instance_volume( client: boto3.client, force: bool, asg_name: str, volume_data: Dict[str, str] ) -> AWSResponse: try: response = client.detach_volume( Device=volume_data["DeviceName"], InstanceId=volume_data["InstanceId"], VolumeId=volume_data["VolumeId"], Force=force, ) # tag volume with instance information (to reattach on rollback) client.create_tags( Resources=[volume_data["VolumeId"]], Tags=[ { "Key": "ChaosToolkitDetached", "Value": "DeviceName=%s;InstanceId=%s;ASG=%s" % (volume_data["DeviceName"], volume_data["InstanceId"], asg_name), } ], ) return response except ClientError as e: raise FailedActivity( "unable to detach volume %s from %s: %s" % ( volume_data["VolumeId"], volume_data["InstanceId"], e.response["Error"]["Message"], ) )
def stop_instances_any_type( instance_types: dict = None, force: bool = False, client: boto3.client = None ) -> List[AWSResponse]: """ Stop instances regardless of the instance type (on demand, spot) """ response = [] if "normal" in instance_types: logger.debug("Stopping instances: {}".format(instance_types["normal"])) response.append( client.stop_instances(InstanceIds=instance_types["normal"], Force=force) ) if "spot" in instance_types: # TODO: proper support for spot fleets # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet.html # To properly stop spot instances have to cancel spot requests first spot_request_ids = get_spot_request_ids_from_response( client.describe_instances(InstanceIds=instance_types["spot"]) ) logger.debug(f"Canceling spot requests: {spot_request_ids}") client.cancel_spot_instance_requests(SpotInstanceRequestIds=spot_request_ids) logger.debug("Terminating spot instances: {}".format(instance_types["spot"])) response.append(client.terminate_instances(InstanceIds=instance_types["spot"])) if "scheduled" in instance_types: # TODO: add support for scheduled instances # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html raise FailedActivity("Scheduled instances support is not implemented") return response
def _overwrite_table( client_glue: boto3.client, catalog_id: Optional[str], database: str, table: str, table_input: Dict[str, Any], transaction_id: Optional[str], boto3_session: boto3.Session, ) -> None: delete_table_if_exists( database=database, table=table, transaction_id=transaction_id, boto3_session=boto3_session, catalog_id=catalog_id, ) args: Dict[str, Any] = _catalog_id( catalog_id=catalog_id, **_transaction_id( transaction_id=transaction_id, DatabaseName=database, TableInput=table_input, ), ) client_glue.create_table(**args)
def download_file(client: boto3.client, bucket_name: str, kname: str, dest_file: str, do_md5_check: bool = False, md5sum: str = '', attempts: int = DOWNLOAD_ATTEMPTS) -> None: ntry = 0 while ntry < attempts: ntry += 1 try: print(f'Downloading {dest_file} | {md5sum}') client.download_file(DEFAULT_BUCKET, kname, dest_file) except ClientError: continue if not do_md5_check: break else: if check_md5(dest_file, md5sum): print(f'{dest_file} pass md5sum -> DONE!') break else: print(f'{dest_file} fail md5sum -> Trying again...') if ntry == attempts: print(f"Try attempt exceeded for s3 key: {kname}") return False return True
def detach_instance_volume(client: boto3.client, force: bool, volume: Dict[str, str]) -> AWSResponse: """ Detach volume from an instance """ try: response = client.detach_volume( Device=volume['DeviceName'], InstanceId=volume['InstanceId'], VolumeId=volume['VolumeId'], Force=force) # tag volume with instance information (to reattach on rollback) client.create_tags( Resources=[volume['VolumeId']], Tags=[ { 'Key': 'ChaosToolkitDetached', 'Value': 'DeviceName=%s;InstanceId=%s' % ( volume['DeviceName'], volume['InstanceId']) }]) return response except ClientError as e: raise FailedActivity('unable to detach volume %s from %s: %s' % ( volume['VolumeId'], volume['InstanceId'], e.response['Error']['Message']))
def put_object(mock_client: boto3.client, obj_key: str) -> None: mock_client.put_object( Bucket=BUCKET, Key=obj_key, Body=json.dumps(OBJ_CONTENT), ContentType="application/json", )
def discover_scaling_groups( client: boto3.client, asgs: List[str] = None, tags: List[Dict[str, Any]] = None ) -> AWSResponse: if not any([asgs, tags]): raise FailedActivity( "missing one of the required parameters: asg_names or tags" ) if not asgs: asgs = [] if tags: tag_filter = [] for t in tags: tag_filter.append({"Name": t["Key"], "Values": [t["Value"]]}) paginator = client.get_paginator("describe_tags") for p in paginator.paginate(Filters=tag_filter): asgs.extend( [t["ResourceId"] for t in p["Tags"] if t["ResourceId"] not in asgs] ) results = {"AutoScalingGroups": []} for group in breakup_iterable(asgs, 50): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=group) results["AutoScalingGroups"].extend(response["AutoScalingGroups"]) return results
def discover_scaling_groups(client: boto3.client, asgs: List[str] = None, tags: List[Dict[str, Any]] = None) -> AWSResponse: if not any([asgs, tags]): raise FailedActivity( 'missing one of the required parameters: asg_names or tags') if not asgs: asgs = [] if tags: tag_filter = [] for t in tags: tag_filter.append({'Name': t['Key'], 'Values': [t['Value']]}) paginator = client.get_paginator('describe_tags') for p in paginator.paginate(Filters=tag_filter): asgs.extend([ t['ResourceId'] for t in p['Tags'] if t['ResourceId'] not in asgs ]) results = {'AutoScalingGroups': []} for group in breakup_iterable(asgs, 50): response = client.describe_auto_scaling_groups( AutoScalingGroupNames=group) results['AutoScalingGroups'].extend(response['AutoScalingGroups']) return results
def list_running_tasks_in_cluster(cluster: str, client: boto3.client, service: str = None): if service: res = client.list_tasks(cluster=cluster, serviceName=service, maxResults=100, desiredStatus='RUNNING') else: res = client.list_tasks(cluster=cluster, maxResults=100, desiredStatus='RUNNING') tasks = res['taskArns'][:] while True: next_token = res.get("nextToken") if not next_token: break if service: res = client.list_tasks(cluster=cluster, serviceName=service, nextToken=next_token, maxResults=100, desiredStatus='RUNNING') else: res = client.list_tasks(cluster=cluster, nextToken=next_token, maxResults=100, desiredStatus='RUNNING') tasks.extend(res["taskArns"]) return tasks
def upload_file_to_s3( local_path: Path, s3_client: boto3.client, s3_bucket: str, s3_prefix: str, ) -> bool: """Upload a file to the given s3 bucket using the given client, if it don't alread exists there. There is an option to add a 'prefix' (extra parent dirs) to the s3 filename, for example if you only want to upload a part of the original local data directory. By default this prefix string is empty (see config file). """ s3_path = str(Path(s3_prefix) / local_path).replace("\\", "/") # Check if file already exists, if yes skip, if no upload try: s3_client.head_object(Bucket=s3_bucket, Key=s3_path) print(f"File found in s3 bucket! Skipping {s3_path}") except ClientError: try: print(f"Uploading {s3_path} ...") s3_client.upload_file(str(local_path), s3_bucket, s3_path) return True except ClientError as e: print(f"Upload failed: {e}") raise
def restart_instances_any_type(instance_types: dict, client: boto3.client): """ Restarts one or more instances regardless of type """ results = [] for k, v in instance_types.items(): logger.debug('Restarting %s instance(s): %s' % (k, v)) client.reboot_instances(InstanceIds=v) return results
def get_s3_buckets_data(s3_client: boto3.client, verbose: False) -> object: """ Collects S3 data for an AWS region :param client: AWS S3 session client :return: Dict object with bucket names as key and each entry has information on bucket size, total files, total file size, creation date and last modified date """ response = s3_client.list_buckets() buckets = [bucket['Name'] for bucket in response['Buckets']] if len(buckets) == 0: print_v('No S3 buckets found', verbose) return None else: print_v('{0} S3 buckets found'.format(len(buckets)), verbose) buckets_data: dict = { bucket['Name']: bucket for bucket in response['Buckets'] } for bucket in buckets: print_v('Working on bucket {0}'.format(bucket), verbose) tot_files = 0 tot_file_size = 0 g_last_modified_date = datetime.datetime(year=1970, month=1, day=1) kwargs = {'Bucket': bucket} while True: resp = s3_client.list_objects_v2(**kwargs) tot_files += resp['KeyCount'] try: obj: dict for obj in resp['Contents']: tot_file_size += obj['Size'] last_modified_date = obj['LastModified'].replace( tzinfo=None) if last_modified_date > g_last_modified_date: g_last_modified_date = last_modified_date except KeyError: break try: kwargs['ContinuationToken'] = resp['NextContinuationToken'] except KeyError: break buckets_data[bucket]['Total Files'] = tot_files buckets_data[bucket]['Total File Size'] = tot_file_size buckets_data[bucket]['Last Modified Date'] = last_modified_date return buckets_data
def validate(client: boto3.client, cluster: str = None, service: str = None): """Validate that a service and/or cluster exists""" if cluster: response = client.describe_clusters(clusters=[cluster])["clusters"] if not response: raise FailedActivity("unable to locate cluster: %s" % cluster) if service: response = client.describe_services(cluster=cluster, services=[service])[ "services" ] if not response: raise FailedActivity( f"unable to locate service: {service} on cluster: {cluster}" )
def list_tasks(cluster: str, service: str, client: boto3.client) -> List[str]: res = client.list_tasks(cluster=cluster, serviceName=service, maxResults=100) tasks = res['taskArns'][:] while True: next_token = res.get("nextToken") if not next_token: break res = client.list_tasks(cluster=cluster, serviceName=service, nextToken=next_token, maxResults=100) tasks.extend(res["taskArns"]) return tasks
def search_provisioned_products(search_pp_name, client: boto3.client) -> dict: """Search for existing Service Catalog Provisioned Products. If it's not found then will search for any in-progress deployments since Control Tower has a serial method of deploying accounts. Args: search_pp_name (str): Service Catalog Provisioned Product Name to search for client (boto3.client): Boto3 Client for Service Catalog Returns: dict: Service Catalog Provisioned """ logger.info(f"Searching for {search_pp_name}") response = client.search_provisioned_products( AccessLevelFilter={ 'Key': 'Account', 'Value': 'self' }, Filters={'SearchQuery': [f"name:{search_pp_name}"]}) if len(response['ProvisionedProducts']) > 0: provisioned_product = response['ProvisionedProducts'][0] logger.info(f"Found {provisioned_product}") # Removing Create time since it doesn't serializable JSON well del provisioned_product['CreatedTime'] return provisioned_product else: # If the product has not been provisioned yet, Since Control Tower has a serial method of deploying # account this statement will check to see if there's and existing In-Progress deployment and will # return provision the product name / status logger.info( f"Did not find {search_pp_name}. Searching for any In-Progress Control Tower Deployments" ) return scan_provisioned_products(search_pp_name, client)
def create_provision_product(product_name: str, pp_name: str, pa_id: str, client: boto3.client, params=None, tags=None) -> dict: """Creates a Service Catalog Provisioned Product Args: product_name (str): Service Catalog Product Name pp_name (str): Service Catalog Provisioned Product Name pa_id (str): Service Catalog Provisioned Artifact Id client (boto3.client): Boto3 Client for Service Catalog params (list): List of Service Catalog Provisioned Product Parameters tags (list): List of tags to add to the Service Catalog Provisioned Product Returns: Return: boto3.client response for service catalog provision product """ if params is None: params = [] if tags is None: tags = [] logging.info( f"Creating pp_id:{pp_name} with ProvisionArtifactId:{pa_id} in ProductName:{product_name}" ) logging.info(f"Parameters used:{params}") re = client.provision_product(ProductName=product_name, ProvisionedProductName=pp_name, ProvisioningArtifactId=pa_id, ProvisioningParameters=params, Tags=tags) logging.info(re) return re
def _get_function_configuration(function_name: str, client: boto3.client, qualifier: str = None) -> AWSResponse: request_kwargs = {'FunctionName': function_name} if qualifier is not None: request_kwargs['Qualifier'] = qualifier return client.get_function_configuration(**request_kwargs)
def get_random_instance_volume( client: boto3.client, instance_ids: List[str] ) -> List[Dict[str, str]]: results = {} try: response = client.describe_instances(InstanceIds=instance_ids)["Reservations"] for r in response: for e in r.get("Instances", []): instance_id = e["InstanceId"] bdm = e.get("BlockDeviceMappings", []) for b in bdm: # skip root devices if b["DeviceName"] in ("/dev/sda1", "/dev/xvda"): continue results.setdefault(instance_id, []).append( {b["DeviceName"]: b["Ebs"]["VolumeId"]} ) volumes = [] for r in results: # select 1 volume at random volume = random.sample(results[r], 1)[0] for k, v in volume.items(): volumes.append({"InstanceId": r, "DeviceName": k, "VolumeId": v}) return volumes except ClientError as e: raise FailedActivity( "Unable to describe asg instances: %s" % (e.response["Error"]["Message"]) )
def get_targets_health_description(tg_arns: Dict, client: boto3.client) -> Dict: """ Return TargetHealthDescriptions by targetgroups Structure: { "TargetGroupName": { "TargetGroupArn": value, "TargetHealthDescriptions": TargetHealthDescriptions[] }, .... } """ logger.debug("Target group ARN: {} Getting health descriptions" .format(str(tg_arns))) tg_health_descr = {} for tg in tg_arns: tg_health_descr[tg] = {} tg_health_descr[tg]['TargetGroupArn'] = tg_arns[tg] tg_health_descr[tg]['TargetHealthDescriptions'] = \ client.describe_target_health(TargetGroupArn=tg_arns[tg])[ 'TargetHealthDescriptions'] logger.debug("Health descriptions for target group(s) are: {}" .format(str(tg_health_descr))) return tg_health_descr
def check_for_s3_bucket(s3_client: boto3.client, s3_bucket: str, s3_region: str = "eu-west-1"): """Check if the given bucket already exists in the s3 account. If not create the bucket. """ my_buckets = s3_client.list_buckets() if s3_bucket not in [bucket["Name"] for bucket in my_buckets["Buckets"]]: try: s3_client.create_bucket( Bucket=s3_bucket, CreateBucketConfiguration={"LocationConstraint": s3_region}, ) except ClientError as e: print(f"Bucket creation failed: {e}") raise
def get_random_instance_volume( client: boto3.client, instance_ids: List[str]) -> List[Dict[str, str]]: results = {} try: response = client.describe_instances( InstanceIds=instance_ids)['Reservations'] for r in response: for e in r.get('Instances', []): instance_id = e['InstanceId'] bdm = e.get('BlockDeviceMappings', []) for b in bdm: # skip root devices if b['DeviceName'] in ('/dev/sda1', '/dev/xvda'): continue results.setdefault(instance_id, []).append( {b['DeviceName']: b['Ebs']['VolumeId']}) volumes = [] for r in results: # select 1 volume at random volume = random.sample(results[r], 1)[0] for k, v in volume.items(): volumes.append({ 'InstanceId': r, 'DeviceName': k, 'VolumeId': v }) return volumes except ClientError as e: raise FailedActivity('Unable to describe asg instances: %s' % (e.response['Error']['Message']))
def inc_stat_keys(client: boto3.client, guild_id, member_id, stat_name): try: response = client.update_item( TableName=TABLE_NAME, Key={ 'guild_id': { 'N': str(guild_id) }, 'member_id': { 'N': str(member_id) } }, UpdateExpression= 'ADD member_stats.#MSG_COUNT :increment SET last_update_date = :date', ExpressionAttributeNames={'#MSG_COUNT': stat_name}, ExpressionAttributeValues={ ':increment': { 'N': '1' }, ':date': { 'S': str(datetime.now()) } }) except ClientError as err: raise err else: return response
def get_ow_instances(client: boto3.client, stack: str): """ Get a list of opsworks instances in stack """ instances = client.describe_instances(StackId=stack)["Instances"] return instances
def get_qual_by_name(client: boto3.client, qual_name: str) -> Optional[dict]: """Find qual by name. Search MTurk qualifications for qual with qual_name. Return if found in first 100 results. NOTE: Only searches quals created/owned by the user's MTurk account. Parameters ---------- client : boto3.client Boto3 MTurk client. qual_name : str Name of qualification to search for. Returns ------- dict or None If qual found, return Dict with qual info. Else, return None. """ response = client.list_qualification_types(Query=qual_name, MustBeRequestable=True, MustBeOwnedByCaller=True, MaxResults=100) for qual in response['QualificationTypes']: name = qual.pop('Name') if name == qual_name: return qual.pop('QualificationTypeId')
def describe_db_cluster(cluster_id: str, client: boto3.client) -> AWSResponse: try: return client.describe_db_clusters( DBClusterIdentifier=cluster_id)['DBClusters'][0] except ClientError as e: raise FailedActivity('unable to identify cluster %s: %s' % ( cluster_id, e.response['Error']['Message']))
def validate(client: boto3.client, cluster: str = None, service: str = None): """Validate that a service and/or cluster exists""" if cluster: response = client.describe_clusters(clusters=[cluster])['clusters'] if not response: raise FailedActivity('unable to locate cluster: %s' % cluster) if service: response = client.describe_services( cluster=cluster, services=[service])['services'] if not response: raise FailedActivity( 'unable to locate service: %s on cluster: %s' % ( service, cluster))