def dispose_load_balancer(self, load_balancer): AWSAccount.set_aws_region(load_balancer.region) if load_balancer.arn is None: region_lbs = self.get_region_load_balancers( load_balancer.region, names=[load_balancer.name]) if len(region_lbs) > 1: raise ValueError( f"Can not find load_balancer '{load_balancer.name}': found {len(region_lbs)}" ) if len(region_lbs) == 0: return load_balancer.update_from_raw_response(region_lbs[0].dict_src) lb_listeners = self.get_region_listeners( load_balancer.region, load_balancer_arn=load_balancer.arn) for listener in lb_listeners: self.dispose_listener_raw(listener.generate_dispose_request()) lb_target_groups = self.get_region_target_groups( load_balancer.region, load_balancer_arn=load_balancer.arn) for target_group in lb_target_groups: self.dispose_target_group_raw( target_group.generate_dispose_request()) self.dispose_load_balancer_raw( load_balancer.generate_dispose_request())
def yield_log_events(self, log_group, stream): """ :param log_group: :return: """ if AWSAccount.get_aws_region() != log_group.region: AWSAccount.set_aws_region(log_group.region) self.NEXT_PAGE_RESPONSE_KEY = "nextForwardToken" token = None for response in self.execute(self.client.get_log_events, "events", raw_data=True, filters_req={ "logGroupName": log_group.name, "logStreamName": stream.name }): token = response["nextForwardToken"] yield response #todo: refactor for response in self.execute(self.client.get_log_events, "events", raw_data=True, filters_req={ "logGroupName": log_group.name, "logStreamName": stream.name, "nextToken": token }): if token != response["nextForwardToken"]: raise ValueError()
def get_region_load_balancers(self, region, names=None, full_information=True, get_tags=True): AWSAccount.set_aws_region(region) final_result = list() filters_req = None if names is not None: filters_req = {"Names": names} for response in self.execute(self.client.describe_load_balancers, "LoadBalancers", filters_req=filters_req, exception_ignore_callback=lambda error: "LoadBalancerNotFound" in repr(error)): obj = LoadBalancer(response) final_result.append(obj) if full_information: self.get_load_balancer_full_inforrmation(obj) if get_tags: self.update_tags(final_result) return final_result
def provision_bucket(self, bucket): """ Provision a bucket into AWS @param bucket: @return: """ filters_req = {"Bucket": bucket.name} AWSAccount.set_aws_region(bucket.region) try: logger.info(f"Get bucket location {filters_req}") for bucket_region_mark in self.execute( self.client.get_bucket_location, "LocationConstraint", filters_req=filters_req): if bucket.region.region_mark != bucket_region_mark: raise RuntimeError( f"Provisioning bucket {bucket.name} in '{bucket.region.region_mark}' fails. " f"Exists in region '{bucket_region_mark}'") except Exception as exception_instance: repr_exception_instance = repr(exception_instance) logger.info(repr_exception_instance) if "NoSuchBucket" not in repr_exception_instance: raise response = self.provision_bucket_raw( bucket.generate_create_request()) bucket.location = response if bucket.policy is not None: self.put_bucket_policy_raw( bucket.generate_put_bucket_policy_request())
def get_region_keys(self, region, full_information=True): final_result = list() AWSAccount.set_aws_region(region) for dict_src in self.execute(self.client.list_keys, "Keys"): obj = KMSKey(dict_src) if full_information: filters_req = {"KeyId": obj.id} for dict_response in self.execute(self.client.describe_key, "KeyMetadata", filters_req=filters_req): obj.update_from_describe_response(dict_response) tags = list( self.execute(self.client.list_resource_tags, "Tags", filters_req=filters_req, exception_ignore_callback=lambda error: "AccessDeniedException" in repr(error))) obj.update_from_list_tags_response({"Tags": tags}) aliases = list( self.execute(self.client.list_aliases, "Aliases", filters_req=filters_req, exception_ignore_callback=lambda error: "AccessDeniedException" in repr(error))) obj.update_from_list_aliases_response({"Aliases": aliases}) final_result.append(obj) return final_result
def get_region_rules(self, region, full_information=False, listener_arn=None, get_tags=True): AWSAccount.set_aws_region(region) final_result = list() filters_req = None if listener_arn is not None: filters_req = {"ListenerArn": listener_arn} for response in self.execute(self.client.describe_rules, "Rules", filters_req=filters_req): obj = LoadBalancer.Rule(response) final_result.append(obj) if full_information: raise NotImplementedError() if get_tags: self.update_tags(final_result) return final_result
def dispose_db_cluster(self, db_cluster: RDSDBCluster): region_db_clusters = self.get_region_db_clusters(db_cluster.region) for region_db_cluster in region_db_clusters: if db_cluster.id == region_db_cluster.id: db_cluster.update_from_raw_response(region_db_cluster.dict_src) break else: return filters_req = [{ 'Name': 'db-cluster-id', 'Values': [ db_cluster.id, ] }] db_instances = self.get_region_db_instances(region=db_cluster.region, filters=filters_req) for db_instance in db_instances: db_instance.region = db_cluster.region db_instance.skip_final_snapshot = db_cluster.skip_final_snapshot self.dispose_db_instance(db_instance) AWSAccount.set_aws_region(db_cluster.region) response = self.dispose_db_cluster_raw( db_cluster.generate_dispose_request()) db_cluster.update_from_raw_response(response) try: self.wait_for_status( db_cluster, self.update_db_cluster_information, [], [db_cluster.Status.DELETING, db_cluster.Status.AVAILABLE], [], timeout=20 * 60) except self.ResourceNotFoundError: pass
def get_region_db_cluster_snapshots(self, region, full_information=True, custom_filters=None, update_tags=True): final_result = list() AWSAccount.set_aws_region(region) for response in self.execute( self.client.describe_db_cluster_snapshots, "DBClusterSnapshots", filters_req=custom_filters, exception_ignore_callback=lambda x: "DBClusterSnapshotNotFoundFault" in repr(x)): obj = RDSDBClusterSnapshot(response) final_result.append(obj) if full_information: filters_req = {"DBClusterSnapshotIdentifier": obj.id} obj.parameters = [] for response_param in self.execute( self.client.describe_db_cluster_snapshot_attributes, "DBClusterSnapshotAttributesResult", filters_req=filters_req): obj.parameters.append(response_param) if update_tags: self.update_tags(final_result) return final_result
def create_snapshots(self, instance): start = datetime.datetime.now() AWSAccount.set_aws_region(instance.region) ret = self.create_snapshots_raw(instance.generate_create_snapshots_request()) end = datetime.datetime.now() logger.info(f"Snapshot creation took {end - start}") return ret
def provision_vpc_peering(self, vpc_peering): region_vpc_peerings = self.get_region_vpc_peerings(vpc_peering.region) for region_vpc_peering in region_vpc_peerings: if region_vpc_peering.get_status() in [region_vpc_peering.Status.DELETED, region_vpc_peering.Status.DELETING]: continue if region_vpc_peering.get_tagname(ignore_missing_tag=True) != vpc_peering.get_tagname(): continue vpc_peering.update_from_raw_response(region_vpc_peering.dict_src) if region_vpc_peering.get_status() in [region_vpc_peering.Status.ACTIVE, region_vpc_peering.Status.PROVISIONING]: return break if vpc_peering.id is None: AWSAccount.set_aws_region(vpc_peering.region) response = self.provision_vpc_peering_raw(vpc_peering.generate_create_request()) vpc_peering.update_from_raw_response(response) if vpc_peering.get_status() in [vpc_peering.Status.INITIATING_REQUEST, vpc_peering.Status.PENDING_ACCEPTANCE]: AWSAccount.set_aws_region(vpc_peering.peer_region) for counter in range(20): try: self.accept_vpc_peering_connection_raw(vpc_peering.generate_accept_request()) break except Exception as exception_inst: repr_exception_inst = repr(exception_inst) if "does not exist" not in repr_exception_inst: raise time.sleep(5) else: raise RuntimeError(vpc_peering.get_status())
def get_region_clusters(self, region, cluster_identifiers=None): AWSAccount.set_aws_region(region) final_result = list() if cluster_identifiers is None: cluster_identifiers = [] for cluster_arn in self.execute(self.client.list_clusters, "clusterArns"): cluster_identifiers.append(cluster_arn) if len(cluster_identifiers) > 100: raise NotImplementedError( """clusters (list) -- A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed.""" ) filter_req = { "clusters": cluster_identifiers, "include": [ "ATTACHMENTS", "CONFIGURATIONS", "SETTINGS", "STATISTICS", "TAGS" ] } for dict_src in self.execute(self.client.describe_clusters, "clusters", filters_req=filter_req): obj = ECSCluster(dict_src) final_result.append(obj) return final_result
def get_region_target_groups(self, region, full_information=True, target_group_names=None, load_balancer_arn=None): AWSAccount.set_aws_region(region) final_result = list() filters_req = dict() if target_group_names is not None: filters_req["Names"] = target_group_names if load_balancer_arn is not None: filters_req["LoadBalancerArn"] = load_balancer_arn for response in self.execute(self.client.describe_target_groups, "TargetGroups", filters_req=filters_req): obj = ELBV2TargetGroup(response) final_result.append(obj) if full_information: try: for update_info in self.execute( self.client.describe_target_health, "TargetHealthDescriptions", filters_req={"TargetGroupArn": obj.arn}): obj.update_target_health(update_info) except Exception as inst: print(response) str_repr = repr(inst) print(str_repr) raise return final_result
def get_region_event_source_mappings(self, region): final_result = list() AWSAccount.set_aws_region(region) for response in self.execute(self.client.list_event_source_mappings, "EventSourceMappings"): obj = LambdaEventSourceMapping(response) final_result.append(obj) return final_result
def get_secret(self, secret_name, region_name=None): if region_name is not None: AWSAccount.set_aws_region(region_name) raw_value = self.get_secret_value(secret_name) obj = SecretsManagerSecret(raw_value) return obj
def yield_cloud_watch_metrics(self): """ Yields metrics - made to handle large amounts of data, in order to prevent the OOM collapse. :return: """ for region in AWSAccount.get_aws_account().regions.values(): AWSAccount.set_aws_region(region) for response in self.execute(self.client.list_metrics, "Metrics"): yield response
def get_region_lambdas(self, region, full_information=True): final_result = list() AWSAccount.set_aws_region(region) for response in self.execute(self.client.list_functions, "Functions"): obj = AWSLambda(response) final_result.append(obj) if full_information: self.update_lambda_full_information(obj) return final_result
def raw_update_elasticsearch_domain_config(self, request, region=None): if region is not None: AWSAccount.set_aws_region(region) for response in self.execute( self.client.update_elasticsearch_domain_config, "DomainConfig", filters_req=request): return response
def get_region_capacity_providers(self, region): final_result = list() AWSAccount.set_aws_region(region) for dict_src in self.execute(self.client.describe_capacity_providers, "capacityProviders"): obj = ECSCapacityProvider(dict_src) final_result.append(obj) return final_result
def get_region_db_subnet_groups(self, region): final_result = list() AWSAccount.set_aws_region(region) for response in self.execute(self.client.describe_db_subnet_groups, "DBSubnetGroups"): obj = RDSDBSubnetGroup(response) final_result.append(obj) return final_result
def get_connection_id(): """ Each connection has a unique id- in order to reuse it. This function generates it. :return: """ aws_account = AWSAccount.get_aws_account() aws_region = AWSAccount.get_aws_region() region_mark = aws_region.region_mark if aws_region is not None else "" return f"{aws_account.id}/{region_mark}"
def provision_launch_template(self, launch_template): region_objects = self.get_region_launch_templates(launch_template.region) for region_object in region_objects: if region_object.name == launch_template.name: launch_template.update_from_raw_response(region_object.dict_src) return AWSAccount.set_aws_region(launch_template.region) response = self.provision_launch_template_raw(launch_template.generate_create_request()) launch_template.update_from_raw_response(response)
def set_cloudwatch_group_metric_filter(self, metric_filter): request_dict = metric_filter.generate_create_request() AWSAccount.set_aws_region(metric_filter.region) logger.info( f"Creating cloudwatch log group metric filter '{metric_filter.name}' in region '{metric_filter.region}'" ) for response in self.execute(self.client.put_metric_filter, "ResponseMetadata", filters_req=request_dict): if response["HTTPStatusCode"] != 200: raise RuntimeError(f"{response}")
def get_region_template_entities(self, region, full_information=True): final_result = list() AWSAccount.set_aws_region(region) for dict_src in self.execute(self.client.describe_template_entities, "template_entities"): obj = TemplateEntity(dict_src) final_result.append(obj) if full_information: raise NotImplementedError() return final_result
def get_region_subnets(self, region, filters=None): final_result = list() filters_req = dict() if filters is not None: filters_req["Filters"] = filters AWSAccount.set_aws_region(region) for dict_src in self.execute(self.client.describe_subnets, "Subnets", filters_req=filters_req): obj = Subnet(dict_src) final_result.append(obj) return final_result
def provision_cluster(self, cluster): pdb.set_trace() region_clusters = self.get_region_clusters(cluster.region) for region_cluster in region_clusters: if cluster.id == region_cluster.id: cluster.update_from_raw_response(region_cluster.dict_src) AWSAccount.set_aws_region(cluster.region) response = self.provision_cluster_raw( cluster.generate_create_request()) cluster.update_from_raw_response(response)
def get_region_clusters(self, region): AWSAccount.set_aws_region(region) final_result = list() for dict_src in self.execute(self.client.describe_cache_clusters, "CacheClusters"): obj = ElasticacheCluster(dict_src) final_result.append(obj) return final_result
def get_region_cache_subnet_groups(self, region): AWSAccount.set_aws_region(region) final_result = list() for dict_src in self.execute(self.client.describe_cache_subnet_groups, "CacheSubnetGroups"): obj = ElasticacheCacheSubnetGroup(dict_src) final_result.append(obj) return final_result
def provision_repository(self, repository): AWSAccount.set_aws_region(repository.region) region_repos = self.get_region_repositories( repository.region, repository_names=[repository.name]) if len(region_repos) == 1: return repository.update_from_raw_create(region_repos[0].dict_src) dict_ret = self.provision_repository_raw( repository.generate_create_request()) return repository.update_from_raw_create(dict_ret)
def provision_key(self, key): region_keys = self.get_region_keys(key.region) for region_key in region_keys: if region_key.get_tagname( ignore_missing_tag=True) == key.get_tagname(): key.update_from_raw_response(region_key.dict_src) return AWSAccount.set_aws_region(key.region) response = self.provision_key_raw(key.generate_create_request()) key.update_from_raw_response(response)
def set_cloudwatch_alarm(self, alarm): request_dict = alarm.generate_create_request() AWSAccount.set_aws_region(alarm.region) logger.info( f"Creating cloudwatch alarm '{alarm.name}' in region '{alarm.region}'" ) for response in self.execute(self.client.put_metric_alarm, "ResponseMetadata", filters_req=request_dict): if response["HTTPStatusCode"] != 200: raise RuntimeError(f"{response}")