def _set_tag(fleet, config, mw):
     min_instance_count = None
     if "Tags" in mw:
         tags = {}
         for t in mw["Tags"]:
             if t["Key"].startswith(config_tag):
                 tags[t["Key"][len(config_tag):]] = t["Value"]
         if fleet is None:
             if "ec2.schedule.min_instance_count" in tags: 
                 min_instance_count = tags["ec2.schedule.min_instance_count"]
         else:
             tag = f"subfleet.{fleet}.ec2.schedule.min_instance_count"
             if tag in tags:
                 min_instance_count = tags[tag]
                 del tags[tag]
             tag = f"subfleet.__all__.ec2.schedule.min_instance_count"
             if tag in tags:
                 min_instance_count = tags[tag]
                 del tags[tag]
         for t in tags:    
             if not Cfg.is_builtin_key_exist(t):
                 log.warning(f"On SSM MaintenanceWindow objection %s/%s, tag '{config_tag}.{t}' does not refer "
                     "to an existing configuration key!!" % (mw["WindowId"], mw["Name"]))
                 continue
             config[f"override:{t}"] = tags[t]
     return min_instance_count
示例#2
0
    def get_prerequisites(self):
        if "transfer" not in self.context["o_state"].get_resource_services():
            return

        self.resources = self.o_state.get_resources(service="transfer")

        self.servers = []
        transfer_client = self.context["transfer.client"]
        paginator = transfer_client.get_paginator('list_servers')
        tag_mappings = itertools.chain.from_iterable(
            page['Servers'] for page in paginator.paginate())
        self.servers = list(tag_mappings)

        #self.state_table = self.o_state.get_state_table()
        #self.state_table.register_aggregates([
        #    {
        #        "Prefix": "transferfamily.",
        #        "Compress": True,
        #        "DefaultTTL": Cfg.get_duration_secs("transferfamily.state.default_ttl"),
        #        "Exclude" : []
        #    }
        #    ])

        metric_time_resolution = Cfg.get_int(
            "transferfamily.metrics.time_resolution")
        if metric_time_resolution < 60:
            metric_time_resolution = 1  # Switch to highest resolution
        self.cloudwatch.register_metric([
            {
                "MetricName": "Subfleet.TransferFamily.Size",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
            {
                "MetricName": "Subfleet.TransferFamily.RunningServers",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
        ])

        # We need to register dynamically subfleet configuration keys to avoid a 'key unknown' warning
        #   when the user is going to set it
        subfleet_names = self.get_subfleet_names()
        for subfleet in subfleet_names:
            key = "subfleet.%s.state" % subfleet
            if not Cfg.is_builtin_key_exist(key):
                Cfg.register({key: ""})
        log.log(log.NOTICE,
                "Detected TransferFamily subfleets '%s'." % subfleet_names)
示例#3
0
    def get_prerequisites(self, only_if_not_already_done=False):
        if only_if_not_already_done and self.prereqs_done:
            return

        self.state_table = self.o_state.get_state_table()
        client = self.context["ec2.client"]

        # Retrieve list of instances with appropriate tag
        Filters = [{
            'Name': 'tag:clonesquad:group-name',
            'Values': [self.context["GroupName"]]
        }]

        instances = []
        response = None
        while (response is None or "NextToken" in response):
            response = client.describe_instances(
                Filters=Filters,
                MaxResults=Cfg.get_int("ec2.describe_instances.max_results"),
                NextToken=response["NextToken"]
                if response is not None else "")
            for reservation in response["Reservations"]:
                instances.extend(reservation["Instances"])

        # Filter out instances with inappropriate state
        non_terminated_instances = []
        for i in instances:
            if i["State"]["Name"] not in ["shutting-down", "terminated"]:
                non_terminated_instances.append(i)

        self.instances = non_terminated_instances
        self.instance_ids = [i["InstanceId"] for i in self.instances]

        # Enrich describe_instances output with instance type details
        if Cfg.get_int("ec2.describe_instance_types.enabled"):
            self.instance_types = []
            [
                self.instance_types.append(i["InstanceType"])
                for i in self.instances
                if i["InstanceType"] not in self.instance_types
            ]
            if len(self.instance_types):
                response = client.describe_instance_types(
                    InstanceTypes=self.instance_types)
                self.instance_type_details = response["InstanceTypes"]
                for i in self.instances:
                    i["_InstanceType"] = next(
                        filter(
                            lambda it: it["InstanceType"] == i["InstanceType"],
                            self.instance_type_details), None)

        # Get instances status
        instance_statuses = []
        response = None
        while response is None or "NextToken" in response:
            q = {"InstanceIds": self.instance_ids}
            if response is not None and "NextToken" in response:
                q["NextToken"] = response["NextToken"]
            response = client.describe_instance_status(**q)
            instance_statuses.extend(response["InstanceStatuses"])
        self.instance_statuses = instance_statuses

        # Get AZ status
        response = client.describe_availability_zones()
        self.availability_zones = response["AvailabilityZones"]
        if len(self.availability_zones) == 0:
            raise Exception("Can't have a region with no AZ...")

        self.az_with_issues = []
        if not Cfg.get_int("ec2.az.statusmgt.disable"):
            for az in self.availability_zones:
                if az["State"] in ["impaired", "unavailable"]:
                    self.az_with_issues.append(az)
                if az["State"] != "available":
                    log.warning(
                        "AZ %s(%s) is marked with status '%s' by EC2.describe_availability_zones() API!"
                        % (zone_name, zone_id, zone_state))
        else:
            log.warning(
                "Automatic AZ issues detection through describe_availability_zones() is DISABLED (ec2.az.statusmgt.disable != 0)..."
            )

        # Use these config keys to simulate an AWS Large Scale Event
        all_az_names = [az["ZoneName"] for az in self.availability_zones]
        all_az_ids = [az["ZoneId"] for az in self.availability_zones]
        [
            log.warning(
                "ec2.debug.availability_zones_impaired do not match local AZs! '%s'"
                % a)
            for a in Cfg.get_list("ec2.debug.availability_zones_impaired",
                                  default=[])
            if a not in all_az_names and a not in all_az_ids
        ]
        [
            log.warning(
                "ec2.az.unavailable_list do not match local AZs! '%s'" % a)
            for a in Cfg.get_list("ec2.az.unavailable_list", default=[])
            if a not in all_az_names and a not in all_az_ids
        ]
        for az in self.availability_zones:
            zone_name = az["ZoneName"]
            zone_id = az["ZoneId"]
            zone_state = az["State"]
            if zone_name in Cfg.get_list(
                    "ec2.debug.availability_zones_impaired", default=[]):
                zone_state = "impaired"
            if zone_id in Cfg.get_list("ec2.debug.availability_zones_impaired",
                                       default=[]):
                zone_state = "impaired"
            if zone_name in Cfg.get_list("ec2.az.unavailable_list",
                                         default=[]):
                zone_state = "unavailable"
            if zone_id in Cfg.get_list("ec2.az.unavailable_list", default=[]):
                zone_state = "unavailable"
            if zone_state != az["State"] and zone_state in [
                    "impaired", "unavailable"
            ] and az not in self.az_with_issues:
                self.az_with_issues.append(az)
            az["State"] = zone_state
            if zone_state != "available":
                log.warning(
                    "AZ %s(%s) is marked with status '%s' by configuration keys!"
                    % (zone_name, zone_id, zone_state))

        # We need to register dynamically static subfleet configuration keys to avoid a 'key unknown' warning
        #   when the user is going to set it
        static_subfleet_names = self.get_static_subfleet_names()
        for static_fleet in static_subfleet_names:
            key = "staticfleet.%s.state" % static_fleet
            if not Cfg.is_builtin_key_exist(key):
                Cfg.register({key: ""})
        log.log(
            log.NOTICE,
            "Detected following static subfleet names across EC2 resources: %s"
            % static_subfleet_names)

        self.prereqs_done = True
示例#4
0
    def get_prerequisites(self):
        if "rds" not in self.context["o_state"].get_resource_services():
            return

        rds_client = self.context["rds.client"]
        tagging_client = self.context["resourcegroupstaggingapi.client"]

        self.databases = {"db": [], "cluster": []}
        for db_type in list(self.databases.keys()):
            paginator = tagging_client.get_paginator('get_resources')
            tag_mappings = itertools.chain.from_iterable(
                page['ResourceTagMappingList'] for page in paginator.paginate(
                    ResourceTypeFilters=["rds:%s" % db_type],
                    TagFilters=[{
                        'Key': 'clonesquad:group-name',
                        'Values': [self.context["GroupName"]]
                    }]))
            self.databases["%s.tags" % db_type] = list(tag_mappings)
            if len(self.databases["%s.tags" % db_type]) == 0:
                continue
            if db_type == "cluster":
                func = rds_client.describe_db_clusters
                filter_key = "db-cluster-id"
                response_index = "DBClusters"
            if db_type == "db":
                func = rds_client.describe_db_instances
                filter_key = "db-instance-id"
                response_index = "DBInstances"
            try:
                self.databases[db_type].extend(
                    func(Filters=[{
                        'Name':
                        filter_key,
                        'Values': [
                            t["ResourceARN"]
                            for t in self.databases["%s.tags" % db_type]
                        ]
                    }])[response_index])
            except Exception as e:
                log.exception("Failed to describe RDS database type '%s'" %
                              (db_type))

        #self.state_table = self.o_state.get_state_table()
        #self.state_table.register_aggregates([
        #    {
        #        "Prefix": "rds.",
        #        "Compress": True,
        #        "DefaultTTL": Cfg.get_duration_secs("rds.state.default_ttl"),
        #        "Exclude" : []
        #    }
        #    ])

        metric_time_resolution = Cfg.get_int("rds.metrics.time_resolution")
        if metric_time_resolution < 60:
            metric_time_resolution = 1  # Switch to highest resolution
        self.cloudwatch.register_metric([
            {
                "MetricName": "Subfleet.RDS.Size",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
            {
                "MetricName": "Subfleet.RDS.AvailableDBs",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
            {
                "MetricName": "Subfleet.RDS.StoppingDBs",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
            {
                "MetricName": "Subfleet.RDS.StartingDBs",
                "Unit": "Count",
                "StorageResolution": metric_time_resolution
            },
        ])

        # We need to register dynamically subfleet configuration keys to avoid a 'key unknown' warning
        #   when the user is going to set it
        subfleet_names = self.get_rds_subfleet_names()
        for subfleet in subfleet_names:
            key = "subfleet.%s.state" % subfleet
            if not Cfg.is_builtin_key_exist(key):
                Cfg.register({key: ""})
        log.log(
            log.NOTICE,
            "Detected following subfleet names across RDS resources: %s" %
            subfleet_names)