Exemple #1
0
    def __init__(self,
            hint=None,
            url=None,
            auth_name=None,
            auth_srid=None,
            postgis_srid=None,
            spatialite_srid=None,
            proj4text=None,
            srtext=None,
            units=None,
            filename=None):

        # copy args to attributes
        self.hint = hint
        self.url = url
        self.auth_name = auth_name
        self.auth_srid = auth_srid
        self.postgis_srid = postgis_srid
        self.spatialite_srid = spatialite_srid
        self.proj4text = proj4text
        self.srtext = srtext
        self.units = units
        self.filename = filename

        # hint
        # TODO look for meaning in hint:  filename, 'EPSG:4326'?  proj4text? url? ...

        # if spatialireference.org url, use it!
        if self.url:
            if not self.srtext: self.srtext = misc.get_url(self.url+'esriwkt/')
            if not self.proj4text: self.proj4text = misc.get_url(self.url+'proj4/')
            if (not self.auth_name) or (not self.auth_srid):
                text = misc.get_url(self.url+'json/')
                # fixe bad json syntax on spatialref.org
                text = text.replace('\'','"')
                data = json.loads(text)
                self.auth_name = data['type']
                self.auth_srid = data['properties']['code']

        # if filename provided, use it!
        if self.filename and not self.srtext:
            self.srtext = srtext_from_shapefile(self.filename)
            
        # if osr (GDAL) is available, use it!
        import_osr()
        if osr:
            if not self.srtext and self.auth_name=='EPSG' and self.auth_srid:
                self.srtext = EPSG2wkt(self.auth_srid)
            if not self.srtext and self.proj4text: self.srtext = proj42wkt(self.proj4text)
            if not self.proj4text and self.srtext: self.proj4text = wkt2proj4(self.srtext)

        # set db srids from auth_srid
        if self.auth_srid:
            if not self.spatialite_srid: self.spatialite_srid = self.auth_srid%100000 + DB_SRID_OFFSET
            if not self.postgis_srid: self.postgis_srid = self.auth_srid%100000 + DB_SRID_OFFSET

        # default to meters
        if not self.units:
            self.units = 'meters'
Exemple #2
0
def _overpass_get(query):
    url = 'http://overpass-api.de/api/interpreter'
    parms = {'data':query}
    response_headers = {}
    data = misc.get_url(url,parms=parms,gzip=True,info=response_headers)
    #print 'DEB _overpass_get response_headers:',response_headers
    return data
Exemple #3
0
 def load_dashboard(self):
     dashboard = misc.get_url("internal:standard-dashboard.json")
     content = str(dashboard, "utf-8")
     for k in self.context:
         content = content.replace("{%s}" % k, str(self.context[k]))
     for k in Cfg.keys():
         content = content.replace("{%s}" % k, Cfg.get(k))
     return content
def get_policy_content(url, account_id, region, api_gw_id=None):
    content = misc.get_url(url)
    if content is None:
        raise ValueError("Failed to load specified GWPolicyUrl '%s'!" % url)
    log.info(f"Success for get URL {url} : %s" % str(content, "utf-8"))
    try:
        content = str(content, "utf-8").replace("%(AccountId)",
                                                account_id).replace(
                                                    "%(Region)", region)
        if api_gw_id is not None:
            content = content.replace("%(ApiGWId)", api_gw_id)
        return json.loads(content)
    except:
        log.exception(
            "Failed to parse the API Gateway policy located at '%s'!" % url)
    def get_play_from_webpage(self, play):

        page = 1
        findep = False
        seasons = dict()

        while not findep:
            realurl = "{}&page={}".format(play["url"], str(page))
            webpage = get_url(realurl, self.headers)
            soup = BeautifulSoup(webpage, 'html.parser')

            if DmhySite.last_page(str(soup.text)):
                break

            for tr in soup.find_all('tr'):
                for a in tr.find_all('a'):
                    if a.has_attr('target') and a['target'] == '_blank':
                        match = re.search(play["pattern"], str(a.text))
                        if match:
                            ep = int(str(match.group('ep')).strip())
                            s = 1
                            if ep == play["episode"]:
                                findep = True
                                print("pagecount=" + str(page))
                            elif ep > play["episode"]:
                                # print("1080p:" + str(a.text).strip())
                                # print("ep:" + str(ep))
                                link = tr.find('a', href=re.compile('magnet:'))
                                uri = str(link['href'])

                                if s not in seasons:
                                    seasons[s] = dict()
                                seasons[s][ep] = uri

            page += 1
            if not findep:
                sleep(15)

        print("Got all episodes of {}".format(play['name']))

        return seasons
    def get_plays(self, config_name, user):
        if self.account != "" and self.password != "":
            self.__sign(self.account, self.password)
        playlist = load_config(config_name)
        for play in playlist:
            alluri = ""

            webpage = get_url(play["url"], self.headers)
            seasons = sort_plays(self.__get_play_from_webpage(webpage, play))

            for season in seasons:
                for episode in seasons[season]:
                    uri = seasons[season][episode]
                    alluri += uri + "\n\r"
                    play['season'] = season
                    play['episode'] = episode

            save_uri("zimuzu", alluri, user)
            save_config(playlist, config_name)

            time.sleep(10)
Exemple #7
0
    def get_prerequisites(self):
        now = self.context["now"]
        client = self.context["cloudwatch.client"]

        # Read all CloudWatch alarm templates into memory
        alarm_definitions = {}
        for i in range(0, Cfg.get_int("cloudwatch.alarms.max_per_instance")):
            key = "cloudwatch.alarm%02d.configuration_url" % (i)
            r = Cfg.get_extended(key)
            if not r["Success"] or r["Value"] == "":
                continue

            d = misc.parse_line_as_list_of_dict(r["Value"])
            url = d[0]["_"]
            meta = d[0]

            index = "%02d" % i
            alarm_defs = {
                "Index": index,
                "Key": key,
                "Url": url,
                "Definition": r,
                "Metadata": meta
            }

            prefix = "alarmname:"
            if url.startswith(prefix):
                alarm_defs["AlarmName"] = url[len(prefix):]
            else:
                log.log(log.NOTICE, "Read Alarm definition: %s" % r["Value"])
                try:
                    resp = misc.get_url(url.format(**self.context))
                    if resp is None:
                        raise Exception("URL content = <None>")
                    alarm_defs["Content"] = str(resp, "utf-8")
                except Exception as e:
                    log.exception("Failed to load Alarm definition '%s' : %e" %
                                  (r["Value"], e))
                    continue
            alarm_definitions[index] = alarm_defs

        self.alarm_definitions = alarm_definitions

        # Read all existing CloudWatch alarms
        alarms = []
        response = None
        while (response is None or "NextToken" in response):
            response = client.describe_alarms(MaxRecords=Cfg.get_int(
                "cloudwatch.describe_alarms.max_results"),
                                              NextToken=response["NextToken"]
                                              if response is not None else "")
            #log.debug(Dbg.pprint(response))
            for alarm in response["MetricAlarms"]:
                alarm_name = alarm["AlarmName"]
                alarm_def = self.get_alarm_configuration_by_name(alarm_name)
                if alarm_def is not None:
                    # This is an alarm thats belong to this CloneSquad instance
                    alarms.append(alarm)
        #log.debug(Dbg.pprint(alarms))
        self.alarms = alarms

        # Sanity check
        for index in self.alarm_definitions.keys():
            alarm_def = self.alarm_definitions[index]
            if "AlarmName" not in alarm_def:
                continue
            alarm = next(
                filter(lambda a: a["AlarmName"] == alarm_def["AlarmName"],
                       self.alarms), None)
            if alarm is None:
                log.warning(
                    "Alarm definition [%s](%s => %s) doesn't match an existing CloudWatch alarm!"
                    % (alarm_def["Definition"]["Key"],
                       alarm_def["Definition"]["Value"],
                       alarm_def["Definition"]["Status"]))

        # Read all metrics associated with alarms

        # CloudWatch intense polling can be expensive: This algorithm links CW metric polling rate to the
        #    scale rate => Under intense scale up condition, polling is aggresive. If not, it falls down
        #    to one polling every 'cloudwatch.metrics.low_rate_polling_interval' seconds
        # TODO(@jcjorel): Avoid this kind of direct references to an upper level module!!
        integration_period = Cfg.get_duration_secs(
            "ec2.schedule.horizontalscale.integration_period")
        instance_scale_score = self.ec2.get_integrated_float_state(
            "ec2.schedule.scaleout.instance_scale_score", integration_period)

        self.metric_cache = self.get_metric_cache()

        query = {"IdMapping": {}, "Queries": []}

        # Build query for Alarm metrics
        if Cfg.get("ec2.schedule.desired_instance_count") == "-1":
            # Sort by oldest alarms first in cache
            cached_metric_names = [m["_MetricId"] for m in self.metric_cache]
            valid_alarms = []
            for a in alarms:
                alarm_name = a["AlarmName"]
                alarm_def = self.get_alarm_configuration_by_name(alarm_name)
                if alarm_def is None or alarm_def["AlarmDefinition"][
                        "Url"].startswith("alarmname:"):
                    continue
                a["_SamplingTime"] = self.get_metric_by_id(
                    alarm_name
                )["_SamplingTime"] if alarm_name in cached_metric_names else str(
                    misc.epoch())
                valid_alarms.append(a)
            sorted_alarms = sorted(
                valid_alarms, key=lambda a: misc.str2utc(a["_SamplingTime"]))

            # We poll from the oldest to the newest and depending on the instance_scale_score to limit CloudWacth GetMetricData costs
            time_for_full_metric_refresh = max(
                Cfg.get_duration_secs(
                    "cloudwatch.metrics.time_for_full_metric_refresh"), 1)
            app_run_period = Cfg.get_duration_secs("app.run_period")
            minimum_polled_alarms_per_run = Cfg.get_int(
                "cloudwatch.metrics.minimum_polled_alarms_per_run")
            maximum_polled_alarms_per_run = app_run_period / time_for_full_metric_refresh
            maximum_polled_alarms_per_run = min(maximum_polled_alarms_per_run,
                                                1.0)
            weight = min(instance_scale_score, maximum_polled_alarms_per_run)
            max_alarms_for_this_run = max(
                minimum_polled_alarms_per_run,
                int(min(weight, 1.0) * len(sorted_alarms)))
            for alarm in sorted_alarms[:max_alarms_for_this_run]:
                alarm_name = alarm["AlarmName"]
                CloudWatch._format_query(query, alarm_name, alarm)

            # We always poll user supplied alarms
            for alarm in alarms:
                alarm_name = alarm["AlarmName"]
                alarm_def = self.get_alarm_configuration_by_name(alarm_name)
                if alarm_def is None:
                    continue  # Unknown alarm name
                if not alarm_def["AlarmDefinition"]["Url"].startswith(
                        "alarmname:"):
                    continue
                CloudWatch._format_query(query, alarm_name, alarm)

        # Query Metric for Burstable instances
        burstable_instances = self.ec2.get_burstable_instances(
            ScalingState="-error")
        last_collect_date = self.ec2.get_state_date(
            "cloudwatch.metrics.last_burstable_metric_collect_date")
        if last_collect_date is None or (now - last_collect_date) > timedelta(
                minutes=1):
            for i in burstable_instances:
                instance_id = i["InstanceId"]
                if not self.ec2.is_static_subfleet_instance(
                        instance_id) and self.ec2.get_scaling_state(
                            instance_id) == "excluded":
                    continue
                CloudWatch._format_query(
                    query, "%s/%s" % ("CPUCreditBalance", instance_id), {
                        "MetricName":
                        "CPUCreditBalance",
                        "Namespace":
                        "AWS/EC2",
                        "Dimensions": [{
                            "Name": "InstanceId",
                            "Value": instance_id
                        }],
                        "Period":
                        300,
                        "Statistic":
                        "Average"
                    })
            self.ec2.set_state(
                "cloudwatch.metrics.last_burstable_metric_collect_date",
                now,
                TTL=Cfg.get_duration_secs("cloudwatch.default_ttl"))

        # Make request to CloudWatch
        query_counter = self.ec2.get_state_int(
            "cloudwatch.metric.query_counter", default=0)
        queries = query["Queries"]
        metric_results = []
        metric_ids = []
        no_metric_ids = []
        while len(queries) > 0:
            q = queries[:500]
            queries = queries[500:]
            results = []
            response = None
            while response is None or "NextToken" in response:
                args = {
                    "MetricDataQueries":
                    q,
                    "StartTime":
                    now - timedelta(seconds=Cfg.get_duration_secs(
                        "cloudwatch.metrics.data_period")),
                    "EndTime":
                    now
                }
                if response is not None:
                    args["NextToken"] = response["NextToken"]
                response = client.get_metric_data(**args)
                results.extend(response["MetricDataResults"])
                query_counter += len(q)

            for r in results:
                if r["StatusCode"] != "Complete":
                    log.error("Failed to retrieve metrics: %s" % q)
                    continue
                metric_id = query["IdMapping"][r["Id"]]
                if len(r["Timestamps"]) == 0:
                    if metric_id not in no_metric_ids:
                        no_metric_ids.append(metric_id)
                    continue
                if metric_id not in metric_ids: metric_ids.append(metric_id)
                r["_MetricId"] = metric_id
                r["_SamplingTime"] = str(now)
                log.debug(r)
                metric_results.append(r)
        if len(no_metric_ids):
            log.info("No metrics returned for alarm '%s'" % no_metric_ids)

        # Merge with existing cache metric
        metric_cache = self.metric_cache
        self.metric_cache = metric_results
        for m in metric_cache:
            max_retention_period = Cfg.get_duration_secs(
                "cloudwatch.metrics.cache.max_retention_period")
            if m["_MetricId"] in metric_ids or "_SamplingTime" not in m:
                continue
            if (now - misc.str2utc(m["_SamplingTime"])
                ).total_seconds() < max_retention_period:
                self.metric_cache.append(m)

        self.ec2.set_state("cloudwatch.metric.query_counter",
                           query_counter,
                           TTL=Cfg.get_duration_secs("cloudwatch.default_ttl"))
        self.ec2.set_state_json(
            "cloudwatch.metrics.cache",
            self.metric_cache,
            TTL=Cfg.get_duration_secs("cloudwatch.default_ttl"))
        self.set_metric("Cloudwatch.GetMetricData", query_counter)

        # Augment Alarm definitions and Instances with associated metrics
        for metric in self.metric_cache:
            metric_id = metric["_MetricId"]

            alarm_data = self.get_alarm_data_by_name(metric_id)
            if alarm_data is not None:
                alarm_data["MetricDetails"] = metric
                continue

            instance = next(
                filter(
                    lambda i: "CPUCreditBalance/%s" % i["InstanceId"] ==
                    metric_id, burstable_instances), None)
            if instance is not None:
                instance["_Metrics"] = {}
                instance["_Metrics"]["CPUCreditBalance"] = metric
                continue
def init(context, with_kvtable=True, with_predefined_configuration=True):
    global _init
    _init = {}
    _init["context"] = context
    _init["all_configs"] = [{
        "source": "Built-in defaults",
        "config": {},
        "metas" : {}
        }]
    if with_kvtable:
        _init["configuration_table"] = kvtable.KVTable(context, context["ConfigurationTable"])
        _init["configuration_table"].reread_table()
    _init["with_kvtable"] = with_kvtable
    _init["active_parameter_set"] = None
    register({
             "config.dump_configuration,Stable" : {
                 "DefaultValue": "0",
                 "Format"      : "Bool",
                 "Description" : """Display all relevant configuration parameters in CloudWatch logs.

    Used for debugging purpose.
                 """
             },
             "config.loaded_files,Stable" : {
                 "DefaultValue" : "internal:predefined.config.yaml;internal:custom.config.yaml",
                 "Format"       : "StringList",
                 "Description"  : """A semi-column separated list of URL to load as configuration.

Upon startup, CloneSquad will load the listed files in sequence and stack them allowing override between layers.

The default contains a reference to the empty internal file 'custom.config.yaml'. Users that intend to embed
customization directly inside the Lambda delivery should override this file with their own configuration. See 
[Customizing the Lambda package](#customizing-the-lambda-package).

This key is evaluated again after each URL parsing meaning that a layer can redefine the 'config.loaded_files' to load further
YAML files.
                 """
             },
             "config.max_file_hierarchy_depth" : 10,
             "config.active_parameter_set,Stable": {
                 "DefaultValue": "",
                 "Format"      : "String",
                 "Description" : """Defines the parameter set to activate.

See [Parameter sets](#parameter-sets) documentation.
                 """
             },
             "config.default_ttl": 0
    })

    # Load extra configuration from specified URLs
    xray_recorder.begin_subsegment("config.init:load_files")
    files_to_load = get_list("config.loaded_files", default=[]) if with_predefined_configuration else []
    if "ConfigurationURL" in context:
        files_to_load.extend(context["ConfigurationURL"].split(";"))
    if misc.is_sam_local():
        # For debugging purpose. Ability to override config when in SAM local
        resource_file = "internal:sam.local.config.yaml" 
        log.info("Reading local resource file %s..." % resource_file)
        files_to_load.append(resource_file)


    loaded_files = []
    i = 0
    while i < len(files_to_load):
        f = files_to_load[i]
        if f == "": 
            i += 1
            continue

        try:
            c = yaml.safe_load(misc.get_url(f))
            if c is None: c = {}
            loaded_files.append({
                    "source": f,
                    "config": c
                })
            if "config.loaded_files" in c:
                files_to_load.extend(c["config.loaded_files"].split(";"))
            if i > get_int("config.max_file_hierarchy_depth"):
                log.warning("Too much config file loads (%s)!! Stopping here!" % loaded_files) 
        except Exception as e:
            log.warning("Failed to load and/or parse config file '%s'! (Notice: It will be safely ignored!)" % f)
        i += 1
    _init["loaded_files"] = loaded_files
    xray_recorder.end_subsegment()

    builtin_config = _init["all_configs"][0]["config"]
    for cfg in _get_config_layers(reverse=True):
        c = cfg["config"]
        if "config.active_parameter_set" in c:
            if c == builtin_config and isinstance(c, dict):
                _init["active_parameter_set"] = c["config.active_parameter_set"]["DefaultValue"]
            else:
                _init["active_parameter_set"] = c["config.active_parameter_set"]
            break
    _parameterset_sanity_check()

    register({
        "config.ignored_warning_keys,Stable" : {
            "DefaultValue": "",
            "Format"      : "StringList",
            "Description" : """A list of config keys that are generating a warning on usage, to disable them.

Typical usage is to avoid the 'WARNING' Cloudwatch Alarm to trigger when using a non-Stable configuration key.

    Ex: ec2.schedule.key1;ec2.schedule.key2

    Remember that using non-stable configuration keys, is creating risk as semantic and/or existence could change 
    from CloneSquad version to version!
            """
            }
        })
    _init["ignored_warning_keys"] = get_list_of_dict("config.ignored_warning_keys")
    def get_prerequisites(self):
        if Cfg.get_int("cron.disable"):
            return

        # Get Timezone related info
        self.timezones = yaml.safe_load(
            misc.get_url("internal:region-timezones.yaml"))
        self.tz = os.getenv("TimeZone")
        self.tz = self.timezones.get(self.context["AWS_DEFAULT_REGION"]) if (
            self.tz is None or self.tz == "") else self.tz
        self.tz = self.tz if self.tz else "UTC"
        self.local_now = arrow.now(
            self.tz)  # Get local time (with local timezone)
        self.utc_offset = self.local_now.utcoffset()
        self.dst_offset = self.local_now.dst()
        log.log(
            log.NOTICE,
            "Current timezone offset to UTC: %s, DST: %s, TimeZone: %s" %
            (self.utc_offset, self.dst_offset, self.tz))

        # Load scheduler KV table
        self.scheduler_table = kvtable.KVTable.create(
            self.context,
            self.context["SchedulerTable"],
            cache_max_age=Cfg.get_duration_secs("scheduler.cache.max_age"))

        # Compute event names
        self.load_event_definitions()

        # Read all existing event rules
        client = self.context["events.client"]
        params = {
            "NamePrefix": "CS-Cron-%s-" % (self.context["GroupName"]),
            "Limit": 10
        }
        self.rules = []
        paginator = client.get_paginator('list_rules')
        response_iterator = paginator.paginate(**params)
        for response in response_iterator:
            if "Rules" in response:
                self.rules.extend(response["Rules"])

        max_rules_per_batch = Cfg.get_int("cron.max_rules_per_batch")
        # Create missing rules
        expected_rule_names = [r["Name"] for r in self.event_names]
        existing_rule_names = [r["Name"] for r in self.rules]
        for r in expected_rule_names:
            if r not in existing_rule_names:
                max_rules_per_batch -= 1
                if max_rules_per_batch <= 0:
                    break
                rule_def = self.get_ruledef_by_name(r)
                schedule_spec = rule_def["Data"][0]["schedule"]
                schedule_expression = self.process_cron_expression(
                    schedule_spec)
                log.log(
                    log.NOTICE,
                    f"Creating {r} {schedule_spec} => {schedule_expression}..."
                )

                # In order to remove burden on user, we perform a sanity check about a wellknown
                #    limitation of Cloudwatch.
                if schedule_expression.startswith("cron("):
                    expr = [
                        i
                        for i in schedule_expression.replace("(", " ").replace(
                            ")", " ").split(" ") if i != ""
                    ]
                    if len(expr) != 7:
                        log.warn(
                            "Schedule rule '%s' has an invalid cron expression '%s' (too short cron syntax)! Ignore it..."
                            % (rule_def["EventName"], schedule_expression))
                        continue
                    if (expr[5] != '?' and not expr[3] == '?') or (
                            expr[3] != '?' and not expr[5] == '?'):
                        log.warn(
                            "Schedule rule '%s' has an invalid cron expression '%s'. "
                            "You can't specify the Day-of-month and Day-of-week fields in the same cron expression. If you specify a value (or a *) in one of the fields, you must use a ? (question mark) in the other. "
                            "" % (rule_def["EventName"], schedule_expression))
                        continue

                # Update Cloudwatch rule
                try:
                    response = client.put_rule(
                        Name=r,
                        Description="Schedule Event '%s': %s" %
                        (rule_def["EventName"], rule_def["Event"]),
                        RoleArn=self.context["CloudWatchEventRoleArn"],
                        ScheduleExpression=schedule_expression,
                        State='ENABLED')
                    log.debug("put_rule: %s" % response)
                except Exception as e:
                    log.exception(
                        "Failed to create scheduler event '%s' (%s) : %s" %
                        (r, schedule_expression, e))

                try:
                    response = client.put_targets(
                        Rule=r,
                        Targets=[{
                            'Arn': self.context["InteractLambdaArn"],
                            'Id': "id%s" % r,
                        }])
                    log.debug("put_targets: %s" % response)
                except Exception as e:
                    log.exception(
                        "Failed to set targets for event rule '%s' : %s" %
                        (r, e))

        # Garbage collect obsolete rules
        for r in existing_rule_names:
            if r not in expected_rule_names:
                max_rules_per_batch -= 1
                if max_rules_per_batch <= 0:
                    break
                try:
                    client.remove_targets(Rule=r, Ids=["id%s" % r])
                    client.delete_rule(Name=r)
                except Exception as e:
                    log.exception("Failed to delete rule '%s' : %s" % (r, e))
    def send_commands(self):        
        if not Cfg.get_int("ssm.enable"):
            return

        client = self.context["ssm.client"]
        refs   = {
            "Linux": {
                "document": "AWS-RunShellScript",
                "shell": [s.rstrip() for s in io.StringIO(str(misc.get_url("internal:cs-ssm-agent.sh"), "utf-8")).readlines()],
                "ids": [],
            }
        }
        # Purge already replied results
        valid_cmds = []
        for cmd in self.run_cmd_states["Commands"]:
            if cmd.get("Complete") or cmd["Expiration"] < misc.seconds_from_epoch_utc():
                continue
            valid_cmds.append(cmd)
        self.run_cmd_states["Commands"] = valid_cmds
        # Purge outdated former results
        former_results  = self.run_cmd_states["FormerResults"]
        for i in list(former_results.keys()):
            for cmd in list(former_results[i].keys()):
                if former_results[i][cmd]["Expiration"] < misc.seconds_from_epoch_utc():
                    del former_results[i][cmd]
            if len(former_results[i].keys()) == 0:
                del former_results[i]

        # Send commands
        for cmd in self.commands_to_send:
            platforms = {}
            for i in cmd["InstanceIds"]:
                info = self.is_instance_online(i)
                if info is None:
                    continue
                platform_type = info["PlatformType"]
                pltf          = refs.get(platform_type)
                if pltf is None:
                    log.warning("Can't run a command on an unsupported platform : %s" % info["PlatformType"])
                    continue # Unsupported platform
                if platform_type not in platforms:
                    platforms[platform_type] = copy.deepcopy(pltf)
                if i not in platforms[platform_type]["ids"]:
                    platforms[platform_type]["ids"].append(i)

            command = cmd["Command"]
            args    = cmd["CommandArgs"]
            for p in platforms:
                pltf         = platforms[p]
                instance_ids = pltf["ids"]
                if not len(instance_ids):
                    continue
                document     = pltf["document"]
                shell        = pltf["shell"]
                i_ids        = instance_ids
                # Perform string parameter substitutions in the helper script
                shell_input = [l.replace("##Cmd##", command) for l in shell]
                shell_input = [l.replace("##ApiGwUrl##", self.context["InteractAPIGWUrl"]) for l in shell_input]
                if isinstance(args, str):
                    shell_input = [l.replace("##Args##", args) for l in shell_input]
                else:
                    shell_input = [l.replace("##Args##", args["Args"] if "Args" in args else "") for l in shell_input]
                    for s in args:
                        shell_input = [l.replace(f"##{s}##", str(args[s])) for l in shell_input]

                while len(i_ids):
                    log.log(log.NOTICE, f"SSM SendCommand({p}): {command}({args}) to %s." % i_ids[:50])

                    try:
                        response = client.send_command(
                            InstanceIds=i_ids[:50],
                            DocumentName=document,
                            TimeoutSeconds=cmd["Timeout"],
                            Comment=cmd["Comment"],
                            Parameters={
                                'commands': shell_input,
                                'executionTimeout': [str(cmd["Timeout"])]
                            },
                            MaxConcurrency='100%',
                            MaxErrors='100%',
                            CloudWatchOutputConfig={
                                'CloudWatchLogGroupName': self.context["SSMLogGroup"],
                                'CloudWatchOutputEnabled': True
                            }
                        )
                        self.run_cmd_states["Commands"].append({
                            "Id": response["Command"]["CommandId"],
                            "InstanceIds": i_ids[:50],
                            "ReceivedInstanceIds": [],
                            "Command": command,
                            "CommandArgs": args,
                            "Results": {},
                            "Expiration": misc.seconds_from_epoch_utc() + Cfg.get_duration_secs("ssm.state.command.default_ttl")
                        })
                        log.log(log.NOTICE, f"SSM RunCommand (Id:%s) : {command}({args})" % response["Command"]["CommandId"])
                    except Exception as e:
                        # Under rare circumstance, we can receive an Exception while trying to send
                        log.log(log.NOTICE, f"Failed to do SSM SendCommand : {e}, %s" % i_ids[:50])
                    i_ids = i_ids[50:]
        self.o_state.set_state_json("ssm.events.run_commands", self.run_cmd_states, compress=True, TTL=self.ttl)