def solve(self, row): if row >= self.N: # solution found print("#{0} Queen solution found".format(self.N)) pprint(self.board) return True for col in self.get_available(row): new_board = deepcopy(self) new_board.place(row, col) # pprint(self.board) new_board.solve(row+1)
def ApiGWParameters_CreateOrUpdate(data, AccountId=None, Region=None, Dummy=None, ApiGWConfiguration=None, ApiGWEndpointConfiguration=None, DefaultGWPolicyURL=None): data["GWType"] = "REGIONAL" data["GWPolicy"] = get_policy_content(DefaultGWPolicyURL, AccountId, Region) data["VpcEndpointDNS"] = "" config = misc.parse_line_as_list_of_dict(ApiGWConfiguration, leading_keyname="GWType") if len(config): data.update(config[0]) CONFIG_KEYS = ["GWPolicy", "GWType", "VpcEndpointDNS"] if len(config): a = config[0] for kw in a.keys(): if kw not in CONFIG_KEYS: raise ValueError("ApiGWConfiguration: Unknown meta key '%s'!" % kw) if kw == "GWType": valid_endpoint_configurations = ["REGIONAL", "PRIVATE"] if a[kw] not in valid_endpoint_configurations: raise ValueError( "Can't set API GW Endpoint to value '%s'! (valid values are %s)" % (a[kw], valid_endpoint_configurations)) if kw == "GWPolicy" and len(a[kw]): data["GWPolicy"] = get_policy_content(a[kw], AccountId, Region) log.info(Dbg.pprint(data["GWPolicy"])) data["EndpointConfiguration.Type"] = data["GWType"]
def get_targetgroups_info(self): active_instances = self.ec2.get_running_instances() info = { "TargetGroupInfo" : [], "AllUnuseableTargetIds" : [], } unuseable_target = info["AllUnuseableTargetIds"] for t in self.targetgroups: targetgrouparn = t["TargetGroupArn"] unuseable_target_instance_ids = self.get_registered_instance_ids(targetgroup=targetgrouparn, state="draining,unavail,unhealthy") useable_instances_count = len(active_instances) - len(unuseable_target_instance_ids) for i in unuseable_target_instance_ids: if i not in unuseable_target_instance_ids: unuseable_target_instance_ids.append(i) if i not in info["AllUnuseableTargetIds"]: info["AllUnuseableTargetIds"].append(i) info["TargetGroupInfo"].append({ "TargetGroupArn": targetgrouparn, "UseableInstanceCount" : useable_instances_count, "UnuseableInstanceCount": len(unuseable_target_instance_ids), "UnuseableInstanceIds" : unuseable_target_instance_ids }) info["MaxUnuseableTargetsOverTargetGroups"] = len(info["AllUnuseableTargetIds"]) log.debug(Dbg.pprint(info)) return info
def configuration_dump(self, context, event, response, cacheddata): response["statusCode"] = 200 is_yaml = "format" in event and event["format"].lower() == "yaml" with_maintenance_window = "with_maintenance_window" in event and event[ "with_maintenance_window"].lower() == "true" if with_maintenance_window: # We load the EC2 and SSM modules to inherit their override parameters if a SSM Maintenance Window is active misc.load_prerequisites(self.context, ["o_ec2", "o_ssm"]) if "httpMethod" in event and event["httpMethod"] == "POST": try: c = yaml.safe_load(event["body"]) if is_yaml else json.loads( event["body"]) Cfg.import_dict(c) response["body"] = "Ok (%d key(s) processed)" % len(c.keys()) except Exception as e: response["statusCode"] = 500 response["body"] = "Can't parse YAML/JSON document : %s " % e return False else: only_stable_keys = "unstable" not in event or event[ "unstable"].lower() != "true" if "raw" in event and event["raw"].lower() == "true": dump = Cfg.get_dict() else: dump = config.dumps(only_stable_keys=only_stable_keys) response["body"] = yaml.dump(dump) if is_yaml else Dbg.pprint(dump) return True
def discovery(self, context, event, response, cacheddata): response["statusCode"] = 200 discovery = { "identity": event["requestContext"]["identity"], "discovery": misc.discovery(self.context) } response["body"] = Dbg.pprint(discovery) return True
def metadata(self, context, event, response, cacheddata): if "requestContext" not in event or "identity" not in event[ "requestContext"]: response["statusCode"] = 403 response[ "body"] = "Must call this API with AWS_IAM authentication." return False identity = event["requestContext"]["identity"] caller = identity["caller"] try: access_key, instance_id = caller.split(":") if not instance_id.startswith("i-"): response["statusCode"] = 500 response[ "body"] = "Can't retrieve requesting InstanceId (%s)." % caller return False except: log.exception("Failed to retrieve IAM caller id (%s)!" % caller) response["statusCode"] = 500 response["body"] = "Can't process metadata caller '%s'!" % caller return False if "instanceid" in event: instance_id = event["instanceid"] # Override from query string query_cache.load_cached_data() d = next(filter(lambda d: d["InstanceId"] == instance_id, cacheddata), None) if d is None: response["statusCode"] = 400 response[ "body"] = "No information for instance id '%s'!" % instance_id return False cache = query_cache.get( f"metadata:ec2.instance.scaling.state.{instance_id}") if cache is not None: state = cache["body"] else: # Read ultra-fresh state of the instance directly from DynamodDB state = self.context["o_ec2"].get_state( f"ec2.instance.scaling.state.{instance_id}", direct=True) if state is not None and state != "": query_cache.put( f"metadata:ec2.instance.scaling.state.{instance_id}", { "body": state, "statusCode": 200 }) log.info( f"Read instance state for {instance_id} directly for state table ({state})" ) d["State"] = state response["statusCode"] = 200 response["body"] = Dbg.pprint(d) return True
def call(event, context): parameters = event["ResourceProperties"].copy() request_type = event["RequestType"] function_name = "%s_%s" % (parameters["Helper"], request_type) match_name = "%s_.*%s.*" % (parameters["Helper"], request_type) if "Helper" not in parameters: raise ValueError("Missing 'Helper' resource property!") if function_name not in globals(): function_name = next( filter(lambda f: re.match(match_name, f), globals()), None) if function_name is None: raise ValueError("Unknown helper function '%s'!" % function_name) del parameters["Helper"] del parameters["ServiceToken"] log.debug(Dbg.pprint(parameters)) log.info("Calling helper function '%s'(%s)..." % (function_name, parameters)) function = globals()[function_name] function(helper.Data, **parameters) log.info("Data: %s" % helper.Data) print("Data: %s" % Dbg.pprint(helper.Data))
def stop_db(self, arn): try: client = self.context["rds.client"] db = self.get_rds_db(arn) db_type = db["_Meta"]["dbType"] log.info("Stopping RDS DB '%s' (type:%s)" % (arn, db_type)) if db_type == "cluster": response = R(lambda args, kwargs, r: "DBCluster" in r, client.stop_db_cluster, DBClusterIdentifier=db["DBClusterIdentifier"]) if db_type == "db": response = R(lambda args, kwargs, r: "DBInstance" in r, client.stop_db_instance, DBInstanceIdentifier=db["DBInstanceIdentifier"]) log.debug(Dbg.pprint(response)) except Exception as e: log.warning("Got exception while stopping DB '%s'! : %s" % (arn, e))
def scheduler_dump(self, context, event, response, cacheddata): scheduler_table = kvtable.KVTable(self.context, self.context["SchedulerTable"]) scheduler_table.reread_table() is_yaml = "format" in event and event["format"] == "yaml" response["statusCode"] = 200 if "httpMethod" in event and event["httpMethod"] == "POST": try: c = yaml.safe_load(event["body"]) if is_yaml else json.loads( event["body"]) scheduler_table.set_dict(c) response["body"] = "Ok (%d key(s) processed)" % len(c.keys()) except Exception as e: response["statusCode"] = 500 response["body"] = "Can't parse YAML/JSON document : %s " % e return False else: c = scheduler_table.get_dict() response["body"] = yaml.dump(c) if is_yaml else Dbg.pprint(c) return True
def interact_handler_entrypoint(event, context): """ Parameters ---------- event: dict, required context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html Returns ------ """ global ctx ctx["now"] = misc.utc_now() ctx["FunctionName"] = "Interact" log.info("Processing start (version=%s)" % (ctx.get("CloneSquadVersion"))) init() notify.do_not_notify = True # We do not want notification and event management in the Interact function #log.info(json.dumps(event)) if ctx["LoggingS3Path"] != "" and Cfg.get_int("app.archive_interact_events"): s3path = "%s/InteractEvents/%s.json" % (ctx["LoggingS3Path"], ctx["now"]) log.warning("Pushing Interact event in '%s'!" % s3path) misc.put_s3_object(s3path, Dbg.pprint(event)) response = {} if ctx["o_interact"].handler(event, context, response): log.debug("API Gateway response: %s" % response) sqs.process_sqs_records(ctx, event) return response
def mark_walked(self): Maze.maze[self.row][self.col] = 'x' pprint(Maze.maze) print
def handler(self, event, context): # Protect from bad data and keep only SNS messages if "Records" not in event: log.error("Not a valid SNS event") return sns_records = [] for sns_msg in event["Records"]: if "EventSource" in sns_msg and sns_msg["EventSource"] == "aws:sns": try: sns_msg["_decoded_message"] = json.loads( sns_msg["Sns"]["Message"]) sns_records.append(sns_msg) except Exception as e: log.exception("Failed to decode message %s : %s" % (sns_msg, e)) log.debug(Dbg.pprint(sns_records)) need_main_update = False # For each SNS records, we keep track of important data in # a DynamoDB table for sns_msg in sns_records: message = sns_msg["_decoded_message"] timestamp = datetime.fromisoformat( message["StateChangeTime"].replace( "+0000", "")).replace(tzinfo=timezone.utc) alarm_name = message["AlarmName"] new_state_reason = message["NewStateReason"] new_state_value = message["NewStateValue"] namespace = message["Trigger"]["Namespace"] metric_name = message["Trigger"]["MetricName"] dimensions = message["Trigger"]["Dimensions"] instance_id = "None" try: instance_id = next( filter(lambda dimension: dimension['name'] == 'InstanceId', message["Trigger"]["Dimensions"]))["value"] except Exception as e: log.exception( "Failed to get InstanceId from dimension %s : %s" % (message["Trigger"]["Dimensions"], e)) continue now = misc.seconds_from_epoch_utc() response = self.context["dynamodb.client"].update_item( Key={"AlarmName": { 'S': alarm_name }}, UpdateExpression= "set InstanceId=:instanceid, %s_LastAlarmTimeStamp=:timestamp, %s_LastNewStateReason=:lastnewstatereason," "%s_LastMetricName=:lastmetricname, %s_LastMetricNamespace=:lastmetricnamespace, " "%s_Event=:event," "ExpirationTime=:expirationtime," "LastRecordUpdateTime=:lastrecordupdatetime" % (new_state_value, new_state_value, new_state_value, new_state_value, new_state_value), ExpressionAttributeValues={ ':instanceid': { 'S': instance_id }, ':timestamp': { 'S': str(timestamp) }, ':lastnewstatereason': { 'S': new_state_reason }, ':lastmetricname': { 'S': metric_name }, ':lastmetricnamespace': { 'S': namespace }, ':event': { 'S': json.dumps(message) }, ':expirationtime': { 'N': str(now + Cfg.get_duration_secs( "snsmgr.record_expiration_delay")) }, ':lastrecordupdatetime': { 'N': str(now) } }, ReturnConsumedCapacity='TOTAL', TableName=self.context["AlarmStateEC2Table"], ) need_main_update = True if need_main_update: # Send a message to wakeup the Main Lambda function that is in # charge to take appropriate decision sqs.call_me_back_send(self.ec2) log.debug("Sent SQS message to Main lambda queue: %s" % self.context["MainSQSQueue"])
def dump_configuration(self, context, event, response): response["statusCode"] = 200 response["body"] = Dbg.pprint(config.dumps())
def cloudwatch_get_metric_cache(self, context, event, response): response["statusCode"] = 200 response["body"] = Dbg.pprint(context["o_cloudwatch"].get_metric_cache())
def fleet_status(self, context, event, response, cacheddata): response["statusCode"] = 200 response["body"] = Dbg.pprint(cacheddata) return True
def allmetadatas(self, context, event, response, cacheddata): response["statusCode"] = 200 query_cache.load_cached_data() response["body"] = Dbg.pprint( query_cache.interact_precomputed_data["data"]["metadata"]) return True
def main(): start_point = Point(1,1) end_point = Point(5,5) start_point.move(end_point) pprint(Maze.maze)
def control_instances(self, context, event, response, cacheddata): path = event["path"].split("/")[-1:][0] if path not in ["unstoppable", "unstartable"]: response["statusCode"] = 404 response["body"] = f"Unknown control state '{path}'!" return False filter_query = {} # The filter query can be sent by POST if "httpMethod" in event and event["httpMethod"] == "POST": try: filter_query = json.loads(event["body"]) except Exception as e: response["statusCode"] = 400 response["body"] = f"Failed to parse JSON body: {e}!" return False # instance ids can be specified in the URL query string if event.get("instanceids"): filter_query["InstanceIds"] = event["instanceids"].split(",") # instance names can be specified in the URL query string if event.get("instancenames"): filter_query["InstanceNames"] = event["instancenames"].split(",") # Do we need to excluded the selected instance ids if event.get("excluded"): filter_query["Excluded"] = event.get("excluded") in [ "true", "True" ] # subfleet names can be specified in the URL query string if event.get("subfleetnames"): filter_query["SubfleetNames"] = event.get("subfleetnames").split( ",") if event.get("subfleetnames") != "" else None mode = event.get("mode") if mode is not None: valid_modes = ["add", "delete"] if mode not in valid_modes: response["statusCode"] = 400 response[ "body"] = f"Invalid mode '{mode}'! (Must be one of {valid_modes})" return False self.context["o_ec2"].update_instance_control_state( path, mode, filter_query, event.get("ttl")) ctrl = self.context["o_ec2"].get_instance_control_state() # Decorate the structure with current name of instance if any instances = self.context["o_ec2"].get_instances() for instance_id in ctrl[path].keys(): instance = next( filter(lambda i: i["InstanceId"] == instance_id, instances)) name = next(filter(lambda t: t["Key"] == "Name", instance["Tags"]), None) ctrl[path][instance_id][ "InstanceName"] = name["Value"] if name is not None else None subfleet_name = next( filter(lambda t: t["Key"] == "clonesquad:subfleet-name", instance["Tags"]), None) ctrl[path][instance_id]["SubfleetName"] = subfleet_name[ "Value"] if subfleet_name is not None else None response["statusCode"] = 200 response["body"] = Dbg.pprint(ctrl[path]) return False
def cloudwatch_sentmetrics(self, context, event, response, cacheddata): response["statusCode"] = 200 response["body"] = Dbg.pprint(cacheddata) return True