def run_scheduler_task(task_name, stack_name): configuration = TaskConfiguration() task_item = configuration.get_config_item(task_name) if task_item is None: raise ValueError( "Task \"{}\" is not configured in stack \"{}\"".format( task_name, stack_name)) print_verbose("Configuration item is\n{}", safe_json(task_item, indent=3)) task = TaskConfiguration( context=used_context, logger=ConsoleLogger()).configuration_item_to_task(task_item) event = { handlers.HANDLER_EVENT_ACTION: handlers.HANDLER_ACTION_SELECT_RESOURCES, handlers.HANDLER_EVENT_TASK: task, handlers.HANDLER_EVENT_SOURCE: sys.argv[0], handlers.HANDLER_EVENT_TASK_DT: datetime.now().isoformat() } for sub_task in ScheduleHandler.task_account_region_sub_tasks(task): event[handlers.HANDLER_EVENT_SUB_TASK] = sub_task print_verbose("Event is \n{}", safe_json(event, indent=3)) handler = handlers.create_handler("SelectResourcesHandler", event, used_context) result = handler.handle_request() print_verbose("(Sub) Task result is\n{}", safe_json(result, indent=3))
def __init__(self, event, context): """ Initializes helper setup class :param event: :param context: """ CustomResource.__init__(self, event, context) self.arguments = copy(self.resource_properties) self.arguments = {a: self.resource_properties[a] for a in self.resource_properties if a not in ["ServiceToken", "Timeout"]} self.configuration_bucket = os.getenv(configuration.ENV_CONFIG_BUCKET, None) self.automator_role_arn = self.arguments.get("OpsAutomatorLambdaRole") self.events_forward_role = self.arguments.get("EventForwardLambdaRole") self.ops_automator_topic_arn = self.arguments.get("OpsAutomatorTopicArn") self.use_ecs = TaskConfiguration.as_boolean(self.arguments.get("UseEcs", False)) self.optimize_cross_account_template = TaskConfiguration.as_boolean( (self.arguments.get("OptimizeCrossAccountTemplate", False))) self.account = os.getenv(handlers.ENV_OPS_AUTOMATOR_ACCOUNT) self.stack_version = self.arguments["StackVersion"] # setup logging dt = datetime.utcnow() classname = self.__class__.__name__ logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) self._logger = QueuedLogger(logstream=logstream, context=context, buffersize=50)
def update_task(name, context=None, **kwargs): """ Updates the specified task. An exception is raised when the action does not exist. :param name: Name of the task. This name overwrites the name in kwargs if it is used there :param kwargs: Task parameters dictionary, see create_task for details. :param context: Lambda context :return: Updated task item """ with _get_logger(context=context) as logger: logger.info("update_task") config = TaskConfiguration(context=context, logger=logger) if name is None or len(name) == 0: raise ValueError(ERR_NO_TASK_NAME) item = config.get_config_item(name) if item is None: raise ValueError(ERR_TASK_DOES_NOT_EXIST.format(name)) # copy to avoid side effects when modifying arguments args = copy.deepcopy(kwargs) args[configuration.CONFIG_TASK_NAME] = name stack_id = item.get(configuration.CONFIG_STACK_ID) if stack_id is not None: args[configuration.CONFIG_STACK_ID] = stack_id item = config.put_config_item(**args) return safe_json(item)
def delete_external_task_config_stacks(self): """ Deletes external stacks that were used to create configuration items :return: """ self._logger.info(INF_DELETING_STACKS) stacks = TaskConfiguration(context=self.context, logger=self._logger).get_external_task_configuration_stacks() if len(stacks) == 0: self._logger.info(INF_NO_STACKS) return self._logger.info(INF_DELETED_STACKS, ", ".join(stacks)) cfn = boto3.resource("cloudformation") for s in stacks: self._logger.info(INF_STACK) try: stack = cfn.Stack(s) add_retry_methods_to_resource(stack, ["delete"], context=self.context) stack.delete_with_retries() except Exception as ex: self._logger.error(ERR_DELETING_STACK, s, str(ex))
def _get_task(name, context=None, logger=None, exception_if_not_exists=True): item = TaskConfiguration(context=context, logger=logger).get_config_item(name) if item is None and exception_if_not_exists: raise ValueError( "not found: task with name {} does not exist".format(name)) return item
def delete_task(name, exception_if_not_exists=False, context=None): """ Deletes the specified task :param name: Name of the task to be deleted, if the task does not exist an exception is raised :param exception_if_not_exists: if set to True raises an exception if the item does not exist :param context: Lambda context :return: Deleted task item """ with _get_logger(context=context) as logger: logger.info("delete_task") config = TaskConfiguration(context=context, logger=logger) if exception_if_not_exists: item = config.get_config_item(name) if item is None: raise ValueError(ERR_TASK_DOES_NOT_EXIST.format(name)) else: item = {"Name": name} config.delete_config_item(name) return safe_json(item)
def get_tasks(include_internal=False, context=None): """ Returns all available tasks :param include_internal: True if internal tasks must be included :param context: Lambda context :return: all available tasks """ with _get_logger(context=context) as logger: logger.info("get_tasks()") tasks = [t for t in TaskConfiguration(context=context, logger=logger).config_items(include_internal)] return safe_json(tasks)
def get_task(name, context=None): """ Returns item for specified task :param name: Name of the task :param context: Lambda context :return: Task item, raises exception if task with specified name does not exist """ with _get_logger(context=context) as logger: logger.info("get_task") item = TaskConfiguration(context=context, logger=logger).get_config_item(name) if item is None: raise ValueError("not found: task with name {} does not exist".format(name)) return safe_json(item)
def setup_action_parameter(name, action_parameter): # single action parameter setup parameter_template = {} # parameter type parameter_type = action_parameter[actions.PARAM_TYPE] if parameter_type in [int, long, float, Decimal]: parameter_template["Type"] = "Number" elif isinstance([], parameter_type): parameter_template["Type"] = "CommaDelimitedList" else: parameter_template["Type"] = "String" if action_parameter.get(actions.PARAM_REQUIRED, False) and actions.PARAM_MIN_LEN not in action_parameter: parameter_template[actions.PARAM_MIN_LEN] = 1 # default allowed values for booleans if parameter_type == bool: parameter_template["AllowedValues"] = YES_NO # for every parameter option... for p in PARAM_OPTIONS: if p in action_parameter: if p == actions.PARAM_DEFAULT: if parameter_type in [bool]: value = TaskConfiguration.as_boolean(action_parameter[actions.PARAM_DEFAULT]) parameter_template[p] = YES if value else NO continue if isinstance(action_parameter[p], type([])): parameter_template[p] = action_parameter[p] else: parameter_template[p] = str(action_parameter[p]) # add parameter to template self._template_parameters[name] = parameter_template # add label if actions.PARAM_LABEL in action_parameter: self._parameter_labels[name] = {"default": action_parameter[actions.PARAM_LABEL]}
def handle_request(self): """ Handles the cloudwatch rule timer event :return: Started tasks, if any, information """ start = datetime.now() try: task_config = TaskConfiguration(context=self._context, logger=self._logger) if not self.execute_task_request: result = self.handle_scheduler_tasks(task_config) else: result = self.handle_execute_task_request(task_config) running_time = float((datetime.now() - start).total_seconds()) self._logger.info(INFO_RESULT, running_time) return result finally: self._logger.flush()
def _delete_request(self): try: self._logger.info("Deleting Task resource") name = self.resource_properties.get(CONFIG_TASK_NAME) self._logger.info("Task name is {}, physical resource id is {}", name, self.physical_resource_id) # as the task can be part of a different stack than the scheduler that owns the configuration table the table could # be deleted by that stack, so first check if the table still exists if TaskConfiguration.config_table_exists(): delete_task(self.physical_resource_id) self._logger.info( "Deleted resource {} with physical resource id {}", name, self.physical_resource_id) else: self._logger.info( "Configuration table does not longer exist so deletion of item skipped" ) return True except Exception as ex: self.response["Reason"] = str(ex) return False
def create_task(context=None, **kwargs): """ Creates a new task :param kwargs: Task parameters :param context: Lambda context Constants can be found in configuration/__init__.py -CONFIG_ACTION_NAME: Name of the action executed by the task, exception is raised if not specified or action does not exist (mandatory, string) -CONFIG_DEBUG: Set to True to log additional debug information for this task (optional, default False, boolean) -CONFIG_DESCRIPTION: Task description(optional, default None, string) -CONFIG_CROSS_ACCOUNT_ROLES: List of cross accounts for cross account processing. Note that roles if the account of a role has already been found in another role, or if the account of a role is the processed account of the scheduler a warning is generated when executing the task and the role is skipped (optional, default [], List<string>) -CONFIG_ENABLED: Set to True to enable execution of task, False to suspend executions (optional, default True, boolean) -CONFIG_INTERNAL: Flag to indicate task is used for internal tats of the scheduler (optional, default False, boolean) -CONFIG_INTERVAL: Cron expression to schedule time/date based execution of task (optional, default "", string) -CONFIG_TASK_TIMEOUT: Timeout in minutes for task to complete (optional, default is action's value or global timeout, number) -CONFIG_TASK_NAME: Name of the task, exception is raised if not specified or name does already exist (mandatory, string) -CONFIG_PARAMETERS: dictionary with names and values passed to the executed action of this task(optional,default {}, dictionary) -CONFIG_THIS_ACCOUNT: Set to True to run tasks for resources in the account of the (optional, default True, boolean) -CONFIG_TIMEZONE: Timezone for time/date based tasks for this task (optional, default UTC, string) -CONFIG_TAG_FILTER: Tag filter used to select resources for the task instead of name of task in the list of values for the automation tag. Only allowed if selected resources support tags (optional, default "", string) -CONFIG_REGIONS: Regions in which to run the task. Use "*" for all regions in which the service for this tasks action is available. If no regions are specified the region in which the scheduler is installed is used as default. Specifying one or more regions for services tha are not region specific will generate a warning when processing the task. (optional, default current region, List<string>) -CONFIG_STACK_ID: Id of the stack if the task is created as part of a cloudformation template (optional, default None, string) -CONFIG_DRYRUN: Dryrun parameter passed to the executed action (optional, default False, boolean) -CONFIG_EVENTS: List of resource events that trigger the task to be executed (optional, default, List<string>) -CONFIG_DRYRUN: Dryrun parameter passed to the executed action (optional, default False, boolean) -CONFIG_EVENTS: List of resource events that trigger the task to be executed (optional, default, List<string>) :return: Item created in the task configuration """ with _get_logger(context=context) as logger: logger.info("create_task") config = TaskConfiguration(context=context, logger=logger) name = kwargs.get(configuration.CONFIG_TASK_NAME) if name is None or len(name) == 0: raise ValueError(ERR_NO_TASK_NAME) item = config.get_config_item(name) if item is not None: raise ValueError(ERR_TASK_DOES_ALREADY_EXIST.format(name)) new_item = config.put_config_item(**kwargs) return safe_json(new_item)
def handle_request(self, use_custom_select=True): """ Handled the cloudwatch rule timer event :return: Started tasks, if any, information """ try: self._logger.info("Handling CloudWatch event {}", safe_json(self._event, indent=3)) result = [] start = datetime.now() dt = self._event_time() config_task = None source_resource_tags = None try: # for all events tasks in configuration for config_task in TaskConfiguration( context=self._context, logger=self._logger).get_tasks(): self._logger.debug_enabled = config_task.get( handlers.TASK_DEBUG, False) if not self._event_triggers_task(task=config_task): continue # tasks that can react to events with a wider resource scope than the actual resource causing the event may # have a filter that can is used to filter based on the tags of the resource event_source_tag_filter = config_task.get( handlers.TASK_EVENT_SOURCE_TAG_FILTER, None) if event_source_tag_filter is not None: if source_resource_tags is None: # get the tags for the source resource of the event session = services.get_session( self._role_executing_triggered_task, logger=self._logger) if session is None: self._logger.error( ERR_NO_SESSION_FOR_GETTING_TAGS) continue try: source_resource_tags = self._source_resource_tags( session, config_task) except Exception as ex: self._logger.error( ERR_GETTING_EVENT_SOURCE_RESOURCE_TAGS, ex) continue self._logger.debug( "Tags for event source resource are {}", source_resource_tags) # apply filter to source resource tags if not TagFilterExpression( event_source_tag_filter).is_match( source_resource_tags): self._logger.debug( "Tags of source resource do not match tag filter {}", event_source_tag_filter) continue task_name = config_task[handlers.TASK_NAME] result.append(task_name) select_parameters = self._select_parameters( self._event_name(), config_task) if select_parameters is None: continue self._logger.debug(DEBUG_EVENT, task_name, self._event_name(), select_parameters, self._event_account(), self._event_region(), safe_json(config_task, indent=3)) # create an event for lambda function that scans for resources for this task lambda_event = { handlers.HANDLER_EVENT_ACTION: handlers.HANDLER_ACTION_SELECT_RESOURCES, handlers.HANDLER_EVENT_CUSTOM_SELECT: use_custom_select, handlers.HANDLER_SELECT_ARGUMENTS: { handlers.HANDLER_EVENT_REGIONS: [self._event_region()], handlers.HANDLER_EVENT_ACCOUNT: self._event_account(), handlers.HANDLER_EVENT_RESOURCE_NAME: config_task[handlers.TASK_RESOURCE_TYPE], }, handlers.HANDLER_EVENT_SOURCE: "{}:{}:{}".format(self._handled_event_source, self._handled_detail_type, self._event_name()), handlers.HANDLER_EVENT_TASK: config_task, handlers.HANDLER_EVENT_TASK_DT: dt } for i in select_parameters: lambda_event[handlers.HANDLER_SELECT_ARGUMENTS][ i] = select_parameters[i] if self._event_resources() is not None: self._logger.debug( DEBUG_EVENT_RESOURCES, safe_json(self._event_resources(), indent=3)) lambda_event[ handlers. HANDLER_SELECT_RESOURCES] = self._event_resources( ) if not handlers.running_local(self._context): # start lambda function to scan for task resources payload = str.encode(safe_json(lambda_event)) client = get_client_with_retries("lambda", ["invoke"], context=self._context, logger=self._logger) client.invoke_with_retries( FunctionName=self._context.function_name, InvocationType="Event", LogType="None", Payload=payload) else: # or if not running in lambda environment pass event to main task handler lambda_handler(lambda_event, None) return safe_dict({ "datetime": datetime.now().isoformat(), "running-time": (datetime.now() - start).total_seconds(), "event-datetime": dt, "started-tasks": result }) except ValueError as ex: self._logger.error(ERR_HANDLING_EVENT_IN_BASE_HANDLER, ex, safe_json(config_task, indent=2)) finally: self._logger.flush()
def handle_request(self): """ Handled the cloudwatch rule timer event :return: Started tasks, if any, information """ def is_matching_event_state(event_state, ec2event): return event_state in [s.strip() for s in ec2event.split(",") ] or ec2event != "*" try: result = [] start = datetime.now() self._logger.info("Handler {}", self.__class__.__name__) state = self._event.get("detail", {}).get("state") if state is not None: state = state.lower() account = self._event["account"] region = self._event["region"] instance_id = self._event["detail"]["instance-id"] dt = self._event["time"] task = None try: # for all ec2 events tasks in configuration for task in [ t for t in TaskConfiguration(context=self._context, logger=self._logger).get_tasks() if t.get("events") is not None and EC2_STATE_EVENT in t["events"] and t.get("enabled", True) ]: task_name = task["name"] ec2_event = task["events"][EC2_STATE_EVENT] if not is_matching_event_state(state, ec2_event): continue result.append(task_name) self._logger.info(INFO_EVENT, task_name, state, instance_id, account, region, safe_json(task, indent=2)) # create an event for lambda function that scans for resources for this task event = { handlers.HANDLER_EVENT_ACTION: handlers.HANDLER_ACTION_SELECT_RESOURCES, handlers.HANDLER_SELECT_ARGUMENTS: { handlers.HANDLER_EVENT_REGIONS: [region], handlers.HANDLER_EVENT_ACCOUNT: account, "InstanceIds": [instance_id] }, handlers.HANDLER_EVENT_SOURCE: EC2_STATE_EVENT, handlers.HANDLER_EVENT_TASK: task, handlers.HANDLER_EVENT_TASK_DT: dt } if self._context is not None: # start lambda function to scan for task resources payload = str.encode(safe_json(event)) client = get_client_with_retries("lambda", ["invoke"], context=self._context) client.invoke_with_retries( FunctionName=self._context.function_name, Qualifier=self._context.function_version, InvocationType="Event", LogType="None", Payload=payload) else: # or if not running in lambda environment pass event to main task handler lambda_handler(event, None) return safe_dict({ "datetime": datetime.now().isoformat(), "running-time": (datetime.now() - start).total_seconds(), "event-datetime": dt, "started-tasks": result }) except ValueError as ex: self._logger.error("{}\n{}".format(ex, safe_json(task, indent=2))) finally: self._logger.flush()
def handle_request(self): """ Handles the cloudwatch rule timer event :return: Started tasks, if any, information """ try: started_tasks = [] start = datetime.now() last_run_dt = self._get_last_run() self._logger.info("Handler {}", self.__class__.__name__) self._logger.info(INFO_LAST_SAVED, str(last_run_dt)) if self.configuration_update: self._logger.info(INFO_CONFIG_RUN, self.updated_task) # test if we already executed in this minute current_dt = self._set_last_run() already_ran_this_minute = last_run_dt == current_dt if already_ran_this_minute and not self.configuration_update: self._logger.info(INFO_TASK_SCHEDULER_ALREADY_RAN) else: self._logger.info(INFO_CURRENT_SCHEDULING_DT, current_dt) task = None enabled_tasks = 0 next_executed_task = None utc = pytz.timezone("UTC") try: for task in [t for t in TaskConfiguration(context=self._context, logger=self._logger).get_tasks() if t.get(handlers.TASK_INTERVAL) is not None and t.get(handlers.TASK_ENABLED, True)]: enabled_tasks += 1 self._logger.debug_enabled = task[handlers.TASK_DEBUG] task_name = task[handlers.TASK_NAME] # timezone for specific task task_timezone = pytz.timezone(task[handlers.TASK_TIMEZONE]) # create cron expression to test if task needs te be executed task_cron_expression = CronExpression(expression=task[handlers.TASK_INTERVAL]) localized_last_run = last_run_dt.astimezone(task_timezone) localized_current_dt = current_dt.astimezone(task_timezone) next_execution = task_cron_expression.first_within_next(timedelta(hours=24), localized_current_dt) next_execution_utc = next_execution.astimezone(utc) if next_execution else None if next_execution_utc is not None: if next_executed_task is None or next_execution_utc < next_executed_task[0]: next_executed_task = (next_execution_utc, task) if already_ran_this_minute: continue # test if task needs te be executed since last run of ops automator execute_dt_since_last = task_cron_expression.last_since(localized_last_run, localized_current_dt) if execute_dt_since_last is None: if next_execution is not None: next_execution = next_execution.astimezone(task_timezone) self._logger.info(INFO_NEXT_EXECUTION, task_name, next_execution.isoformat(), task_timezone) else: self._logger.info(INFO_NO_NEXT_WITHIN, task_name) continue started_tasks.append(task_name) self._logger.debug(INFO_SCHEDULED_TASK, task_name, execute_dt_since_last, task_timezone, str(safe_json(task, indent=2))) # create an event for lambda function that starts execution by selecting for resources for this task self._execute_task(task, execute_dt_since_last) if started_tasks: self._logger.info(INFO_STARTED_TASKS, enabled_tasks, ",".join(started_tasks)) else: self._logger.info(INFO_NO_TASKS_STARTED, enabled_tasks) self._set_next_schedule_event(current_dt, next_executed_task) running_time = float((datetime.now() - start).total_seconds()) self._logger.info(INFO_RESULT, running_time) return safe_dict({ "datetime": datetime.now().isoformat(), "running-time": running_time, "event-datetime": current_dt.isoformat(), "enabled_tasks": enabled_tasks, "started-tasks": started_tasks }) except ValueError as ex: self._logger.error("{}\n{}".format(ex, safe_json(task, indent=2))) finally: self._logger.flush()