def process_and_select_resource(service, logger, resource_name, resource, context, task, task_assumed_role): parameters = task.get(TASK_PARAMETERS, {}) if parameters.get(PARAM_RESIZE_MODE) == RESIZE_BY_SPECIFIED_TYPE: return resource tags = resource.get("Tags", {}) scale_up_str = parameters.get(PARAM_TAGFILTER_SCALE_UP) scale_up_filter = TagFilterExpression( scale_up_str) if scale_up_str is not None else None if scale_up_filter is not None and scale_up_filter.is_match(tags): return resource scale_down_str = parameters.get(PARAM_TAGFILTER_SCALE_DOWN) scale_down_filter = TagFilterExpression( scale_down_str) if scale_down_str is not None else None if scale_down_filter is not None and scale_down_filter.is_match(tags): return resource logger.debug( "Instance {} is not selected as tags {} do not match scale-up filter \"{}\" or scale-down filter \"{}\"", resource["InstanceId"], tags, scale_up_str, scale_down_str) return None
def delete_instance_snapshots_by_tags(self, tag_filter_expression): delete_filter = TagFilterExpression(tag_filter_expression) snapshots = [] for s in self.rds_service.describe(services.rds_service.DB_SNAPSHOTS, region=self.region, tags=True): if delete_filter.is_match(s.get("Tags")): snapshots.append(s["DBSnapshotIdentifier"]) self.delete_instance_snapshots(snapshots)
def delete_snapshots_by_tags(self, tag_filter_expression): delete_filter = TagFilterExpression(tag_filter_expression) snapshots = [] for s in self.ec2_service.describe(services.ec2_service.SNAPSHOTS, region=self.region, tags=True, OwnerIds=["self"]): if delete_filter.is_match(s.get("Tags")): snapshots.append(s["SnapshotId"]) self.delete_snapshots(snapshots)
def delete_images_by_tags(self, tag_filter_expression): delete_filter = TagFilterExpression(tag_filter_expression) images = [] for s in self.ec2_service.describe(services.ec2_service.IMAGES, region=self.region, tags=True, Owners=["self"]): if delete_filter.is_match(s.get("Tags")): images.append(s["ImageId"]) self.delete_images(images)
def _new_tags_triggers_task(self, task): # get the changed tags and the new tak values changed_tag_keys = set( self._event.get("detail", {}).get("changed-tag-keys", [])) tags = self._event.get("detail", {}).get("tags", {}) task_tag_filter_str = task.get(handlers.TASK_TAG_FILTER, None) if task_tag_filter_str is None: # if there is no tag filtering to select resources check if the task that holds the actions is updates task_tag_name = os.getenv(handlers.ENV_AUTOMATOR_TAG_NAME, "") if task_tag_name not in changed_tag_keys: self._logger.debug("Value of task tag {} is not changed", task_tag_name) return False # check if the new value does include the name of this task task_tag_value = tags.get(task_tag_name, "") if not task[handlers.TASK_NAME] in tagging.split_task_list( task_tag_value): self._logger.debug( "Task name \"{}\" not in value \"{}\" of task task {}", task[handlers.TASK_NAME], task_tag_value, task_tag_name) return False return True # there is a tag filter task_tag_filter = TagFilterExpression(task_tag_filter_str) # test if the new tasks match the filter if not task_tag_filter.is_match(tags): self._logger.debug("Tags {} do not match tag filter {}", tags, task_tag_filter_str) return False self._logger.debug("Tags {} do match tag filter {}", tags, task_tag_filter_str) return True
class Ec2ResizeInstanceAction(ActionEc2EventBase): properties = { ACTION_TITLE: "EC2 Resize Instance", ACTION_VERSION: "1.0", ACTION_DESCRIPTION: "Re-sizes EC2 instances", ACTION_AUTHOR: "AWS", ACTION_ID: "cd198dac-d7b6-4992-b748-6f2a95a1a041", ACTION_SERVICE: "ec2", ACTION_RESOURCES: services.ec2_service.INSTANCES, ACTION_AGGREGATION: ACTION_AGGREGATION_RESOURCE, ACTION_SELECT_SIZE: ACTION_SIZE_ALL_WITH_ECS, ACTION_COMPLETION_TIMEOUT_MINUTES: 15, ACTION_ALLOW_TAGFILTER_WILDCARD: False, ACTION_SELECT_EXPRESSION: "Reservations[*].Instances[]." + "{State:State.Name,InstanceId:InstanceId, CurrentState:CurrentState,InstanceType:InstanceType, Tags:Tags}" + "|[?contains(['running','stopped'],State)]", ACTION_EVENTS: { handlers.ec2_tag_event_handler.EC2_TAG_EVENT_SOURCE: { handlers.TAG_CHANGE_EVENT: [ handlers.ec2_tag_event_handler. EC2_CHANGED_INSTANCE_TAGS_EVENT ] } }, ACTION_PARAMETERS: { PARAM_RESIZED_INSTANCE_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_RESIZED_INSTANCE_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_RESIZED_INSTANCE_TAGS }, PARAM_INSTANCE_TYPES: { PARAM_DESCRIPTION: PARAM_DESC_INSTANCE_TYPES, PARAM_TYPE: type([]), PARAM_LABEL: PARAM_LABEL_INSTANCE_TYPES }, PARAM_RESIZE_MODE: { PARAM_DESCRIPTION: PARAM_DESC_RESIZE_MODE.format(RESIZE_BY_SPECIFIED_TYPE, RESIZE_BY_STEP), PARAM_TYPE: str, PARAM_REQUIRED: False, PARAM_DEFAULT: RESIZE_BY_SPECIFIED_TYPE, PARAM_ALLOWED_VALUES: [RESIZE_BY_SPECIFIED_TYPE, RESIZE_BY_STEP], PARAM_LABEL: PARAM_LABEL_RESIZE_MODE }, PARAM_ASSUMED_TYPE: { PARAM_DESCRIPTION: PARAM_DESC_ASSUMED_TYPE, PARAM_TYPE: str, PARAM_REQUIRED: False, PARAM_ALLOWED_VALUES: services.ec2_service.Ec2Service.valid_instance_types(), PARAM_LABEL: PARAM_LABEL_ASSUMED_TYPE }, PARAM_TRY_NEXT_IN_RANGE: { PARAM_DESCRIPTION: PARAM_DESC_TRY_NEXT_IN_RANGE, PARAM_TYPE: bool, PARAM_REQUIRED: False, PARAM_DEFAULT: True, PARAM_LABEL: PARAM_LABEL_TRY_NEXT_IN_RANGE }, PARAM_SCALING_RANGE: { PARAM_DESCRIPTION: PARAM_DESC_SCALING_RANGE, PARAM_TYPE: list, PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_SCALING_RANGE }, PARAM_TAGFILTER_SCALE_UP: { PARAM_DESCRIPTION: PARAM_DESC_TAGFILTER_SCALE_UP, PARAM_TYPE: str, PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_TAGFILTER_SCALE_UP }, PARAM_TAGFILTER_SCALE_DOWN: { PARAM_DESCRIPTION: PARAM_DESC_TAGFILTER_SCALE_DOWN, PARAM_TYPE: str, PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_TAGFILTER_SCALE_DOWN }, PARAM_TEST_UNAVAILABLE_TYPES: { # This is a hidden test parameter and is used to simulate situations where instance types are not available PARAM_DESCRIPTION: "", PARAM_TYPE: type([]), PARAM_REQUIRED: False, PARAM_LABEL: "", PARAM_HIDDEN: True } }, ACTION_PARAMETER_GROUPS: [ { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_INSTANCE_OPTIONS, ACTION_PARAMETER_GROUP_LIST: [PARAM_RESIZED_INSTANCE_TAGS, PARAM_RESIZE_MODE], }, { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_RESIZE_BY_SPECIFIED_TYPE_OPTIONS, ACTION_PARAMETER_GROUP_LIST: [ PARAM_INSTANCE_TYPES, ], }, { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_STEP_RESIZING_OPTIONS, ACTION_PARAMETER_GROUP_LIST: [ PARAM_SCALING_RANGE, PARAM_ASSUMED_TYPE, PARAM_TRY_NEXT_IN_RANGE, PARAM_TAGFILTER_SCALE_UP, PARAM_TAGFILTER_SCALE_DOWN ], }, ], ACTION_PERMISSIONS: [ "ec2:StartInstances", "ec2:StopInstances", "ec2:DescribeTags", "ec2:ModifyInstanceAttribute", "ec2:CreateTags", "ec2:DeleteTags" ], } def __init__(self, action_arguments, action_parameters): ActionEc2EventBase.__init__(self, action_arguments, action_parameters) self.instance = self._resources_ self.instance_id = self.instance["InstanceId"] self._ec2_client = None self._ec2_service = None self.instance_type_index = -1 self.result = { "account": self._account_, "region": self._region_, "instance": self.instance_id, "task": self._task_ } # instance type, list if alternatives must be retried if the type is not available self.new_instance_types = [ s.strip() for s in self.get(PARAM_INSTANCE_TYPES, []) ] self.resize_mode = self.get(PARAM_RESIZE_MODE) self.instance_type_index = -1 self.scaling_range = [ t.strip() for t in self.get(PARAM_SCALING_RANGE, []) ] self.next_type_in_range = self.get( PARAM_TRY_NEXT_IN_RANGE, True) if self.resize_mode == RESIZE_BY_STEP else True self.scale_up_str = self.get(PARAM_TAGFILTER_SCALE_UP) self.scale_up_tagfilter = TagFilterExpression( self.scale_up_str) if self.scale_up_str is not None else None self.scale_down_str = self.get(PARAM_TAGFILTER_SCALE_DOWN) self.scale_down_tagfilter = TagFilterExpression( self.scale_down_str) if self.scale_down_str is not None else None self.assumed_instance_type = self.get(PARAM_ASSUMED_TYPE) self.scaling_range_index = None self.scale_up = None self.scale_down = None self.original_type = None self.new_instance_type = None self.result = { "account": self._account_, "region": self._region_, "instance": self.instance_id, "task": self._task_ } @staticmethod def action_logging_subject(arguments, _): instance = arguments[ACTION_PARAM_RESOURCES] instance_id = instance["InstanceId"] account = instance["AwsAccount"] region = instance["Region"] return "{}-{}-{}-{}".format(account, region, instance_id, log_stream_date()) @staticmethod def action_validate_parameters(parameters, task_settings, logger): mode = parameters.get(PARAM_RESIZE_MODE) if mode == RESIZE_BY_SPECIFIED_TYPE: instance_types = parameters.get(PARAM_INSTANCE_TYPES, []) if len(instance_types) == 0: raise_value_error(ERR_NO_TYPE_IN_SPECIFIED_MODE, PARAM_RESIZE_MODE.format(mode)) valid_types = services.ec2_service.Ec2Service.valid_instance_types( ) if valid_types not in [None, []]: for inst_type in [e.strip() for e in instance_types]: if inst_type not in valid_types: raise_value_error( ERR_INVALID_INSTANCE_TYPE.format(inst_type)) else: scaling_range = parameters.get(PARAM_SCALING_RANGE, []) if len(scaling_range) < 2: raise_value_error( ERR_AT_LEAST_TWO_TYPES.format(PARAM_SCALING_RANGE)) assumed_type = parameters.get(PARAM_ASSUMED_TYPE) if assumed_type is not None: if assumed_type not in scaling_range: raise_value_error(ERR_ASSUMED_NOT_IN_SCALING_RANGE, PARAM_ASSUMED_TYPE, PARAM_SCALING_RANGE) scale_up_filter = parameters.get(PARAM_TAGFILTER_SCALE_UP) scale_down_filter = parameters.get(PARAM_TAGFILTER_SCALE_DOWN) if scale_up_filter is None and scale_down_filter is None: raise_value_error(ERR_BOTH_SCALING_FILTERS_EMPTY, PARAM_TAGFILTER_SCALE_UP, PARAM_TAGFILTER_SCALE_DOWN, mode) ActionEc2EventBase.check_tag_filters_and_tags( parameters, task_settings, [PARAM_RESIZED_INSTANCE_TAGS], logger) return parameters # noinspection PyUnusedLocal @staticmethod def process_and_select_resource(service, logger, resource_name, resource, context, task, task_assumed_role): parameters = task.get(TASK_PARAMETERS, {}) if parameters.get(PARAM_RESIZE_MODE) == RESIZE_BY_SPECIFIED_TYPE: return resource tags = resource.get("Tags", {}) scale_up_str = parameters.get(PARAM_TAGFILTER_SCALE_UP) scale_up_filter = TagFilterExpression( scale_up_str) if scale_up_str is not None else None if scale_up_filter is not None and scale_up_filter.is_match(tags): return resource scale_down_str = parameters.get(PARAM_TAGFILTER_SCALE_DOWN) scale_down_filter = TagFilterExpression( scale_down_str) if scale_down_str is not None else None if scale_down_filter is not None and scale_down_filter.is_match(tags): return resource logger.debug( "Instance {} is not selected as tags {} do not match scale-up filter \"{}\" or scale-down filter \"{}\"", resource["InstanceId"], tags, scale_up_str, scale_down_str) return None def _get_instance(self): return self.ec2_service.get( services.ec2_service.INSTANCES, InstanceIds=[self.instance_id], region=self._region_, select="Reservations[*].Instances[].{" "Tags:Tags," "StateName:State.Name," "StateCode:State.Code," "StateStateReasonMessage:StateReason.Message," "InstanceType:InstanceType," "InstanceId:InstanceId}") @property def ec2_client(self): if self._ec2_client is None: methods = [ "start_instances", "stop_instances", "create_tags", "delete_tags", "describe_instances", "modify_instance_attribute" ] self._ec2_client = get_client_with_retries("ec2", methods=methods, region=self._region_, session=self._session_, logger=self._logger_) return self._ec2_client @property def ec2_service(self): if self._ec2_service is None: self._ec2_service = services.create_service( "ec2", session=self._session_, service_retry_strategy=get_default_retry_strategy( "ec2", context=self._context_)) return self._ec2_service def is_completed(self, start_data): def task_is_triggered_by_tag_event(): task_change_events = self._events_.get( handlers.ec2_tag_event_handler.EC2_TAG_EVENT_SOURCE, {}).get(handlers.TAG_CHANGE_EVENT, []) return handlers.ec2_tag_event_handler.EC2_CHANGED_INSTANCE_TAGS_EVENT in task_change_events def tags_to_delete(): tags = {} tags_on_instance = self.instance.get("Tags", {}) for t in list(tags_on_instance.keys()): if (self.scale_up_tagfilter and t in self.scale_up_tagfilter.get_filter_keys()) or \ (self.scale_down_tagfilter and t in self.scale_down_tagfilter.get_filter_keys()): self._logger_.info( INF_REMOVE_TAG.format({t: tags_on_instance[t]}, self.instance_id)) tags[t] = tagging.TAG_DELETE return tags def delete_up_down_filter_tags(): tags = tags_to_delete() if len(tags) > 0: tagging.set_ec2_tags(ec2_client=self.ec2_client, tags=tags, can_delete=True, logger=self._logger_, resource_ids=[self.instance_id]) def set_tags_on_resized_instance(new_instance_type, original_type): # tags set by action tags = self.build_tags_from_template( parameter_name=PARAM_RESIZED_INSTANCE_TAGS, tag_variables={ TAG_PLACEHOLDER_NEW_INSTANCE_TYPE: new_instance_type, TAG_PLACEHOLDER_ORG_INSTANCE_TYPE: original_type }) try: # if task is triggered by tagging event if task_is_triggered_by_tag_event(): # up or down tags filters should not match new tags as it would re-trigger execution of the task if self.resize_mode == RESIZE_BY_STEP: for t in list(tags.keys()): # remove tags that match up or down tag filters if (self.scale_up_tagfilter and t in self.scale_up_tagfilter.get_filter_keys()) or \ (self.scale_down_tagfilter and t in self.scale_down_tagfilter.get_filter_keys()): self._logger_.info( INF_TAGS_NOT_SET_STEP.format( {t: tags[t]}, self.instance_id)) del tags[t] tags.update(tags_to_delete()) self.set_ec2_instance_tags_with_event_loop_check( client=self.ec2_client, instance_ids=[self.instance_id], tags_to_set=tags) except Exception as tag_ex: raise_exception(ERR_SET_TAGS, self.instance_id, tag_ex) resized = not start_data.get("not-resized", False) need_start = start_data.get("instance-running", True) if not resized and not need_start: delete_up_down_filter_tags() self._logger_.info(INF_STOPPED_INSTANCE, self.instance_id) return self.result if not need_start and resized: set_tags_on_resized_instance( start_data["new-instance-type"], start_data.get("org-instance-type", "")) return self.result # get current state of instance instance = self._get_instance() self._logger_.debug("Instance data is {}", safe_json(instance, indent=3)) state_code = instance["StateCode"] & 0xFF # resized instance is running, done... if state_code == EC2_STATE_RUNNING: # instance is running self._logger_.info(INF_INSTANCE_RUNNING, self.instance_id) if resized: set_tags_on_resized_instance( instance["InstanceType"], start_data.get("org-instance-type", "")) else: delete_up_down_filter_tags() return self.result # in pending state, wait for next completion check if state_code == EC2_STATE_PENDING: return None raise_exception(ERR_INSTANCE_NOT_IN_STARTING_STATE, self.instance_id, instance) @classmethod def is_in_starting_or_running_state(cls, state): return (state & 0xFF) in EC2_STARTING_STATES if state is not None else False @classmethod def is_in_stopping_or_stopped_state(cls, state): return (state & 0xFF) in EC2_STOPPING_STATES @classmethod def insufficient_capacity(cls, ex): return type(ex).__name__ == "ClientError" and ex.response.get( "Error", {}).get("Code", None) == INSUFFICIENT_CAPACITY def _set_new_instance_type(self): if self.resize_mode == RESIZE_BY_SPECIFIED_TYPE: self.instance_type_index += 1 if self.instance_type_index >= len(self.new_instance_types): self.new_instance_type = self.original_type return self.new_instance_type = self.new_instance_types[ self.instance_type_index] return current_type = self.instance["InstanceType"] instance_tags = self.instance.get("Tags", {}) if self.scaling_range_index is None: if current_type not in self.scaling_range: self._logger_.info(INF_NOT_IN_SCALING_RANGE, current_type, ", ".join(self.scaling_range)) if self.assumed_instance_type is None: self._logger_.error(ERR_NOT_IN_RANGE_NO_ASSUMED_TYPE, current_type, ", ".join(self.scaling_range)) self.new_instance_type = current_type return current_type = self.assumed_instance_type self._logger_.info(INF_USE_ASSUMED_TYPE, self.assumed_instance_type) instance_tags = self.instance.get("Tags", {}) self.scale_up = self.scale_up_tagfilter is not None and self.scale_up_tagfilter.is_match( instance_tags) self.scale_down = self.scale_down_tagfilter is not None and self.scale_down_tagfilter.is_match( instance_tags) if self.scale_up and self.scale_down: self._logger_.warning(WARN_BOTH_UP_DOWN, self.scale_up_str, self.scale_down_str, instance_tags, self.instance_id) self.new_instance_type = current_type return self.scaling_range_index = self.scaling_range.index(current_type) elif not self.next_type_in_range: self.new_instance_type = current_type return if self.scale_up: self.scaling_range_index += 1 if self.scaling_range_index >= len(self.scaling_range): self._logger_.warning(WARM_MAX_SIZE, self.instance_id, current_type, ", ".join(self.scaling_range)) self.new_instance_type = current_type else: self.new_instance_type = self.scaling_range[ self.scaling_range_index] return if self.scale_down: self.scaling_range_index -= 1 if self.scaling_range_index < 0: self._logger_.warning(WARM_MIN_SIZE, self.instance_id, current_type, ", ".join(self.scaling_range)) self.new_instance_type = current_type else: self.new_instance_type = self.scaling_range[ self.scaling_range_index] return self._logger_.info(INF_NO_TAG_MATCH_NO_REPLACE, self.scale_up_str, self.scale_down_str, instance_tags, self.instance_id, current_type) self.new_instance_type = current_type def _resize_instance(self): if self._get_instance()["InstanceType"] != self.new_instance_type: self._logger_.info("Setting instance size of instance {} to {}", self.instance_id, self.new_instance_type) try: self.ec2_client.modify_instance_attribute_with_retries( InstanceId=self.instance_id, InstanceType={"Value": self.new_instance_type}) except Exception as ex: self._logger_.error(ERR_INSTANCE_RESIZING, self.instance_id, self.new_instance_type, ex) def _stop_instance(self): try: self._logger_.info(INF_STOPPING, self.instance_id) self.ec2_client.stop_instances_with_retries( InstanceIds=[self.instance_id]) except Exception as ex: raise_exception(ERR_STOP_RESIZING, self.instance_id, ex) # wait for instance to stop, or until is signaled it is about to timeout while not self.time_out(): time.sleep(10) state = self._get_instance()["StateCode"] & 0xFF if state == EC2_STATE_STOPPED: break if self.time_out(): raise_exception(ERR_INSTANCE_STOP_TIMEOUT, self.instance_id) def _restart_instance(self): # for testing the parameter PARAM_TEST_UNAVAILABLE_TYPES can be used to simulate a InsufficientInstanceCapacity self._test_simulate_insufficient_instance_capacity() self.ec2_client.start_instances_with_retries( InstanceIds=[self.instance_id]) with Timer(timeout_seconds=60, start=True) as t: started_instance = self._get_instance() # get state of started instance current_state = started_instance["StateCode"] if self.is_in_starting_or_running_state(current_state): # instance is starting return else: if t.timeout: self._logger_.info(ERR_INSTANCE_NOT_IN_STARTING_STATE, self.instance_id, current_state) raise_exception(ERR_INSTANCE_NOT_IN_STARTING_STATE, self.instance_id, current_state) def _test_simulate_insufficient_instance_capacity(self): if self.new_instance_type in self.get(PARAM_TEST_UNAVAILABLE_TYPES, []): raise ClientError( { "Error": { "Code": INSUFFICIENT_CAPACITY, "Message": "Simulated {} Exception".format(INSUFFICIENT_CAPACITY) } }, operation_name="start_instances") def execute(self): def should_resize_instance(): if self.original_type == self.new_instance_type: self._logger_.info(INF_INSTANCE_NOT_RESIZED, self.instance_id, self.original_type) self.result["not-resized"] = True self.result[METRICS_DATA] = build_action_metrics( action=self, ReplacedInstances=0) return False return True self._logger_.info("{}, version {}", self.properties[ACTION_TITLE], self.properties[ACTION_VERSION]) # get instance in it's current state instance = self._get_instance() if instance is None: raise_exception(ERR_NOT_LONGER_AVAILABLE, self.instance_id) instance_running = not self.is_in_stopping_or_stopped_state( instance["StateCode"]) self.result["instance-running"] = instance_running self.original_type = instance["InstanceType"] self.result["org-instance-type"] = self.original_type self._set_new_instance_type() if not should_resize_instance(): self.result["new-instance-type"] = self.new_instance_type return self.result self._logger_.info(INF_INSTANCE_RESIZE_ACTION, self.instance_id, self.original_type, self.new_instance_type, self._task_) # instance is running, stop it first so it can be resized if instance_running: self._stop_instance() self._resize_instance() if instance_running: while True: try: self._restart_instance() break except ClientError as ex: # no capacity for this type if self.insufficient_capacity(ex): # try to set alternative type self._logger_.warning(WARN_NO_TYPE_CAPACITY, self.new_instance_type) self._set_new_instance_type() if not should_resize_instance(): # resize to original type self._resize_instance() self._restart_instance() self.result[ "new-instance-type"] = self.new_instance_type return self.result self._resize_instance() self._logger_.info(INF_RETRY_START, self.instance_id, self.new_instance_type) except Exception as ex: self.new_instance_type = self.original_type self._resize_instance() self._restart_instance() raise_exception(ERR_STARTING, self.instance_id, str(ex)) self.result[METRICS_DATA] = build_action_metrics( action=self, ResizedInstances=1, OrgInstanceSize=self.original_type, NewInstanceSize=self.new_instance_type) self.result["new-instance-type"] = self.new_instance_type return self.result
def set_ec2_instance_tags_with_event_loop_check(self, instance_ids, tags_to_set, client=None, region=None): def get_instances(): ec2 = services.create_service("ec2", session=self._session_, service_retry_strategy=get_default_retry_strategy("ec2", context=self._context_)) return list(ec2.describe(services.ec2_service.INSTANCES, InstanceIds=instance_ids, region=region if region is not None else self._region_, tags=True, select="Reservations[*].Instances[].{Tags:Tags,InstanceId:InstanceId}")) def get_ec2_client(): if client is not None: return client methods = ["create_tags", "delete_tags"] return get_client_with_retries("ec2", methods=methods, region=region, session=self._session_, logger=self._logger_) try: if len(tags_to_set) > 0: tagged_instances = instance_ids[:] # before setting the tags check if these tags won't trigger a new execution of the task causing a loop task_events = self.get(ACTION_PARAM_EVENTS, {}) task_change_events = task_events.get(handlers.ec2_tag_event_handler.EC2_TAG_EVENT_SOURCE, {}).get( handlers.TAG_CHANGE_EVENT, []) if handlers.ec2_tag_event_handler.EC2_CHANGED_INSTANCE_TAGS_EVENT in task_change_events: tag_name = os.getenv(handlers.ENV_AUTOMATOR_TAG_NAME) tag_filter_str = self.get(ACTION_PARAM_TAG_FILTER, None) tag_filter = TagFilterExpression(tag_filter_str) if tag_filter_str not in ["", None, "None"] else None for instance in get_instances(): # tags currently on instance instance_tags = instance.get("Tags", {}) # tags that have updated values when setting the tags deleted_tags = {t: tags_to_set[t] for t in tags_to_set if tags_to_set[t] == tagging.TAG_DELETE and t in instance_tags} new_tags = {t: tags_to_set[t] for t in tags_to_set if t not in instance_tags and tags_to_set[t] != tagging.TAG_DELETE} updated_tags = {t: tags_to_set[t] for t in tags_to_set if tags_to_set[t] != tagging.TAG_DELETE and t in instance_tags and instance_tags[t] != tags_to_set[t]} updated_tags.update(new_tags) # if there are updates if any([len(t) > 0 for t in [new_tags, updated_tags, deleted_tags]]): # this will be the new set of tags for the instance updated_instance_tags = copy.deepcopy(instance_tags) for t in deleted_tags: del updated_instance_tags[t] for t in updated_tags: updated_instance_tags[t] = updated_tags[t] # test if we have a tag filter and if the filter matches the new tags if tag_filter is not None: updated_tags_used_in_filter = set(updated_tags).intersection(tag_filter.get_filter_keys()) # tags updated that are in the tag filter if len(updated_tags_used_in_filter) > 0: # test if updated tags trigger the task if tag_filter.is_match(updated_instance_tags): self._logger_.warning(WARN_LOOP_TAG_TAGFILTER, tags_to_set, tag_filter_str, instance["InstanceId"]) tagged_instances.remove(instance["InstanceId"]) # if no tag filter then check if the tag with the Ops Automator tasks does contain the name of the task else: task_list = updated_instance_tags.get(tag_name, "") if tag_name in updated_tags and self._task_ in tagging.split_task_list(task_list): self._logger_.warning(WARN_LOOP_TAG, tags_to_set, task_list, tag_name, instance["InstanceId"]) tagged_instances.remove(instance["InstanceId"]) if len(tagged_instances) > 0: tagging.set_ec2_tags(ec2_client=get_ec2_client(), resource_ids=tagged_instances, tags=tags_to_set) except Exception as ex: self._logger_.error(ERR_SET_TAGS, ','.join(instance_ids), str(ex))
class Ec2CreateSnapshotAction(ActionEc2EventBase): properties = { ACTION_TITLE: "EC2 Create Snapshot", ACTION_VERSION: "1.2", ACTION_DESCRIPTION: "Creates snapshots for selected volumes of an EC2 Instance", ACTION_AUTHOR: "AWS", ACTION_ID: "444f070b-9302-4e67-989a-23e224518e87", ACTION_SERVICE: "ec2", ACTION_RESOURCES: services.ec2_service.INSTANCES, ACTION_AGGREGATION: ACTION_AGGREGATION_RESOURCE, ACTION_SELECT_EXPRESSION: "Reservations[*].Instances[].{InstanceId:InstanceId, Tags:Tags," "RootDeviceName:RootDeviceName,BlockDeviceMappings:BlockDeviceMappings, " "State:State.Name}|[?State!='terminated']", ACTION_COMPLETION_TIMEOUT_MINUTES: 60, ACTION_MIN_INTERVAL_MIN: 60, ACTION_EVENTS: { handlers.EC2_EVENT_SOURCE: { handlers.ec2_state_event_handler.EC2_STATE_NOTIFICATION: [handlers.ec2_state_event_handler.EC2_STATE_RUNNING, handlers.ec2_state_event_handler.EC2_STATE_STOPPED] }, handlers.ec2_tag_event_handler.EC2_TAG_EVENT_SOURCE: { handlers.TAG_CHANGE_EVENT: [ handlers.ec2_tag_event_handler.EC2_CHANGED_INSTANCE_TAGS_EVENT] } }, ACTION_SELECT_SIZE: [ACTION_SIZE_MEDIUM, ACTION_SIZE_LARGE, ACTION_SIZE_XLARGE, ACTION_SIZE_XXLARGE, ACTION_SIZE_XXXLARGE] + [ACTION_USE_ECS], ACTION_EXECUTE_SIZE: [ACTION_SIZE_MEDIUM], ACTION_COMPLETION_SIZE: [ACTION_SIZE_MEDIUM], ACTION_PARAMETERS: { PARAM_BACKUP_ROOT_DEVICE: { PARAM_DESCRIPTION: PARAM_DESC_BACKUP_ROOT_VOLUME, PARAM_TYPE: type(True), PARAM_REQUIRED: False, PARAM_DEFAULT: True, PARAM_LABEL: PARAM_LABEL_BACKUP_ROOT_VOLUME }, PARAM_SNAPSHOT_DESCRIPTION: { PARAM_DESCRIPTION: PARAM_DESC_SNAPSHOT_DESCRIPTION, PARAM_LABEL: PARAM_LABEL_SNAPSHOT_DESCRIPTION, PARAM_TYPE: str, PARAM_REQUIRED: False, }, PARAM_BACKUP_DATA_DEVICES: { PARAM_DESCRIPTION: PARAM_DESC_BACKUP_DATA_VOLUMES, PARAM_TYPE: type(True), PARAM_REQUIRED: False, PARAM_DEFAULT: True, PARAM_LABEL: PARAM_LABEL_BACKUP_DATA_VOLUMES }, PARAM_VOLUME_TAG_FILTER: { PARAM_DESCRIPTION: PARAM_DESC_VOLUME_TAG_FILTER, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_DEFAULT: "", PARAM_LABEL: PARAM_LABEL_VOLUME_TAG_FILTER }, PARAM_COPIED_INSTANCE_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_COPIED_INSTANCE_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_COPIED_INSTANCE_TAGS }, PARAM_COPIED_VOLUME_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_COPIED_VOLUME_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_COPIED_VOLUME_TAGS }, PARAM_SNAPSHOT_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_SNAPSHOT_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_SNAPSHOT_TAGS }, PARAM_VOLUME_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_VOLUME_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_VOLUME_TAGS }, PARAM_SET_SNAPSHOT_NAME: { PARAM_DESCRIPTION: PARAM_DESC_SET_SNAPSHOT_NAME, PARAM_TYPE: type(True), PARAM_REQUIRED: False, PARAM_DEFAULT: True, PARAM_LABEL: PARAM_LABEL_SET_SNAPSHOT_NAME }, PARAM_SNAPSHOT_NAME_PREFIX: { PARAM_DESCRIPTION: PARAM_DESC_SNAPSHOT_NAME_PREFIX, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_SNAPSHOT_NAME_PREFIX }, PARAM_NAME: { PARAM_DESCRIPTION: PARAM_DESC_NAME, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_NAME }, PARAM_ACCOUNTS_VOLUME_CREATE_PERMISSIONS: { PARAM_DESCRIPTION: PARAM_DESC_ACCOUNTS_VOLUME_CREATE_PERMISSIONS, PARAM_TYPE: type([]), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_ACCOUNTS_VOLUME_CREATE_PERMISSIONS }, PARAM_SHARED_ACCOUNT_TAGGING_ROLENAME: { PARAM_DESCRIPTION: PARAM_DESC_SHARED_ACCOUNT_TAGGING_ROLENAME.format(handlers.default_rolename_for_stack()), PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_SHARED_ACCOUNT_TAGGING_ROLENAME }, PARAM_TAG_SHARED_SNAPSHOTS: { PARAM_DESCRIPTION: PARAM_DESC_TAG_SHARED_SNAPSHOTS, PARAM_TYPE: bool, PARAM_REQUIRED: False, PARAM_DEFAULT: False, PARAM_LABEL: PARAM_LABEL_TAG_SHARED_SNAPSHOTS }, PARAM_INSTANCE_TAGS: { PARAM_DESCRIPTION: PARAM_DESC_INSTANCE_TAGS, PARAM_TYPE: type(""), PARAM_REQUIRED: False, PARAM_LABEL: PARAM_LABEL_INSTANCE_TAGS } }, ACTION_PARAMETER_GROUPS: [ { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_SNAPSHOT_OPTIONS, ACTION_PARAMETER_GROUP_LIST: [ PARAM_BACKUP_ROOT_DEVICE, PARAM_BACKUP_DATA_DEVICES, PARAM_VOLUME_TAG_FILTER ], }, { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_NAMING, ACTION_PARAMETER_GROUP_LIST: [ PARAM_SET_SNAPSHOT_NAME, PARAM_SNAPSHOT_NAME_PREFIX, PARAM_NAME, PARAM_SNAPSHOT_DESCRIPTION ] }, { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_TAGGING, ACTION_PARAMETER_GROUP_LIST: [ PARAM_COPIED_INSTANCE_TAGS, PARAM_COPIED_VOLUME_TAGS, PARAM_SNAPSHOT_TAGS, PARAM_VOLUME_TAGS, PARAM_INSTANCE_TAGS ], }, { ACTION_PARAMETER_GROUP_TITLE: GROUP_TITLE_SHARING, ACTION_PARAMETER_GROUP_LIST: [ PARAM_ACCOUNTS_VOLUME_CREATE_PERMISSIONS, PARAM_TAG_SHARED_SNAPSHOTS, PARAM_SHARED_ACCOUNT_TAGGING_ROLENAME ], }, ], ACTION_PERMISSIONS: [ "ec2:CreateSnapshot", "ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeSnapshots", "ec2:DescribeVolumes", "ec2:ModifySnapshotAttribute", "ec2:CreateTags", "ec2:DeleteTags" ], } def __init__(self, arguments, action_parameters): ActionBase.__init__(self, arguments, action_parameters) self.instance = self._resources_ self.instance_id = self.instance["InstanceId"] self._ec2_client = None # tags on the instance self.tags_on_instance = self.instance.get("Tags", {}) self.volumes = {dev["Ebs"]["VolumeId"]: dev["DeviceName"] for dev in self.instance["BlockDeviceMappings"]} self.root_volume = None for dev in self.volumes: if self.volumes[dev] == self.instance["RootDeviceName"]: self.root_volume = dev self.accounts_with_create_permissions = self.get(PARAM_ACCOUNTS_VOLUME_CREATE_PERMISSIONS, []) self.tag_shared_snapshots = self.get(PARAM_TAG_SHARED_SNAPSHOTS, False) self.copied_instance_tagfilter = TagFilterSet(self.get(PARAM_COPIED_INSTANCE_TAGS, "")) self.copied_volume_tagfilter = TagFilterSet(self.get(PARAM_COPIED_VOLUME_TAGS, "")) self.backup_root_device = self.get(PARAM_BACKUP_ROOT_DEVICE, True) self.backup_data_devices = self.get(PARAM_BACKUP_DATA_DEVICES, True) self.set_snapshot_name = self.get(PARAM_SET_SNAPSHOT_NAME, True) volume_tag_filter = self.get(PARAM_VOLUME_TAG_FILTER, None) self.volume_tag_filter = TagFilterExpression(volume_tag_filter) if volume_tag_filter not in ["", None] else None self._all_volume_tags = None self.result = { "account": self._account_, "region": self._region_, "instance": self.instance_id, "task": self._task_, "volumes": {}, "snapshots": {} } @staticmethod def action_logging_subject(arguments, _): instance = arguments[ACTION_PARAM_RESOURCES] instance_id = instance["InstanceId"] account = instance["AwsAccount"] region = instance["Region"] return "{}-{}-{}-{}".format(account, region, instance_id, log_stream_date()) @property def ec2_client(self): if self._ec2_client is None: methods = ["create_snapshot", "describe_tags", "delete_tags", "describe_instances", "modify_snapshot_attribute", "create_tags"] self._ec2_client = get_client_with_retries("ec2", methods, region=self.instance["Region"], session=self._session_, logger=self._logger_) return self._ec2_client @property def all_volume_tags(self): if self._all_volume_tags is None: self._all_volume_tags = {} volumes = list(self.volumes.keys()) describe_tags_args = { "DryRun": self._dryrun_, "Filters": [ { "Name": "resource-id", "Values": volumes } ] } try: while True: describe_tag_resp = self.ec2_client.describe_tags_with_retries(**describe_tags_args) for tag in describe_tag_resp.get("Tags", []): resource = tag["ResourceId"] if resource not in self._all_volume_tags: self._all_volume_tags[resource] = {} self._all_volume_tags[resource][tag["Key"]] = tag["Value"] if "NextToken" in describe_tag_resp: describe_tags_args["NextToken"] = describe_tag_resp["NextToken"] else: break except Exception as ex: if self._dryrun_: self._logger_.debug(str(ex)) self.result["describe_tags"] = str(ex) self._all_volume_tags = {v: {"dryrun": ""} for v in volumes} else: raise ex return self._all_volume_tags def create_volume_snapshot(self, volume): def create_snapshot(vol, snapshot_description): snapshot_id = "" try: create_snapshot_resp = self.ec2_client.create_snapshot_with_retries(DryRun=self._dryrun_, VolumeId=vol, Description=snapshot_description) self.result["volumes"][vol] = {} self.result["volumes"][vol]["create_snapshot"] = create_snapshot_resp snapshot_id = create_snapshot_resp["SnapshotId"] self.result["volumes"][vol]["snapshot"] = snapshot_id self._logger_.info(INFO_SNAPSHOT_CREATED, snapshot_id) except Exception as ex: if self._dryrun_: self._logger_.info(str(ex)) self.result["volumes"][volume]["create_snapshot"] = str(ex) else: raise ex return snapshot_id def set_snapshot_tags(snap, vol, dev): try: tags = get_tags_for_volume_snapshot(vol, dev) if self.set_snapshot_name: snapshot_name = self.build_str_from_template(parameter_name=PARAM_NAME, tag_variables={ TAG_PLACEHOLDER_INSTANCE_ID: self.instance_id, TAG_PLACEHOLDER_VOLUME_ID: volume }) if snapshot_name == "": dt = self._datetime_.utcnow() snapshot_name = SNAPSHOT_NAME.format(volume, dt.year, dt.month, dt.day, dt.hour, dt.minute) prefix = self.build_str_from_template(parameter_name=PARAM_SNAPSHOT_NAME_PREFIX, tag_variables={ TAG_PLACEHOLDER_INSTANCE_ID: self.instance_id, TAG_PLACEHOLDER_VOLUME_ID: volume }) snapshot_name = prefix + snapshot_name tags["Name"] = snapshot_name self._logger_.info(INFO_SNAPSHOT_NAME, snapshot_name) if len(tags) > 0: self._logger_.info(INFO_CREATE_TAGS, safe_json(tags, indent=3)) tagging.set_ec2_tags(ec2_client=self.ec2_client, resource_ids=[snap], tags=tags, can_delete=False, logger=self._logger_) if snap not in self.result["snapshots"]: self.result["snapshots"][snap] = {} self.result["snapshots"][snap]["tags"] = tags self._logger_.info(INFO_TAGS_CREATED) except Exception as ex: if self._dryrun_: self._logger_.debug(str(ex)) self.result["volumes"][volume]["create_tags"] = str(ex) else: raise ex def get_tags_for_volume_snapshot(vol, dev): vol_tags = self.copied_instance_tagfilter.pairs_matching_any_filter(self.tags_on_instance) tags_on_volume = self.all_volume_tags.get(vol, {}) vol_tags.update(self.copied_volume_tagfilter.pairs_matching_any_filter(tags_on_volume)) vol_tags.update( self.build_tags_from_template(parameter_name=PARAM_SNAPSHOT_TAGS, tag_variables={ TAG_PLACEHOLDER_INSTANCE_ID: self.instance_id, TAG_PLACEHOLDER_VOLUME_ID: volume, TAG_PLACEHOLDER_DEVICE: dev })) vol_tags[actions.marker_snapshot_tag_source_source_volume_id()] = volume return vol_tags device = self.volumes[volume] self.result[volume] = {"device": device} description = self.build_str_from_template(parameter_name=PARAM_SNAPSHOT_DESCRIPTION, tag_variables={ TAG_PLACEHOLDER_INSTANCE_ID: self.instance_id, TAG_PLACEHOLDER_VOLUME_ID: volume, TAG_PLACEHOLDER_DEVICE: device }) if description == "": description = SNAPSHOT_DESCRIPTION.format(self._task_, "root " if volume == self.root_volume else "", volume, device, self.instance_id) self._logger_.info(INFO_CREATE_SNAPSHOT, volume, "root " if volume == self.root_volume else "", device, self.instance_id) snapshot = create_snapshot(volume, description) set_snapshot_tags(snapshot, volume, device) def is_completed(self, snapshot_create_data): def grant_create_volume_permissions(snap_ids): if self.accounts_with_create_permissions is not None and len(self.accounts_with_create_permissions) > 0: args = { "CreateVolumePermission": { "Add": [{"UserId": a.strip()} for a in self.accounts_with_create_permissions] } } for snapshot_id in snap_ids: args["SnapshotId"] = snapshot_id try: self.ec2_client.modify_snapshot_attribute_with_retries(**args) self._logger_.info(INFO_SETTING_CREATE_VOLUME_PERMISSIONS, ", ".join(self.accounts_with_create_permissions)) self.result["create-volume-access-accounts"] = [a.strip() for a in self.accounts_with_create_permissions] except Exception as ex: raise_exception(ERR_SETTING_CREATE_VOLUME_PERMISSIONS, self.accounts_with_create_permissions, ex) def tag_shared_snapshots(snapshot_data, snap_ids): if self.accounts_with_create_permissions not in ["", None] and self.tag_shared_snapshots: for account in self.accounts_with_create_permissions: session_for_tagging = self.get_action_session(account=account, param_name=PARAM_SHARED_ACCOUNT_TAGGING_ROLENAME, logger=self._logger_) if session_for_tagging is None: self._logger_.error(ERR_TAGS_NOT_SET_IN_ACCOUNT, account) continue try: ec2_client = get_client_with_retries(service_name="ec2", methods=[ "create_tags", "delete_tags" ], context=self._context_, region=self._region_, session=session_for_tagging, logger=self._logger_) for snap_id in snap_ids: tags = snapshot_data.get(snap_id, {}).get("tags", None) if tags is not None: self._logger_.info(INFO_SET_SNAPSHOT_TAGS_SHARED, safe_json(tags, indent=3), snap_id, account, self._region_) tagging.set_ec2_tags(ec2_client=ec2_client, resource_ids=[snap_id], tags=tags, logger=self._logger_) except Exception as ex: raise Exception(ERR_SETTING_SHARED_TAGS.format(account, str(ex))) def set_volume_tags(volume_id, snap_id): tags = self.build_tags_from_template(parameter_name=PARAM_VOLUME_TAGS, tag_variables={ TAG_PLACEHOLDER_VOLUME_SNAPSHOT: snap_id }) if len(tags) > 0: try: tagging.set_ec2_tags(ec2_client=self.ec2_client, resource_ids=[volume_id], tags=tags, logger=self._logger_) self._logger_.info(INFO_SET_VOLUME_TAGS, safe_json(tags, indent=3), volume_id) except Exception as ex: raise Exception(ERR_SETTING_VOLUME_TAGS.format(self.instance_id, ex)) def set_instance_tags(snap_ids): tags = self.build_tags_from_template(parameter_name=PARAM_INSTANCE_TAGS, tag_variables={ TAG_PLACEHOLDER_INSTANCE_SNAPSHOTS: ','.join(sorted(snap_ids)) }) if len(tags) > 0: try: self.set_ec2_instance_tags_with_event_loop_check(instance_ids=[self.instance_id], tags_to_set=tags, client=self.ec2_client, region=self._region_) self._logger_.info(INFO_SET_INSTANCE_TAGS, safe_json(tags, indent=3), self.instance_id) except Exception as ex: raise Exception(ERR_SETTING_INSTANCE_TAGS.format(self.instance_id, ex)) snapshot_ids = [volume.get("create_snapshot", {}).get("SnapshotId") for volume in list(snapshot_create_data.get("volumes", {}).values())] self._logger_.info(INFO_CHECKING_SNAPSHOT_STATUS, ",".join(snapshot_ids)) if len(snapshot_ids) == 0: return { "InstanceId": snapshot_create_data["instance"], "Volumes": [] } # create service instance to test is snapshots are available ec2 = services.create_service("ec2", session=self._session_, service_retry_strategy=get_default_retry_strategy("ec2", context=self._context_)) # test if the snapshot with the ids that were returned from the CreateSnapshot API call exists and are completed snapshots = list(ec2.describe(services.ec2_service.SNAPSHOTS, OwnerIds=["self"], region=self.instance["Region"], Filters=[ { "Name": "snapshot-id", "Values": snapshot_ids } ])) if len(snapshots) != len(snapshot_ids): # allow 5 minutes to all snapshots to appear start_time = dateutil.parser.parse(snapshot_create_data["start-time"]) if self._datetime_.now() - start_time < timedelta(minutes=5): self._logger_.info(INFO_NOT_ALL_IN_PROGRESS) return None test_result = { "InstanceId": snapshot_create_data["instance"], "Volumes": [{ "VolumeId": s["VolumeId"], "SnapshotId": s["SnapshotId"], "State": s["State"], "Progress": s["Progress"] } for s in snapshots] } self._logger_.info(INFO_STATE_SNAPSHOTS, safe_json(test_result, indent=3)) # wait until all snapshot are no longer pending for volume in test_result["Volumes"]: if volume["State"] == SNAPSHOT_STATE_PENDING: self._logger_.info(INFO_CREATION_PENDING) return None # collect possible failed snapshots failed = [] for volume in test_result["Volumes"]: if volume["State"] == SNAPSHOT_STATE_ERROR: failed.append(volume) if len(failed) > 0: s = ",".join([ERR_FAILED_SNAPSHOT.format(volume["SnapshotId"], volume["VolumeId"]) for volume in failed]) raise Exception(s) if len(snapshot_ids) != len(snapshots): created_snapshots = [s["SnapshotId"] for s in snapshots] raise Exception(ERR_MISSING_SNAPSHOTS.format(",".join([s for s in snapshot_ids if s not in created_snapshots]))) snapshot_ids = [s["SnapshotId"] for s in snapshots] # set tags on source instance set_instance_tags(snapshot_ids) for s in snapshots: set_volume_tags(volume_id=s["VolumeId"], snap_id=s["SnapshotId"]) # set permissions to create volumes from snapshots grant_create_volume_permissions(snapshot_ids) # tag resources in accounts the snapshots are shared with tag_shared_snapshots(snapshot_create_data.get("snapshots", {}), snapshot_ids) self._logger_.info(INFO_COMPLETED) return test_result def execute(self): def volume_has_active_snapshots(ec2_service, vol_id): # test if the snapshot with the ids that were returned from the CreateSnapshot API call exists and are completed volume_snapshots = list( ec2_service.describe(services.ec2_service.SNAPSHOTS, OwnerIds=["self"], region=self.instance["Region"], Filters=[ { "Name": "volume-id", "Values": [vol_id] } ])) active = [s["SnapshotId"] for s in volume_snapshots if s.get("State", "") == "pending"] if len(active) > 0: self._logger_.info(INFO_PENDING_SNAPSHOTS, vol_id, ",".join(active)) return True return False self._logger_.info("{}, version {}", self.properties[ACTION_TITLE], self.properties[ACTION_VERSION]) self._logger_.info(INFO_START_SNAPSHOT_ACTION, self.instance_id, self._account_, self._region_, self._task_) self._logger_.debug("Instance block device mappings are {}", self.instance["BlockDeviceMappings"]) ec2 = services.create_service("ec2", session=self._session_, service_retry_strategy=get_default_retry_strategy("ec2", context=self._context_)) if self.volume_tag_filter is not None: volume_data = ec2.describe(services.ec2_service.VOLUMES, VolumeIds=list(self.volumes.keys()), tags=True, region=self._region_) volume_tags = {k["VolumeId"]: k.get("Tags", {}) for k in list(volume_data)} else: volume_tags = {} if self.backup_root_device: if self.root_volume is None: self._logger_.warning(WARN_ROOT_NOT_FOUND, self.instance_id, ",".join(self.volumes)) else: if self.volume_tag_filter is None or self.volume_tag_filter.is_match(volume_tags.get(self.root_volume, {})): if volume_has_active_snapshots(ec2, self.root_volume): self._logger_.error(ERR_SNAPSHOT_PENDING, self.root_volume) else: self.create_volume_snapshot(self.root_volume) else: self._logger_.info(INF_SKIP_VOLUME_TAG_FILTER, self.root_volume, volume_tags.get(self.root_volume, {})) if self.backup_data_devices: for volume in [v for v in self.volumes if v != self.root_volume]: if self.volume_tag_filter is None or self.volume_tag_filter.is_match(volume_tags.get(volume, {})): if volume_has_active_snapshots(ec2, volume): self._logger_.error(ERR_SNAPSHOT_PENDING, volume) else: self.create_volume_snapshot(volume) else: self._logger_.info(INF_SKIP_VOLUME_TAG_FILTER, volume, volume_tags.get(volume, {})) self.result["start-time"] = self._datetime_.now().isoformat() self.result[METRICS_DATA] = build_action_metrics( action=self, CreatedSnapshots=len(list(self.result.get("volumes", {}).values())), SnapshotsSizeTotal=sum( [volume.get("create_snapshot", {}).get("VolumeSize") for volume in list(self.result.get("volumes", {}).values())])) return self.result