def test_get_one_id_last_no_executions_in_the_database(self): ActionExecution.query().delete() resp = self.app.get('/v1/executions/last/output', expect_errors=True) self.assertEqual(resp.status_int, http_client.BAD_REQUEST) self.assertEqual(resp.json['faultstring'], 'No executions found in the database')
def get_descendants(actionexecution_id, descendant_depth=-1, result_fmt=None): """ Returns all descendant executions upto the specified descendant_depth for the supplied actionexecution_id. """ descendants = DESCENDANT_VIEWS.get(result_fmt, DFSDescendantView)() children = ActionExecution.query(parent=actionexecution_id, **{'order_by': ['start_timestamp']}) LOG.debug('Found %s children for id %s.', len(children), actionexecution_id) current_level = [(child, 1) for child in children] while current_level: parent, level = current_level.pop(0) parent_id = str(parent.id) descendants.add(parent) if not parent.children: continue if level != -1 and level == descendant_depth: continue children = ActionExecution.query(parent=parent_id, **{'order_by': ['start_timestamp']}) LOG.debug('Found %s children for id %s.', len(children), parent_id) # prepend for DFS for idx in range(len(children)): current_level.insert(idx, (children[idx], level + 1)) return descendants.result
def get_descendants(actionexecution_id, descendant_depth=-1): """ Returns all descendant executions upto the specified descendant_depth for the supplied actionexecution_id. """ descendants = [] current_level = set([actionexecution_id]) next_level = set() remaining_depth = descendant_depth # keep track of processed ActionExecution to avoid any cycles. Will raise # an exception if a cycle is found. processed_action_executions = set() while current_level and remaining_depth != 0: parent_id = current_level.pop() processed_action_executions.add(parent_id) children = ActionExecution.query(parent=parent_id) LOG.debug('Found %s children for id %s.', len(children), parent_id) for child in children: if str(child.id) in processed_action_executions: raise Exception('child with id %s appeared multiple times.', str(child.id)) if child.children: next_level.add(str(child.id)) descendants.extend(children) # check if current_level is complete. If so start processing the next level. if not current_level: current_level.update(next_level) next_level.clear() remaining_depth = remaining_depth - 1 return descendants
def _purge_executions(timestamp=None, action_ref=None): if not timestamp: print('Specify a valid timestamp to purge.') return if not action_ref: action_ref = '' print('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) def should_delete(execution_db): if action_ref != '': return (execution_db.liveaction['action'] == action_ref and execution_db.start_timestamp < timestamp) else: return execution_db.start_timestamp < timestamp # XXX: Think about paginating this call. filters = {'start_timestamp__lt': isotime.parse(timestamp)} executions = ActionExecution.query(**filters) executions_to_delete = filter(should_delete, executions) print('#### Total number of executions to delete: %d' % len(executions_to_delete)) # Purge execution and liveaction models now for execution_db in executions_to_delete: _purge_action_models(execution_db) # Print stats print('#### Total execution models deleted: %d' % DELETED_COUNT)
def get_one(self, id, output_type='all', output_format='raw', existing_only=False, requester_user=None): # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit(1).first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) execution_db = self._get_one_by_id(id=id, requester_user=requester_user, permission_type=PermissionType.EXECUTION_VIEW) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != 'all': query_filters['output_type'] = output_type def existing_output_iter(): # Consume and return all of the existing lines # pylint: disable=no-member output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) output = ''.join([output_db.data for output_db in output_dbs]) yield six.binary_type(output.encode('utf-8')) def make_response(): app_iter = existing_output_iter() res = Response(content_type='text/plain', app_iter=app_iter) return res res = make_response() return res
def get_one(self, id, requester_user, exclude_attributes=None, include_attributes=None, show_secrets=False): """ Retrieve a single execution. Handles requests: GET /executions/<id>[?exclude_attributes=result,trigger_instance] :param exclude_attributes: List of attributes to exclude from the object. :type exclude_attributes: ``list`` """ exclude_fields = self._validate_exclude_fields(exclude_fields=exclude_attributes) include_fields = self._validate_include_fields(include_fields=include_attributes) from_model_kwargs = { 'mask_secrets': self._get_mask_secrets(requester_user, show_secrets=show_secrets) } # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit(1).only('id').first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) return self._get_one_by_id(id=id, exclude_fields=exclude_fields, include_fields=include_fields, requester_user=requester_user, from_model_kwargs=from_model_kwargs, permission_type=PermissionType.EXECUTION_VIEW)
def get_one(self, id, output_type='all', output_format='raw', existing_only=False, requester_user=None): # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit(1).first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) execution_db = self._get_one_by_id(id=id, requester_user=requester_user, permission_type=PermissionType.EXECUTION_VIEW) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != 'all': query_filters['output_type'] = output_type def existing_output_iter(): # Consume and return all of the existing lines # pylint: disable=no-member output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) output = ''.join([output_db.data for output_db in output_dbs]) yield six.binary_type(output.encode('utf-8')) def make_response(): app_iter = existing_output_iter() res = Response(content_type='text/plain', app_iter=app_iter) return res res = make_response() return res
def purge_executions(timestamp=None, action_ref=None, purge_incomplete=False): if not timestamp: LOG.error("Specify a valid timestamp to purge.") return LOG.info("Purging executions older than timestamp: %s" % timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")) filters = {} if action_ref: filters["action__ref"] = action_ref if purge_incomplete: filters["start_timestamp__lt"] = isotime.parse(timestamp) else: filters["end_timestamp__lt"] = isotime.parse(timestamp) filters["start_timestamp__lt"] = isotime.parse(timestamp) filters["status"] = {"$in": DONE_STATES} # XXX: Think about paginating this call. executions = ActionExecution.query(**filters) LOG.info("#### Total number of executions to delete: %d" % len(executions)) # Purge execution and liveaction models now for execution_db in executions: _purge_models(execution_db) # Print stats LOG.info("#### Total execution models deleted: %d" % DELETED_COUNT)
def get_one(self, id, requester_user, exclude_attributes=None, show_secrets=False): """ Retrieve a single execution. Handles requests: GET /executions/<id>[?exclude_attributes=result,trigger_instance] :param exclude_attributes: List of attributes to exclude from the object. :type exclude_attributes: ``list`` """ exclude_fields = self._validate_exclude_fields(exclude_fields=exclude_attributes) from_model_kwargs = { 'mask_secrets': self._get_mask_secrets(requester_user, show_secrets=show_secrets) } # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit(1).only('id').first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) return self._get_one_by_id(id=id, exclude_fields=exclude_fields, requester_user=requester_user, from_model_kwargs=from_model_kwargs, permission_type=PermissionType.EXECUTION_VIEW)
def _purge_executions(timestamp=None, action_ref=None): if not timestamp: print('Specify a valid timestamp to purge.') return if not action_ref: action_ref = '' print('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) def should_delete(execution_db): if action_ref != '': return (execution_db.liveaction['action'] == action_ref and execution_db.start_timestamp < timestamp) else: return execution_db.start_timestamp < timestamp # XXX: Think about paginating this call. filters = {'start_timestamp__lt': isotime.parse(timestamp)} executions = ActionExecution.query(**filters) executions_to_delete = filter(should_delete, executions) print('#### Total number of executions to delete: %d' % len(executions_to_delete)) # Purge execution and liveaction models now for execution_db in executions_to_delete: _purge_action_models(execution_db) # Print stats print('#### Total execution models deleted: %d' % DELETED_COUNT)
def purge_executions(timestamp=None, action_ref=None, purge_incomplete=False): if not timestamp: LOG.error('Specify a valid timestamp to purge.') return LOG.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if action_ref: filters['action__ref'] = action_ref if purge_incomplete: filters['start_timestamp__lt'] = isotime.parse(timestamp) else: filters['end_timestamp__lt'] = isotime.parse(timestamp) filters['start_timestamp__lt'] = isotime.parse(timestamp) filters['status'] = {"$in": DONE_STATES} # XXX: Think about paginating this call. executions = ActionExecution.query(**filters) LOG.info('#### Total number of executions to delete: %d' % len(executions)) # Purge execution and liveaction models now for execution_db in executions: _purge_models(execution_db) # Print stats LOG.info('#### Total execution models deleted: %d' % DELETED_COUNT)
def test_datetime_range(self): base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0)) for i in range(60): timestamp = base + datetime.timedelta(seconds=i) doc = copy.deepcopy(self.fake_history_subtasks[0]) doc['id'] = str(bson.ObjectId()) doc['start_timestamp'] = isotime.format(timestamp) obj = ActionExecutionAPI(**doc) ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj)) dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z' objs = ActionExecution.query(start_timestamp=dt_range) self.assertEqual(len(objs), 10) dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z' objs = ActionExecution.query(start_timestamp=dt_range) self.assertEqual(len(objs), 10)
def test_datetime_range(self): base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0)) for i in range(60): timestamp = base + datetime.timedelta(seconds=i) doc = copy.deepcopy(self.fake_history_subtasks[0]) doc['id'] = str(bson.ObjectId()) doc['start_timestamp'] = isotime.format(timestamp) obj = ActionExecutionAPI(**doc) ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj)) dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z' objs = ActionExecution.query(start_timestamp=dt_range) self.assertEqual(len(objs), 10) dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z' objs = ActionExecution.query(start_timestamp=dt_range) self.assertEqual(len(objs), 10)
def _get_missed_executions_from_db(self, export_marker=None): if not export_marker: return self._get_all_executions_from_db() # XXX: Should adapt this query to get only executions with status # in COMPLETION_STATUSES. filters = {'end_timestamp__gt': export_marker} LOG.info('Querying for executions with filters: %s', filters) return ActionExecution.query(**filters)
def purge_inquiries(logger): """Purge Inquiries that have exceeded their configured TTL At the moment, Inquiries do not have their own database model, so this function effectively is another, more specialized GC for executions. It will look for executions with a 'pending' status that use the 'inquirer' runner, which is the current definition for an Inquiry. Then it will mark those that have a nonzero TTL have existed longer than their TTL as "timed out". It will then request that the parent workflow(s) resume, where the failure can be handled as the user desires. """ # Get all existing Inquiries filters = {'runner__name': 'inquirer', 'status': action_constants.LIVEACTION_STATUS_PENDING} inquiries = list(ActionExecution.query(**filters)) gc_count = 0 # Inspect each Inquiry, and determine if TTL is expired for inquiry in inquiries: ttl = int(inquiry.result.get('ttl')) if ttl <= 0: logger.debug("Inquiry %s has a TTL of %s. Skipping." % (inquiry.id, ttl)) continue min_since_creation = int( (get_datetime_utc_now() - inquiry.start_timestamp).total_seconds() / 60 ) logger.debug("Inquiry %s has a TTL of %s and was started %s minute(s) ago" % ( inquiry.id, ttl, min_since_creation)) if min_since_creation > ttl: gc_count += 1 logger.info("TTL expired for Inquiry %s. Marking as timed out." % inquiry.id) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_TIMED_OUT, result=inquiry.result, liveaction_id=inquiry.liveaction.get('id')) executions.update_execution(liveaction_db) # Call Inquiry runner's post_run to trigger callback to workflow action_db = get_action_by_ref(liveaction_db.action) invoke_post_run(liveaction_db=liveaction_db, action_db=action_db) if liveaction_db.context.get("parent"): # Request that root workflow resumes root_liveaction = action_service.get_root_liveaction(liveaction_db) action_service.request_resume( root_liveaction, UserDB(cfg.CONF.system_user.user) ) logger.info('Marked %s ttl-expired Inquiries as "timed out".' % (gc_count))
def test_sort_by_start_timestamp(self): base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0)) for i in range(60): timestamp = base + datetime.timedelta(seconds=i) doc = copy.deepcopy(self.fake_history_subtasks[0]) doc["id"] = str(bson.ObjectId()) doc["start_timestamp"] = isotime.format(timestamp) obj = ActionExecutionAPI(**doc) ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj)) dt_range = "2014-12-25T00:00:10Z..2014-12-25T00:00:19Z" objs = ActionExecution.query(start_timestamp=dt_range, order_by=["start_timestamp"]) self.assertLess(objs[0]["start_timestamp"], objs[9]["start_timestamp"]) dt_range = "2014-12-25T00:00:19Z..2014-12-25T00:00:10Z" objs = ActionExecution.query(start_timestamp=dt_range, order_by=["-start_timestamp"]) self.assertLess(objs[9]["start_timestamp"], objs[0]["start_timestamp"])
def get_one( self, id, output_type="all", output_format="raw", existing_only=False, requester_user=None, show_secrets=False, ): # Special case for id == "last" if id == "last": execution_db = ActionExecution.query().order_by("-id").limit( 1).first() if not execution_db: raise ValueError("No executions found in the database") id = str(execution_db.id) if not requester_user: requester_user = UserDB(name=cfg.CONF.system_user.user) from_model_kwargs = { "mask_secrets": self._get_mask_secrets(requester_user, show_secrets=show_secrets) } execution_db = self._get_one_by_id( id=id, requester_user=requester_user, from_model_kwargs=from_model_kwargs, permission_type=PermissionType.EXECUTION_VIEW, ) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != "all": query_filters["output_type"] = output_type def existing_output_iter(): # Consume and return all of the existing lines # pylint: disable=no-member output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) output = "".join([output_db.data for output_db in output_dbs]) yield six.binary_type(output.encode("utf-8")) def make_response(): app_iter = existing_output_iter() res = Response(content_type="text/plain", app_iter=app_iter) return res res = make_response() return res
def test_sort_by_start_timestamp(self): base = isotime.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0)) for i in range(60): timestamp = base + datetime.timedelta(seconds=i) doc = copy.deepcopy(self.fake_history_subtasks[0]) doc['id'] = str(bson.ObjectId()) doc['start_timestamp'] = isotime.format(timestamp) obj = ActionExecutionAPI(**doc) ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj)) dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z' objs = ActionExecution.query(start_timestamp=dt_range, order_by=['start_timestamp']) self.assertLess(objs[0]['start_timestamp'], objs[9]['start_timestamp']) dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z' objs = ActionExecution.query(start_timestamp=dt_range, order_by=['-start_timestamp']) self.assertLess(objs[9]['start_timestamp'], objs[0]['start_timestamp'])
def test_inquiry_garbage_collection(self): now = date_utils.get_datetime_utc_now() # Insert some mock Inquiries with start_timestamp > TTL old_inquiry_count = 15 timestamp = now - datetime.timedelta(minutes=3) for index in range(0, old_inquiry_count): self._create_inquiry(ttl=2, timestamp=timestamp) # Insert some mock Inquiries with TTL set to a "disabled" value disabled_inquiry_count = 3 timestamp = now - datetime.timedelta(minutes=3) for index in range(0, disabled_inquiry_count): self._create_inquiry(ttl=0, timestamp=timestamp) # Insert some mock Inquiries with start_timestamp < TTL new_inquiry_count = 5 timestamp = now - datetime.timedelta(minutes=3) for index in range(0, new_inquiry_count): self._create_inquiry(ttl=15, timestamp=timestamp) filters = {"status": action_constants.LIVEACTION_STATUS_PENDING} inquiries = list(ActionExecution.query(**filters)) self.assertEqual( len(inquiries), (old_inquiry_count + new_inquiry_count + disabled_inquiry_count), ) # Start garbage collector process = self._start_garbage_collector() # Give it some time to perform garbage collection and kill it concurrency.sleep(15) process.send_signal(signal.SIGKILL) self.remove_process(process=process) # Expired Inquiries should have been garbage collected inquiries = list(ActionExecution.query(**filters)) self.assertEqual(len(inquiries), new_inquiry_count + disabled_inquiry_count)
def test_inquiry_garbage_collection(self): now = date_utils.get_datetime_utc_now() # Insert some mock Inquiries with start_timestamp > TTL old_inquiry_count = 15 timestamp = (now - datetime.timedelta(minutes=3)) for index in range(0, old_inquiry_count): self._create_inquiry(ttl=2, timestamp=timestamp) # Insert some mock Inquiries with TTL set to a "disabled" value disabled_inquiry_count = 3 timestamp = (now - datetime.timedelta(minutes=3)) for index in range(0, disabled_inquiry_count): self._create_inquiry(ttl=0, timestamp=timestamp) # Insert some mock Inquiries with start_timestamp < TTL new_inquiry_count = 5 timestamp = (now - datetime.timedelta(minutes=3)) for index in range(0, new_inquiry_count): self._create_inquiry(ttl=15, timestamp=timestamp) filters = { 'status': action_constants.LIVEACTION_STATUS_PENDING } inquiries = list(ActionExecution.query(**filters)) self.assertEqual(len(inquiries), (old_inquiry_count + new_inquiry_count + disabled_inquiry_count)) # Start garbage collector process = self._start_garbage_collector() # Give it some time to perform garbage collection and kill it eventlet.sleep(15) process.send_signal(signal.SIGKILL) self.remove_process(process=process) # Expired Inquiries should have been garbage collected inquiries = list(ActionExecution.query(**filters)) self.assertEqual(len(inquiries), new_inquiry_count + disabled_inquiry_count)
def get_one( self, id, requester_user, exclude_attributes=None, include_attributes=None, show_secrets=False, max_result_size=None, ): """ Retrieve a single execution. Handles requests: GET /executions/<id>[?exclude_attributes=result,trigger_instance] :param exclude_attributes: List of attributes to exclude from the object. :type exclude_attributes: ``list`` """ exclude_fields = self._validate_exclude_fields( exclude_fields=exclude_attributes) include_fields = self._validate_include_fields( include_fields=include_attributes) from_model_kwargs = { "mask_secrets": self._get_mask_secrets(requester_user, show_secrets=show_secrets) } max_result_size = self._validate_max_result_size( max_result_size=max_result_size) # Special case for id == "last" if id == "last": execution_db = (ActionExecution.query().order_by("-id").limit( 1).only("id").first()) if not execution_db: raise ValueError("No executions found in the database") id = str(execution_db.id) return self._get_one_by_id( id=id, exclude_fields=exclude_fields, include_fields=include_fields, requester_user=requester_user, from_model_kwargs=from_model_kwargs, permission_type=PermissionType.EXECUTION_VIEW, get_by_id_kwargs={"max_result_size": max_result_size}, )
def is_children_active(liveaction_id): execution_db = ActionExecution.get(liveaction__id=str(liveaction_id)) if execution_db.runner[ 'name'] not in action_constants.WORKFLOW_RUNNER_TYPES: return False children_execution_dbs = ActionExecution.query(parent=str(execution_db.id)) inactive_statuses = (action_constants.LIVEACTION_COMPLETED_STATES + [action_constants.LIVEACTION_STATUS_PAUSED]) completed = [ child_exec_db.status in inactive_statuses for child_exec_db in children_execution_dbs ] return (not all(completed))
def is_children_active(liveaction_id): execution_db = ActionExecution.get(liveaction__id=str(liveaction_id)) if execution_db.runner['name'] not in action_constants.WORKFLOW_RUNNER_TYPES: return False children_execution_dbs = ActionExecution.query(parent=str(execution_db.id)) inactive_statuses = ( action_constants.LIVEACTION_COMPLETED_STATES + [action_constants.LIVEACTION_STATUS_PAUSED, action_constants.LIVEACTION_STATUS_PENDING] ) completed = [ child_exec_db.status in inactive_statuses for child_exec_db in children_execution_dbs ] return (not all(completed))
def get_one(self, id, output_type='all', requester_user=None): # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit( 1).first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) execution_db = self._get_one_by_id( id=id, requester_user=requester_user, permission_type=PermissionType.EXECUTION_VIEW) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != 'all': query_filters['output_type'] = output_type def format_output_object(output_db_or_api): if isinstance(output_db_or_api, ActionExecutionOutputDB): data = ActionExecutionOutputAPI.from_model(output_db_or_api) elif isinstance(output_db_or_api, ActionExecutionOutputAPI): data = output_db_or_api else: raise ValueError('Unsupported format: %s' % (type(output_db_or_api))) event = 'st2.execution.output__create' result = 'event: %s\ndata: %s\n\n' % ( event, json_encode(data, indent=None)) return result def existing_output_iter(): # Consume and return all of the existing lines output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) # Note: We return all at once instead of yield line by line to avoid multiple socket # writes and to achieve better performance output = [ format_output_object(output_db) for output_db in output_dbs ] output = ''.join(output) yield six.binary_type(output.encode('utf-8')) def new_output_iter(): def noop_gen(): yield six.binary_type(NO_MORE_DATA_EVENT.encode('utf-8')) # Bail out if execution has already completed / been paused if execution_db.status in self.CLOSE_STREAM_LIVEACTION_STATES: return noop_gen() # Wait for and return any new line which may come in execution_ids = [execution_id] listener = get_listener(name='execution_output') # pylint: disable=no-member gen = listener.generator(execution_ids=execution_ids) def format(gen): for pack in gen: if not pack: continue else: (_, model_api) = pack # Note: gunicorn wsgi handler expect bytes, not unicode # pylint: disable=no-member if isinstance(model_api, ActionExecutionOutputAPI): if output_type and output_type != 'all' and \ model_api.output_type != output_type: continue output = format_output_object(model_api).encode( 'utf-8') yield six.binary_type(output) elif isinstance(model_api, ActionExecutionAPI): if model_api.status in self.CLOSE_STREAM_LIVEACTION_STATES: yield six.binary_type( NO_MORE_DATA_EVENT.encode('utf-8')) break else: LOG.debug('Unrecognized message type: %s' % (model_api)) gen = format(gen) return gen def make_response(): app_iter = itertools.chain(existing_output_iter(), new_output_iter()) res = Response(content_type='text/event-stream', app_iter=app_iter) return res res = make_response() return res
def test_get_one_id_last_no_executions_in_the_database(self): ActionExecution.query().delete() resp = self.app.get('/v1/executions/last/output', expect_errors=True) self.assertEqual(resp.status_int, http_client.BAD_REQUEST) self.assertEqual(resp.json['faultstring'], 'No executions found in the database')
def purge_executions(logger, timestamp, action_ref=None, purge_incomplete=False): """ :param timestamp: Exections older than this timestamp will be deleted. :type timestamp: ``datetime.datetime :param action_ref: Only delete executions for the provided actions. :type action_ref: ``str`` :param purge_incomplete: True to also delete executions which are not in a done state. :type purge_incomplete: ``bool`` """ if not timestamp: raise ValueError("Specify a valid timestamp to purge.") logger.info("Purging executions older than timestamp: %s" % timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")) filters = {} if purge_incomplete: filters["start_timestamp__lt"] = timestamp else: filters["end_timestamp__lt"] = timestamp filters["start_timestamp__lt"] = timestamp filters["status"] = {"$in": DONE_STATES} exec_filters = copy.copy(filters) if action_ref: exec_filters["action__ref"] = action_ref liveaction_filters = copy.deepcopy(filters) if action_ref: liveaction_filters["action"] = action_ref try: deleted_count = ActionExecution.delete_by_query(**exec_filters) except InvalidQueryError as e: msg = "Bad query (%s) used to delete execution instances: %s" "Please contact support." % (exec_filters, str(e)) raise InvalidQueryError(msg) except: logger.exception("Deletion of execution models failed for query with filters: %s.", exec_filters) else: logger.info("Deleted %s action execution objects" % (deleted_count)) try: deleted_count = LiveAction.delete_by_query(**liveaction_filters) except InvalidQueryError as e: msg = "Bad query (%s) used to delete liveaction instances: %s" "Please contact support." % ( liveaction_filters, str(e), ) raise InvalidQueryError(msg) except: logger.exception("Deletion of liveaction models failed for query with filters: %s.", liveaction_filters) else: logger.info("Deleted %s liveaction objects" % (deleted_count)) zombie_execution_instances = len(ActionExecution.query(**exec_filters)) zombie_liveaction_instances = len(LiveAction.query(**liveaction_filters)) if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0): logger.error("Zombie execution instances left: %d.", zombie_execution_instances) logger.error("Zombie liveaction instances left: %s.", zombie_liveaction_instances) # Print stats logger.info("All execution models older than timestamp %s were deleted.", timestamp)
def get_one(self, id, output_type='all', requester_user=None): # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit(1).first() if not execution_db: raise ValueError('No executions found in the database') id = str(execution_db.id) execution_db = self._get_one_by_id(id=id, requester_user=requester_user, permission_type=PermissionType.EXECUTION_VIEW) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != 'all': query_filters['output_type'] = output_type def format_output_object(output_db_or_api): if isinstance(output_db_or_api, ActionExecutionOutputDB): data = ActionExecutionOutputAPI.from_model(output_db_or_api) elif isinstance(output_db_or_api, ActionExecutionOutputAPI): data = output_db_or_api else: raise ValueError('Unsupported format: %s' % (type(output_db_or_api))) event = 'st2.execution.output__create' result = 'event: %s\ndata: %s\n\n' % (event, json_encode(data, indent=None)) return result def existing_output_iter(): # Consume and return all of the existing lines output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) # Note: We return all at once instead of yield line by line to avoid multiple socket # writes and to achieve better performance output = [format_output_object(output_db) for output_db in output_dbs] output = ''.join(output) yield six.binary_type(output.encode('utf-8')) def new_output_iter(): def noop_gen(): yield six.binary_type(NO_MORE_DATA_EVENT.encode('utf-8')) # Bail out if execution has already completed / been paused if execution_db.status in self.CLOSE_STREAM_LIVEACTION_STATES: return noop_gen() # Wait for and return any new line which may come in execution_ids = [execution_id] listener = get_listener(name='execution_output') # pylint: disable=no-member gen = listener.generator(execution_ids=execution_ids) def format(gen): for pack in gen: if not pack: continue else: (_, model_api) = pack # Note: gunicorn wsgi handler expect bytes, not unicode # pylint: disable=no-member if isinstance(model_api, ActionExecutionOutputAPI): if output_type and output_type != 'all' and \ model_api.output_type != output_type: continue output = format_output_object(model_api).encode('utf-8') yield six.binary_type(output) elif isinstance(model_api, ActionExecutionAPI): if model_api.status in self.CLOSE_STREAM_LIVEACTION_STATES: yield six.binary_type(NO_MORE_DATA_EVENT.encode('utf-8')) break else: LOG.debug('Unrecognized message type: %s' % (model_api)) gen = format(gen) return gen def make_response(): app_iter = itertools.chain(existing_output_iter(), new_output_iter()) res = Response(content_type='text/event-stream', app_iter=app_iter) return res res = make_response() return res
def _append_view_properties(self, rule_enforcement_apis): """ Method which appends corresponding execution (if available) and trigger instance object properties. """ trigger_instance_ids = set([]) execution_ids = [] for rule_enforcement_api in rule_enforcement_apis: if rule_enforcement_api.get('trigger_instance_id', None): trigger_instance_ids.add(str(rule_enforcement_api['trigger_instance_id'])) if rule_enforcement_api.get('execution_id', None): execution_ids.append(rule_enforcement_api['execution_id']) # 1. Retrieve corresponding execution objects # NOTE: Executions contain a lot of field and could contain a lot of data so we only # retrieve fields we need only_fields = [ 'id', 'action.ref', 'action.parameters', 'runner.name', 'runner.runner_parameters', 'parameters', 'status' ] execution_dbs = ActionExecution.query(id__in=execution_ids, only_fields=only_fields) execution_dbs_by_id = {} for execution_db in execution_dbs: execution_dbs_by_id[str(execution_db.id)] = execution_db # 2. Retrieve corresponding trigger instance objects trigger_instance_dbs = TriggerInstance.query(id__in=list(trigger_instance_ids)) trigger_instance_dbs_by_id = {} for trigger_instance_db in trigger_instance_dbs: trigger_instance_dbs_by_id[str(trigger_instance_db.id)] = trigger_instance_db # Ammend rule enforcement objects with additional data for rule_enforcement_api in rule_enforcement_apis: rule_enforcement_api['trigger_instance'] = {} rule_enforcement_api['execution'] = {} trigger_instance_id = rule_enforcement_api.get('trigger_instance_id', None) execution_id = rule_enforcement_api.get('execution_id', None) trigger_instance_db = trigger_instance_dbs_by_id.get(trigger_instance_id, None) execution_db = execution_dbs_by_id.get(execution_id, None) if trigger_instance_db: trigger_instance_api = TriggerInstanceAPI.from_model(trigger_instance_db) rule_enforcement_api['trigger_instance'] = trigger_instance_api if execution_db: execution_api = ActionExecutionAPI.from_model(execution_db) rule_enforcement_api['execution'] = execution_api return rule_enforcement_apis
def purge_executions(logger, timestamp, action_ref=None, purge_incomplete=False): """ Purge action executions and corresponding live action, execution output objects. :param timestamp: Exections older than this timestamp will be deleted. :type timestamp: ``datetime.datetime :param action_ref: Only delete executions for the provided actions. :type action_ref: ``str`` :param purge_incomplete: True to also delete executions which are not in a done state. :type purge_incomplete: ``bool`` """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if purge_incomplete: filters['start_timestamp__lt'] = timestamp else: filters['end_timestamp__lt'] = timestamp filters['start_timestamp__lt'] = timestamp filters['status'] = {'$in': DONE_STATES} exec_filters = copy.copy(filters) if action_ref: exec_filters['action__ref'] = action_ref liveaction_filters = copy.deepcopy(filters) if action_ref: liveaction_filters['action'] = action_ref to_delete_execution_dbs = [] # 1. Delete ActionExecutionDB objects try: # Note: We call list() on the query set object because it's lazyily evaluated otherwise to_delete_execution_dbs = list(ActionExecution.query(only_fields=['id'], no_dereference=True, **exec_filters)) deleted_count = ActionExecution.delete_by_query(**exec_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution instances: %s' 'Please contact support.' % (exec_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of execution models failed for query with filters: %s.', exec_filters) else: logger.info('Deleted %s action execution objects' % (deleted_count)) # 2. Delete LiveActionDB objects try: deleted_count = LiveAction.delete_by_query(**liveaction_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete liveaction instances: %s' 'Please contact support.' % (liveaction_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of liveaction models failed for query with filters: %s.', liveaction_filters) else: logger.info('Deleted %s liveaction objects' % (deleted_count)) # 3. Delete ActionExecutionOutputDB objects to_delete_exection_ids = [str(execution_db.id) for execution_db in to_delete_execution_dbs] output_dbs_filters = {} output_dbs_filters['execution_id'] = {'$in': to_delete_exection_ids} try: deleted_count = ActionExecutionOutput.delete_by_query(**output_dbs_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution output instances: %s' 'Please contact support.' % (output_dbs_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of execution output models failed for query with filters: %s.', output_dbs_filters) else: logger.info('Deleted %s execution output objects' % (deleted_count)) zombie_execution_instances = len(ActionExecution.query(only_fields=['id'], no_dereference=True, **exec_filters)) zombie_liveaction_instances = len(LiveAction.query(only_fields=['id'], no_dereference=True, **liveaction_filters)) if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0): logger.error('Zombie execution instances left: %d.', zombie_execution_instances) logger.error('Zombie liveaction instances left: %s.', zombie_liveaction_instances) # Print stats logger.info('All execution models older than timestamp %s were deleted.', timestamp)
def test_migrate_executions(self): ActionExecutionDB._meta["allow_inheritance"] = True LiveActionDB._meta["allow_inheritance"] = True class ActionExecutionDB_OldFieldType(ActionExecutionDB): result = stormbase.EscapedDynamicField(default={}) class LiveActionDB_OldFieldType(LiveActionDB): result = stormbase.EscapedDynamicField(default={}) execution_dbs = ActionExecution.query( __raw__={"result": { "$not": { "$type": "binData", }, }}) self.assertEqual(len(execution_dbs), 0) execution_dbs = ActionExecution.query(__raw__={ "result": { "$type": "object", }, }) self.assertEqual(len(execution_dbs), 0) # 1. Insert data in old format liveaction_1_db = LiveActionDB_OldFieldType() liveaction_1_db.action = "foo.bar" liveaction_1_db.status = action_constants.LIVEACTION_STATUS_FAILED liveaction_1_db.result = MOCK_RESULT_1 liveaction_1_db.start_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) liveaction_1_db = LiveAction.add_or_update(liveaction_1_db, publish=False) execution_1_db = ActionExecutionDB_OldFieldType() execution_1_db.action = {"a": 1} execution_1_db.runner = {"a": 1} execution_1_db.liveaction = {"id": liveaction_1_db.id} execution_1_db.status = action_constants.LIVEACTION_STATUS_FAILED execution_1_db.result = MOCK_RESULT_1 execution_1_db.start_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) execution_1_db = ActionExecution.add_or_update(execution_1_db, publish=False) # This execution is not in a final state yet so it should not be migrated liveaction_2_db = LiveActionDB_OldFieldType() liveaction_2_db.action = "foo.bar2" liveaction_2_db.status = action_constants.LIVEACTION_STATUS_RUNNING liveaction_2_db.result = MOCK_RESULT_2 liveaction_2_db.start_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) liveaction_2_db = LiveAction.add_or_update(liveaction_2_db, publish=False) execution_2_db = ActionExecutionDB_OldFieldType() execution_2_db.action = {"a": 2} execution_2_db.runner = {"a": 2} execution_2_db.liveaction = {"id": liveaction_2_db.id} execution_2_db.status = action_constants.LIVEACTION_STATUS_RUNNING execution_2_db.result = MOCK_RESULT_2 execution_2_db.start_timestamp = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) execution_2_db = ActionExecution.add_or_update(execution_2_db, publish=False) # This object is older than the default threshold so it should not be migrated execution_3_db = ActionExecutionDB_OldFieldType() execution_3_db.action = {"a": 2} execution_3_db.runner = {"a": 2} execution_3_db.liveaction = {"id": liveaction_2_db.id} execution_3_db.status = action_constants.LIVEACTION_STATUS_SUCCEEDED execution_3_db.result = MOCK_RESULT_1 execution_3_db.start_timestamp = datetime.datetime.utcfromtimestamp( 0).replace(tzinfo=datetime.timezone.utc) execution_3_db = ActionExecution.add_or_update(execution_3_db, publish=False) # Verify data has been inserted in old format execution_dbs = ActionExecution.query(__raw__={ "result": { "$type": "object", }, }) self.assertEqual(len(execution_dbs), 3) execution_dbs = ActionExecution.query( __raw__={"result": { "$not": { "$type": "binData", }, }}) self.assertEqual(len(execution_dbs), 3) execution_dbs = ActionExecution.query(__raw__={ "result": { "$type": "binData", }, }) self.assertEqual(len(execution_dbs), 0) liveaction_dbs = LiveAction.query(__raw__={ "result": { "$type": "object", }, }) self.assertEqual(len(liveaction_dbs), 2) liveaction_dbs = LiveAction.query( __raw__={"result": { "$not": { "$type": "binData", }, }}) self.assertEqual(len(liveaction_dbs), 2) liveaction_dbs = LiveAction.query(__raw__={ "result": { "$type": "binData", }, }) self.assertEqual(len(liveaction_dbs), 0) # Update inserted documents and remove special _cls field added by mongoengine. We need to # do that here due to how mongoengine works with subclasses. ActionExecution.query(__raw__={ "result": { "$type": "object", }, }).update(set___cls="ActionExecutionDB") LiveAction.query(__raw__={ "result": { "$type": "object", }, }).update(set___cls="LiveActionDB") # 2. Run migration start_dt = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) - datetime.timedelta(hours=2) end_dt = datetime.datetime.utcnow().replace( tzinfo=datetime.timezone.utc) migration_module.migrate_executions(start_dt=start_dt, end_dt=end_dt) # 3. Verify data has been migrated - only 1 item should have been migrated since it's in a # completed state execution_dbs = ActionExecution.query(__raw__={ "result": { "$type": "object", }, }) self.assertEqual(len(execution_dbs), 2) execution_dbs = ActionExecution.query(__raw__={ "result": { "$type": "binData", }, }) self.assertEqual(len(execution_dbs), 1) execution_db_1_retrieved = ActionExecution.get_by_id(execution_1_db.id) self.assertEqual(execution_db_1_retrieved.result, MOCK_RESULT_1) execution_db_2_retrieved = ActionExecution.get_by_id(execution_2_db.id) self.assertEqual(execution_db_2_retrieved.result, MOCK_RESULT_2) liveaction_db_1_retrieved = LiveAction.get_by_id(liveaction_1_db.id) self.assertEqual(liveaction_db_1_retrieved.result, MOCK_RESULT_1) liveaction_db_2_retrieved = LiveAction.get_by_id(liveaction_2_db.id) self.assertEqual(liveaction_db_2_retrieved.result, MOCK_RESULT_2)
def purge_executions(logger, timestamp, action_ref=None, purge_incomplete=False): """ :param timestamp: Exections older than this timestamp will be deleted. :type timestamp: ``datetime.datetime :param action_ref: Only delete executions for the provided actions. :type action_ref: ``str`` :param purge_incomplete: True to also delete executions which are not in a done state. :type purge_incomplete: ``bool`` """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if purge_incomplete: filters['start_timestamp__lt'] = timestamp else: filters['end_timestamp__lt'] = timestamp filters['start_timestamp__lt'] = timestamp filters['status'] = {'$in': DONE_STATES} exec_filters = copy.copy(filters) if action_ref: exec_filters['action__ref'] = action_ref liveaction_filters = copy.deepcopy(filters) if action_ref: liveaction_filters['action'] = action_ref try: deleted_count = ActionExecution.delete_by_query(**exec_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution instances: %s' 'Please contact support.' % (exec_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception( 'Deletion of execution models failed for query with filters: %s.', exec_filters) else: logger.info('Deleted %s action execution objects' % (deleted_count)) try: deleted_count = LiveAction.delete_by_query(**liveaction_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete liveaction instances: %s' 'Please contact support.' % (liveaction_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception( 'Deletion of liveaction models failed for query with filters: %s.', liveaction_filters) else: logger.info('Deleted %s liveaction objects' % (deleted_count)) zombie_execution_instances = len(ActionExecution.query(**exec_filters)) zombie_liveaction_instances = len(LiveAction.query(**liveaction_filters)) if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0): logger.error('Zombie execution instances left: %d.', zombie_execution_instances) logger.error('Zombie liveaction instances left: %s.', zombie_liveaction_instances) # Print stats logger.info('All execution models older than timestamp %s were deleted.', timestamp)
def get_one(self, id, output_type=None, requester_user=None): # Special case for id == "last" if id == 'last': execution_db = ActionExecution.query().order_by('-id').limit( 1).first() else: execution_db = self._get_one_by_id( id=id, requester_user=requester_user, permission_type=PermissionType.EXECUTION_VIEW) execution_id = str(execution_db.id) query_filters = {} if output_type and output_type != 'all': query_filters['output_type'] = output_type def existing_output_iter(): # Consume and return all of the existing lines # pylint: disable=no-member output_dbs = ActionExecutionOutput.query(execution_id=execution_id, **query_filters) # Note: We return all at once instead of yield line by line to avoid multiple socket # writes and to achieve better performance output = ''.join([output_db.data for output_db in output_dbs]) yield six.binary_type(output.encode('utf-8')) def new_output_iter(): def noop_gen(): yield six.binary_type('') # Bail out if execution has already completed / been paused if execution_db.status in self.CLOSE_STREAM_LIVEACTION_STATES: return noop_gen() # Wait for and return any new line which may come in execution_ids = [execution_id] listener = get_listener(name='execution_output') # pylint: disable=no-member gen = listener.generator(execution_ids=execution_ids) def format(gen): for pack in gen: if not pack: continue else: (_, model_api) = pack # Note: gunicorn wsgi handler expect bytes, not unicode # pylint: disable=no-member if isinstance(model_api, ActionExecutionOutputAPI): if output_type and model_api.output_type != output_type: continue yield six.binary_type( model_api.data.encode('utf-8')) elif isinstance(model_api, ActionExecutionAPI): if model_api.status in self.CLOSE_STREAM_LIVEACTION_STATES: yield six.binary_type('') break else: LOG.debug('Unrecognized message type: %s' % (model_api)) gen = format(gen) return gen def make_response(): app_iter = itertools.chain(existing_output_iter(), new_output_iter()) res = Response(content_type='text/plain', app_iter=app_iter) return res res = make_response() return res
def purge_executions(logger, timestamp, action_ref=None, purge_incomplete=False): """ :param timestamp: Exections older than this timestamp will be deleted. :type timestamp: ``datetime.datetime :param action_ref: Only delete executions for the provided actions. :type action_ref: ``str`` :param purge_incomplete: True to also delete executions which are not in a done state. :type purge_incomplete: ``bool`` """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if purge_incomplete: filters['start_timestamp__lt'] = timestamp else: filters['end_timestamp__lt'] = timestamp filters['start_timestamp__lt'] = timestamp filters['status'] = {'$in': DONE_STATES} exec_filters = copy.copy(filters) if action_ref: exec_filters['action__ref'] = action_ref liveaction_filters = copy.deepcopy(filters) if action_ref: liveaction_filters['action'] = action_ref # TODO: Update this code to return statistics on deleted objects once we # upgrade to newer version of MongoDB where delete_by_query actually returns # some data try: ActionExecution.delete_by_query(**exec_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution instances: %s' 'Please contact support.' % (exec_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of execution models failed for query with filters: %s.', exec_filters) try: LiveAction.delete_by_query(**liveaction_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete liveaction instances: %s' 'Please contact support.' % (liveaction_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of liveaction models failed for query with filters: %s.', liveaction_filters) zombie_execution_instances = len(ActionExecution.query(**exec_filters)) zombie_liveaction_instances = len(LiveAction.query(**liveaction_filters)) if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0): logger.error('Zombie execution instances left: %d.', zombie_execution_instances) logger.error('Zombie liveaction instances left: %s.', zombie_liveaction_instances) # Print stats logger.info('All execution models older than timestamp %s were deleted.', timestamp)
def purge_executions(logger, timestamp, action_ref=None, purge_incomplete=False): """ Purge action executions and corresponding live action, execution output objects. :param timestamp: Exections older than this timestamp will be deleted. :type timestamp: ``datetime.datetime :param action_ref: Only delete executions for the provided actions. :type action_ref: ``str`` :param purge_incomplete: True to also delete executions which are not in a done state. :type purge_incomplete: ``bool`` """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if purge_incomplete: filters['start_timestamp__lt'] = timestamp else: filters['end_timestamp__lt'] = timestamp filters['start_timestamp__lt'] = timestamp filters['status'] = {'$in': DONE_STATES} exec_filters = copy.copy(filters) if action_ref: exec_filters['action__ref'] = action_ref liveaction_filters = copy.deepcopy(filters) if action_ref: liveaction_filters['action'] = action_ref to_delete_execution_dbs = [] # 1. Delete ActionExecutionDB objects try: # Note: We call list() on the query set object because it's lazyily evaluated otherwise to_delete_execution_dbs = list(ActionExecution.query(only_fields=['id'], no_dereference=True, **exec_filters)) deleted_count = ActionExecution.delete_by_query(**exec_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution instances: %s' 'Please contact support.' % (exec_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of execution models failed for query with filters: %s.', exec_filters) else: logger.info('Deleted %s action execution objects' % (deleted_count)) # 2. Delete LiveActionDB objects try: deleted_count = LiveAction.delete_by_query(**liveaction_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete liveaction instances: %s' 'Please contact support.' % (liveaction_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of liveaction models failed for query with filters: %s.', liveaction_filters) else: logger.info('Deleted %s liveaction objects' % (deleted_count)) # 3. Delete ActionExecutionOutputDB objects to_delete_exection_ids = [str(execution_db.id) for execution_db in to_delete_execution_dbs] output_dbs_filters = {} output_dbs_filters['execution_id'] = {'$in': to_delete_exection_ids} try: deleted_count = ActionExecutionOutput.delete_by_query(**output_dbs_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete execution output instances: %s' 'Please contact support.' % (output_dbs_filters, six.text_type(e))) raise InvalidQueryError(msg) except: logger.exception('Deletion of execution output models failed for query with filters: %s.', output_dbs_filters) else: logger.info('Deleted %s execution output objects' % (deleted_count)) zombie_execution_instances = len(ActionExecution.query(only_fields=['id'], no_dereference=True, **exec_filters)) zombie_liveaction_instances = len(LiveAction.query(only_fields=['id'], no_dereference=True, **liveaction_filters)) if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0): logger.error('Zombie execution instances left: %d.', zombie_execution_instances) logger.error('Zombie liveaction instances left: %s.', zombie_liveaction_instances) # Print stats logger.info('All execution models older than timestamp %s were deleted.', timestamp)