def test_default_sort(self): response = self.app.get('/history/executions') self.assertEqual(response.status_int, 200) self.assertIsInstance(response.json, list) dt1 = response.json[0]['execution']['start_timestamp'] dt2 = response.json[len(response.json) - 1]['execution']['start_timestamp'] self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def purge_executions(timestamp=None, action_ref=None, purge_incomplete=False): if not timestamp: LOG.error('Specify a valid timestamp to purge.') return LOG.info('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) filters = {} if action_ref: filters['action__ref'] = action_ref if purge_incomplete: filters['start_timestamp__lt'] = isotime.parse(timestamp) else: filters['end_timestamp__lt'] = isotime.parse(timestamp) filters['start_timestamp__lt'] = isotime.parse(timestamp) filters['status'] = {"$in": DONE_STATES} # XXX: Think about paginating this call. executions = ActionExecution.query(**filters) LOG.info('#### Total number of executions to delete: %d' % len(executions)) # Purge execution and liveaction models now for execution_db in executions: _purge_models(execution_db) # Print stats LOG.info('#### Total execution models deleted: %d' % DELETED_COUNT)
def _process_datetime_range_filters(self, filters, order_by=None): ranges = {k: v for k, v in filters.iteritems() if type(v) in [str, unicode] and ".." in v} order_by_list = copy.deepcopy(order_by) if order_by else [] for k, v in ranges.iteritems(): values = v.split("..") dt1 = isotime.parse(values[0]) dt2 = isotime.parse(values[1]) k__gte = "%s__gte" % k k__lte = "%s__lte" % k if dt1 < dt2: query = {k__gte: dt1, k__lte: dt2} sort_key, reverse_sort_key = k, "-" + k else: query = {k__gte: dt2, k__lte: dt1} sort_key, reverse_sort_key = "-" + k, k del filters[k] filters.update(query) if reverse_sort_key in order_by_list: idx = order_by_list.index(reverse_sort_key) order_by_list.pop(idx) order_by_list.insert(idx, sort_key) elif sort_key not in order_by_list: order_by_list = [sort_key] + order_by_list return filters, order_by_list
def to_model(cls, live_action): action = live_action.action if getattr(live_action, 'start_timestamp', None): start_timestamp = isotime.parse(live_action.start_timestamp) else: start_timestamp = None if getattr(live_action, 'end_timestamp', None): end_timestamp = isotime.parse(live_action.end_timestamp) else: end_timestamp = None status = getattr(live_action, 'status', None) parameters = getattr(live_action, 'parameters', dict()) context = getattr(live_action, 'context', dict()) callback = getattr(live_action, 'callback', dict()) result = getattr(live_action, 'result', None) if getattr(live_action, 'notify', None): notify = NotificationsHelper.to_model(live_action.notify) else: notify = None model = cls.model(action=action, start_timestamp=start_timestamp, end_timestamp=end_timestamp, status=status, parameters=parameters, context=context, callback=callback, result=result, notify=notify) return model
def test_descending_sort(self): response = self.app.get('/v1/executions?sort_desc=True') self.assertEqual(response.status_int, 200) self.assertIsInstance(response.json, list) dt1 = response.json[0]['start_timestamp'] dt2 = response.json[len(response.json) - 1]['start_timestamp'] self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
class RuleEnforcementController(resource.ResourceController): model = RuleEnforcementAPI access = RuleEnforcement # ResourceController attributes query_options = {'sort': ['-enforced_at', 'rule.ref']} supported_filters = SUPPORTED_FILTERS filter_transform_functions = { 'enforced_at': lambda value: isotime.parse(value=value), 'enforced_at_gt': lambda value: isotime.parse(value=value), 'enforced_at_lt': lambda value: isotime.parse(value=value) } def get_all(self, sort=None, offset=0, limit=None, **raw_filters): return super(RuleEnforcementController, self)._get_all(sort=sort, offset=offset, limit=limit, raw_filters=raw_filters) def get_one(self, id, requester_user): return super(RuleEnforcementController, self)._get_one_by_id( id, requester_user=requester_user, permission_type=PermissionType.RULE_ENFORCEMENT_VIEW)
def decorate(*args, **kwargs): ranges = { k: v for k, v in kwargs.iteritems() if type(v) in [str, unicode] and '..' in v } for k, v in ranges.iteritems(): values = v.split('..') dt1 = isotime.parse(values[0]) dt2 = isotime.parse(values[1]) order_by_list = kwargs.get('order_by', []) k__gte = '%s__gte' % k k__lte = '%s__lte' % k if dt1 < dt2: query = {k__gte: dt1, k__lte: dt2} sort_key, reverse_sort_key = k, '-' + k else: query = {k__gte: dt2, k__lte: dt1} sort_key, reverse_sort_key = '-' + k, k del kwargs[k] kwargs.update(query) if reverse_sort_key in order_by_list: idx = order_by_list.index(reverse_sort_key) order_by_list.pop(idx) order_by_list.insert(idx, sort_key) elif sort_key not in order_by_list: kwargs['order_by'] = [sort_key] + order_by_list return func(*args, **kwargs)
def purge_executions(timestamp=None, action_ref=None, purge_incomplete=False): if not timestamp: LOG.error("Specify a valid timestamp to purge.") return LOG.info("Purging executions older than timestamp: %s" % timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")) filters = {} if action_ref: filters["action__ref"] = action_ref if purge_incomplete: filters["start_timestamp__lt"] = isotime.parse(timestamp) else: filters["end_timestamp__lt"] = isotime.parse(timestamp) filters["start_timestamp__lt"] = isotime.parse(timestamp) filters["status"] = {"$in": DONE_STATES} # XXX: Think about paginating this call. executions = ActionExecution.query(**filters) LOG.info("#### Total number of executions to delete: %d" % len(executions)) # Purge execution and liveaction models now for execution_db in executions: _purge_models(execution_db) # Print stats LOG.info("#### Total execution models deleted: %d" % DELETED_COUNT)
class RuleEnforcementController(resource.ResourceController): model = RuleEnforcementAPI access = RuleEnforcement # ResourceController attributes query_options = {'sort': ['-enforced_at', 'rule.ref']} supported_filters = SUPPORTED_FILTERS filter_transform_functions = { 'enforced_at': lambda value: isotime.parse(value=value), 'enforced_at_gt': lambda value: isotime.parse(value=value), 'enforced_at_lt': lambda value: isotime.parse(value=value) } @request_user_has_permission( permission_type=PermissionType.RULE_ENFORCEMENT_LIST) @jsexpose() def get_all(self, **kwargs): return super(RuleEnforcementController, self)._get_all(**kwargs) @request_user_has_resource_db_permission( permission_type=PermissionType.RULE_ENFORCEMENT_VIEW) @jsexpose(arg_types=[str]) def get_one(self, ref_or_id): return super(RuleEnforcementController, self)._get_one(ref_or_id)
def to_model(cls, live_action): name = getattr(live_action, 'name', None) description = getattr(live_action, 'description', None) action = live_action.action if getattr(live_action, 'start_timestamp', None): start_timestamp = isotime.parse(live_action.start_timestamp) else: start_timestamp = None if getattr(live_action, 'end_timestamp', None): end_timestamp = isotime.parse(live_action.end_timestamp) else: end_timestamp = None status = getattr(live_action, 'status', None) parameters = getattr(live_action, 'parameters', dict()) context = getattr(live_action, 'context', dict()) callback = getattr(live_action, 'callback', dict()) result = getattr(live_action, 'result', None) if getattr(live_action, 'notify', None): notify = NotificationsHelper.to_model(live_action.notify) else: notify = None model = cls.model(name=name, description=description, action=action, start_timestamp=start_timestamp, end_timestamp=end_timestamp, status=status, parameters=parameters, context=context, callback=callback, result=result, notify=notify) return model
def _process_datetime_range_filters(self, filters, order_by=None): ranges = {k: v for k, v in filters.iteritems() if type(v) in [str, unicode] and '..' in v} order_by_list = copy.deepcopy(order_by) if order_by else [] for k, v in ranges.iteritems(): values = v.split('..') dt1 = isotime.parse(values[0]) dt2 = isotime.parse(values[1]) k__gte = '%s__gte' % k k__lte = '%s__lte' % k if dt1 < dt2: query = {k__gte: dt1, k__lte: dt2} sort_key, reverse_sort_key = k, '-' + k else: query = {k__gte: dt2, k__lte: dt1} sort_key, reverse_sort_key = '-' + k, k del filters[k] filters.update(query) if reverse_sort_key in order_by_list: idx = order_by_list.index(reverse_sort_key) order_by_list.pop(idx) order_by_list.insert(idx, sort_key) elif sort_key not in order_by_list: order_by_list = [sort_key] + order_by_list return filters, order_by_list
def test_default_sort(self): response = self.app.get("/v1/executions") self.assertEqual(response.status_int, 200) self.assertIsInstance(response.json, list) dt1 = response.json[0]["start_timestamp"] dt2 = response.json[len(response.json) - 1]["start_timestamp"] self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_update_marker(self): executions_queue = self.get_queue() dumper = Dumper(queue=executions_queue, export_dir='/tmp', batch_size=5, max_files_per_sleep=1, file_prefix='st2-stuff-', file_format='json') # Batch 1 batch = self.execution_apis[0:5] new_marker = dumper._update_marker(batch) self.assertTrue(new_marker is not None) timestamps = [ isotime.parse(execution.end_timestamp) for execution in batch ] max_timestamp = max(timestamps) self.assertEqual(new_marker, max_timestamp) # Batch 2 batch = self.execution_apis[0:5] new_marker = dumper._update_marker(batch) timestamps = [ isotime.parse(execution.end_timestamp) for execution in batch ] max_timestamp = max(timestamps) self.assertEqual(new_marker, max_timestamp) dumper._write_marker_to_db.assert_called_with(new_marker)
def _process_datetime_range_filters(self, filters, order_by=None): ranges = {k: v for k, v in six.iteritems(filters) if type(v) in [str, six.text_type] and '..' in v} order_by_list = copy.deepcopy(order_by) if order_by else [] for k, v in six.iteritems(ranges): values = v.split('..') dt1 = isotime.parse(values[0]) dt2 = isotime.parse(values[1]) k__gte = '%s__gte' % k k__lte = '%s__lte' % k if dt1 < dt2: query = {k__gte: dt1, k__lte: dt2} sort_key, reverse_sort_key = k, '-' + k else: query = {k__gte: dt2, k__lte: dt1} sort_key, reverse_sort_key = '-' + k, k del filters[k] filters.update(query) if reverse_sort_key in order_by_list: idx = order_by_list.index(reverse_sort_key) order_by_list.pop(idx) order_by_list.insert(idx, sort_key) elif sort_key not in order_by_list: order_by_list = [sort_key] + order_by_list return filters, order_by_list
def test_crud_partial(self): # Create the DB record. obj = ActionExecutionHistoryAPI(**copy.deepcopy(self.fake_history_subtasks[0])) ActionExecutionHistory.add_or_update(ActionExecutionHistoryAPI.to_model(obj)) model = ActionExecutionHistory.get_by_id(obj.id) self.assertEqual(str(model.id), obj.id) self.assertDictEqual(model.trigger, {}) self.assertDictEqual(model.trigger_type, {}) self.assertDictEqual(model.trigger_instance, {}) self.assertDictEqual(model.rule, {}) self.assertDictEqual(model.action, self.fake_history_subtasks[0]['action']) self.assertDictEqual(model.runner, self.fake_history_subtasks[0]['runner']) doc = copy.deepcopy(self.fake_history_subtasks[0]['execution']) doc['start_timestamp'] = isotime.parse(doc['start_timestamp']) doc['end_timestamp'] = isotime.parse(doc['end_timestamp']) self.assertDictEqual(model.execution, doc) self.assertEqual(model.parent, self.fake_history_subtasks[0]['parent']) self.assertListEqual(model.children, []) # Update the DB record. children = [str(bson.ObjectId()), str(bson.ObjectId())] model.children = children ActionExecutionHistory.add_or_update(model) model = ActionExecutionHistory.get_by_id(obj.id) self.assertListEqual(model.children, children) # Delete the DB record. ActionExecutionHistory.delete(model) self.assertRaises(ValueError, ActionExecutionHistory.get_by_id, obj.id)
def _process_datetime_range_filters(self, filters, order_by=None): ranges = { k: v for k, v in six.iteritems(filters) if type(v) in [str, six.text_type] and ".." in v } order_by_list = copy.deepcopy(order_by) if order_by else [] for k, v in six.iteritems(ranges): values = v.split("..") dt1 = isotime.parse(values[0]) dt2 = isotime.parse(values[1]) k__gte = "%s__gte" % k k__lte = "%s__lte" % k if dt1 < dt2: query = {k__gte: dt1, k__lte: dt2} sort_key, reverse_sort_key = k, "-" + k else: query = {k__gte: dt2, k__lte: dt1} sort_key, reverse_sort_key = "-" + k, k del filters[k] filters.update(query) if reverse_sort_key in order_by_list: idx = order_by_list.index(reverse_sort_key) order_by_list.pop(idx) order_by_list.insert(idx, sort_key) elif sort_key not in order_by_list: order_by_list = [sort_key] + order_by_list return filters, order_by_list
def _get_updated_action_exec_result(self, action_node, liveaction, prev_task_result): if liveaction.status in action_constants.LIVEACTION_COMPLETED_STATES: created_at = isotime.parse(prev_task_result['created_at']) updated_at = liveaction.end_timestamp else: created_at = isotime.parse(prev_task_result['created_at']) updated_at = isotime.parse(prev_task_result['updated_at']) return self._format_action_exec_result(action_node, liveaction, created_at, updated_at)
class TriggerInstanceController(TriggerInstanceControllerMixin, resource.ResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of TriggerInstances in the system. """ supported_filters = { 'trigger': 'trigger', 'timestamp_gt': 'occurrence_time.gt', 'timestamp_lt': 'occurrence_time.lt', 'status': 'status' } filter_transform_functions = { 'timestamp_gt': lambda value: isotime.parse(value=value), 'timestamp_lt': lambda value: isotime.parse(value=value) } query_options = { 'sort': ['-occurrence_time', 'trigger'] } def __init__(self): super(TriggerInstanceController, self).__init__() def get_one(self, instance_id): """ List triggerinstance by instance_id. Handle: GET /triggerinstances/1 """ return self._get_one_by_id(instance_id, permission_type=None) def get_all(self, sort=None, offset=0, limit=None, **raw_filters): """ List all triggerinstances. Handles requests: GET /triggerinstances/ """ trigger_instances = self._get_trigger_instances(sort=sort, offset=offset, limit=limit, raw_filters=raw_filters) return trigger_instances def _get_trigger_instances(self, sort=None, offset=0, limit=None, raw_filters=None): if limit is None: limit = self.default_limit limit = int(limit) LOG.debug('Retrieving all trigger instances with filters=%s', raw_filters) return super(TriggerInstanceController, self)._get_all(sort=sort, offset=offset, limit=limit, raw_filters=raw_filters)
def test_get_all(self): self._get_actionexecution_id(self._do_post(LIVE_ACTION_1)) self._get_actionexecution_id(self._do_post(LIVE_ACTION_2)) resp = self.app.get("/v1/executions") body = resp.json self.assertEqual(resp.status_int, 200) self.assertEqual(len(resp.json), 2, "/v1/executions did not return all " "actionexecutions.") # Assert liveactions are sorted by timestamp. for i in range(len(body) - 1): self.assertTrue(isotime.parse(body[i]["start_timestamp"]) >= isotime.parse(body[i + 1]["start_timestamp"]))
def to_model(cls, instance): model = cls.model() for attr, meta in six.iteritems(cls.schema.get('properties', dict())): default = copy.deepcopy(meta.get('default', None)) value = getattr(instance, attr, default) if not value and not cls.model._fields[attr].required: continue if attr not in ActionExecutionAPI.SKIP: setattr(model, attr, value) model.start_timestamp = isotime.parse(instance.start_timestamp) model.end_timestamp = isotime.parse(instance.end_timestamp) return model
def test_write_marker_to_db(self): executions_queue = self.get_queue() dumper = Dumper(queue=executions_queue, export_dir='/tmp', batch_size=5, max_files_per_sleep=1, file_prefix='st2-stuff-', file_format='json') timestamps = [isotime.parse(execution.end_timestamp) for execution in self.execution_apis] max_timestamp = max(timestamps) marker_db = dumper._write_marker_to_db(max_timestamp) persisted_marker = marker_db.marker self.assertTrue(isinstance(persisted_marker, six.string_types)) self.assertEqual(isotime.parse(persisted_marker), max_timestamp)
class TriggerInstanceController(TriggerInstanceControllerMixin, resource.ResourceController): """ Implements the RESTful web endpoint that handles the lifecycle of TriggerInstances in the system. """ re_emit = TriggerInstanceResendController() supported_filters = { 'trigger': 'trigger', 'timestamp_gt': 'occurrence_time.gt', 'timestamp_lt': 'occurrence_time.lt', 'status': 'status' } filter_transform_functions = { 'timestamp_gt': lambda value: isotime.parse(value=value), 'timestamp_lt': lambda value: isotime.parse(value=value) } query_options = { 'sort': ['-occurrence_time', 'trigger'] } def __init__(self): super(TriggerInstanceController, self).__init__() @jsexpose(arg_types=[str]) def get_one(self, instance_id): """ List triggerinstance by instance_id. Handle: GET /triggerinstances/1 """ return self._get_one(instance_id) @jsexpose() def get_all(self, **kw): """ List all triggerinstances. Handles requests: GET /triggerinstances/ """ trigger_instances = self._get_trigger_instances(**kw) return trigger_instances def _get_trigger_instances(self, **kw): kw['limit'] = int(kw.get('limit', 100)) LOG.debug('Retrieving all trigger instances with filters=%s', kw) return super(TriggerInstanceController, self)._get_all(**kw)
def test_get_all(self): self._get_actionexecution_id(self._do_post(LIVE_ACTION_1)) self._get_actionexecution_id(self._do_post(LIVE_ACTION_2)) resp = self.app.get('/v1/executions') body = resp.json self.assertEqual(resp.status_int, 200) self.assertEqual(len(resp.json), 2, '/v1/executions did not return all ' 'actionexecutions.') # Assert liveactions are sorted by timestamp. for i in range(len(body) - 1): self.assertTrue(isotime.parse(body[i]['start_timestamp']) >= isotime.parse(body[i + 1]['start_timestamp']))
def test_get_all(self): self._get_actionexecution_id(self._do_post(ACTION_EXECUTION_1)) self._get_actionexecution_id(self._do_post(ACTION_EXECUTION_2)) resp = self.app.get('/v1/actionexecutions') body = resp.json # Assert executions are sorted by timestamp. for i in range(len(body) - 1): self.assertTrue(isotime.parse(body[i]['start_timestamp']) >= isotime.parse(body[i + 1]['start_timestamp'])) self.assertEqual(resp.status_int, 200) self.assertEqual(len(resp.json), 2, '/v1/actionexecutions did not return all ' 'actionexecutions.')
def _purge_executions(timestamp=None, action_ref=None): if not timestamp: print('Specify a valid timestamp to purge.') return if not action_ref: action_ref = '' print('Purging executions older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) def should_delete(execution_db): if action_ref != '': return (execution_db.liveaction['action'] == action_ref and execution_db.start_timestamp < timestamp) else: return execution_db.start_timestamp < timestamp # XXX: Think about paginating this call. filters = {'start_timestamp__lt': isotime.parse(timestamp)} executions = ActionExecution.query(**filters) executions_to_delete = filter(should_delete, executions) print('#### Total number of executions to delete: %d' % len(executions_to_delete)) # Purge execution and liveaction models now for execution_db in executions_to_delete: _purge_action_models(execution_db) # Print stats print('#### Total execution models deleted: %d' % DELETED_COUNT)
def to_model(cls, instance): trigger = instance.trigger payload = instance.payload occurrence_time = isotime.parse(instance.occurrence_time) model = cls.model(trigger=trigger, payload=payload, occurrence_time=occurrence_time) return model
def to_model(cls, execution): model = super(cls, cls).to_model(execution) model.action = execution.action if getattr(execution, 'start_timestamp', None): model.start_timestamp = isotime.parse(execution.start_timestamp) if getattr(execution, 'end_timestamp', None): model.end_timestamp = isotime.parse(execution.end_timestamp) model.status = getattr(execution, 'status', None) model.parameters = getattr(execution, 'parameters', dict()) model.context = getattr(execution, 'context', dict()) model.callback = getattr(execution, 'callback', dict()) model.result = getattr(execution, 'result', None) return model
def test_update_marker_out_of_order_batch(self): executions_queue = self.get_queue() dumper = Dumper( queue=executions_queue, export_dir="/tmp", batch_size=5, max_files_per_sleep=1, file_prefix="st2-stuff-", file_format="json", ) timestamps = [ isotime.parse(execution.end_timestamp) for execution in self.execution_apis ] max_timestamp = max(timestamps) # set dumper persisted timestamp to something less than min timestamp in the batch test_timestamp = max_timestamp + datetime.timedelta(hours=1) dumper._persisted_marker = test_timestamp new_marker = dumper._update_marker(self.execution_apis) self.assertTrue(new_marker < test_timestamp) # Assert we rolled back the marker. self.assertEqual(dumper._persisted_marker, max_timestamp) self.assertEqual(new_marker, max_timestamp) dumper._write_marker_to_db.assert_called_with(new_marker)
def to_model(cls, instance): model = cls.model() for attr, meta in six.iteritems(cls.schema.get('properties', dict())): default = copy.deepcopy(meta.get('default', None)) value = getattr(instance, attr, default) # pylint: disable=no-member # TODO: Add plugin which lets pylint know each MongoEngine document has _fields # attribute if not value and not cls.model._fields[attr].required: continue if attr not in ActionExecutionAPI.SKIP: setattr(model, attr, value) model.start_timestamp = isotime.parse(instance.start_timestamp) model.end_timestamp = isotime.parse(instance.end_timestamp) return model
def purge_trigger_instances(logger, timestamp): """ :param timestamp: Trigger instances older than this timestamp will be deleted. :type timestamp: ``datetime.datetime """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging trigger instances older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) query_filters = {'occurrence_time__lt': isotime.parse(timestamp)} # TODO: Update this code to return statistics on deleted objects once we # upgrade to newer version of MongoDB where delete_by_query actually returns # some data try: TriggerInstance.delete_by_query(**query_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete trigger instances: %s' 'Please contact support.' % (query_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception('Deleting instances using query_filters %s failed.', query_filters) # Print stats logger.info( 'All trigger instance models older than timestamp %s were deleted.', timestamp)
def test_crud_partial(self): # Create the DB record. obj = ActionExecutionHistoryAPI(**copy.deepcopy(self.fake_history_subtasks[0])) ActionExecutionHistory.add_or_update(ActionExecutionHistoryAPI.to_model(obj)) model = ActionExecutionHistory.get_by_id(obj.id) self.assertEqual(str(model.id), obj.id) self.assertDictEqual(model.trigger, {}) self.assertDictEqual(model.trigger_type, {}) self.assertDictEqual(model.trigger_instance, {}) self.assertDictEqual(model.rule, {}) self.assertDictEqual(model.action, self.fake_history_subtasks[0]['action']) self.assertDictEqual(model.runner, self.fake_history_subtasks[0]['runner']) doc = copy.deepcopy(self.fake_history_subtasks[0]['execution']) doc['start_timestamp'] = isotime.parse(doc['start_timestamp']) self.assertDictEqual(model.execution, doc) self.assertEqual(model.parent, self.fake_history_subtasks[0]['parent']) self.assertListEqual(model.children, []) # Update the DB record. children = [str(bson.ObjectId()), str(bson.ObjectId())] model.children = children ActionExecutionHistory.add_or_update(model) model = ActionExecutionHistory.get_by_id(obj.id) self.assertListEqual(model.children, children) # Delete the DB record. ActionExecutionHistory.delete(model) self.assertRaises(ValueError, ActionExecutionHistory.get_by_id, obj.id)
def to_model(cls, token): model = super(cls, cls).to_model(token) model.user = str(token.user) if token.user else None model.token = str(token.token) if token.token else None model.ttl = getattr(token, 'ttl', None) model.expiry = isotime.parse(token.expiry) if token.expiry else None return model
def test_get_all(self): self._get_actionexecution_id(self._do_post(LIVE_ACTION_1)) self._get_actionexecution_id(self._do_post(LIVE_ACTION_2)) resp = self.app.get('/v1/executions') body = resp.json self.assertEqual(resp.status_int, 200) self.assertEqual(len(resp.json), 2, '/v1/executions did not return all ' 'actionexecutions.') # Assert liveactions are sorted by timestamp. for i in range(len(body) - 1): self.assertTrue(isotime.parse(body[i]['start_timestamp']) >= isotime.parse(body[i + 1]['start_timestamp'])) self.assertTrue('web_url' in body[i]) if 'end_timestamp' in body[i]: self.assertTrue('elapsed_seconds' in body[i])
def to_model(cls, instance): user = str(instance.user) if instance.user else None token = str(instance.token) if instance.token else None expiry = isotime.parse(instance.expiry) if instance.expiry else None model = cls.model(user=user, token=token, expiry=expiry) return model
def test_write_marker_to_db_marker_exists(self): executions_queue = self.get_queue() dumper = Dumper(queue=executions_queue, export_dir='/tmp', batch_size=5, max_files_per_sleep=1, file_prefix='st2-stuff-', file_format='json') timestamps = [ isotime.parse(execution.end_timestamp) for execution in self.execution_apis ] max_timestamp = max(timestamps) first_marker_db = dumper._write_marker_to_db(max_timestamp) second_marker_db = dumper._write_marker_to_db(max_timestamp + datetime.timedelta( hours=1)) markers = DumperMarker.get_all() self.assertEqual(len(markers), 1) final_marker_id = markers[0].id self.assertEqual(first_marker_db.id, final_marker_id) self.assertEqual(second_marker_db.id, final_marker_id) self.assertEqual(markers[0].marker, second_marker_db.marker) self.assertTrue( second_marker_db.updated_at > first_marker_db.updated_at)
def test_format_sec_truncated(self): dt1 = date.add_utc_tz(datetime.datetime.utcnow()) dt2 = isotime.parse(isotime.format(dt1, usec=False)) dt3 = datetime.datetime(dt1.year, dt1.month, dt1.day, dt1.hour, dt1.minute, dt1.second) self.assertLess(dt2, dt1) self.assertEqual(dt2, date.add_utc_tz(dt3))
def purge_rule_enforcements(logger, timestamp): """ :param timestamp: Rule enforcement instances older than this timestamp will be deleted. :type timestamp: ``datetime.datetime """ if not timestamp: raise ValueError("Specify a valid timestamp to purge.") logger.info("Purging rule enforcements older than timestamp: %s" % timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")) query_filters = {"enforced_at__lt": isotime.parse(timestamp)} try: deleted_count = RuleEnforcement.delete_by_query(**query_filters) except InvalidQueryError as e: msg = ("Bad query (%s) used to delete rule enforcements: %s" "Please contact support." % ( query_filters, six.text_type(e), )) raise InvalidQueryError(msg) except: logger.exception( "Deleting rule enforcements using query_filters %s failed.", query_filters) else: logger.info("Deleted %s rule enforcement objects" % (deleted_count)) # Print stats logger.info( "All rule enforcement models older than timestamp %s were deleted.", timestamp)
def to_model(cls, rule_enforcement): trigger_instance_id = getattr(rule_enforcement, "trigger_instance_id", None) execution_id = getattr(rule_enforcement, "execution_id", None) enforced_at = getattr(rule_enforcement, "enforced_at", None) failure_reason = getattr(rule_enforcement, "failure_reason", None) status = getattr(rule_enforcement, "status", RULE_ENFORCEMENT_STATUS_SUCCEEDED) rule_ref_model = dict(getattr(rule_enforcement, "rule", {})) rule = RuleReferenceSpecDB( ref=rule_ref_model["ref"], id=rule_ref_model["id"], uid=rule_ref_model["uid"], ) if enforced_at: enforced_at = isotime.parse(enforced_at) return cls.model( trigger_instance_id=trigger_instance_id, execution_id=execution_id, failure_reason=failure_reason, enforced_at=enforced_at, rule=rule, status=status, )
def purge_trigger_instances(logger, timestamp): """ :param timestamp: Trigger instances older than this timestamp will be deleted. :type timestamp: ``datetime.datetime """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging trigger instances older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) query_filters = {'occurrence_time__lt': isotime.parse(timestamp)} # TODO: Update this code to return statistics on deleted objects once we # upgrade to newer version of MongoDB where delete_by_query actually returns # some data try: TriggerInstance.delete_by_query(**query_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete trigger instances: %s' 'Please contact support.' % (query_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception('Deleting instances using query_filters %s failed.', query_filters) # Print stats logger.info('All trigger instance models older than timestamp %s were deleted.', timestamp)
def purge_trigger_instances(logger, timestamp): """ :param timestamp: Trigger instances older than this timestamp will be deleted. :type timestamp: ``datetime.datetime """ if not timestamp: raise ValueError('Specify a valid timestamp to purge.') logger.info('Purging trigger instances older than timestamp: %s' % timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) query_filters = {'occurrence_time__lt': isotime.parse(timestamp)} try: deleted_count = TriggerInstance.delete_by_query(**query_filters) except InvalidQueryError as e: msg = ('Bad query (%s) used to delete trigger instances: %s' 'Please contact support.' % (query_filters, str(e))) raise InvalidQueryError(msg) except: logger.exception('Deleting instances using query_filters %s failed.', query_filters) else: logger.info('Deleted %s trigger instance objects' % (deleted_count)) # Print stats logger.info('All trigger instance models older than timestamp %s were deleted.', timestamp)
def test_token_post_set_ttl(self): timestamp = date_utils.add_utc_tz(date_utils.get_datetime_utc_now()) response = self.app.post_json(TOKEN_V1_PATH, {'ttl': 60}, expect_errors=False) expected_expiry = date_utils.get_datetime_utc_now() + datetime.timedelta(seconds=60) self.assertEqual(response.status_int, 201) actual_expiry = isotime.parse(response.json['expiry']) self.assertLess(timestamp, actual_expiry) self.assertLess(actual_expiry, expected_expiry)
def to_component_model(cls, component): values = { 'object_id': component['object_id'] } updated_at = component.get('updated_at', None) if updated_at: values['updated_at'] = isotime.parse(updated_at) return TraceComponentDB(**values)
def test_create_token_ttl_ok(self): ttl = 10 token = access.create_token(USERNAME, 10) self.assertTrue(token is not None) self.assertTrue(token.token is not None) self.assertEqual(token.user, USERNAME) expected_expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=ttl) expected_expiry = isotime.add_utc_tz(expected_expiry) self.assertLess(isotime.parse(token.expiry), expected_expiry)
def test_create_token_ttl_capped(self): ttl = cfg.CONF.auth.token_ttl + 10 expected_expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=ttl) expected_expiry = isotime.add_utc_tz(expected_expiry) token = access.create_token('manas', 10) self.assertTrue(token is not None) self.assertTrue(token.token is not None) self.assertEqual(token.user, 'manas') self.assertLess(isotime.parse(token.expiry), expected_expiry)
def test_create_token_ttl_capped(self): ttl = cfg.CONF.auth.token_ttl + 10 expected_expiry = date_utils.get_datetime_utc_now() + datetime.timedelta(seconds=ttl) expected_expiry = date_utils.add_utc_tz(expected_expiry) token = access.create_token(USERNAME, 10) self.assertTrue(token is not None) self.assertTrue(token.token is not None) self.assertEqual(token.user, USERNAME) self.assertLess(isotime.parse(token.expiry), expected_expiry)
def test_timestamp_lt_and_gt_filter(self): def isoformat(timestamp): return isotime.format(timestamp, offset=False) index = len(self.start_timestamps) - 1 timestamp = self.start_timestamps[index] # Last (largest) timestamp, there are no executions with a greater timestamp timestamp = self.start_timestamps[-1] response = self.app.get("/v1/executions?timestamp_gt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), 0) # First (smallest) timestamp, there are no executions with a smaller timestamp timestamp = self.start_timestamps[0] response = self.app.get("/v1/executions?timestamp_lt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), 0) # Second last, there should be one timestamp greater than it timestamp = self.start_timestamps[-2] response = self.app.get("/v1/executions?timestamp_gt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), 1) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) > timestamp) # Second one, there should be one timestamp smaller than it timestamp = self.start_timestamps[1] response = self.app.get("/v1/executions?timestamp_lt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), 1) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) < timestamp) # Half timestamps should be smaller index = len(self.start_timestamps) - 1 timestamp = self.start_timestamps[index] response = self.app.get("/v1/executions?timestamp_lt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), index) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) < timestamp) # Half timestamps should be greater index = len(self.start_timestamps) - 1 timestamp = self.start_timestamps[-index] response = self.app.get("/v1/executions?timestamp_gt=%s" % (isoformat(timestamp))) self.assertEqual(len(response.json), (index - 1)) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) > timestamp) # Both, lt and gt filters, should return exactly two results timestamp_gt = self.start_timestamps[10] timestamp_lt = self.start_timestamps[13] response = self.app.get( "/v1/executions?timestamp_gt=%s×tamp_lt=%s" % (isoformat(timestamp_gt), isoformat(timestamp_lt)) ) self.assertEqual(len(response.json), 2) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) > timestamp_gt) self.assertTrue(isotime.parse(response.json[1]["start_timestamp"]) > timestamp_gt) self.assertTrue(isotime.parse(response.json[0]["start_timestamp"]) < timestamp_lt) self.assertTrue(isotime.parse(response.json[1]["start_timestamp"]) < timestamp_lt)
def _get_export_marker_from_db(self): try: markers = DumperMarker.get_all() except: return None else: if len(markers) >= 1: marker = markers[0] return isotime.parse(marker.marker) else: return None
def test_datetime_range(self): dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z' response = self.app.get('/history/executions?timestamp=%s' % dt_range) self.assertEqual(response.status_int, 200) self.assertIsInstance(response.json, list) self.assertEqual(len(response.json), 10) self.assertEqual(response.headers['X-Total-Count'], '10') dt1 = response.json[0]['execution']['start_timestamp'] dt2 = response.json[9]['execution']['start_timestamp'] self.assertLess(isotime.parse(dt1), isotime.parse(dt2)) dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z' response = self.app.get('/history/executions?timestamp=%s' % dt_range) self.assertEqual(response.status_int, 200) self.assertIsInstance(response.json, list) self.assertEqual(len(response.json), 10) self.assertEqual(response.headers['X-Total-Count'], '10') dt1 = response.json[0]['execution']['start_timestamp'] dt2 = response.json[9]['execution']['start_timestamp'] self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def to_model(cls, execution): model = super(cls, cls).to_model(execution) model.action = execution.action if getattr(execution, 'start_timestamp', None): model.start_timestamp = isotime.parse(execution.start_timestamp) model.status = getattr(execution, 'status', None) model.parameters = getattr(execution, 'parameters', dict()) model.context = getattr(execution, 'context', dict()) model.callback = getattr(execution, 'callback', dict()) model.result = getattr(execution, 'result', None) return model
def to_model(cls, liveaction): model = super(cls, cls).to_model(liveaction) model.action = liveaction.action if getattr(liveaction, 'start_timestamp', None): model.start_timestamp = isotime.parse(liveaction.start_timestamp) if getattr(liveaction, 'end_timestamp', None): model.end_timestamp = isotime.parse(liveaction.end_timestamp) model.status = getattr(liveaction, 'status', None) model.parameters = getattr(liveaction, 'parameters', dict()) model.context = getattr(liveaction, 'context', dict()) model.callback = getattr(liveaction, 'callback', dict()) model.result = getattr(liveaction, 'result', None) if getattr(liveaction, 'notify', None): model.notify = NotificationsHelper.to_model(liveaction.notify) return model