def list_events(query, include=None): result = ManagerElasticsearch.search_events(body=query, include=include) events = ManagerElasticsearch.extract_search_result_values(result) metadata = ManagerElasticsearch.build_list_result_metadata( query, result) return ListResult(events, metadata)
def delete(self, filters=None, pagination=None, sort=None, range_filters=None, **kwargs): """Delete events/logs connected to a certain Deployment ID.""" if not isinstance(filters, dict) or 'type' not in filters: raise manager_exceptions.BadParametersError( 'Filter by type is expected') if 'cloudify_event' not in filters['type']: raise manager_exceptions.BadParametersError( 'At least `type=cloudify_event` filter is expected') executions_query = ( db.session.query(Execution._storage_id) .filter( Execution._deployment_fk == Deployment._storage_id, Deployment.id == bindparam('deployment_id'), Execution._tenant_id == bindparam('tenant_id') ) ) params = { 'deployment_id': filters['deployment_id'][0], 'tenant_id': self.current_tenant.id } do_store_before = 'store_before' in filters and \ filters['store_before'][0].upper() == 'TRUE' delete_event_query = Events._apply_range_filters( Events._build_delete_subquery( Event, executions_query, params), Event, range_filters) if do_store_before: self._store_log_entries('events', filters['deployment_id'][0], delete_event_query.order_by( 'reported_timestamp')) total = delete_event_query.delete( synchronize_session=False) if 'cloudify_log' in filters['type']: delete_log_query = Events._apply_range_filters( Events._build_delete_subquery( Log, executions_query, params), Log, range_filters) if do_store_before: self._store_log_entries('logs', filters['deployment_id'][0], delete_log_query.order_by( 'reported_timestamp')) total += delete_log_query.delete('fetch') metadata = {'pagination': dict(pagination, total=total)} # Commit bulk row deletions to database db.session.commit() # We don't really want to return all of the deleted events, # so it's a bit of a hack to return the deleted element count. return ListResult([total], metadata)
def delete(self, filters=None, pagination=None, sort=None, range_filters=None, **kwargs): """Delete events/logs connected to a certain Deployment ID.""" if not isinstance(filters, dict) or 'type' not in filters: raise manager_exceptions.BadParametersError( 'Filter by type is expected') if 'cloudify_event' not in filters['type']: raise manager_exceptions.BadParametersError( 'At least `type=cloudify_event` filter is expected') executions_query = ( db.session.query(Execution._storage_id) .filter( Execution._deployment_fk == Deployment._storage_id, Deployment.id == bindparam('deployment_id'), Execution._tenant_id == bindparam('tenant_id') ) ) params = { 'deployment_id': filters['deployment_id'][0], 'tenant_id': self.current_tenant.id } delete_event_query = ( db.session.query(Event) .filter( Event._execution_fk.in_(executions_query), Event._tenant_id == bindparam('tenant_id') ) .params(**params) ) total = delete_event_query.delete(synchronize_session=False) if 'cloudify_log' in filters['type']: delete_log_query = ( db.session.query(Log) .filter( Log._execution_fk.in_(executions_query), Log._tenant_id == bindparam('tenant_id') ) .params(**params) ) total += delete_log_query.delete('fetch') metadata = { 'pagination': dict(pagination, total=total) } # Commit bulk row deletions to database db.session.commit() # We don't really want to return all of the deleted events, # so it's a bit of a hack to return the deleted element count. return ListResult([total], metadata)
def _test_include_propagation_to_model(self, expected_blueprints_list_args, expected_blueprints_list_kwargs): # test that the "include" parameter does not only filter the response # fields at the end of the request, but also propagates to the Model # section, for more efficient storage queries with mock.patch('manager_rest.storage.storage_manager' '.SQLStorageManager.list') as sm_list_bp: mock_meta = {'pagination': {'total': 0, 'size': 0, 'offset': 0}} sm_list_bp.return_value = ListResult([], mock_meta) self.client.blueprints.list(_include=['id']) sm_list_bp.assert_called_once_with( *expected_blueprints_list_args, **expected_blueprints_list_kwargs)
def delete(self, filters=None, pagination=None, sort=None, range_filters=None, **kwargs): """Delete events/logs connected to a certain Deployment ID """ query = self._build_query(filters=filters, pagination=pagination, sort=sort, range_filters=range_filters) events = ManagerElasticsearch.search_events(body=query) metadata = ManagerElasticsearch.build_list_result_metadata( query, events) ManagerElasticsearch.delete_events(events) # We don't really want to return all of the deleted events, so it's a # bit of a hack to only return the number of events to delete - if any # of the events weren't deleted, we'd have gotten an error from the # method above return ListResult([events['hits']['total']], metadata)
def get(self, _include=None, filters=None, pagination=None, sort=None, range_filters=None, **kwargs): """List events using a SQL backend. :param _include: Projection used to get records from database (not currently used) :type _include: list(str) :param filters: Filter selection. It's used to decide if events: {'type': ['cloudify_event']} or both events and logs should be returned: {'type': ['cloudify_event', 'cloudify_log']} Also it's used to get only events for a particular execution: {'execution_id': '<some uuid>'} :type filters: dict(str, str) :param pagination: Parameters used to limit results returned in a single query. Expected values `size` and `offset` are mapped into SQL as `LIMIT` and `OFFSET`. :type pagination: dict(str, int) :param sort: Result sorting order. The only allowed and expected value is to sort by timestamp in ascending order: {'timestamp': 'asc'} :type sort: dict(str, str) :returns: Events that match the conditions passed as arguments :rtype: :class:`manager_rest.storage.storage_manager.ListResult` :param range_filters: Apparently was used to select a timestamp interval. It's not currently used. :type range_filters: dict(str) :returns: Events found in the SQL backend :rtype: :class:`manager_rest.storage.storage_manager.ListResult` """ size = pagination.get('size', self.DEFAULT_SEARCH_SIZE) offset = pagination.get('offset', 0) params = { 'limit': size, 'offset': offset, } count_query = self._build_count_query(filters, range_filters, self.current_tenant.id) total = count_query.params(**params).scalar() select_query = self._build_select_query(filters, sort, range_filters, self.current_tenant.id) results = [ self._map_event_to_dict(_include, event) for event in select_query.params(**params).all() ] metadata = { 'pagination': { 'size': size, 'offset': offset, 'total': total, } } return ListResult(results, metadata)
def delete(self, filters=None, pagination=None, all_tenants=None): request_dict = get_json_and_verify_params({ 'keep_last': {'optional': True, 'type': int}, 'to_datetime': {'optional': True} }) if 'keep_last' in request_dict: if 'to_datetime' in request_dict: raise BadParametersError( "Must provide either a `to_datetime` timestamp or a " "`keep_last` number of executions to keep" ) if request_dict['keep_last'] <= 0: raise BadParametersError( "`keep_last` must be an integer greater than 0. got {} " "instead.".format(request_dict['keep_last']) ) requested_time = None if 'to_datetime' in request_dict: requested_time = parse_datetime_multiple_formats( request_dict['to_datetime']) if 'status' in filters: if filters['status'] not in ExecutionState.END_STATES: raise BadParametersError( 'Can\'t filter by execution status `{0}`. ' 'Allowed statuses are: {1}'.format( filters['status'], ExecutionState.END_STATES) ) else: filters['status'] = ExecutionState.END_STATES sm = get_storage_manager() executions = sm.list(models.Execution, filters=filters, all_tenants=all_tenants, get_all_results=True) dep_creation_execs = {} for execution in executions: if execution.workflow_id == 'create_deployment_environment' and \ execution.status == 'terminated': dep_creation_execs[execution.deployment_id] = \ dep_creation_execs.get(execution.deployment_id, 0) + 1 deleted_count = 0 if requested_time: for execution in executions: creation_time = datetime.strptime(execution.created_at, '%Y-%m-%dT%H:%M:%S.%fZ') if creation_time < requested_time and \ self._can_delete_execution(execution, dep_creation_execs): sm.delete(execution) deleted_count += 1 else: if request_dict.get('keep_last'): max_to_delete = len(executions) - request_dict['keep_last'] for execution in executions: if self._can_delete_execution(execution, dep_creation_execs): sm.delete(execution) deleted_count += 1 if request_dict.get('keep_last') and deleted_count >= \ max_to_delete: break return ListResult([{'count': deleted_count}], {'pagination': pagination})