Exemple #1
0
    def lookup_logs(
        self,
        start_datetime,
        end_datetime,
        performer_name=None,
        repository_name=None,
        namespace_name=None,
        filter_kinds=None,
        page_token=None,
        max_page_count=None,
    ):
        rw_model = self.read_write_logs_model
        ro_model = self.read_only_logs_model

        page_token = page_token or {}

        new_page_token = {}
        if page_token is None or not page_token.get("under_readonly_model",
                                                    False):
            rw_page_token = page_token.get("readwrite_page_token")
            rw_logs = rw_model.lookup_logs(
                start_datetime,
                end_datetime,
                performer_name,
                repository_name,
                namespace_name,
                filter_kinds,
                rw_page_token,
                max_page_count,
            )
            logs, next_page_token = rw_logs
            new_page_token["under_readonly_model"] = next_page_token is None
            new_page_token["readwrite_page_token"] = next_page_token
            return LogEntriesPage(logs, new_page_token)
        else:
            readonly_page_token = page_token.get("readonly_page_token")
            ro_logs = ro_model.lookup_logs(
                start_datetime,
                end_datetime,
                performer_name,
                repository_name,
                namespace_name,
                filter_kinds,
                readonly_page_token,
                max_page_count,
            )
            logs, next_page_token = ro_logs
            if next_page_token is None:
                return LogEntriesPage(logs, None)

            new_page_token["under_readonly_model"] = True
            new_page_token["readonly_page_token"] = next_page_token
            return LogEntriesPage(logs, new_page_token)
Exemple #2
0
 def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
                 namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
   logs = []
   for log_and_repo in self._filter_logs(start_datetime, end_datetime, performer_name,
                                         repository_name, namespace_name, filter_kinds):
     logs.append(log_and_repo.log)
   return LogEntriesPage(logs, None)
Exemple #3
0
    def lookup_logs(
        self,
        start_datetime,
        end_datetime,
        performer_name=None,
        repository_name=None,
        namespace_name=None,
        filter_kinds=None,
        page_token=None,
        max_page_count=None,
    ):
        if filter_kinds is not None:
            assert all(
                isinstance(kind_name, str) for kind_name in filter_kinds)

        assert start_datetime is not None
        assert end_datetime is not None

        repository = None
        if repository_name and namespace_name:
            repository = model.repository.get_repository(
                namespace_name, repository_name)
            assert repository

        performer = None
        if performer_name:
            performer = model.user.get_user(performer_name)
            assert performer

        def get_logs(m, page_token):
            logs_query = model.log.get_logs_query(
                start_datetime,
                end_datetime,
                performer=performer,
                repository=repository,
                namespace=namespace_name,
                ignore=filter_kinds,
                model=m,
            )

            logs, next_page_token = model.modelutil.paginate(
                logs_query,
                m,
                descending=True,
                page_token=page_token,
                limit=20,
                max_page=max_page_count,
                sort_field_name="datetime",
            )

            return logs, next_page_token

        TOKEN_TABLE_ID = "tti"
        table_index = 0
        logs = []
        next_page_token = page_token or None

        # Skip empty pages (empty table)
        while len(logs) == 0 and table_index < len(LOG_MODELS) - 1:
            table_specified = (next_page_token is not None and
                               next_page_token.get(TOKEN_TABLE_ID) is not None)
            if table_specified:
                table_index = next_page_token.get(TOKEN_TABLE_ID)

            logs_result, next_page_token = get_logs(LOG_MODELS[table_index],
                                                    next_page_token)
            logs.extend(logs_result)

            if next_page_token is None and table_index < len(LOG_MODELS) - 1:
                next_page_token = {TOKEN_TABLE_ID: table_index + 1}

        return LogEntriesPage([Log.for_logentry(log) for log in logs],
                              next_page_token)
    def lookup_logs(
        self,
        start_datetime,
        end_datetime,
        performer_name=None,
        repository_name=None,
        namespace_name=None,
        filter_kinds=None,
        page_token=None,
        max_page_count=None,
    ):
        assert start_datetime is not None and end_datetime is not None

        # Check for a valid combined model token when migrating online from a combined model
        if page_token is not None and page_token.get(
                "readwrite_page_token") is not None:
            page_token = page_token.get("readwrite_page_token")

        if page_token is not None and max_page_count is not None:
            page_number = page_token.get("page_number")
            if page_number is not None and page_number + 1 > max_page_count:
                return LogEntriesPage([], None)

        repository_id, account_id, performer_id = DocumentLogsModel._get_ids_by_names(
            repository_name, namespace_name, performer_name)

        after_datetime = None
        after_random_id = None
        if page_token is not None:
            after_datetime = parse_datetime(page_token["datetime"])
            after_random_id = page_token["random_id"]

        if after_datetime is not None:
            end_datetime = min(end_datetime, after_datetime)

        all_logs = []

        with CloseForLongOperation(config.app_config):
            for current_date in _date_range_descending(start_datetime,
                                                       end_datetime):
                try:
                    logs = self._load_logs_for_day(
                        current_date,
                        performer_id,
                        repository_id,
                        account_id,
                        filter_kinds,
                        after_datetime,
                        after_random_id,
                        size=PAGE_SIZE + 1,
                    )

                    all_logs.extend(logs)
                except NotFoundError:
                    continue

                if len(all_logs) > PAGE_SIZE:
                    break

        next_page_token = None
        all_logs = all_logs[0:PAGE_SIZE + 1]

        if len(all_logs) == PAGE_SIZE + 1:
            # The last element in the response is used to check if there's more elements.
            # The second element in the response is used as the pagination token because search_after does
            # not include the exact match, and so the next page will start with the last element.
            # This keeps the behavior exactly the same as table_logs_model, so that
            # the caller can expect when a pagination token is non-empty, there must be
            # at least 1 log to be retrieved.
            next_page_token = {
                "datetime": all_logs[-2].datetime.isoformat(),
                "random_id": all_logs[-2].random_id,
                "page_number":
                page_token["page_number"] + 1 if page_token else 1,
            }

        return LogEntriesPage(
            _for_elasticsearch_logs(all_logs[:PAGE_SIZE], repository_id,
                                    account_id),
            next_page_token,
        )
                                "kind_id": [1]
                            }
                        }]
                    }
                },
            ]
        }
    },
    "size": 2,
}
SEARCH_PAGE_TOKEN = {
    "datetime": datetime(2018, 3, 8, 3, 30).isoformat(),
    "random_id": 233,
    "page_number": 1,
}
SEARCH_PAGE_START = LogEntriesPage(logs=[_log1],
                                   next_page_token=SEARCH_PAGE_TOKEN)
SEARCH_PAGE_END = LogEntriesPage(logs=[_log2], next_page_token=None)
SEARCH_PAGE_EMPTY = LogEntriesPage([], None)

AGGS_RESPONSE = _status(
    _shards({
        "hits": {
            "total": 4,
            "max_score": None,
            "hits": []
        },
        "aggregations": {
            "by_id": {
                "doc_count_error_upper_bound":
                0,
                "sum_other_doc_count":