Example #1
0
def remove_search_indexes(project_slug, version_slug=None):
    """Wrapper around ``remove_indexed_files`` to make it a task."""
    remove_indexed_files(
        model=HTMLFile,
        project_slug=project_slug,
        version_slug=version_slug,
    )
Example #2
0
    def test_search_ignore(self, api_client):
        project = Project.objects.get(slug='docs')
        version = project.versions.all().first()

        page_index = HTMLFile.objects.get(
            version=version,
            path='index.html',
        )
        page_guides = HTMLFile.objects.get(
            version=version,
            path='guides/index.html',
        )

        search_params = {
            'project': project.slug,
            'version': version.slug,
            'q': '"content from"',
        }

        # Query with files not ignored.
        assert page_index.ignore is None
        assert page_guides.ignore is None

        resp = self.get_search(api_client, search_params)
        assert resp.status_code == 200

        results = resp.data['results']
        assert len(results) == 2
        assert results[0]['path'] == '/en/latest/index.html'
        assert results[1]['path'] == '/en/latest/guides/index.html'

        # Query with guides/index.html ignored.
        page_guides.ignore = True
        page_guides.save()

        remove_indexed_files(HTMLFile, project.slug, version.slug)
        index_new_files(HTMLFile, version, page_index.build)

        resp = self.get_search(api_client, search_params)
        assert resp.status_code == 200

        results = resp.data['results']
        assert len(results) == 1
        assert results[0]['path'] == '/en/latest/index.html'

        # Query with index.html and guides/index.html ignored.
        page_index.ignore = True
        page_index.save()

        remove_indexed_files(HTMLFile, project.slug, version.slug)
        index_new_files(HTMLFile, version, page_index.build)

        resp = self.get_search(api_client, search_params)
        assert resp.status_code == 200

        results = resp.data['results']
        assert len(results) == 0
    def test_remove_only_one_project_index(self, api_client, all_projects):
        project = 'kuma'

        assert self.has_results(api_client, project, LATEST)
        assert self.has_results(api_client, project, STABLE)

        utils.remove_indexed_files(
            HTMLFile,
            project_slug=project,
        )
        # Deletion of indices from ES happens async,
        # so we need to wait a little before checking for results.
        time.sleep(1)

        assert self.has_results(api_client, project, LATEST) is False
        assert self.has_results(api_client, project, STABLE) is False
        # Check that other projects weren't deleted
        for project in ['pipeline', 'docs']:
            for version in [LATEST, STABLE]:
                assert self.has_results(api_client, project, version) is True
Example #4
0
def _sync_imported_files(version, build):
    """
    Sync/Update/Delete ImportedFiles objects of this version.

    :param version: Version instance
    :param build: Build id
    """

    # Index new HTMLFiles to ElasticSearch
    index_new_files(model=HTMLFile, version=version, build=build)

    # Remove old HTMLFiles from ElasticSearch
    remove_indexed_files(
        model=HTMLFile,
        project_slug=version.project.slug,
        version_slug=version.slug,
        build_id=build,
    )

    # Delete SphinxDomain objects from previous versions
    # This has to be done before deleting ImportedFiles and not with a cascade,
    # because multiple Domain's can reference a specific HTMLFile.
    (
        SphinxDomain.objects
        .filter(project=version.project, version=version)
        .exclude(build=build)
        .delete()
    )

    # Delete ImportedFiles objects (including HTMLFiles)
    # from the previous build of the version.
    (
        ImportedFile.objects
        .filter(project=version.project, version=version)
        .exclude(build=build)
        .delete()
    )