Пример #1
0
    def extract_version(cls, obj, version_obj):
        from olympia.versions.models import License, Version

        data = (
            {
                'id': version_obj.pk,
                'compatible_apps': cls.extract_compatibility_info(obj, version_obj),
                'files': [
                    {
                        'id': file_.id,
                        'created': file_.created,
                        'filename': file_.filename,
                        'hash': file_.hash,
                        'is_webextension': file_.is_webextension,
                        'is_mozilla_signed_extension': (
                            file_.is_mozilla_signed_extension
                        ),
                        'is_restart_required': file_.is_restart_required,
                        'size': file_.size,
                        'status': file_.status,
                        'strict_compatibility': file_.strict_compatibility,
                        'permissions': file_.permissions,
                        'optional_permissions': file_.optional_permissions,
                    }
                    for file_ in version_obj.all_files
                ],
                'reviewed': version_obj.reviewed,
                'version': version_obj.version,
            }
            if version_obj
            else None
        )
        if data and version_obj:
            attach_trans_dict(Version, [version_obj])
            data.update(
                cls.extract_field_api_translations(
                    version_obj, 'release_notes', db_field='release_notes_id'
                )
            )
            if version_obj.license:
                data['license'] = {
                    'id': version_obj.license.id,
                    'builtin': bool(version_obj.license.builtin),
                    'url': version_obj.license.url,
                }
                attach_trans_dict(License, [version_obj.license])
                data['license'].update(
                    cls.extract_field_api_translations(version_obj.license, 'name')
                )
        return data
Пример #2
0
    def test_basic(self):
        addon = addon_factory(
            name='Name',
            description='Description <script>alert(42)</script>!',
            eula='',
            summary='Summary',
            homepage='http://home.pa.ge',
            developer_comments='Developer Comments',
            privacy_policy='Policy',
            support_email='*****@*****.**',
            support_url='http://su.pport.url')
        addon.save()

        # Quick sanity checks: is description properly escaped? The underlying
        # implementation should leave localized_string un-escaped but never use
        # it for __unicode__. We depend on this behaviour later in the test.
        assert '<script>' in addon.description.localized_string
        assert '<script>' not in addon.description.localized_string_clean
        assert '<script>' not in unicode(addon.description)

        # Attach trans dict.
        attach_trans_dict(Addon, [addon])
        assert isinstance(addon.translations, collections.defaultdict)
        translations = dict(addon.translations)

        # addon.translations is a defaultdict.
        assert addon.translations['whatever'] == []

        # No-translated fields should be absent.
        assert addon.thankyou_note_id is None
        assert None not in translations

        # Build expected translations dict.
        expected_translations = {
            addon.eula_id: [('en-us', unicode(addon.eula))],
            addon.privacy_policy_id:
            [('en-us', unicode(addon.privacy_policy))],
            addon.description_id: [('en-us', unicode(addon.description))],
            addon.developer_comments_id:
            [('en-us', unicode(addon.developer_comments))],
            addon.summary_id: [('en-us', unicode(addon.summary))],
            addon.homepage_id: [('en-us', unicode(addon.homepage))],
            addon.name_id: [('en-us', unicode(addon.name))],
            addon.support_email_id: [('en-us', unicode(addon.support_email))],
            addon.support_url_id: [('en-us', unicode(addon.support_url))]
        }
        assert translations == expected_translations
Пример #3
0
    def test_defer_all_fields(self):
        addon = addon_factory(
            name='Name',
            description='Description <script>alert(42)</script>!',
            eula='',
            summary='Summary',
            homepage='http://home.pa.ge',
            developer_comments='Developer Comments',
            support_email='*****@*****.**',
            support_url='http://su.pport.url',
        )
        addon.save()

        addons = Addon.objects.only('id').all()
        attach_trans_dict(Addon, addons)
        addon = addons[0]
        assert isinstance(addon.translations, collections.defaultdict)
        translations = dict(addon.translations)
        assert list(translations.values()) == []
Пример #4
0
    def test_basic(self):
        addon = addon_factory(
            name='Name', description='Description <script>alert(42)</script>!',
            eula='', summary='Summary', homepage='http://home.pa.ge',
            developer_comments='Developer Comments',
            support_email='*****@*****.**', support_url='http://su.pport.url')
        addon.save()

        # Quick sanity checks: is description properly escaped? The underlying
        # implementation should leave localized_string un-escaped but never use
        # it for __str__. We depend on this behaviour later in the test.
        assert '<script>' in addon.description.localized_string
        assert '<script>' not in addon.description.localized_string_clean
        assert '<script>' not in six.text_type(addon.description)

        # Attach trans dict.
        attach_trans_dict(Addon, [addon])
        assert isinstance(addon.translations, collections.defaultdict)
        translations = dict(addon.translations)

        # addon.translations is a defaultdict.
        assert addon.translations['whatever'] == []

        # No-translated fields should be absent.
        assert addon.privacy_policy_id is None
        assert None not in translations

        # Build expected translations dict.
        expected_translations = {
            addon.eula_id: [('en-us', six.text_type(addon.eula))],
            addon.description_id: [
                ('en-us', six.text_type(addon.description))],
            addon.developer_comments_id:
                [('en-us', six.text_type(addon.developer_comments))],
            addon.summary_id: [('en-us', six.text_type(addon.summary))],
            addon.homepage_id: [('en-us', six.text_type(addon.homepage))],
            addon.name_id: [('en-us', six.text_type(addon.name))],
            addon.support_email_id: [
                ('en-us', six.text_type(addon.support_email))],
            addon.support_url_id: [
                ('en-us', six.text_type(addon.support_url))]
        }
        assert translations == expected_translations
Пример #5
0
    def test_deferred_field(self):
        addon = addon_factory(
            name='Name',
            description='Description <script>alert(42)</script>!',
            eula='',
            summary='Summary',
            homepage='http://home.pa.ge',
            developer_comments='Developer Comments',
            support_email='*****@*****.**',
            support_url='http://su.pport.url',
        )
        addon.save()
        description_id = addon.description_id

        addons = Addon.objects.defer('description').all()
        attach_trans_dict(Addon, addons)
        addon = addons[0]
        assert isinstance(addon.translations, collections.defaultdict)
        translations = dict(addon.translations)
        assert translations[addon.name_id]
        assert description_id not in translations
Пример #6
0
 def test_multiple_objects_with_multiple_translations(self):
     addon = addon_factory()
     addon.description = {
         'fr': 'French Description',
         'en-us': 'English Description'
     }
     addon.save()
     addon2 = addon_factory(description='English 2 Description')
     addon2.name = {
         'fr': 'French 2 Name',
         'en-us': 'English 2 Name',
         'es': 'Spanish 2 Name'
     }
     addon2.save()
     attach_trans_dict(Addon, [addon, addon2])
     assert set(addon.translations[addon.description_id]) == (
         set([('en-us', 'English Description'),
              ('fr', 'French Description')]))
     assert set(addon2.translations[addon2.name_id]) == (
         set([('en-us', 'English 2 Name'),
              ('es', 'Spanish 2 Name'),
              ('fr', 'French 2 Name')]))
Пример #7
0
    def extract_version(cls, obj, version_obj):
        from olympia.versions.models import License, Version

        data = {
            'id': version_obj.pk,
            'compatible_apps': cls.extract_compatibility_info(
                obj, version_obj),
            'files': [{
                'id': file_.id,
                'created': file_.created,
                'filename': file_.filename,
                'hash': file_.hash,
                'is_webextension': file_.is_webextension,
                'is_mozilla_signed_extension': (
                    file_.is_mozilla_signed_extension),
                'is_restart_required': file_.is_restart_required,
                'platform': file_.platform,
                'size': file_.size,
                'status': file_.status,
                'strict_compatibility': file_.strict_compatibility,
                'webext_permissions_list': file_.webext_permissions_list,
            } for file_ in version_obj.all_files],
            'reviewed': version_obj.reviewed,
            'version': version_obj.version,
        } if version_obj else None
        if data and version_obj:
            attach_trans_dict(Version, [version_obj])
            data.update(cls.extract_field_api_translations(
                version_obj, 'release_notes', db_field='release_notes_id'))
            if version_obj.license:
                data['license'] = {
                    'id': version_obj.license.id,
                    'builtin': version_obj.license.builtin,
                    'url': version_obj.license.url,
                }
                attach_trans_dict(License, [version_obj.license])
                data['license'].update(cls.extract_field_api_translations(
                    version_obj.license, 'name'))
        return data
Пример #8
0
 def test_multiple_objects_with_multiple_translations(self):
     addon = addon_factory()
     addon.description = {
         'fr': 'French Description',
         'en-us': 'English Description'
     }
     addon.save()
     addon2 = addon_factory(description='English 2 Description')
     addon2.name = {
         'fr': 'French 2 Name',
         'en-us': 'English 2 Name',
         'es': 'Spanish 2 Name'
     }
     addon2.save()
     attach_trans_dict(Addon, [addon, addon2])
     assert set(addon.translations[addon.description_id]) == (set([
         ('en-us', 'English Description'), ('fr', 'French Description')
     ]))
     assert set(addon2.translations[addon2.name_id]) == (set([
         ('en-us', 'English 2 Name'), ('es', 'Spanish 2 Name'),
         ('fr', 'French 2 Name')
     ]))
Пример #9
0
 def _locales_transformer(self, objs):
     current_versions = [
         obj.addon._current_version for obj in objs
         if obj.addon._current_version
     ]
     addons = [obj.addon for obj in objs]
     attach_trans_dict(CollectionAddon, objs)
     attach_trans_dict(Addon, addons)
     attach_trans_dict(License, [ver.license for ver in current_versions])
Пример #10
0
    def _locales_transformer(self, objs):
        """
        Transformer to fetch all translations from objects related to CollectionAddon.

        This is necessary because the regular translation transformer only fetches
        translations for the current language, and only for the model the queryset is
        built from, not related objects.

        Only used when `lang` is not passed.
        """
        current_versions = [
            obj.addon._current_version for obj in objs if obj.addon._current_version
        ]
        addons = [obj.addon for obj in objs]
        attach_trans_dict(CollectionAddon, objs)
        attach_trans_dict(Addon, addons)
        attach_trans_dict(License, [ver.license for ver in current_versions])
Пример #11
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating',
                 'contributions', 'created', 'default_locale', 'guid',
                 'hotness', 'icon_type', 'is_disabled', 'is_experimental',
                 'last_updated', 'modified', 'public_stats',
                 'requires_payment', 'slug', 'status', 'type', 'view_source',
                 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        if obj.type == amo.ADDON_PERSONA:
            # Personas are compatible with all platforms. They don't have files
            # so we have to fake the info to be consistent with the rest of the
            # add-ons stored in ES.
            data['platforms'] = [amo.PLATFORM_ALL.id]
            try:
                # Boost on popularity.
                data['boost'] = float(obj.persona.popularity**.2)
                data['has_theme_rereview'] = (
                    obj.persona.rereviewqueuetheme_set.exists())
                # 'weekly_downloads' field is used globally to sort, but
                # for themes weekly_downloads don't make much sense, use
                # popularity instead (FIXME: should be the other way around).
                data['weekly_downloads'] = obj.persona.popularity
                data['persona'] = {
                    'accentcolor': obj.persona.accentcolor,
                    'author': obj.persona.display_username,
                    'header': obj.persona.header,
                    'footer': obj.persona.footer,
                    'is_new': obj.persona.is_new(),
                    'textcolor': obj.persona.textcolor,
                }
            except ObjectDoesNotExist:
                # The instance won't have a persona while it's being created.
                pass
        else:
            if obj.current_version:
                data['platforms'] = [
                    p.id for p in obj.current_version.supported_platforms
                ]
            # Boost by the number of users on a logarithmic scale. The maximum
            # boost (11,000,000 users for adblock) is about 5x.
            data['boost'] = float(obj.average_daily_users**.2)
            data['has_theme_rereview'] = None

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_PUBLIC and not obj.is_experimental
                and 'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We can use all_categories because the indexing code goes through the
        # transformer that sets it.
        data['category'] = [cat.id for cat in obj.all_categories]
        data['current_version'] = cls.extract_version(obj, obj.current_version)
        data['current_beta_version'] = cls.extract_version(
            obj, obj.current_beta_version)
        data['listed_authors'] = [{
            'name': a.name,
            'id': a.id,
            'username': a.username,
            'is_public': a.is_public
        } for a in obj.listed_authors]

        data['is_featured'] = obj.is_featured(None, None)
        data['featured_for'] = [{
            'application': [app],
            'locales': list(locales)
        } for app, locales in obj.get_featured_by_app().items()]

        data['has_eula'] = bool(obj.eula)
        data['has_privacy_policy'] = bool(obj.privacy_policy)

        data['latest_unlisted_version'] = cls.extract_version(
            obj, obj.latest_unlisted_version)

        # We can use all_previews because the indexing code goes through the
        # transformer that sets it.
        data['previews'] = [{
            'id': preview.id,
            'modified': preview.modified
        } for preview in obj.all_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
            'text_count': obj.text_reviews_count,
        }
        # We can use tag_list because the indexing code goes through the
        # transformer that sets it (attach_tags).
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_raw_translations(obj, field))
            data.update(cls.extract_field_search_translations(obj, field))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('developer_comments', 'homepage', 'support_email',
                      'support_url'):
            data.update(cls.extract_field_raw_translations(obj, field))
        # Also do that for preview captions, which are set on each preview
        # object.
        attach_trans_dict(Preview, obj.all_previews)
        for i, preview in enumerate(obj.all_previews):
            data['previews'][i].update(
                cls.extract_field_raw_translations(preview, 'caption'))

        # Finally, add the special sort field, coercing the current translation
        # into an unicode object first.
        data['name_sort'] = unicode(obj.name).lower()

        return data
Пример #12
0
 def test_no_objects(self):
     # Calling attach_trans_dict on an empty list/queryset shouldn't do anything.
     attach_trans_dict(Addon, [])
     attach_trans_dict(Addon, Addon.objects.none())
Пример #13
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating',
                 'contributions', 'created',
                 'default_locale', 'guid', 'hotness', 'icon_hash', 'icon_type',
                 'is_disabled', 'is_experimental', 'last_updated',
                 'modified', 'public_stats', 'requires_payment', 'slug',
                 'status', 'type', 'view_source', 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        if obj.type == amo.ADDON_PERSONA:
            # Personas are compatible with all platforms. They don't have files
            # so we have to fake the info to be consistent with the rest of the
            # add-ons stored in ES.
            data['platforms'] = [amo.PLATFORM_ALL.id]
            try:
                data['has_theme_rereview'] = (
                    obj.persona.rereviewqueuetheme_set.exists())
                # Theme popularity is roughly equivalent to average daily users
                # (the period is not the same and the methodology differs since
                # themes don't have updates, but it's good enough).
                data['average_daily_users'] = obj.persona.popularity
                # 'weekly_downloads' field is used globally to sort, but
                # for themes weekly_downloads don't make much sense, use
                # popularity instead. To keep it comparable with extensions,
                # multiply by 7. (FIXME: could we stop sorting by downloads,
                # even stop exposing downloads numbers in API/pages outside of
                # the statistic-specific pages?)
                data['weekly_downloads'] = obj.persona.popularity * 7
                data['persona'] = {
                    'accentcolor': obj.persona.accentcolor,
                    'author': obj.persona.display_username,
                    'header': obj.persona.header,
                    'footer': obj.persona.footer,
                    'is_new': obj.persona.is_new(),
                    'textcolor': obj.persona.textcolor,
                }
            except ObjectDoesNotExist:
                # The instance won't have a persona while it's being created.
                pass
        else:
            if obj.current_version:
                data['platforms'] = [p.id for p in
                                     obj.current_version.supported_platforms]
            data['has_theme_rereview'] = None

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        # Boost by the number of users on a logarithmic scale.
        data['boost'] = float(data['average_daily_users'] ** .2)
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_PUBLIC and not obj.is_experimental and
                'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We can use all_categories because the indexing code goes through the
        # transformer that sets it.
        data['category'] = [cat.id for cat in obj.all_categories]
        data['current_version'] = cls.extract_version(
            obj, obj.current_version)
        data['listed_authors'] = [
            {'name': a.name, 'id': a.id, 'username': a.username,
             'is_public': a.is_public}
            for a in obj.listed_authors
        ]

        data['is_featured'] = obj.is_featured(None, None)
        data['featured_for'] = [
            {'application': [app], 'locales': list(sorted(locales))}
            for app, locales in obj.get_featured_by_app().items()]

        data['has_eula'] = bool(obj.eula)
        data['has_privacy_policy'] = bool(obj.privacy_policy)

        data['latest_unlisted_version'] = cls.extract_version(
            obj, obj.latest_unlisted_version)

        data['previews'] = [{'id': preview.id, 'modified': preview.modified,
                             'sizes': preview.sizes}
                            for preview in obj.current_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_ratings,
            'text_count': obj.text_ratings_count,
        }
        # We can use tag_list because the indexing code goes through the
        # transformer that sets it (attach_tags).
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_api_translations(obj, field))
            data.update(cls.extract_field_search_translation(
                obj, field, obj.default_locale))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('developer_comments', 'homepage', 'support_email',
                      'support_url'):
            data.update(cls.extract_field_api_translations(obj, field))
        if obj.type != amo.ADDON_STATICTHEME:
            # Also do that for preview captions, which are set on each preview
            # object.
            attach_trans_dict(Preview, obj.current_previews)
            for i, preview in enumerate(obj.current_previews):
                data['previews'][i].update(
                    cls.extract_field_api_translations(preview, 'caption'))

        return data
Пример #14
0
def attach_translations(collections):
    """Put all translations into a translations dict."""
    attach_trans_dict(Collection, collections)
Пример #15
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating',
                 'contributions', 'created',
                 'default_locale', 'guid', 'hotness', 'icon_hash', 'icon_type',
                 'is_disabled', 'is_experimental', 'last_updated',
                 'modified', 'public_stats', 'requires_payment', 'slug',
                 'status', 'type', 'view_source', 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        data['colors'] = None
        if obj.type == amo.ADDON_PERSONA:
            # Personas are compatible with all platforms. They don't have files
            # so we have to fake the info to be consistent with the rest of the
            # add-ons stored in ES.
            data['platforms'] = [amo.PLATFORM_ALL.id]
            try:
                data['has_theme_rereview'] = (
                    obj.persona.rereviewqueuetheme_set.exists())
                # Theme popularity is roughly equivalent to average daily users
                # (the period is not the same and the methodology differs since
                # themes don't have updates, but it's good enough).
                data['average_daily_users'] = obj.persona.popularity
                # 'weekly_downloads' field is used globally to sort, but
                # for themes weekly_downloads don't make much sense, use
                # popularity instead. To keep it comparable with extensions,
                # multiply by 7. (FIXME: could we stop sorting by downloads,
                # even stop exposing downloads numbers in API/pages outside of
                # the statistic-specific pages?)
                data['weekly_downloads'] = obj.persona.popularity * 7
                data['persona'] = {
                    'accentcolor': obj.persona.accentcolor,
                    'author': obj.persona.display_username,
                    'header': obj.persona.header,
                    'footer': obj.persona.footer,
                    'is_new': obj.persona.is_new(),
                    'textcolor': obj.persona.textcolor,
                }
            except ObjectDoesNotExist:
                # The instance won't have a persona while it's being created.
                pass
        else:
            if obj.current_version:
                data['platforms'] = [p.id for p in
                                     obj.current_version.supported_platforms]
            data['has_theme_rereview'] = None

            # Extract dominant colors from static themes.
            if obj.type == amo.ADDON_STATICTHEME:
                first_preview = obj.current_previews.first()
                if first_preview:
                    data['colors'] = first_preview.colors

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        # Boost by the number of users on a logarithmic scale.
        data['boost'] = float(data['average_daily_users'] ** .2)
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_PUBLIC and not obj.is_experimental and
                'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We can use all_categories because the indexing code goes through the
        # transformer that sets it.
        data['category'] = [cat.id for cat in obj.all_categories]
        data['current_version'] = cls.extract_version(
            obj, obj.current_version)
        data['listed_authors'] = [
            {'name': a.name, 'id': a.id, 'username': a.username,
             'is_public': a.is_public}
            for a in obj.listed_authors
        ]

        data['is_featured'] = obj.is_featured(None, None)
        data['featured_for'] = [
            {'application': [app], 'locales': list(sorted(locales))}
            for app, locales in obj.get_featured_by_app().items()]

        data['has_eula'] = bool(obj.eula)
        data['has_privacy_policy'] = bool(obj.privacy_policy)

        data['previews'] = [{'id': preview.id, 'modified': preview.modified,
                             'sizes': preview.sizes}
                            for preview in obj.current_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_ratings,
            'text_count': obj.text_ratings_count,
        }
        # We can use tag_list because the indexing code goes through the
        # transformer that sets it (attach_tags).
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_api_translations(obj, field))
            data.update(cls.extract_field_search_translation(
                obj, field, obj.default_locale))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('developer_comments', 'homepage', 'support_email',
                      'support_url'):
            data.update(cls.extract_field_api_translations(obj, field))
        if obj.type != amo.ADDON_STATICTHEME:
            # Also do that for preview captions, which are set on each preview
            # object.
            attach_trans_dict(Preview, obj.current_previews)
            for i, preview in enumerate(obj.current_previews):
                data['previews'][i].update(
                    cls.extract_field_api_translations(preview, 'caption'))

        return data
Пример #16
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating', 'created',
                 'default_locale', 'guid', 'hotness', 'icon_type',
                 'is_disabled', 'is_experimental', 'is_listed', 'last_updated',
                 'modified', 'public_stats', 'slug', 'status', 'type',
                 'view_source', 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        if obj.type == amo.ADDON_PERSONA:
            try:
                # Boost on popularity.
                data['boost'] = float(obj.persona.popularity ** .2)
                data['has_theme_rereview'] = (
                    obj.persona.rereviewqueuetheme_set.exists())
                # 'weekly_downloads' field is used globally to sort, but
                # for themes weekly_downloads don't make much sense, use
                # popularity instead (FIXME: should be the other way around).
                data['weekly_downloads'] = obj.persona.popularity
                data['persona'] = {
                    'accentcolor': obj.persona.accentcolor,
                    'author': obj.persona.display_username,
                    'header': obj.persona.header,
                    'footer': obj.persona.footer,
                    'is_new': obj.persona.is_new(),
                    'textcolor': obj.persona.textcolor,
                }
            except ObjectDoesNotExist:
                # The instance won't have a persona while it's being created.
                pass
        else:
            # Boost by the number of users on a logarithmic scale. The maximum
            # boost (11,000,000 users for adblock) is about 5x.
            data['boost'] = float(obj.average_daily_users ** .2)
            data['has_theme_rereview'] = None

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        data['appversion'] = {}
        for app, appver in obj.compatible_apps.items():
            if appver:
                min_, max_ = appver.min.version_int, appver.max.version_int
                min_human, max_human = appver.min.version, appver.max.version
            else:
                # Fake wide compatibility for search tools and personas.
                min_, max_ = 0, version_int('9999')
                min_human, max_human = None, None
            data['appversion'][app.id] = {
                'min': min_, 'min_human': min_human,
                'max': max_, 'max_human': max_human,
            }
        # FIXME: See issue #3120, the 'authors' property is for
        # backwards-compatibility and all code should be switched
        # to use 'listed_authors.name' instead. We needed a reindex
        # first though, which is why the 2 are present at the
        # moment.
        data['authors'] = [a.name for a in obj.listed_authors]
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_PUBLIC and not obj.is_experimental and
                'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We go through attach_categories and attach_tags transformer before
        # calling this function, it sets category_ids and tag_list.
        data['category'] = getattr(obj, 'category_ids', [])
        if obj.current_version:
            data['current_version'] = {
                'id': obj.current_version.pk,
                'files': [{
                    'id': file_.id,
                    'created': file_.created,
                    'filename': file_.filename,
                    'hash': file_.hash,
                    'platform': file_.platform,
                    'size': file_.size,
                    'status': file_.status,
                } for file_ in obj.current_version.all_files],
                'reviewed': obj.current_version.reviewed,
                'version': obj.current_version.version,
            }
            data['has_version'] = True
            data['platforms'] = [p.id for p in
                                 obj.current_version.supported_platforms]
        else:
            data['has_version'] = None
        data['listed_authors'] = [
            {'name': a.name, 'id': a.id, 'username': a.username}
            for a in obj.listed_authors
        ]

        # We can use all_previews because the indexing code goes through the
        # transformer that sets it.
        data['previews'] = [{'id': preview.id, 'modified': preview.modified}
                            for preview in obj.all_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
        }
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_raw_translations(obj, field))
            data.update(cls.extract_field_search_translations(obj, field))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('homepage', 'support_email', 'support_url'):
            data.update(cls.extract_field_raw_translations(obj, field))
        # Also do that for preview captions, which are set on each preview
        # object.
        attach_trans_dict(Preview, obj.all_previews)
        for i, preview in enumerate(obj.all_previews):
            data['previews'][i].update(
                cls.extract_field_raw_translations(preview, 'caption'))

        # Finally, add the special sort field, coercing the current translation
        # into an unicode object first.
        data['name_sort'] = unicode(obj.name).lower()

        return data
Пример #17
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating', 'created',
                 'default_locale', 'guid', 'hotness', 'icon_type',
                 'is_disabled', 'is_experimental', 'is_listed', 'last_updated',
                 'modified', 'public_stats', 'slug', 'status', 'type',
                 'view_source', 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        if obj.type == amo.ADDON_PERSONA:
            try:
                # Boost on popularity.
                data['boost'] = float(obj.persona.popularity ** .2)
                data['has_theme_rereview'] = (
                    obj.persona.rereviewqueuetheme_set.exists())
                # 'weekly_downloads' field is used globally to sort, but
                # for themes weekly_downloads don't make much sense, use
                # popularity instead (FIXME: should be the other way around).
                data['weekly_downloads'] = obj.persona.popularity
                data['persona'] = {
                    'accentcolor': obj.persona.accentcolor,
                    'author': obj.persona.display_username,
                    'header': obj.persona.header,
                    'footer': obj.persona.footer,
                    'is_new': obj.persona.is_new(),
                    'textcolor': obj.persona.textcolor,
                }
            except ObjectDoesNotExist:
                # The instance won't have a persona while it's being created.
                pass
        else:
            # Boost by the number of users on a logarithmic scale. The maximum
            # boost (11,000,000 users for adblock) is about 5x.
            data['boost'] = float(obj.average_daily_users ** .2)
            data['has_theme_rereview'] = None

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_PUBLIC and not obj.is_experimental and
                'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We can use all_categories because the indexing code goes through the
        # transformer that sets it.
        data['category'] = [cat.id for cat in obj.all_categories]
        if obj.current_version:
            data['current_version'] = cls.extract_version(
                obj, obj.current_version)
            data['has_version'] = True
            data['platforms'] = [p.id for p in
                                 obj.current_version.supported_platforms]
        else:
            data['has_version'] = None
        if obj.current_beta_version:
            data['current_beta_version'] = cls.extract_version(
                obj, obj.current_beta_version)
        else:
            data['current_beta_version'] = None
        data['listed_authors'] = [
            {'name': a.name, 'id': a.id, 'username': a.username}
            for a in obj.listed_authors
        ]

        data['has_eula'] = bool(obj.eula)
        data['has_privacy_policy'] = bool(obj.privacy_policy)

        # We can use all_previews because the indexing code goes through the
        # transformer that sets it.
        data['previews'] = [{'id': preview.id, 'modified': preview.modified}
                            for preview in obj.all_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
        }
        # We can use tag_list because the indexing code goes through the
        # transformer that sets it (attach_tags).
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_raw_translations(obj, field))
            data.update(cls.extract_field_search_translations(obj, field))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('homepage', 'support_email', 'support_url'):
            data.update(cls.extract_field_raw_translations(obj, field))
        # Also do that for preview captions, which are set on each preview
        # object.
        attach_trans_dict(Preview, obj.all_previews)
        for i, preview in enumerate(obj.all_previews):
            data['previews'][i].update(
                cls.extract_field_raw_translations(preview, 'caption'))

        # Finally, add the special sort field, coercing the current translation
        # into an unicode object first.
        data['name_sort'] = unicode(obj.name).lower()

        return data
Пример #18
0
def attach_translations(collections):
    """Put all translations into a translations dict."""
    attach_trans_dict(Collection, collections)
Пример #19
0
    def extract_document(cls, obj):
        """Extract indexable attributes from an add-on."""
        from olympia.addons.models import Preview

        attrs = ('id', 'average_daily_users', 'bayesian_rating',
                 'contributions', 'created', 'default_locale', 'guid',
                 'hotness', 'icon_hash', 'icon_type', 'is_disabled',
                 'is_experimental', 'is_recommended', 'last_updated',
                 'modified', 'requires_payment', 'slug', 'status', 'type',
                 'view_source', 'weekly_downloads')
        data = {attr: getattr(obj, attr) for attr in attrs}

        data['colors'] = None
        if obj.current_version:
            data['platforms'] = [
                p.id for p in obj.current_version.supported_platforms
            ]

        # Extract dominant colors from static themes.
        if obj.type == amo.ADDON_STATICTHEME:
            first_preview = obj.current_previews.first()
            if first_preview:
                data['colors'] = first_preview.colors

        data['app'] = [app.id for app in obj.compatible_apps.keys()]
        # Boost by the number of users on a logarithmic scale.
        data['boost'] = float(data['average_daily_users']**.2)
        # Quadruple the boost if the add-on is public.
        if (obj.status == amo.STATUS_APPROVED and not obj.is_experimental
                and 'boost' in data):
            data['boost'] = float(max(data['boost'], 1) * 4)
        # We can use all_categories because the indexing code goes through the
        # transformer that sets it.
        data['category'] = [cat.id for cat in obj.all_categories]
        data['current_version'] = cls.extract_version(obj, obj.current_version)
        data['listed_authors'] = [{
            'name': a.name,
            'id': a.id,
            'username': a.username,
            'is_public': a.is_public
        } for a in obj.listed_authors]

        data['has_eula'] = bool(obj.eula)
        data['has_privacy_policy'] = bool(obj.privacy_policy)

        data['previews'] = [{
            'id': preview.id,
            'modified': preview.modified,
            'sizes': preview.sizes
        } for preview in obj.current_previews]
        data['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_ratings,
            'text_count': obj.text_ratings_count,
        }
        # We can use tag_list because the indexing code goes through the
        # transformer that sets it (attach_tags).
        data['tags'] = getattr(obj, 'tag_list', [])

        # Handle localized fields.
        # First, deal with the 3 fields that need everything:
        for field in ('description', 'name', 'summary'):
            data.update(cls.extract_field_api_translations(obj, field))
            data.update(
                cls.extract_field_search_translation(obj, field,
                                                     obj.default_locale))
            data.update(cls.extract_field_analyzed_translations(obj, field))

        # Then add fields that only need to be returned to the API without
        # contributing to search relevancy.
        for field in ('developer_comments', 'homepage', 'support_email',
                      'support_url'):
            data.update(cls.extract_field_api_translations(obj, field))
        if obj.type != amo.ADDON_STATICTHEME:
            # Also do that for preview captions, which are set on each preview
            # object.
            attach_trans_dict(Preview, obj.current_previews)
            for i, preview in enumerate(obj.current_previews):
                data['previews'][i].update(
                    cls.extract_field_api_translations(preview, 'caption'))

        return data