Exemplo n.º 1
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        attrs = ('created', 'default_language', 'id', 'modified', 'slug',
                 'status', 'version')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        # is_disabled is here for compatibility with other filters, but never
        # set to True at the moment, and not present in the model.
        doc['is_disabled'] = False
        doc['name_sort'] = unicode(obj.name).lower()
        doc['guid'] = unicode(obj.uuid)

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(cls.extract_field_translations(
                obj, field, include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 2
0
    def test_multiple_objects_with_multiple_translations(self):
        obj = FancyModel.objects.create()
        obj.purified = {
            'fr': 'French Purified',
            'en-us': 'English Purified'
        }
        obj.save()

        obj2 = FancyModel.objects.create(purified='English 2 Linkified')
        obj2.linkified = {
            'fr': 'French 2 Linkified',
            'en-us': 'English 2 Linkified',
            'es': 'Spanish 2 Linkified'
        }
        obj2.save()

        attach_trans_dict(FancyModel, [obj, obj2])

        eq_(set(obj.translations[obj.purified_id]),
            set([('en-us', 'English Purified'),
                 ('fr', 'French Purified')]))
        eq_(set(obj2.translations[obj2.linkified_id]),
            set([('en-us', 'English 2 Linkified'),
                 ('es', 'Spanish 2 Linkified'),
                 ('fr', 'French 2 Linkified')]))
Exemplo n.º 3
0
    def test_basic(self):
        obj = self.FancyModel.objects.create(
            purified='Purified <script>alert(42)</script>!',
            linkified='Linkified <script>alert(42)</script>!')

        # Quick sanity checks: is description properly escaped? The underlying
        # implementation should leave localized_string un-escaped but never use
        # it for __unicode__. We depend on this behaviour later in the test.
        ok_('<script>' in obj.purified.localized_string)
        ok_('<script>' not in obj.purified.localized_string_clean)
        ok_('<script>' in obj.linkified.localized_string)
        ok_('<script>' not in obj.linkified.localized_string_clean)

        # Attach trans dict.
        attach_trans_dict(self.FancyModel, [obj])
        ok_(isinstance(obj.translations, collections.defaultdict))
        translations = dict(obj.translations)

        # addon.translations is a defaultdict.
        eq_(obj.translations['whatever'], [])

        # No-translated fields should be absent.
        ok_(None not in translations)

        # Build expected translations dict.
        expected_translations = {
            obj.purified_id: [('en-us', unicode(obj.purified))],
            obj.linkified_id: [('en-us', unicode(obj.linkified))],
        }
        eq_(translations, expected_translations)
Exemplo n.º 4
0
    def test_multiple_objects_with_multiple_translations(self):
        obj = FancyModel.objects.create()
        obj.purified = {
            'fr': 'French Purified',
            'en-us': 'English Purified'
        }
        obj.save()

        obj2 = FancyModel.objects.create(purified='English 2 Linkified')
        obj2.linkified = {
            'fr': 'French 2 Linkified',
            'en-us': 'English 2 Linkified',
            'es': 'Spanish 2 Linkified'
        }
        obj2.save()

        attach_trans_dict(FancyModel, [obj, obj2])

        eq_(set(obj.translations[obj.purified_id]),
            set([('en-us', 'English Purified'),
                 ('fr', 'French Purified')]))
        eq_(set(obj2.translations[obj2.linkified_id]),
            set([('en-us', 'English 2 Linkified'),
                 ('es', 'Spanish 2 Linkified'),
                 ('fr', 'French 2 Linkified')]))
Exemplo n.º 5
0
    def test_basic(self):
        obj = FancyModel.objects.create(
            purified='Purified <script>alert(42)</script>!',
            linkified='Linkified <script>alert(42)</script>!')

        # Quick sanity checks: is description properly escaped? The underlying
        # implementation should leave localized_string un-escaped but never use
        # it for __unicode__. We depend on this behaviour later in the test.
        ok_('<script>' in obj.purified.localized_string)
        ok_('<script>' not in obj.purified.localized_string_clean)
        ok_('<script>' in obj.linkified.localized_string)
        ok_('<script>' not in obj.linkified.localized_string_clean)

        # Attach trans dict.
        attach_trans_dict(FancyModel, [obj])
        ok_(isinstance(obj.translations, collections.defaultdict))
        translations = dict(obj.translations)

        # addon.translations is a defaultdict.
        eq_(obj.translations['whatever'], [])

        # No-translated fields should be absent.
        ok_(None not in translations)

        # Build expected translations dict.
        expected_translations = {
            obj.purified_id: [
                ('en-us', unicode(obj.purified))],
            obj.linkified_id: [
                ('en-us', unicode(obj.linkified))],
        }
        eq_(translations, expected_translations)
Exemplo n.º 6
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])
        attach_trans_dict(Webapp, [obj.app])

        doc = {
            'id': obj.id,
            'app': obj.app_id,
            'background_color': obj.background_color,
            'created': obj.created,
            'image_hash': obj.image_hash,
            'item_type': feed.FEED_TYPE_APP,
            'preview': {'id': obj.preview.id,
                        'thumbnail_size': obj.preview.thumbnail_size,
                        'thumbnail_url': obj.preview.thumbnail_url}
                       if getattr(obj, 'preview') else None,
            'pullquote_attribution': obj.pullquote_attribution,
            'pullquote_rating': obj.pullquote_rating,
            'search_names': list(
                set(string for _, string
                    in obj.app.translations[obj.app.name_id])),
            'slug': obj.slug,
            'type': obj.type,
        }

        # Handle localized fields.
        for field in ('description', 'pullquote_text'):
            doc.update(format_translation_es(obj, field))

        return doc
Exemplo n.º 7
0
    def extract_document(cls, pk=None, obj=None):
        if obj is None:
            obj = cls.get_model().get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            'id': obj.id,
            'apps': list(obj.apps().values_list('id', flat=True)),
            'carrier': mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
            'created': obj.created,
            'image_hash': obj.image_hash,
            'image_landing_hash': obj.image_landing_hash,
            'item_type': feed.FEED_TYPE_SHELF,
            'region': mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
            'search_names': list(set(string for _, string
                                     in obj.translations[obj.name_id])),
            'slug': obj.slug,
        }

        # Handle localized fields.
        for field in ('description', 'name'):
            doc.update(format_translation_es(obj, field))

        return doc
Exemplo n.º 8
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        attrs = ('created', 'default_locale', 'id', 'icon_hash', 'icon_type',
                 'is_disabled', 'last_updated', 'modified', 'status')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        doc['category'] = obj.categories or []
        doc['device'] = obj.devices or []
        doc['title_sort'] = unicode(obj.title).lower()
        doc['region_exclusions'] = obj.region_exclusions or []

        # Add boost, popularity, trending values.
        doc.update(cls.extract_popularity_trending_boost(obj))

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(cls.extract_field_translations(
                obj, field, include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 9
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedCollectionMembership

        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            'id':
            obj.id,
            'apps':
            list(obj.apps().values_list('id', flat=True)),
            'background_color':
            obj.background_color,
            'color':
            obj.color,
            'created':
            obj.created,
            'group_apps': {},  # Map of app IDs to index in group_names below.
            'group_names': [],  # List of ES-serialized group names.
            'image_hash':
            obj.image_hash,
            'item_type':
            feed.FEED_TYPE_COLL,
            'search_names':
            list(set(string for _, string in obj.translations[obj.name_id])),
            'slug':
            obj.slug,
            'type':
            obj.type,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedcollectionmembership_set.all()
        attach_trans_dict(FeedCollectionMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(
                    member, 'group')
                if group_translation not in doc['group_names']:
                    doc['group_names'].append(group_translation)

                doc['group_apps'][member.app_id] = (
                    doc['group_names'].index(group_translation))

        # Handle localized fields.
        for field in ('description', 'name'):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 10
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedShelfMembership

        if obj is None:
            obj = cls.get_model().get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            'id':
            obj.id,
            'apps':
            list(obj.apps().values_list('id', flat=True)),
            'carrier':
            mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
            'created':
            obj.created,
            'group_apps': {},  # Map of app IDs to index in group_names below.
            'group_names': [],  # List of ES-serialized group names.
            'image_hash':
            obj.image_hash,
            'image_landing_hash':
            obj.image_landing_hash,
            'item_type':
            feed.FEED_TYPE_SHELF,
            'region':
            mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
            'search_names':
            list(set(string for _, string in obj.translations[obj.name_id])),
            'slug':
            obj.slug,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedshelfmembership_set.all()
        attach_trans_dict(FeedShelfMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(
                    member, 'group')
                if group_translation not in doc['group_names']:
                    doc['group_names'].append(group_translation)

                doc['group_apps'][member.app_id] = (
                    doc['group_names'].index(group_translation))

        # Handle localized fields.
        for field in ('description', 'name'):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 11
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        attrs = ('author', 'created', 'default_language', 'icon_hash', 'id',
                 'last_updated', 'modified', 'slug', 'status')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        doc['device'] = obj.devices
        doc['guid'] = unicode(obj.uuid)
        doc['is_deleted'] = obj.deleted
        doc['is_disabled'] = obj.disabled
        if obj.status == STATUS_PUBLIC:
            doc['latest_public_version'] = {
                'id': obj.latest_public_version.pk,
                'created': obj.latest_public_version.created,
                'size': obj.latest_public_version.size,
                'version': obj.latest_public_version.version
            }
        else:
            doc['latest_public_version'] = None
        doc['name_sort'] = unicode(obj.name).lower()
        # Find the first reviewed date (used in sort).
        doc['reviewed'] = obj.versions.public().aggregate(
            Min('reviewed')).get('reviewed__min')

        # Add boost, popularity, trending values.
        doc.update(cls.extract_popularity_trending_boost(obj))

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(
                cls.extract_field_translations(obj,
                                               field,
                                               include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 12
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        # Attach tags (keywords).
        attach_tags([obj])

        attrs = ('created', 'default_locale', 'id', 'icon_hash', 'icon_type',
                 'is_disabled', 'last_updated', 'mobile_url', 'modified',
                 'promo_img_hash', 'status', 'tv_url', 'url')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        doc['category'] = obj.categories or []
        doc['device'] = obj.devices or []
        doc['name_sort'] = unicode(obj.name).lower()
        doc['preferred_regions'] = obj.preferred_regions or []
        doc['tags'] = getattr(obj, 'keywords_list', [])
        doc['tv_featured'] = obj.keywords.filter(
            tag_text='featured-tv').exists()
        doc['url_tokenized'] = cls.strip_url(obj.url)

        # For now, websites are not reviewed, since we're manually injecting
        # data, so just use last_updated.
        doc['reviewed'] = obj.last_updated

        # Add boost, popularity, trending values.
        doc.update(cls.extract_popularity_trending_boost(obj))

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(
                cls.extract_field_translations(obj,
                                               field,
                                               include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 13
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        # Attach tags (keywords).
        attach_tags([obj])

        attrs = ('created', 'default_locale', 'id', 'icon_hash', 'icon_type',
                 'is_disabled', 'last_updated', 'mobile_url', 'modified',
                 'promo_img_hash', 'status', 'tv_url', 'url')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        doc['category'] = obj.categories or []
        doc['device'] = obj.devices or []
        doc['developer_name'] = (unicode(obj.developer_name)
                                 if obj.developer_name else None)
        doc['name_sort'] = unicode(obj.name).lower()
        doc['preferred_regions'] = obj.preferred_regions or []
        doc['tags'] = getattr(obj, 'keywords_list', [])
        doc['tv_featured'] = obj.keywords.filter(
            tag_text='featured-tv').exists()
        doc['url_tokenized'] = cls.strip_url(obj.url)

        # For now, websites are not reviewed, since we're manually injecting
        # data, so just use last_updated.
        doc['reviewed'] = obj.last_updated

        # Add boost, popularity, trending values.
        doc.update(cls.extract_popularity_trending_boost(obj))

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(cls.extract_field_translations(
                obj, field, include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 14
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])

        attrs = ('author', 'created', 'default_language', 'icon_hash', 'id',
                 'last_updated', 'modified', 'slug', 'status')
        doc = dict(zip(attrs, attrgetter(*attrs)(obj)))

        doc['device'] = obj.devices
        doc['guid'] = unicode(obj.uuid)
        doc['is_deleted'] = obj.deleted
        doc['is_disabled'] = obj.disabled
        if obj.status == STATUS_PUBLIC:
            doc['latest_public_version'] = {
                'id': obj.latest_public_version.pk,
                'created': obj.latest_public_version.created,
                'size': obj.latest_public_version.size,
                'version': obj.latest_public_version.version
            }
        else:
            doc['latest_public_version'] = None
        doc['name_sort'] = unicode(obj.name).lower()
        # Find the first reviewed date (used in sort).
        doc['reviewed'] = obj.versions.public().aggregate(
            Min('reviewed')).get('reviewed__min')

        # Add boost, popularity, trending values.
        doc.update(cls.extract_popularity_trending_boost(obj))

        # Handle localized fields. This adds both the field used for search and
        # the one with all translations for the API.
        for field in cls.translated_fields:
            doc.update(cls.extract_field_translations(
                obj, field, include_field_for_search=True))

        # Handle language-specific analyzers.
        for field in cls.fields_with_language_analyzers:
            doc.update(cls.extract_field_analyzed_translations(obj, field))

        return doc
Exemplo n.º 15
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedCollectionMembership

        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            'id': obj.id,
            'apps': list(obj.apps().values_list('id', flat=True)),
            'background_color': obj.background_color,
            'color': obj.color,
            'created': obj.created,
            'group_apps': {},  # Map of app IDs to index in group_names below.
            'group_names': [],  # List of ES-serialized group names.
            'image_hash': obj.image_hash,
            'item_type': feed.FEED_TYPE_COLL,
            'search_names': list(
                set(string for _, string
                    in obj.translations[obj.name_id])),
            'slug': obj.slug,
            'type': obj.type,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedcollectionmembership_set.all()
        attach_trans_dict(FeedCollectionMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(member,
                                                                   'group')
                if group_translation not in doc['group_names']:
                    doc['group_names'].append(group_translation)

                doc['group_apps'][member.app_id] = (
                    doc['group_names'].index(group_translation))

        # Handle localized fields.
        for field in ('description', 'name'):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 16
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedShelfMembership

        if obj is None:
            obj = cls.get_model().get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            'id': obj.id,
            'apps': list(obj.apps().values_list('id', flat=True)),
            'carrier': mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
            'created': obj.created,
            'group_apps': {},  # Map of app IDs to index in group_names below.
            'group_names': [],  # List of ES-serialized group names.
            'image_hash': obj.image_hash,
            'image_landing_hash': obj.image_landing_hash,
            'item_type': feed.FEED_TYPE_SHELF,
            'region': mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
            'search_names': list(set(string for _, string
                                     in obj.translations[obj.name_id])),
            'slug': obj.slug,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedshelfmembership_set.all()
        attach_trans_dict(FeedShelfMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(member,
                                                                   'group')
                if group_translation not in doc['group_names']:
                    doc['group_names'].append(group_translation)

                doc['group_apps'][member.app_id] = (
                    doc['group_names'].index(group_translation))

        # Handle localized fields.
        for field in ('description', 'name'):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 17
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedCollectionMembership

        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            "id": obj.id,
            "apps": list(obj.apps().values_list("id", flat=True)),
            "background_color": obj.background_color,
            "color": obj.color,
            "created": obj.created,
            "group_apps": {},  # Map of app IDs to index in group_names below.
            "group_names": [],  # List of ES-serialized group names.
            "image_hash": obj.image_hash,
            "item_type": feed.FEED_TYPE_COLL,
            "search_names": list(set(string for _, string in obj.translations[obj.name_id])),
            "slug": obj.slug,
            "type": obj.type,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedcollectionmembership_set.all()
        attach_trans_dict(FeedCollectionMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(member, "group")
                if group_translation not in doc["group_names"]:
                    doc["group_names"].append(group_translation)

                doc["group_apps"][member.app_id] = doc["group_names"].index(group_translation)

        # Handle localized fields.
        for field in ("description", "name"):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 18
0
    def extract_document(cls, pk=None, obj=None):
        from mkt.feed.models import FeedShelfMembership

        if obj is None:
            obj = cls.get_model().get(pk=pk)

        attach_trans_dict(cls.get_model(), [obj])

        doc = {
            "id": obj.id,
            "apps": list(obj.apps().values_list("id", flat=True)),
            "carrier": mkt.carriers.CARRIER_CHOICE_DICT[obj.carrier].slug,
            "created": obj.created,
            "group_apps": {},  # Map of app IDs to index in group_names below.
            "group_names": [],  # List of ES-serialized group names.
            "image_hash": obj.image_hash,
            "image_landing_hash": obj.image_landing_hash,
            "item_type": feed.FEED_TYPE_SHELF,
            "region": mkt.regions.REGIONS_CHOICES_ID_DICT[obj.region].slug,
            "search_names": list(set(string for _, string in obj.translations[obj.name_id])),
            "slug": obj.slug,
        }

        # Grouped apps. Key off of translation, pointed to app IDs.
        memberships = obj.feedshelfmembership_set.all()
        attach_trans_dict(FeedShelfMembership, memberships)
        for member in memberships:
            if member.group:
                group_translation = cls.extract_field_translations(member, "group")
                if group_translation not in doc["group_names"]:
                    doc["group_names"].append(group_translation)

                doc["group_apps"][member.app_id] = doc["group_names"].index(group_translation)

        # Handle localized fields.
        for field in ("description", "name"):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 19
0
    def extract_document(cls, pk=None, obj=None):
        """Converts this instance into an Elasticsearch document"""
        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach translations for searching and indexing.
        attach_trans_dict(cls.get_model(), [obj])
        attach_trans_dict(Webapp, [obj.app])

        doc = {
            "id": obj.id,
            "app": obj.app_id,
            "background_color": obj.background_color,
            "color": obj.color,
            "created": obj.created,
            "image_hash": obj.image_hash,
            "item_type": feed.FEED_TYPE_APP,
            "preview": {
                "id": obj.preview.id,
                "thumbnail_size": obj.preview.thumbnail_size,
                "thumbnail_url": obj.preview.thumbnail_url,
            }
            if getattr(obj, "preview")
            else None,
            "pullquote_attribution": obj.pullquote_attribution,
            "pullquote_rating": obj.pullquote_rating,
            "search_names": list(set(string for _, string in obj.app.translations[obj.app.name_id])),
            "slug": obj.slug,
            "type": obj.type,
        }

        # Handle localized fields.
        for field in ("description", "pullquote_text"):
            doc.update(cls.extract_field_translations(obj, field))

        return doc
Exemplo n.º 20
0
    def extract_document(cls, pk=None, obj=None):
        """Extracts the ElasticSearch index document for this instance."""
        from mkt.webapps.models import (AppFeatures, attach_devices,
                                        attach_prices, attach_translations,
                                        RatingDescriptors, RatingInteractives)

        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach everything we need to index apps.
        for transform in (attach_devices, attach_prices, attach_tags,
                          attach_translations):
            transform([obj])

        latest_version = obj.latest_version
        version = obj.current_version
        geodata = obj.geodata
        features = (version.features.to_dict()
                    if version else AppFeatures().to_dict())

        try:
            status = latest_version.statuses[0][1] if latest_version else None
        except IndexError:
            status = None

        attrs = ('app_slug', 'bayesian_rating', 'created', 'default_locale',
                 'guid', 'icon_hash', 'id', 'is_disabled', 'is_offline',
                 'file_size', 'last_updated', 'modified', 'premium_type',
                 'promo_img_hash', 'status', 'uses_flash')
        d = dict(zip(attrs, attrgetter(*attrs)(obj)))

        d['app_type'] = obj.app_type_id
        d['author'] = obj.developer_name
        d['banner_regions'] = geodata.banner_regions_slugs()
        d['category'] = obj.categories if obj.categories else []
        d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
                                None)
        try:
            d['content_descriptors'] = obj.rating_descriptors.to_keys()
        except RatingDescriptors.DoesNotExist:
            d['content_descriptors'] = []
        d['current_version'] = version.version if version else None
        d['device'] = getattr(obj, 'device_ids', [])
        d['features'] = features
        d['has_public_stats'] = obj.public_stats
        try:
            d['interactive_elements'] = obj.rating_interactives.to_keys()
        except RatingInteractives.DoesNotExist:
            d['interactive_elements'] = []
        d['installs_allowed_from'] = (
            version.manifest.get('installs_allowed_from', ['*'])
            if version else ['*'])
        d['is_priority'] = obj.priority_review

        is_escalated = obj.escalationqueue_set.exists()
        d['is_escalated'] = is_escalated
        d['escalation_date'] = (obj.escalationqueue_set.get().created
                                if is_escalated else None)
        is_rereviewed = obj.rereviewqueue_set.exists()
        d['is_rereviewed'] = is_rereviewed
        d['rereview_date'] = (obj.rereviewqueue_set.get().created
                              if is_rereviewed else None)

        if latest_version:
            d['latest_version'] = {
                'status': status,
                'is_privileged': latest_version.is_privileged,
                'has_editor_comment': latest_version.has_editor_comment,
                'has_info_request': latest_version.has_info_request,
                'nomination_date': latest_version.nomination,
                'created_date': latest_version.created,
            }
        else:
            d['latest_version'] = {
                'status': None,
                'is_privileged': None,
                'has_editor_comment': None,
                'has_info_request': None,
                'nomination_date': None,
                'created_date': None,
            }
        d['manifest_url'] = obj.get_manifest_url()
        d['package_path'] = obj.get_package_path()
        d['name_sort'] = unicode(obj.name).lower()
        d['owners'] = [au.user.id for au in
                       obj.addonuser_set.filter(role=mkt.AUTHOR_ROLE_OWNER)]

        d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
                          'id': p.id, 'sizes': p.sizes}
                         for p in obj.previews.all()]
        try:
            p = obj.addonpremium.price
            d['price_tier'] = p.name
        except AddonPremium.DoesNotExist:
            d['price_tier'] = None

        d['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
        }
        d['region_exclusions'] = obj.get_excluded_region_ids()
        d['reviewed'] = obj.versions.filter(
            deleted=False).aggregate(Min('reviewed')).get('reviewed__min')

        # The default locale of the app is considered "supported" by default.
        supported_locales = [obj.default_locale]
        other_locales = (filter(None, version.supported_locales.split(','))
                         if version else [])
        if other_locales:
            supported_locales.extend(other_locales)
        d['supported_locales'] = list(set(supported_locales))

        d['tags'] = getattr(obj, 'tags_list', [])

        if obj.upsell and obj.upsell.premium.is_published():
            upsell_obj = obj.upsell.premium
            d['upsell'] = {
                'id': upsell_obj.id,
                'app_slug': upsell_obj.app_slug,
                'icon_url': upsell_obj.get_icon_url(128),
                # TODO: Store all localizations of upsell.name.
                'name': unicode(upsell_obj.name),
                'region_exclusions': upsell_obj.get_excluded_region_ids()
            }

        d['versions'] = [dict(version=v.version,
                              resource_uri=reverse_version(v))
                         for v in obj.versions.all()]

        # Handle localized fields.
        # This adds both the field used for search and the one with
        # all translations for the API.
        for field in ('description', 'name'):
            d.update(cls.extract_field_translations(
                obj, field, include_field_for_search=True))
        # This adds only the field with all the translations for the API, we
        # don't need to search on those.
        for field in ('homepage', 'support_email', 'support_url'):
            d.update(cls.extract_field_translations(obj, field))

        if version:
            attach_trans_dict(version._meta.model, [version])
            d.update(cls.extract_field_translations(
                version, 'release_notes', db_field='releasenotes_id'))
        else:
            d['release_notes_translations'] = None
        attach_trans_dict(geodata._meta.model, [geodata])
        d.update(cls.extract_field_translations(geodata, 'banner_message'))

        # Add boost, popularity, trending values.
        d.update(cls.extract_popularity_trending_boost(obj))

        # If the app is compatible with Firefox OS, push suggestion data in the
        # index - This will be used by RocketbarView API, which is specific to
        # Firefox OS.
        if DEVICE_GAIA.id in d['device'] and obj.is_published():
            d['name_suggest'] = {
                'input': d['name'],
                'output': unicode(obj.id),  # We only care about the payload.
                'weight': int(d['boost']),
                'payload': {
                    'default_locale': d['default_locale'],
                    'icon_hash': d['icon_hash'],
                    'id': d['id'],
                    'manifest_url': d['manifest_url'],
                    'modified': d['modified'],
                    'name_translations': d['name_translations'],
                    'slug': d['app_slug'],
                }
            }

        for field in ('name', 'description'):
            d.update(cls.extract_field_analyzed_translations(obj, field))

        return d
Exemplo n.º 21
0
    def extract_document(cls, pk=None, obj=None):
        """Extracts the ElasticSearch index document for this instance."""
        from mkt.webapps.models import (AppFeatures, attach_devices,
                                        attach_prices, attach_tags,
                                        attach_translations, Geodata,
                                        Installed, RatingDescriptors,
                                        RatingInteractives)

        if obj is None:
            obj = cls.get_model().objects.no_cache().get(pk=pk)

        # Attach everything we need to index apps.
        for transform in (attach_devices, attach_prices, attach_tags,
                          attach_translations):
            transform([obj])

        latest_version = obj.latest_version
        version = obj.current_version
        geodata = obj.geodata
        features = (version.features.to_dict()
                    if version else AppFeatures().to_dict())

        try:
            status = latest_version.statuses[0][1] if latest_version else None
        except IndexError:
            status = None

        installed_ids = list(Installed.objects.filter(addon=obj)
                             .values_list('id', flat=True))

        attrs = ('app_slug', 'bayesian_rating', 'created', 'id', 'is_disabled',
                 'last_updated', 'modified', 'premium_type', 'status',
                 'uses_flash', 'weekly_downloads')
        d = dict(zip(attrs, attrgetter(*attrs)(obj)))

        d['boost'] = len(installed_ids) or 1
        d['app_type'] = obj.app_type_id
        d['author'] = obj.developer_name
        d['banner_regions'] = geodata.banner_regions_slugs()
        d['category'] = obj.categories if obj.categories else []
        if obj.is_published:
            d['collection'] = [{'id': cms.collection_id, 'order': cms.order}
                               for cms in obj.collectionmembership_set.all()]
        else:
            d['collection'] = []
        d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
                                None)
        try:
            d['content_descriptors'] = obj.rating_descriptors.to_keys()
        except RatingDescriptors.DoesNotExist:
            d['content_descriptors'] = []
        d['current_version'] = version.version if version else None
        d['default_locale'] = obj.default_locale
        d['description'] = list(
            set(string for _, string in obj.translations[obj.description_id]))
        d['device'] = getattr(obj, 'device_ids', [])
        d['features'] = features
        d['has_public_stats'] = obj.public_stats
        d['icon_hash'] = obj.icon_hash
        try:
            d['interactive_elements'] = obj.rating_interactives.to_keys()
        except RatingInteractives.DoesNotExist:
            d['interactive_elements'] = []
        d['is_escalated'] = obj.escalationqueue_set.exists()
        d['is_offline'] = getattr(obj, 'is_offline', False)
        d['is_priority'] = obj.priority_review
        d['is_rereviewed'] = obj.rereviewqueue_set.exists()
        if latest_version:
            d['latest_version'] = {
                'status': status,
                'is_privileged': latest_version.is_privileged,
                'has_editor_comment': latest_version.has_editor_comment,
                'has_info_request': latest_version.has_info_request,
                'nomination_date': latest_version.nomination,
                'created_date': latest_version.created,
            }
        else:
            d['latest_version'] = {
                'status': None,
                'is_privileged': None,
                'has_editor_comment': None,
                'has_info_request': None,
                'nomination_date': None,
                'created_date': None,
            }
        d['manifest_url'] = obj.get_manifest_url()
        d['package_path'] = obj.get_package_path()
        d['name'] = list(
            set(string for _, string in obj.translations[obj.name_id]))
        d['name_sort'] = unicode(obj.name).lower()
        d['owners'] = [au.user.id for au in
                       obj.addonuser_set.filter(role=amo.AUTHOR_ROLE_OWNER)]
        d['popularity'] = len(installed_ids)
        d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
                          'id': p.id, 'sizes': p.sizes}
                         for p in obj.previews.all()]
        try:
            p = obj.addonpremium.price
            d['price_tier'] = p.name
        except AddonPremium.DoesNotExist:
            d['price_tier'] = None

        d['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
        }
        d['region_exclusions'] = obj.get_excluded_region_ids()
        d['reviewed'] = obj.versions.filter(
            deleted=False).aggregate(Min('reviewed')).get('reviewed__min')
        if version:
            d['supported_locales'] = filter(
                None, version.supported_locales.split(','))
        else:
            d['supported_locales'] = []

        d['tags'] = getattr(obj, 'tag_list', [])
        if obj.upsell and obj.upsell.premium.is_published():
            upsell_obj = obj.upsell.premium
            d['upsell'] = {
                'id': upsell_obj.id,
                'app_slug': upsell_obj.app_slug,
                'icon_url': upsell_obj.get_icon_url(128),
                # TODO: Store all localizations of upsell.name.
                'name': unicode(upsell_obj.name),
                'region_exclusions': upsell_obj.get_excluded_region_ids()
            }

        d['versions'] = [dict(version=v.version,
                              resource_uri=reverse_version(v))
                         for v in obj.versions.all()]

        # Handle our localized fields.
        for field in ('description', 'homepage', 'name', 'support_email',
                      'support_url'):
            d['%s_translations' % field] = [
                {'lang': to_language(lang), 'string': string}
                for lang, string
                in obj.translations[getattr(obj, '%s_id' % field)]
                if string]
        if version:
            attach_trans_dict(Version, [version])
            d['release_notes_translations'] = [
                {'lang': to_language(lang), 'string': string}
                for lang, string
                in version.translations[version.releasenotes_id]]
        else:
            d['release_notes_translations'] = None
        attach_trans_dict(Geodata, [geodata])
        d['banner_message_translations'] = [
            {'lang': to_language(lang), 'string': string}
            for lang, string
            in geodata.translations[geodata.banner_message_id]]

        for region in mkt.regions.ALL_REGION_IDS:
            d['popularity_%s' % region] = d['popularity']

        # Bump the boost if the add-on is public.
        if obj.status == amo.STATUS_PUBLIC:
            d['boost'] = max(d['boost'], 1) * 4

        # If the app is compatible with Firefox OS, push suggestion data in the
        # index - This will be used by RocketbarView API, which is specific to
        # Firefox OS.
        if DEVICE_GAIA.id in d['device'] and obj.is_published():
            d['name_suggest'] = {
                'input': d['name'],
                'output': unicode(obj.id),  # We only care about the payload.
                'weight': d['boost'],
                'payload': {
                    'default_locale': d['default_locale'],
                    'icon_hash': d['icon_hash'],
                    'id': d['id'],
                    'manifest_url': d['manifest_url'],
                    'modified': d['modified'],
                    'name_translations': d['name_translations'],
                    'slug': d['app_slug'],
                }
            }

        # Indices for each language. languages is a list of locales we want to
        # index with analyzer if the string's locale matches.
        for analyzer, languages in amo.SEARCH_ANALYZER_MAP.iteritems():
            if (not settings.ES_USE_PLUGINS and
                analyzer in amo.SEARCH_ANALYZER_PLUGINS):
                continue

            d['name_' + analyzer] = list(
                set(string for locale, string in obj.translations[obj.name_id]
                    if locale.lower() in languages))
            d['description_' + analyzer] = list(
                set(string for locale, string
                    in obj.translations[obj.description_id]
                    if locale.lower() in languages))

        return d
Exemplo n.º 22
0
    def extract_document(cls, pk=None, obj=None):
        """Extracts the ElasticSearch index document for this instance."""
        from mkt.webapps.models import (AppFeatures, attach_devices,
                                        attach_prices, attach_translations,
                                        RatingDescriptors, RatingInteractives)

        if obj is None:
            obj = cls.get_model().objects.get(pk=pk)

        # Attach everything we need to index apps.
        for transform in (attach_devices, attach_prices, attach_tags,
                          attach_translations):
            transform([obj])

        latest_version = obj.latest_version
        version = obj.current_version
        features = (version.features.to_dict()
                    if version else AppFeatures().to_dict())

        try:
            status = latest_version.statuses[0][1] if latest_version else None
        except IndexError:
            status = None

        attrs = ('app_slug', 'bayesian_rating', 'created', 'default_locale',
                 'guid', 'hosted_url', 'icon_hash', 'id', 'is_disabled',
                 'is_offline', 'file_size', 'last_updated', 'modified',
                 'premium_type', 'promo_img_hash', 'status')
        d = dict(zip(attrs, attrgetter(*attrs)(obj)))

        d['app_type'] = obj.app_type_id
        d['author'] = obj.developer_name
        d['category'] = obj.categories if obj.categories else []
        d['content_ratings'] = (obj.get_content_ratings_by_body(es=True)
                                or None)
        try:
            d['content_descriptors'] = obj.rating_descriptors.to_keys()
        except RatingDescriptors.DoesNotExist:
            d['content_descriptors'] = []
        d['current_version'] = version.version if version else None
        d['device'] = getattr(obj, 'device_ids', [])
        d['features'] = features
        d['has_public_stats'] = obj.public_stats
        try:
            d['interactive_elements'] = obj.rating_interactives.to_keys()
        except RatingInteractives.DoesNotExist:
            d['interactive_elements'] = []
        d['installs_allowed_from'] = (version.manifest.get(
            'installs_allowed_from', ['*']) if version else ['*'])
        d['is_priority'] = obj.priority_review

        is_escalated = obj.escalationqueue_set.exists()
        d['is_escalated'] = is_escalated
        d['escalation_date'] = (obj.escalationqueue_set.get().created
                                if is_escalated else None)
        is_rereviewed = obj.rereviewqueue_set.exists()
        d['is_rereviewed'] = is_rereviewed
        d['rereview_date'] = (obj.rereviewqueue_set.get().created
                              if is_rereviewed else None)

        if latest_version:
            d['latest_version'] = {
                'status': status,
                'is_privileged': latest_version.is_privileged,
                'has_editor_comment': latest_version.has_editor_comment,
                'has_info_request': latest_version.has_info_request,
                'nomination_date': latest_version.nomination,
                'created_date': latest_version.created,
            }
        else:
            d['latest_version'] = {
                'status': None,
                'is_privileged': None,
                'has_editor_comment': None,
                'has_info_request': None,
                'nomination_date': None,
                'created_date': None,
            }
        d['manifest_url'] = obj.get_manifest_url()
        d['package_path'] = obj.get_package_path()
        d['name_sort'] = unicode(obj.name).lower()
        d['owners'] = [
            au.user.id
            for au in obj.addonuser_set.filter(role=mkt.AUTHOR_ROLE_OWNER)
        ]

        d['previews'] = [{
            'filetype': p.filetype,
            'modified': p.modified,
            'id': p.id,
            'sizes': p.sizes
        } for p in obj.previews.all()]
        try:
            p = obj.addonpremium.price
            d['price_tier'] = p.name
        except AddonPremium.DoesNotExist:
            d['price_tier'] = None

        d['ratings'] = {
            'average': obj.average_rating,
            'count': obj.total_reviews,
        }
        d['region_exclusions'] = obj.get_excluded_region_ids()
        d['reviewed'] = obj.versions.filter(deleted=False).aggregate(
            Min('reviewed')).get('reviewed__min')

        # The default locale of the app is considered "supported" by default.
        supported_locales = [obj.default_locale]
        other_locales = (filter(None, version.supported_locales.split(','))
                         if version else [])
        if other_locales:
            supported_locales.extend(other_locales)
        d['supported_locales'] = list(set(supported_locales))

        d['tags'] = getattr(obj, 'tags_list', [])
        d['tv_featured'] = obj.tags.filter(tag_text='featured-tv').exists()

        if obj.upsell and obj.upsell.premium.is_published():
            upsell_obj = obj.upsell.premium
            d['upsell'] = {
                'id': upsell_obj.id,
                'app_slug': upsell_obj.app_slug,
                'icon_url': upsell_obj.get_icon_url(128),
                # TODO: Store all localizations of upsell.name.
                'name': unicode(upsell_obj.name),
                'region_exclusions': upsell_obj.get_excluded_region_ids()
            }

        d['versions'] = [
            dict(version=v.version, resource_uri=reverse_version(v))
            for v in obj.versions.all()
        ]

        # Handle localized fields.
        # This adds both the field used for search and the one with
        # all translations for the API.
        for field in ('description', 'name'):
            d.update(
                cls.extract_field_translations(obj,
                                               field,
                                               include_field_for_search=True))
        # This adds only the field with all the translations for the API, we
        # don't need to search on those.
        for field in ('homepage', 'support_email', 'support_url'):
            d.update(cls.extract_field_translations(obj, field))

        if version:
            attach_trans_dict(version._meta.model, [version])
            d.update(
                cls.extract_field_translations(version,
                                               'release_notes',
                                               db_field='releasenotes_id'))
        else:
            d['release_notes_translations'] = None

        # Add boost, popularity, trending values.
        d.update(cls.extract_popularity_trending_boost(obj))

        # If the app is compatible with Firefox OS, push suggestion data in the
        # index - This will be used by RocketbarView API, which is specific to
        # Firefox OS.
        if DEVICE_GAIA.id in d['device'] and obj.is_published():
            d['name_suggest'] = {
                'input': d['name'],
                'output': unicode(obj.id),  # We only care about the payload.
                'weight': int(d['boost']),
                'payload': {
                    'default_locale': d['default_locale'],
                    'icon_hash': d['icon_hash'],
                    'id': d['id'],
                    'manifest_url': d['manifest_url'],
                    'modified': d['modified'],
                    'name_translations': d['name_translations'],
                    'slug': d['app_slug'],
                }
            }

        for field in ('name', 'description'):
            d.update(cls.extract_field_analyzed_translations(obj, field))

        return d