Exemplo n.º 1
0
    def get_max_value(cls, store, attr):
        """Get the maximum value for a given attr

        On text columns, trying to find the max value for them using MAX()
        on postgres would result in some problems, like '9' being considered
        greater than '10' (because the comparison is done from left to right).

        This will 0-"pad" the values for the comparison, making it compare
        the way we want. Note that because of that, in the example above,
        it would return '09' instead of '9'

        :para store: a store
        :param attr: the attribute to find the max value for
        :returns: the maximum value for the attr
        """
        cls.validate_attr(attr, expected_type=UnicodeCol)

        max_length = Alias(
            Select(columns=[Alias(Max(CharLength(attr)), 'max_length')],
                   tables=[cls]), '_max_length')
        # Using LPad with max_length will workaround most of the cases where
        # the string comparison fails. For example, the common case would
        # consider '9' to be greater than '10'. We could test just strings
        # with length equal to max_length, but than '010' would be greater
        # than '001' would be greater than '10' (that would be excluded from
        # the comparison). By doing lpad, '09' is lesser than '10' and '001'
        # is lesser than '010', working around those cases
        max_batch = store.using(cls, max_length).find(cls).max(
            LPad(attr, Field('_max_length', 'max_length'), u'0'))

        # Make the api consistent and return an ampty string instead of None
        # if there's no batch registered on the database
        return max_batch or u''
Exemplo n.º 2
0
def album_list():
    ltype, size, offset = map(request.args.get, ['type', 'size', 'offset'])
    try:
        size = int(size) if size else 10
        offset = int(offset) if offset else 0
    except:
        return request.error_formatter(0, 'Invalid parameter format')

    query = store.find(Folder, Track.folder_id == Folder.id)
    if ltype == 'random':
        albums = []
        count = query.count()

        if not count:
            return request.formatter({'albumList': {}})

        for _ in xrange(size):
            x = random.choice(xrange(count))
            albums.append(query[x])

        return request.formatter({
            'albumList': {
                'album': [a.as_subsonic_child(request.user) for a in albums]
            }
        })
    elif ltype == 'newest':
        query = query.order_by(Desc(Folder.created)).config(distinct=True)
    elif ltype == 'highest':
        query = query.find(RatingFolder.rated_id == Folder.id).group_by(
            Folder.id).order_by(Desc(Avg(RatingFolder.rating)))
    elif ltype == 'frequent':
        query = query.group_by(Folder.id).order_by(Desc(Avg(Track.play_count)))
    elif ltype == 'recent':
        query = query.group_by(Folder.id).order_by(Desc(Max(Track.last_play)))
    elif ltype == 'starred':
        query = query.find(StarredFolder.starred_id == Folder.id,
                           User.id == StarredFolder.user_id,
                           User.name == request.username)
    elif ltype == 'alphabeticalByName':
        query = query.order_by(Folder.name).config(distinct=True)
    elif ltype == 'alphabeticalByArtist':
        parent = ClassAlias(Folder)
        query = query.find(Folder.parent_id == parent.id).order_by(
            parent.name, Folder.name).config(distinct=True)
    else:
        return request.error_formatter(0, 'Unknown search type')

    return request.formatter({
        'albumList': {
            'album': [
                f.as_subsonic_child(request.user)
                for f in query[offset:offset + size]
            ]
        }
    })
Exemplo n.º 3
0
def album_list_id3():
    ltype, size, offset = map(request.args.get, ['type', 'size', 'offset'])
    try:
        size = int(size) if size else 10
        offset = int(offset) if offset else 0
    except:
        return request.error_formatter(0, 'Invalid parameter format')

    query = store.find(Album)
    if ltype == 'random':
        albums = []
        count = query.count()

        if not count:
            return request.formatter({'albumList2': {}})

        for _ in xrange(size):
            x = random.choice(xrange(count))
            albums.append(query[x])

        return request.formatter({
            'albumList2': {
                'album': [a.as_subsonic_album(request.user) for a in albums]
            }
        })
    elif ltype == 'newest':
        query = query.find(Track.album_id == Album.id).group_by(
            Album.id).order_by(Desc(Min(Track.created)))
    elif ltype == 'frequent':
        query = query.find(Track.album_id == Album.id).group_by(
            Album.id).order_by(Desc(Avg(Track.play_count)))
    elif ltype == 'recent':
        query = query.find(Track.album_id == Album.id).group_by(
            Album.id).order_by(Desc(Max(Track.last_play)))
    elif ltype == 'starred':
        query = query.find(StarredAlbum.starred_id == Album.id,
                           User.id == StarredAlbum.user_id,
                           User.name == request.username)
    elif ltype == 'alphabeticalByName':
        query = query.order_by(Album.name)
    elif ltype == 'alphabeticalByArtist':
        query = query.find(Artist.id == Album.artist_id).order_by(
            Artist.name, Album.name)
    else:
        return request.error_formatter(0, 'Unknown search type')

    return request.formatter({
        'albumList2': {
            'album': [
                f.as_subsonic_album(request.user)
                for f in query[offset:offset + size]
            ]
        }
    })
Exemplo n.º 4
0
    def recalculateCounts(self):
        """See `ITranslatedLanguage`."""
        templates = self.parent.getCurrentTemplatesCollection()
        pofiles = templates.joinOuterPOFile(self.language)
        total_count_results = list(
            pofiles.select(Coalesce(Sum(POTemplate.messagecount), 0),
                           Coalesce(Sum(POFile.currentcount), 0),
                           Coalesce(Sum(POFile.updatescount), 0),
                           Coalesce(Sum(POFile.rosettacount), 0),
                           Coalesce(Sum(POFile.unreviewed_count), 0),
                           Max(POFile.date_changed)))
        total, imported, changed, rosetta, unreviewed, date_changed = (
            total_count_results[0])
        translated = imported + rosetta
        new = rosetta - changed
        self.setCounts(total, translated, new, changed, unreviewed)

        # We have to add a timezone to the otherwise naive-datetime object
        # (because we've gotten it using Max() aggregate function).
        if date_changed is not None:
            date_changed = date_changed.replace(tzinfo=pytz.UTC)
        self.last_changed_date = date_changed
Exemplo n.º 5
0
 def bug_count(self):
     return IStore(Bug).find(Max(Bug.id)).one()
Exemplo n.º 6
0
    def productserieslanguages(self):
        """See `IProductSeries`."""
        store = Store.of(self)

        english = getUtility(ILaunchpadCelebrities).english

        results = []
        if self.potemplate_count == 1:
            # If there is only one POTemplate in a ProductSeries, fetch
            # Languages and corresponding POFiles with one query, along
            # with their stats, and put them into ProductSeriesLanguage
            # objects.
            origin = [Language, POFile, POTemplate]
            query = store.using(*origin).find(
                (Language, POFile),
                POFile.language == Language.id,
                Language.visible == True,
                POFile.potemplate == POTemplate.id,
                POTemplate.productseries == self,
                POTemplate.iscurrent == True,
                Language.id != english.id)

            ordered_results = query.order_by(['Language.englishname'])

            for language, pofile in ordered_results:
                psl = ProductSeriesLanguage(self, language, pofile=pofile)
                total = pofile.potemplate.messageCount()
                imported = pofile.currentCount()
                changed = pofile.updatesCount()
                rosetta = pofile.rosettaCount()
                unreviewed = pofile.unreviewedCount()
                translated = imported + rosetta
                new = rosetta - changed
                psl.setCounts(total, translated, new, changed, unreviewed)
                psl.last_changed_date = pofile.date_changed
                results.append(psl)
        else:
            # If there is more than one template, do a single
            # query to count total messages in all templates.
            query = store.find(
                Sum(POTemplate.messagecount),
                POTemplate.productseries == self,
                POTemplate.iscurrent == True)
            total, = query
            # And another query to fetch all Languages with translations
            # in this ProductSeries, along with their cumulative stats
            # for imported, changed, rosetta-provided and unreviewed
            # translations.
            query = store.find(
                (Language,
                 Sum(POFile.currentcount),
                 Sum(POFile.updatescount),
                 Sum(POFile.rosettacount),
                 Sum(POFile.unreviewed_count),
                 Max(POFile.date_changed)),
                POFile.language == Language.id,
                Language.visible == True,
                POFile.potemplate == POTemplate.id,
                POTemplate.productseries == self,
                POTemplate.iscurrent == True,
                Language.id != english.id).group_by(Language)

            ordered_results = query.order_by(['Language.englishname'])

            for (language, imported, changed, rosetta, unreviewed,
                 last_changed) in ordered_results:
                psl = ProductSeriesLanguage(self, language)
                translated = imported + rosetta
                new = rosetta - changed
                psl.setCounts(total, translated, new, changed, unreviewed)
                psl.last_changed_date = last_changed
                results.append(psl)

        return results
Exemplo n.º 7
0
    def _update(cls, distroseries, binarypackagenames, archive, log):
        """Update the package cache for a given set of `IBinaryPackageName`s.

        'log' is required, it should be a logger object able to print
        DEBUG level messages.
        'ztm' is the current trasaction manager used for partial commits
        (in full batches of 100 elements)
        """
        # get the set of published binarypackagereleases
        all_details = list(
            IStore(BinaryPackageRelease).find(
                (BinaryPackageRelease.binarypackagenameID,
                 BinaryPackageRelease.summary,
                 BinaryPackageRelease.description,
                 Max(BinaryPackageRelease.datecreated)),
                BinaryPackageRelease.id ==
                BinaryPackagePublishingHistory.binarypackagereleaseID,
                BinaryPackagePublishingHistory.binarypackagenameID.is_in(
                    [bpn.id for bpn in binarypackagenames]),
                BinaryPackagePublishingHistory.distroarchseriesID.is_in(
                    Select(
                        DistroArchSeries.id,
                        tables=[DistroArchSeries],
                        where=DistroArchSeries.distroseries == distroseries)),
                BinaryPackagePublishingHistory.archive == archive,
                BinaryPackagePublishingHistory.status.is_in(
                    (PackagePublishingStatus.PENDING,
                     PackagePublishingStatus.PUBLISHED))).group_by(
                         BinaryPackageRelease.binarypackagenameID,
                         BinaryPackageRelease.summary,
                         BinaryPackageRelease.description).order_by(
                             BinaryPackageRelease.binarypackagenameID,
                             Desc(Max(BinaryPackageRelease.datecreated))))
        if not all_details:
            log.debug("No binary releases found.")
            return

        details_map = defaultdict(list)
        for (bpn_id, summary, description, datecreated) in all_details:
            bpn = IStore(BinaryPackageName).get(BinaryPackageName, bpn_id)
            details_map[bpn].append((summary, description))

        all_caches = IStore(cls).find(
            cls, cls.distroseries == distroseries, cls.archive == archive,
            cls.binarypackagenameID.is_in(
                [bpn.id for bpn in binarypackagenames]))
        cache_map = {cache.binarypackagename: cache for cache in all_caches}

        for bpn in set(binarypackagenames) - set(cache_map):
            cache_map[bpn] = cls(archive=archive,
                                 distroseries=distroseries,
                                 binarypackagename=bpn)

        for bpn in binarypackagenames:
            cache = cache_map[bpn]
            details = details_map[bpn]
            # make sure the cached name, summary and description are correct
            cache.name = bpn.name
            cache.summary = details[0][0]
            cache.description = details[0][1]

            # get the sets of binary package summaries, descriptions. there is
            # likely only one, but just in case...

            summaries = set()
            descriptions = set()
            for summary, description in details:
                summaries.add(summary)
                descriptions.add(description)

            # and update the caches
            cache.summaries = ' '.join(sorted(summaries))
            cache.descriptions = ' '.join(sorted(descriptions))